aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm/lib/ExecutionEngine/Orc
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/ExecutionEngine/Orc')
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Orc/ExecutionUtils.cpp102
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Orc/IndirectionUtils.cpp254
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Orc/NullResolver.cpp26
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Orc/OrcABISupport.cpp542
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Orc/OrcCBindings.cpp119
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Orc/OrcCBindingsStack.h304
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Orc/OrcError.cpp72
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Orc/OrcMCJITReplacement.cpp133
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Orc/OrcMCJITReplacement.h411
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Orc/RPCUtils.cpp55
10 files changed, 2018 insertions, 0 deletions
diff --git a/contrib/llvm/lib/ExecutionEngine/Orc/ExecutionUtils.cpp b/contrib/llvm/lib/ExecutionEngine/Orc/ExecutionUtils.cpp
new file mode 100644
index 000000000000..b7220dba88e9
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/Orc/ExecutionUtils.cpp
@@ -0,0 +1,102 @@
+//===---- ExecutionUtils.cpp - Utilities for executing functions in Orc ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
+
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Module.h"
+
+namespace llvm {
+namespace orc {
+
+CtorDtorIterator::CtorDtorIterator(const GlobalVariable *GV, bool End)
+ : InitList(
+ GV ? dyn_cast_or_null<ConstantArray>(GV->getInitializer()) : nullptr),
+ I((InitList && End) ? InitList->getNumOperands() : 0) {
+}
+
+bool CtorDtorIterator::operator==(const CtorDtorIterator &Other) const {
+ assert(InitList == Other.InitList && "Incomparable iterators.");
+ return I == Other.I;
+}
+
+bool CtorDtorIterator::operator!=(const CtorDtorIterator &Other) const {
+ return !(*this == Other);
+}
+
+CtorDtorIterator& CtorDtorIterator::operator++() {
+ ++I;
+ return *this;
+}
+
+CtorDtorIterator CtorDtorIterator::operator++(int) {
+ CtorDtorIterator Temp = *this;
+ ++I;
+ return Temp;
+}
+
+CtorDtorIterator::Element CtorDtorIterator::operator*() const {
+ ConstantStruct *CS = dyn_cast<ConstantStruct>(InitList->getOperand(I));
+ assert(CS && "Unrecognized type in llvm.global_ctors/llvm.global_dtors");
+
+ Constant *FuncC = CS->getOperand(1);
+ Function *Func = nullptr;
+
+ // Extract function pointer, pulling off any casts.
+ while (FuncC) {
+ if (Function *F = dyn_cast_or_null<Function>(FuncC)) {
+ Func = F;
+ break;
+ } else if (ConstantExpr *CE = dyn_cast_or_null<ConstantExpr>(FuncC)) {
+ if (CE->isCast())
+ FuncC = dyn_cast_or_null<ConstantExpr>(CE->getOperand(0));
+ else
+ break;
+ } else {
+ // This isn't anything we recognize. Bail out with Func left set to null.
+ break;
+ }
+ }
+
+ ConstantInt *Priority = dyn_cast<ConstantInt>(CS->getOperand(0));
+ Value *Data = CS->getOperand(2);
+ return Element(Priority->getZExtValue(), Func, Data);
+}
+
+iterator_range<CtorDtorIterator> getConstructors(const Module &M) {
+ const GlobalVariable *CtorsList = M.getNamedGlobal("llvm.global_ctors");
+ return make_range(CtorDtorIterator(CtorsList, false),
+ CtorDtorIterator(CtorsList, true));
+}
+
+iterator_range<CtorDtorIterator> getDestructors(const Module &M) {
+ const GlobalVariable *DtorsList = M.getNamedGlobal("llvm.global_dtors");
+ return make_range(CtorDtorIterator(DtorsList, false),
+ CtorDtorIterator(DtorsList, true));
+}
+
+void LocalCXXRuntimeOverrides::runDestructors() {
+ auto& CXXDestructorDataPairs = DSOHandleOverride;
+ for (auto &P : CXXDestructorDataPairs)
+ P.first(P.second);
+ CXXDestructorDataPairs.clear();
+}
+
+int LocalCXXRuntimeOverrides::CXAAtExitOverride(DestructorPtr Destructor,
+ void *Arg, void *DSOHandle) {
+ auto& CXXDestructorDataPairs =
+ *reinterpret_cast<CXXDestructorDataPairList*>(DSOHandle);
+ CXXDestructorDataPairs.push_back(std::make_pair(Destructor, Arg));
+ return 0;
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/llvm/lib/ExecutionEngine/Orc/IndirectionUtils.cpp b/contrib/llvm/lib/ExecutionEngine/Orc/IndirectionUtils.cpp
new file mode 100644
index 000000000000..e3a456849f90
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/Orc/IndirectionUtils.cpp
@@ -0,0 +1,254 @@
+//===---- IndirectionUtils.cpp - Utilities for call indirection in Orc ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/IndirectionUtils.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ExecutionEngine/Orc/OrcABISupport.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/Transforms/Utils/Cloning.h"
+#include <sstream>
+
+namespace llvm {
+namespace orc {
+
+void JITCompileCallbackManager::anchor() {}
+void IndirectStubsManager::anchor() {}
+
+std::unique_ptr<JITCompileCallbackManager>
+createLocalCompileCallbackManager(const Triple &T,
+ JITTargetAddress ErrorHandlerAddress) {
+ switch (T.getArch()) {
+ default: return nullptr;
+
+ case Triple::x86: {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcI386> CCMgrT;
+ return llvm::make_unique<CCMgrT>(ErrorHandlerAddress);
+ }
+
+ case Triple::x86_64: {
+ if ( T.getOS() == Triple::OSType::Win32 ) {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcX86_64_Win32> CCMgrT;
+ return llvm::make_unique<CCMgrT>(ErrorHandlerAddress);
+ } else {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcX86_64_SysV> CCMgrT;
+ return llvm::make_unique<CCMgrT>(ErrorHandlerAddress);
+ }
+ }
+ }
+}
+
+std::function<std::unique_ptr<IndirectStubsManager>()>
+createLocalIndirectStubsManagerBuilder(const Triple &T) {
+ switch (T.getArch()) {
+ default: return nullptr;
+
+ case Triple::x86:
+ return [](){
+ return llvm::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcI386>>();
+ };
+
+ case Triple::x86_64:
+ if (T.getOS() == Triple::OSType::Win32) {
+ return [](){
+ return llvm::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcX86_64_Win32>>();
+ };
+ } else {
+ return [](){
+ return llvm::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcX86_64_SysV>>();
+ };
+ }
+ }
+}
+
+Constant* createIRTypedAddress(FunctionType &FT, JITTargetAddress Addr) {
+ Constant *AddrIntVal =
+ ConstantInt::get(Type::getInt64Ty(FT.getContext()), Addr);
+ Constant *AddrPtrVal =
+ ConstantExpr::getCast(Instruction::IntToPtr, AddrIntVal,
+ PointerType::get(&FT, 0));
+ return AddrPtrVal;
+}
+
+GlobalVariable* createImplPointer(PointerType &PT, Module &M,
+ const Twine &Name, Constant *Initializer) {
+ auto IP = new GlobalVariable(M, &PT, false, GlobalValue::ExternalLinkage,
+ Initializer, Name, nullptr,
+ GlobalValue::NotThreadLocal, 0, true);
+ IP->setVisibility(GlobalValue::HiddenVisibility);
+ return IP;
+}
+
+void makeStub(Function &F, Value &ImplPointer) {
+ assert(F.isDeclaration() && "Can't turn a definition into a stub.");
+ assert(F.getParent() && "Function isn't in a module.");
+ Module &M = *F.getParent();
+ BasicBlock *EntryBlock = BasicBlock::Create(M.getContext(), "entry", &F);
+ IRBuilder<> Builder(EntryBlock);
+ LoadInst *ImplAddr = Builder.CreateLoad(&ImplPointer);
+ std::vector<Value*> CallArgs;
+ for (auto &A : F.args())
+ CallArgs.push_back(&A);
+ CallInst *Call = Builder.CreateCall(ImplAddr, CallArgs);
+ Call->setTailCall();
+ Call->setAttributes(F.getAttributes());
+ if (F.getReturnType()->isVoidTy())
+ Builder.CreateRetVoid();
+ else
+ Builder.CreateRet(Call);
+}
+
+// Utility class for renaming global values and functions during partitioning.
+class GlobalRenamer {
+public:
+
+ static bool needsRenaming(const Value &New) {
+ return !New.hasName() || New.getName().startswith("\01L");
+ }
+
+ const std::string& getRename(const Value &Orig) {
+ // See if we have a name for this global.
+ {
+ auto I = Names.find(&Orig);
+ if (I != Names.end())
+ return I->second;
+ }
+
+ // Nope. Create a new one.
+ // FIXME: Use a more robust uniquing scheme. (This may blow up if the user
+ // writes a "__orc_anon[[:digit:]]* method).
+ unsigned ID = Names.size();
+ std::ostringstream NameStream;
+ NameStream << "__orc_anon" << ID++;
+ auto I = Names.insert(std::make_pair(&Orig, NameStream.str()));
+ return I.first->second;
+ }
+private:
+ DenseMap<const Value*, std::string> Names;
+};
+
+static void raiseVisibilityOnValue(GlobalValue &V, GlobalRenamer &R) {
+ if (V.hasLocalLinkage()) {
+ if (R.needsRenaming(V))
+ V.setName(R.getRename(V));
+ V.setLinkage(GlobalValue::ExternalLinkage);
+ V.setVisibility(GlobalValue::HiddenVisibility);
+ }
+ V.setUnnamedAddr(GlobalValue::UnnamedAddr::None);
+ assert(!R.needsRenaming(V) && "Invalid global name.");
+}
+
+void makeAllSymbolsExternallyAccessible(Module &M) {
+ GlobalRenamer Renamer;
+
+ for (auto &F : M)
+ raiseVisibilityOnValue(F, Renamer);
+
+ for (auto &GV : M.globals())
+ raiseVisibilityOnValue(GV, Renamer);
+
+ for (auto &A : M.aliases())
+ raiseVisibilityOnValue(A, Renamer);
+}
+
+Function* cloneFunctionDecl(Module &Dst, const Function &F,
+ ValueToValueMapTy *VMap) {
+ assert(F.getParent() != &Dst && "Can't copy decl over existing function.");
+ Function *NewF =
+ Function::Create(cast<FunctionType>(F.getValueType()),
+ F.getLinkage(), F.getName(), &Dst);
+ NewF->copyAttributesFrom(&F);
+
+ if (VMap) {
+ (*VMap)[&F] = NewF;
+ auto NewArgI = NewF->arg_begin();
+ for (auto ArgI = F.arg_begin(), ArgE = F.arg_end(); ArgI != ArgE;
+ ++ArgI, ++NewArgI)
+ (*VMap)[&*ArgI] = &*NewArgI;
+ }
+
+ return NewF;
+}
+
+void moveFunctionBody(Function &OrigF, ValueToValueMapTy &VMap,
+ ValueMaterializer *Materializer,
+ Function *NewF) {
+ assert(!OrigF.isDeclaration() && "Nothing to move");
+ if (!NewF)
+ NewF = cast<Function>(VMap[&OrigF]);
+ else
+ assert(VMap[&OrigF] == NewF && "Incorrect function mapping in VMap.");
+ assert(NewF && "Function mapping missing from VMap.");
+ assert(NewF->getParent() != OrigF.getParent() &&
+ "moveFunctionBody should only be used to move bodies between "
+ "modules.");
+
+ SmallVector<ReturnInst *, 8> Returns; // Ignore returns cloned.
+ CloneFunctionInto(NewF, &OrigF, VMap, /*ModuleLevelChanges=*/true, Returns,
+ "", nullptr, nullptr, Materializer);
+ OrigF.deleteBody();
+}
+
+GlobalVariable* cloneGlobalVariableDecl(Module &Dst, const GlobalVariable &GV,
+ ValueToValueMapTy *VMap) {
+ assert(GV.getParent() != &Dst && "Can't copy decl over existing global var.");
+ GlobalVariable *NewGV = new GlobalVariable(
+ Dst, GV.getValueType(), GV.isConstant(),
+ GV.getLinkage(), nullptr, GV.getName(), nullptr,
+ GV.getThreadLocalMode(), GV.getType()->getAddressSpace());
+ NewGV->copyAttributesFrom(&GV);
+ if (VMap)
+ (*VMap)[&GV] = NewGV;
+ return NewGV;
+}
+
+void moveGlobalVariableInitializer(GlobalVariable &OrigGV,
+ ValueToValueMapTy &VMap,
+ ValueMaterializer *Materializer,
+ GlobalVariable *NewGV) {
+ assert(OrigGV.hasInitializer() && "Nothing to move");
+ if (!NewGV)
+ NewGV = cast<GlobalVariable>(VMap[&OrigGV]);
+ else
+ assert(VMap[&OrigGV] == NewGV &&
+ "Incorrect global variable mapping in VMap.");
+ assert(NewGV->getParent() != OrigGV.getParent() &&
+ "moveGlobalVariable should only be used to move initializers between "
+ "modules");
+
+ NewGV->setInitializer(MapValue(OrigGV.getInitializer(), VMap, RF_None,
+ nullptr, Materializer));
+}
+
+GlobalAlias* cloneGlobalAliasDecl(Module &Dst, const GlobalAlias &OrigA,
+ ValueToValueMapTy &VMap) {
+ assert(OrigA.getAliasee() && "Original alias doesn't have an aliasee?");
+ auto *NewA = GlobalAlias::create(OrigA.getValueType(),
+ OrigA.getType()->getPointerAddressSpace(),
+ OrigA.getLinkage(), OrigA.getName(), &Dst);
+ NewA->copyAttributesFrom(&OrigA);
+ VMap[&OrigA] = NewA;
+ return NewA;
+}
+
+void cloneModuleFlagsMetadata(Module &Dst, const Module &Src,
+ ValueToValueMapTy &VMap) {
+ auto *MFs = Src.getModuleFlagsMetadata();
+ if (!MFs)
+ return;
+ for (auto *MF : MFs->operands())
+ Dst.addModuleFlag(MapMetadata(MF, VMap));
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/llvm/lib/ExecutionEngine/Orc/NullResolver.cpp b/contrib/llvm/lib/ExecutionEngine/Orc/NullResolver.cpp
new file mode 100644
index 000000000000..8f2d6fd6c32b
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/Orc/NullResolver.cpp
@@ -0,0 +1,26 @@
+//===---------- NullResolver.cpp - Reject symbol lookup requests ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/NullResolver.h"
+
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+namespace orc {
+
+JITSymbol NullResolver::findSymbol(const std::string &Name) {
+ llvm_unreachable("Unexpected cross-object symbol reference");
+}
+
+JITSymbol NullResolver::findSymbolInLogicalDylib(const std::string &Name) {
+ llvm_unreachable("Unexpected cross-object symbol reference");
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/llvm/lib/ExecutionEngine/Orc/OrcABISupport.cpp b/contrib/llvm/lib/ExecutionEngine/Orc/OrcABISupport.cpp
new file mode 100644
index 000000000000..9869b6c7050c
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/Orc/OrcABISupport.cpp
@@ -0,0 +1,542 @@
+//===------------- OrcABISupport.cpp - ABI specific support code ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/OrcABISupport.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/Process.h"
+
+namespace llvm {
+namespace orc {
+
+void OrcAArch64::writeResolverCode(uint8_t *ResolverMem, JITReentryFn ReentryFn,
+ void *CallbackMgr) {
+
+ const uint32_t ResolverCode[] = {
+ // resolver_entry:
+ 0xa9bf47fd, // 0x000: stp x29, x17, [sp, #-16]!
+ 0x910003fd, // 0x004: mov x29, sp
+ 0xa9bf73fb, // 0x008: stp x27, x28, [sp, #-16]!
+ 0xa9bf6bf9, // 0x00c: stp x25, x26, [sp, #-16]!
+ 0xa9bf63f7, // 0x010: stp x23, x24, [sp, #-16]!
+ 0xa9bf5bf5, // 0x014: stp x21, x22, [sp, #-16]!
+ 0xa9bf53f3, // 0x018: stp x19, x20, [sp, #-16]!
+ 0xa9bf3fee, // 0x01c: stp x14, x15, [sp, #-16]!
+ 0xa9bf37ec, // 0x020: stp x12, x13, [sp, #-16]!
+ 0xa9bf2fea, // 0x024: stp x10, x11, [sp, #-16]!
+ 0xa9bf27e8, // 0x028: stp x8, x9, [sp, #-16]!
+ 0xa9bf1fe6, // 0x02c: stp x6, x7, [sp, #-16]!
+ 0xa9bf17e4, // 0x030: stp x4, x5, [sp, #-16]!
+ 0xa9bf0fe2, // 0x034: stp x2, x3, [sp, #-16]!
+ 0xa9bf07e0, // 0x038: stp x0, x1, [sp, #-16]!
+ 0xadbf7ffe, // 0x03c: stp q30, q31, [sp, #-32]!
+ 0xadbf77fc, // 0x040: stp q28, q29, [sp, #-32]!
+ 0xadbf6ffa, // 0x044: stp q26, q27, [sp, #-32]!
+ 0xadbf67f8, // 0x048: stp q24, q25, [sp, #-32]!
+ 0xadbf5ff6, // 0x04c: stp q22, q23, [sp, #-32]!
+ 0xadbf57f4, // 0x050: stp q20, q21, [sp, #-32]!
+ 0xadbf4ff2, // 0x054: stp q18, q19, [sp, #-32]!
+ 0xadbf47f0, // 0x058: stp q16, q17, [sp, #-32]!
+ 0xadbf3fee, // 0x05c: stp q14, q15, [sp, #-32]!
+ 0xadbf37ec, // 0x060: stp q12, q13, [sp, #-32]!
+ 0xadbf2fea, // 0x064: stp q10, q11, [sp, #-32]!
+ 0xadbf27e8, // 0x068: stp q8, q9, [sp, #-32]!
+ 0xadbf1fe6, // 0x06c: stp q6, q7, [sp, #-32]!
+ 0xadbf17e4, // 0x070: stp q4, q5, [sp, #-32]!
+ 0xadbf0fe2, // 0x074: stp q2, q3, [sp, #-32]!
+ 0xadbf07e0, // 0x078: stp q0, q1, [sp, #-32]!
+ 0x580004e0, // 0x07c: ldr x0, Lcallbackmgr
+ 0xaa1e03e1, // 0x080: mov x1, x30
+ 0xd1003021, // 0x084: sub x1, x1, #12
+ 0x58000442, // 0x088: ldr x2, Lreentry_fn_ptr
+ 0xd63f0040, // 0x08c: blr x2
+ 0xaa0003f1, // 0x090: mov x17, x0
+ 0xacc107e0, // 0x094: ldp q0, q1, [sp], #32
+ 0xacc10fe2, // 0x098: ldp q2, q3, [sp], #32
+ 0xacc117e4, // 0x09c: ldp q4, q5, [sp], #32
+ 0xacc11fe6, // 0x0a0: ldp q6, q7, [sp], #32
+ 0xacc127e8, // 0x0a4: ldp q8, q9, [sp], #32
+ 0xacc12fea, // 0x0a8: ldp q10, q11, [sp], #32
+ 0xacc137ec, // 0x0ac: ldp q12, q13, [sp], #32
+ 0xacc13fee, // 0x0b0: ldp q14, q15, [sp], #32
+ 0xacc147f0, // 0x0b4: ldp q16, q17, [sp], #32
+ 0xacc14ff2, // 0x0b8: ldp q18, q19, [sp], #32
+ 0xacc157f4, // 0x0bc: ldp q20, q21, [sp], #32
+ 0xacc15ff6, // 0x0c0: ldp q22, q23, [sp], #32
+ 0xacc167f8, // 0x0c4: ldp q24, q25, [sp], #32
+ 0xacc16ffa, // 0x0c8: ldp q26, q27, [sp], #32
+ 0xacc177fc, // 0x0cc: ldp q28, q29, [sp], #32
+ 0xacc17ffe, // 0x0d0: ldp q30, q31, [sp], #32
+ 0xa8c107e0, // 0x0d4: ldp x0, x1, [sp], #16
+ 0xa8c10fe2, // 0x0d8: ldp x2, x3, [sp], #16
+ 0xa8c117e4, // 0x0dc: ldp x4, x5, [sp], #16
+ 0xa8c11fe6, // 0x0e0: ldp x6, x7, [sp], #16
+ 0xa8c127e8, // 0x0e4: ldp x8, x9, [sp], #16
+ 0xa8c12fea, // 0x0e8: ldp x10, x11, [sp], #16
+ 0xa8c137ec, // 0x0ec: ldp x12, x13, [sp], #16
+ 0xa8c13fee, // 0x0f0: ldp x14, x15, [sp], #16
+ 0xa8c153f3, // 0x0f4: ldp x19, x20, [sp], #16
+ 0xa8c15bf5, // 0x0f8: ldp x21, x22, [sp], #16
+ 0xa8c163f7, // 0x0fc: ldp x23, x24, [sp], #16
+ 0xa8c16bf9, // 0x100: ldp x25, x26, [sp], #16
+ 0xa8c173fb, // 0x104: ldp x27, x28, [sp], #16
+ 0xa8c17bfd, // 0x108: ldp x29, x30, [sp], #16
+ 0xd65f0220, // 0x10c: ret x17
+ 0x01234567, // 0x110: Lreentry_fn_ptr:
+ 0xdeadbeef, // 0x114: .quad 0
+ 0x98765432, // 0x118: Lcallbackmgr:
+ 0xcafef00d // 0x11c: .quad 0
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x110;
+ const unsigned CallbackMgrAddrOffset = 0x118;
+
+ memcpy(ResolverMem, ResolverCode, sizeof(ResolverCode));
+ memcpy(ResolverMem + ReentryFnAddrOffset, &ReentryFn, sizeof(ReentryFn));
+ memcpy(ResolverMem + CallbackMgrAddrOffset, &CallbackMgr,
+ sizeof(CallbackMgr));
+}
+
+void OrcAArch64::writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
+ unsigned NumTrampolines) {
+
+ unsigned OffsetToPtr = alignTo(NumTrampolines * TrampolineSize, 8);
+
+ memcpy(TrampolineMem + OffsetToPtr, &ResolverAddr, sizeof(void *));
+
+ // OffsetToPtr is actually the offset from the PC for the 2nd instruction, so
+ // subtract 32-bits.
+ OffsetToPtr -= 4;
+
+ uint32_t *Trampolines = reinterpret_cast<uint32_t *>(TrampolineMem);
+
+ for (unsigned I = 0; I < NumTrampolines; ++I, OffsetToPtr -= TrampolineSize) {
+ Trampolines[3 * I + 0] = 0xaa1e03f1; // mov x17, x30
+ Trampolines[3 * I + 1] = 0x58000010 | (OffsetToPtr << 3); // mov x16, Lptr
+ Trampolines[3 * I + 2] = 0xd63f0200; // blr x16
+ }
+
+}
+
+Error OrcAArch64::emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
+ unsigned MinStubs,
+ void *InitialPtrVal) {
+ // Stub format is:
+ //
+ // .section __orc_stubs
+ // stub1:
+ // ldr x0, ptr1 ; PC-rel load of ptr1
+ // br x0 ; Jump to resolver
+ // stub2:
+ // ldr x0, ptr2 ; PC-rel load of ptr2
+ // br x0 ; Jump to resolver
+ //
+ // ...
+ //
+ // .section __orc_ptrs
+ // ptr1:
+ // .quad 0x0
+ // ptr2:
+ // .quad 0x0
+ //
+ // ...
+
+ const unsigned StubSize = IndirectStubsInfo::StubSize;
+
+ // Emit at least MinStubs, rounded up to fill the pages allocated.
+ unsigned PageSize = sys::Process::getPageSize();
+ unsigned NumPages = ((MinStubs * StubSize) + (PageSize - 1)) / PageSize;
+ unsigned NumStubs = (NumPages * PageSize) / StubSize;
+
+ // Allocate memory for stubs and pointers in one call.
+ std::error_code EC;
+ auto StubsMem = sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
+ 2 * NumPages * PageSize, nullptr,
+ sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC));
+
+ if (EC)
+ return errorCodeToError(EC);
+
+ // Create separate MemoryBlocks representing the stubs and pointers.
+ sys::MemoryBlock StubsBlock(StubsMem.base(), NumPages * PageSize);
+ sys::MemoryBlock PtrsBlock(static_cast<char *>(StubsMem.base()) +
+ NumPages * PageSize,
+ NumPages * PageSize);
+
+ // Populate the stubs page stubs and mark it executable.
+ uint64_t *Stub = reinterpret_cast<uint64_t *>(StubsBlock.base());
+ uint64_t PtrOffsetField = static_cast<uint64_t>(NumPages * PageSize)
+ << 3;
+
+ for (unsigned I = 0; I < NumStubs; ++I)
+ Stub[I] = 0xd61f020058000010 | PtrOffsetField;
+
+ if (auto EC = sys::Memory::protectMappedMemory(
+ StubsBlock, sys::Memory::MF_READ | sys::Memory::MF_EXEC))
+ return errorCodeToError(EC);
+
+ // Initialize all pointers to point at FailureAddress.
+ void **Ptr = reinterpret_cast<void **>(PtrsBlock.base());
+ for (unsigned I = 0; I < NumStubs; ++I)
+ Ptr[I] = InitialPtrVal;
+
+ StubsInfo = IndirectStubsInfo(NumStubs, std::move(StubsMem));
+
+ return Error::success();
+}
+
+void OrcX86_64_Base::writeTrampolines(uint8_t *TrampolineMem,
+ void *ResolverAddr,
+ unsigned NumTrampolines) {
+
+ unsigned OffsetToPtr = NumTrampolines * TrampolineSize;
+
+ memcpy(TrampolineMem + OffsetToPtr, &ResolverAddr, sizeof(void *));
+
+ uint64_t *Trampolines = reinterpret_cast<uint64_t *>(TrampolineMem);
+ uint64_t CallIndirPCRel = 0xf1c40000000015ff;
+
+ for (unsigned I = 0; I < NumTrampolines; ++I, OffsetToPtr -= TrampolineSize)
+ Trampolines[I] = CallIndirPCRel | ((OffsetToPtr - 6) << 16);
+}
+
+Error OrcX86_64_Base::emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
+ unsigned MinStubs,
+ void *InitialPtrVal) {
+ // Stub format is:
+ //
+ // .section __orc_stubs
+ // stub1:
+ // jmpq *ptr1(%rip)
+ // .byte 0xC4 ; <- Invalid opcode padding.
+ // .byte 0xF1
+ // stub2:
+ // jmpq *ptr2(%rip)
+ //
+ // ...
+ //
+ // .section __orc_ptrs
+ // ptr1:
+ // .quad 0x0
+ // ptr2:
+ // .quad 0x0
+ //
+ // ...
+
+ const unsigned StubSize = IndirectStubsInfo::StubSize;
+
+ // Emit at least MinStubs, rounded up to fill the pages allocated.
+ unsigned PageSize = sys::Process::getPageSize();
+ unsigned NumPages = ((MinStubs * StubSize) + (PageSize - 1)) / PageSize;
+ unsigned NumStubs = (NumPages * PageSize) / StubSize;
+
+ // Allocate memory for stubs and pointers in one call.
+ std::error_code EC;
+ auto StubsMem = sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
+ 2 * NumPages * PageSize, nullptr,
+ sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC));
+
+ if (EC)
+ return errorCodeToError(EC);
+
+ // Create separate MemoryBlocks representing the stubs and pointers.
+ sys::MemoryBlock StubsBlock(StubsMem.base(), NumPages * PageSize);
+ sys::MemoryBlock PtrsBlock(static_cast<char *>(StubsMem.base()) +
+ NumPages * PageSize,
+ NumPages * PageSize);
+
+ // Populate the stubs page stubs and mark it executable.
+ uint64_t *Stub = reinterpret_cast<uint64_t *>(StubsBlock.base());
+ uint64_t PtrOffsetField = static_cast<uint64_t>(NumPages * PageSize - 6)
+ << 16;
+ for (unsigned I = 0; I < NumStubs; ++I)
+ Stub[I] = 0xF1C40000000025ff | PtrOffsetField;
+
+ if (auto EC = sys::Memory::protectMappedMemory(
+ StubsBlock, sys::Memory::MF_READ | sys::Memory::MF_EXEC))
+ return errorCodeToError(EC);
+
+ // Initialize all pointers to point at FailureAddress.
+ void **Ptr = reinterpret_cast<void **>(PtrsBlock.base());
+ for (unsigned I = 0; I < NumStubs; ++I)
+ Ptr[I] = InitialPtrVal;
+
+ StubsInfo = IndirectStubsInfo(NumStubs, std::move(StubsMem));
+
+ return Error::success();
+}
+
+void OrcX86_64_SysV::writeResolverCode(uint8_t *ResolverMem,
+ JITReentryFn ReentryFn,
+ void *CallbackMgr) {
+
+ const uint8_t ResolverCode[] = {
+ // resolver_entry:
+ 0x55, // 0x00: pushq %rbp
+ 0x48, 0x89, 0xe5, // 0x01: movq %rsp, %rbp
+ 0x50, // 0x04: pushq %rax
+ 0x53, // 0x05: pushq %rbx
+ 0x51, // 0x06: pushq %rcx
+ 0x52, // 0x07: pushq %rdx
+ 0x56, // 0x08: pushq %rsi
+ 0x57, // 0x09: pushq %rdi
+ 0x41, 0x50, // 0x0a: pushq %r8
+ 0x41, 0x51, // 0x0c: pushq %r9
+ 0x41, 0x52, // 0x0e: pushq %r10
+ 0x41, 0x53, // 0x10: pushq %r11
+ 0x41, 0x54, // 0x12: pushq %r12
+ 0x41, 0x55, // 0x14: pushq %r13
+ 0x41, 0x56, // 0x16: pushq %r14
+ 0x41, 0x57, // 0x18: pushq %r15
+ 0x48, 0x81, 0xec, 0x08, 0x02, 0x00, 0x00, // 0x1a: subq 0x208, %rsp
+ 0x48, 0x0f, 0xae, 0x04, 0x24, // 0x21: fxsave64 (%rsp)
+ 0x48, 0xbf, // 0x26: movabsq <CBMgr>, %rdi
+
+ // 0x28: Callback manager addr.
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0x48, 0x8b, 0x75, 0x08, // 0x30: movq 8(%rbp), %rsi
+ 0x48, 0x83, 0xee, 0x06, // 0x34: subq $6, %rsi
+ 0x48, 0xb8, // 0x38: movabsq <REntry>, %rax
+
+ // 0x3a: JIT re-entry fn addr:
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xff, 0xd0, // 0x42: callq *%rax
+ 0x48, 0x89, 0x45, 0x08, // 0x44: movq %rax, 8(%rbp)
+ 0x48, 0x0f, 0xae, 0x0c, 0x24, // 0x48: fxrstor64 (%rsp)
+ 0x48, 0x81, 0xc4, 0x08, 0x02, 0x00, 0x00, // 0x4d: addq 0x208, %rsp
+ 0x41, 0x5f, // 0x54: popq %r15
+ 0x41, 0x5e, // 0x56: popq %r14
+ 0x41, 0x5d, // 0x58: popq %r13
+ 0x41, 0x5c, // 0x5a: popq %r12
+ 0x41, 0x5b, // 0x5c: popq %r11
+ 0x41, 0x5a, // 0x5e: popq %r10
+ 0x41, 0x59, // 0x60: popq %r9
+ 0x41, 0x58, // 0x62: popq %r8
+ 0x5f, // 0x64: popq %rdi
+ 0x5e, // 0x65: popq %rsi
+ 0x5a, // 0x66: popq %rdx
+ 0x59, // 0x67: popq %rcx
+ 0x5b, // 0x68: popq %rbx
+ 0x58, // 0x69: popq %rax
+ 0x5d, // 0x6a: popq %rbp
+ 0xc3, // 0x6b: retq
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x3a;
+ const unsigned CallbackMgrAddrOffset = 0x28;
+
+ memcpy(ResolverMem, ResolverCode, sizeof(ResolverCode));
+ memcpy(ResolverMem + ReentryFnAddrOffset, &ReentryFn, sizeof(ReentryFn));
+ memcpy(ResolverMem + CallbackMgrAddrOffset, &CallbackMgr,
+ sizeof(CallbackMgr));
+}
+
+void OrcX86_64_Win32::writeResolverCode(uint8_t *ResolverMem,
+ JITReentryFn ReentryFn,
+ void *CallbackMgr) {
+
+ // resolverCode is similar to OrcX86_64 with differences specific to windows x64 calling convention:
+ // arguments go into rcx, rdx and come in reverse order, shadow space allocation on stack
+ const uint8_t ResolverCode[] = {
+ // resolver_entry:
+ 0x55, // 0x00: pushq %rbp
+ 0x48, 0x89, 0xe5, // 0x01: movq %rsp, %rbp
+ 0x50, // 0x04: pushq %rax
+ 0x53, // 0x05: pushq %rbx
+ 0x51, // 0x06: pushq %rcx
+ 0x52, // 0x07: pushq %rdx
+ 0x56, // 0x08: pushq %rsi
+ 0x57, // 0x09: pushq %rdi
+ 0x41, 0x50, // 0x0a: pushq %r8
+ 0x41, 0x51, // 0x0c: pushq %r9
+ 0x41, 0x52, // 0x0e: pushq %r10
+ 0x41, 0x53, // 0x10: pushq %r11
+ 0x41, 0x54, // 0x12: pushq %r12
+ 0x41, 0x55, // 0x14: pushq %r13
+ 0x41, 0x56, // 0x16: pushq %r14
+ 0x41, 0x57, // 0x18: pushq %r15
+ 0x48, 0x81, 0xec, 0x08, 0x02, 0x00, 0x00, // 0x1a: subq 0x208, %rsp
+ 0x48, 0x0f, 0xae, 0x04, 0x24, // 0x21: fxsave64 (%rsp)
+
+ 0x48, 0xb9, // 0x26: movabsq <CBMgr>, %rcx
+ // 0x28: Callback manager addr.
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0x48, 0x8B, 0x55, 0x08, // 0x30: mov rdx, [rbp+0x8]
+ 0x48, 0x83, 0xea, 0x06, // 0x34: sub rdx, 0x6
+
+ 0x48, 0xb8, // 0x38: movabsq <REntry>, %rax
+ // 0x3a: JIT re-entry fn addr:
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ // 0x42: sub rsp, 0x20 (Allocate shadow space)
+ 0x48, 0x83, 0xEC, 0x20,
+ 0xff, 0xd0, // 0x46: callq *%rax
+
+ // 0x48: add rsp, 0x20 (Free shadow space)
+ 0x48, 0x83, 0xC4, 0x20,
+
+ 0x48, 0x89, 0x45, 0x08, // 0x4C: movq %rax, 8(%rbp)
+ 0x48, 0x0f, 0xae, 0x0c, 0x24, // 0x50: fxrstor64 (%rsp)
+ 0x48, 0x81, 0xc4, 0x08, 0x02, 0x00, 0x00, // 0x55: addq 0x208, %rsp
+ 0x41, 0x5f, // 0x5C: popq %r15
+ 0x41, 0x5e, // 0x5E: popq %r14
+ 0x41, 0x5d, // 0x60: popq %r13
+ 0x41, 0x5c, // 0x62: popq %r12
+ 0x41, 0x5b, // 0x64: popq %r11
+ 0x41, 0x5a, // 0x66: popq %r10
+ 0x41, 0x59, // 0x68: popq %r9
+ 0x41, 0x58, // 0x6a: popq %r8
+ 0x5f, // 0x6c: popq %rdi
+ 0x5e, // 0x6d: popq %rsi
+ 0x5a, // 0x6e: popq %rdx
+ 0x59, // 0x6f: popq %rcx
+ 0x5b, // 0x70: popq %rbx
+ 0x58, // 0x71: popq %rax
+ 0x5d, // 0x72: popq %rbp
+ 0xc3, // 0x73: retq
+ };
+
+
+ const unsigned ReentryFnAddrOffset = 0x3a;
+ const unsigned CallbackMgrAddrOffset = 0x28;
+
+ memcpy(ResolverMem, ResolverCode, sizeof(ResolverCode));
+ memcpy(ResolverMem + ReentryFnAddrOffset, &ReentryFn, sizeof(ReentryFn));
+ memcpy(ResolverMem + CallbackMgrAddrOffset, &CallbackMgr,
+ sizeof(CallbackMgr));
+}
+
+void OrcI386::writeResolverCode(uint8_t *ResolverMem, JITReentryFn ReentryFn,
+ void *CallbackMgr) {
+
+ const uint8_t ResolverCode[] = {
+ // resolver_entry:
+ 0x55, // 0x00: pushl %ebp
+ 0x89, 0xe5, // 0x01: movl %esp, %ebp
+ 0x54, // 0x03: pushl %esp
+ 0x83, 0xe4, 0xf0, // 0x04: andl $-0x10, %esp
+ 0x50, // 0x07: pushl %eax
+ 0x53, // 0x08: pushl %ebx
+ 0x51, // 0x09: pushl %ecx
+ 0x52, // 0x0a: pushl %edx
+ 0x56, // 0x0b: pushl %esi
+ 0x57, // 0x0c: pushl %edi
+ 0x81, 0xec, 0x18, 0x02, 0x00, 0x00, // 0x0d: subl $0x218, %esp
+ 0x0f, 0xae, 0x44, 0x24, 0x10, // 0x13: fxsave 0x10(%esp)
+ 0x8b, 0x75, 0x04, // 0x18: movl 0x4(%ebp), %esi
+ 0x83, 0xee, 0x05, // 0x1b: subl $0x5, %esi
+ 0x89, 0x74, 0x24, 0x04, // 0x1e: movl %esi, 0x4(%esp)
+ 0xc7, 0x04, 0x24, 0x00, 0x00, 0x00,
+ 0x00, // 0x22: movl <cbmgr>, (%esp)
+ 0xb8, 0x00, 0x00, 0x00, 0x00, // 0x29: movl <reentry>, %eax
+ 0xff, 0xd0, // 0x2e: calll *%eax
+ 0x89, 0x45, 0x04, // 0x30: movl %eax, 0x4(%ebp)
+ 0x0f, 0xae, 0x4c, 0x24, 0x10, // 0x33: fxrstor 0x10(%esp)
+ 0x81, 0xc4, 0x18, 0x02, 0x00, 0x00, // 0x38: addl $0x218, %esp
+ 0x5f, // 0x3e: popl %edi
+ 0x5e, // 0x3f: popl %esi
+ 0x5a, // 0x40: popl %edx
+ 0x59, // 0x41: popl %ecx
+ 0x5b, // 0x42: popl %ebx
+ 0x58, // 0x43: popl %eax
+ 0x8b, 0x65, 0xfc, // 0x44: movl -0x4(%ebp), %esp
+ 0x5d, // 0x48: popl %ebp
+ 0xc3 // 0x49: retl
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x2a;
+ const unsigned CallbackMgrAddrOffset = 0x25;
+
+ memcpy(ResolverMem, ResolverCode, sizeof(ResolverCode));
+ memcpy(ResolverMem + ReentryFnAddrOffset, &ReentryFn, sizeof(ReentryFn));
+ memcpy(ResolverMem + CallbackMgrAddrOffset, &CallbackMgr,
+ sizeof(CallbackMgr));
+}
+
+void OrcI386::writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
+ unsigned NumTrampolines) {
+
+ uint64_t CallRelImm = 0xF1C4C400000000e8;
+ uint64_t Resolver = reinterpret_cast<uint64_t>(ResolverAddr);
+ uint64_t ResolverRel =
+ Resolver - reinterpret_cast<uint64_t>(TrampolineMem) - 5;
+
+ uint64_t *Trampolines = reinterpret_cast<uint64_t *>(TrampolineMem);
+ for (unsigned I = 0; I < NumTrampolines; ++I, ResolverRel -= TrampolineSize)
+ Trampolines[I] = CallRelImm | (ResolverRel << 8);
+}
+
+Error OrcI386::emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
+ unsigned MinStubs, void *InitialPtrVal) {
+ // Stub format is:
+ //
+ // .section __orc_stubs
+ // stub1:
+ // jmpq *ptr1
+ // .byte 0xC4 ; <- Invalid opcode padding.
+ // .byte 0xF1
+ // stub2:
+ // jmpq *ptr2
+ //
+ // ...
+ //
+ // .section __orc_ptrs
+ // ptr1:
+ // .quad 0x0
+ // ptr2:
+ // .quad 0x0
+ //
+ // ...
+
+ const unsigned StubSize = IndirectStubsInfo::StubSize;
+
+ // Emit at least MinStubs, rounded up to fill the pages allocated.
+ unsigned PageSize = sys::Process::getPageSize();
+ unsigned NumPages = ((MinStubs * StubSize) + (PageSize - 1)) / PageSize;
+ unsigned NumStubs = (NumPages * PageSize) / StubSize;
+
+ // Allocate memory for stubs and pointers in one call.
+ std::error_code EC;
+ auto StubsMem = sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
+ 2 * NumPages * PageSize, nullptr,
+ sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC));
+
+ if (EC)
+ return errorCodeToError(EC);
+
+ // Create separate MemoryBlocks representing the stubs and pointers.
+ sys::MemoryBlock StubsBlock(StubsMem.base(), NumPages * PageSize);
+ sys::MemoryBlock PtrsBlock(static_cast<char *>(StubsMem.base()) +
+ NumPages * PageSize,
+ NumPages * PageSize);
+
+ // Populate the stubs page stubs and mark it executable.
+ uint64_t *Stub = reinterpret_cast<uint64_t *>(StubsBlock.base());
+ uint64_t PtrAddr = reinterpret_cast<uint64_t>(PtrsBlock.base());
+ for (unsigned I = 0; I < NumStubs; ++I, PtrAddr += 4)
+ Stub[I] = 0xF1C40000000025ff | (PtrAddr << 16);
+
+ if (auto EC = sys::Memory::protectMappedMemory(
+ StubsBlock, sys::Memory::MF_READ | sys::Memory::MF_EXEC))
+ return errorCodeToError(EC);
+
+ // Initialize all pointers to point at FailureAddress.
+ void **Ptr = reinterpret_cast<void **>(PtrsBlock.base());
+ for (unsigned I = 0; I < NumStubs; ++I)
+ Ptr[I] = InitialPtrVal;
+
+ StubsInfo = IndirectStubsInfo(NumStubs, std::move(StubsMem));
+
+ return Error::success();
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/llvm/lib/ExecutionEngine/Orc/OrcCBindings.cpp b/contrib/llvm/lib/ExecutionEngine/Orc/OrcCBindings.cpp
new file mode 100644
index 000000000000..5fe259f80b6f
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/Orc/OrcCBindings.cpp
@@ -0,0 +1,119 @@
+//===----------- OrcCBindings.cpp - C bindings for the Orc APIs -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "OrcCBindingsStack.h"
+#include "llvm-c/OrcBindings.h"
+
+using namespace llvm;
+
+LLVMSharedModuleRef LLVMOrcMakeSharedModule(LLVMModuleRef Mod) {
+ return wrap(new std::shared_ptr<Module>(unwrap(Mod)));
+}
+
+void LLVMOrcDisposeSharedModuleRef(LLVMSharedModuleRef SharedMod) {
+ delete unwrap(SharedMod);
+}
+
+LLVMSharedObjectBufferRef
+LLVMOrcMakeSharedObjectBuffer(LLVMMemoryBufferRef ObjBuffer) {
+ return wrap(new std::shared_ptr<MemoryBuffer>(unwrap(ObjBuffer)));
+}
+
+void
+LLVMOrcDisposeSharedObjectBufferRef(LLVMSharedObjectBufferRef SharedObjBuffer) {
+ delete unwrap(SharedObjBuffer);
+}
+
+LLVMOrcJITStackRef LLVMOrcCreateInstance(LLVMTargetMachineRef TM) {
+ TargetMachine *TM2(unwrap(TM));
+
+ Triple T(TM2->getTargetTriple());
+
+ auto CompileCallbackMgr = orc::createLocalCompileCallbackManager(T, 0);
+ auto IndirectStubsMgrBuilder =
+ orc::createLocalIndirectStubsManagerBuilder(T);
+
+ OrcCBindingsStack *JITStack = new OrcCBindingsStack(
+ *TM2, std::move(CompileCallbackMgr), IndirectStubsMgrBuilder);
+
+ return wrap(JITStack);
+}
+
+const char *LLVMOrcGetErrorMsg(LLVMOrcJITStackRef JITStack) {
+ OrcCBindingsStack &J = *unwrap(JITStack);
+ return J.getErrorMessage().c_str();
+}
+
+void LLVMOrcGetMangledSymbol(LLVMOrcJITStackRef JITStack, char **MangledName,
+ const char *SymbolName) {
+ OrcCBindingsStack &J = *unwrap(JITStack);
+ std::string Mangled = J.mangle(SymbolName);
+ *MangledName = new char[Mangled.size() + 1];
+ strcpy(*MangledName, Mangled.c_str());
+}
+
+void LLVMOrcDisposeMangledSymbol(char *MangledName) { delete[] MangledName; }
+
+LLVMOrcTargetAddress
+LLVMOrcCreateLazyCompileCallback(LLVMOrcJITStackRef JITStack,
+ LLVMOrcLazyCompileCallbackFn Callback,
+ void *CallbackCtx) {
+ OrcCBindingsStack &J = *unwrap(JITStack);
+ return J.createLazyCompileCallback(Callback, CallbackCtx);
+}
+
+LLVMOrcErrorCode LLVMOrcCreateIndirectStub(LLVMOrcJITStackRef JITStack,
+ const char *StubName,
+ LLVMOrcTargetAddress InitAddr) {
+ OrcCBindingsStack &J = *unwrap(JITStack);
+ return J.createIndirectStub(StubName, InitAddr);
+}
+
+LLVMOrcErrorCode LLVMOrcSetIndirectStubPointer(LLVMOrcJITStackRef JITStack,
+ const char *StubName,
+ LLVMOrcTargetAddress NewAddr) {
+ OrcCBindingsStack &J = *unwrap(JITStack);
+ return J.setIndirectStubPointer(StubName, NewAddr);
+}
+
+LLVMOrcModuleHandle
+LLVMOrcAddEagerlyCompiledIR(LLVMOrcJITStackRef JITStack,
+ LLVMSharedModuleRef Mod,
+ LLVMOrcSymbolResolverFn SymbolResolver,
+ void *SymbolResolverCtx) {
+ OrcCBindingsStack &J = *unwrap(JITStack);
+ std::shared_ptr<Module> *M(unwrap(Mod));
+ return J.addIRModuleEager(*M, SymbolResolver, SymbolResolverCtx);
+}
+
+LLVMOrcModuleHandle
+LLVMOrcAddLazilyCompiledIR(LLVMOrcJITStackRef JITStack,
+ LLVMSharedModuleRef Mod,
+ LLVMOrcSymbolResolverFn SymbolResolver,
+ void *SymbolResolverCtx) {
+ OrcCBindingsStack &J = *unwrap(JITStack);
+ std::shared_ptr<Module> *M(unwrap(Mod));
+ return J.addIRModuleLazy(*M, SymbolResolver, SymbolResolverCtx);
+}
+
+void LLVMOrcRemoveModule(LLVMOrcJITStackRef JITStack, LLVMOrcModuleHandle H) {
+ OrcCBindingsStack &J = *unwrap(JITStack);
+ J.removeModule(H);
+}
+
+LLVMOrcTargetAddress LLVMOrcGetSymbolAddress(LLVMOrcJITStackRef JITStack,
+ const char *SymbolName) {
+ OrcCBindingsStack &J = *unwrap(JITStack);
+ auto Sym = J.findSymbol(SymbolName, true);
+ return Sym.getAddress();
+}
+
+void LLVMOrcDisposeInstance(LLVMOrcJITStackRef JITStack) {
+ delete unwrap(JITStack);
+}
diff --git a/contrib/llvm/lib/ExecutionEngine/Orc/OrcCBindingsStack.h b/contrib/llvm/lib/ExecutionEngine/Orc/OrcCBindingsStack.h
new file mode 100644
index 000000000000..931d0a9eb2ad
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/Orc/OrcCBindingsStack.h
@@ -0,0 +1,304 @@
+//===- OrcCBindingsStack.h - Orc JIT stack for C bindings -----*- C++ -*---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_ORC_ORCCBINDINGSSTACK_H
+#define LLVM_LIB_EXECUTIONENGINE_ORC_ORCCBINDINGSSTACK_H
+
+#include "llvm-c/OrcBindings.h"
+#include "llvm-c/TargetMachine.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h"
+#include "llvm/ExecutionEngine/Orc/CompileUtils.h"
+#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
+#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
+#include "llvm/ExecutionEngine/Orc/LambdaResolver.h"
+#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/ExecutionEngine/SectionMemoryManager.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetMachine.h"
+#include <algorithm>
+#include <cstdint>
+#include <functional>
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+class OrcCBindingsStack;
+
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(std::shared_ptr<Module>,
+ LLVMSharedModuleRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(std::shared_ptr<MemoryBuffer>,
+ LLVMSharedObjectBufferRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(OrcCBindingsStack, LLVMOrcJITStackRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(TargetMachine, LLVMTargetMachineRef)
+
+class OrcCBindingsStack {
+public:
+
+ using CompileCallbackMgr = orc::JITCompileCallbackManager;
+ using ObjLayerT = orc::RTDyldObjectLinkingLayer;
+ using CompileLayerT = orc::IRCompileLayer<ObjLayerT, orc::SimpleCompiler>;
+ using CODLayerT =
+ orc::CompileOnDemandLayer<CompileLayerT, CompileCallbackMgr>;
+
+ using CallbackManagerBuilder =
+ std::function<std::unique_ptr<CompileCallbackMgr>()>;
+
+ using IndirectStubsManagerBuilder = CODLayerT::IndirectStubsManagerBuilderT;
+
+private:
+ class GenericHandle {
+ public:
+ virtual ~GenericHandle() = default;
+
+ virtual JITSymbol findSymbolIn(const std::string &Name,
+ bool ExportedSymbolsOnly) = 0;
+ virtual void removeModule() = 0;
+ };
+
+ template <typename LayerT> class GenericHandleImpl : public GenericHandle {
+ public:
+ GenericHandleImpl(LayerT &Layer, typename LayerT::ModuleHandleT Handle)
+ : Layer(Layer), Handle(std::move(Handle)) {}
+
+ JITSymbol findSymbolIn(const std::string &Name,
+ bool ExportedSymbolsOnly) override {
+ return Layer.findSymbolIn(Handle, Name, ExportedSymbolsOnly);
+ }
+
+ void removeModule() override { return Layer.removeModule(Handle); }
+
+ private:
+ LayerT &Layer;
+ typename LayerT::ModuleHandleT Handle;
+ };
+
+ template <typename LayerT>
+ std::unique_ptr<GenericHandleImpl<LayerT>>
+ createGenericHandle(LayerT &Layer, typename LayerT::ModuleHandleT Handle) {
+ return llvm::make_unique<GenericHandleImpl<LayerT>>(Layer,
+ std::move(Handle));
+ }
+
+public:
+ using ModuleHandleT = unsigned;
+
+ OrcCBindingsStack(TargetMachine &TM,
+ std::unique_ptr<CompileCallbackMgr> CCMgr,
+ IndirectStubsManagerBuilder IndirectStubsMgrBuilder)
+ : DL(TM.createDataLayout()), IndirectStubsMgr(IndirectStubsMgrBuilder()),
+ CCMgr(std::move(CCMgr)),
+ CompileLayer(ObjectLayer, orc::SimpleCompiler(TM)),
+ CODLayer(CompileLayer,
+ [](Function &F) { return std::set<Function *>({&F}); },
+ *this->CCMgr, std::move(IndirectStubsMgrBuilder), false),
+ CXXRuntimeOverrides(
+ [this](const std::string &S) { return mangle(S); }) {}
+
+ ~OrcCBindingsStack() {
+ // Run any destructors registered with __cxa_atexit.
+ CXXRuntimeOverrides.runDestructors();
+ // Run any IR destructors.
+ for (auto &DtorRunner : IRStaticDestructorRunners)
+ DtorRunner.runViaLayer(*this);
+ }
+
+ std::string mangle(StringRef Name) {
+ std::string MangledName;
+ {
+ raw_string_ostream MangledNameStream(MangledName);
+ Mangler::getNameWithPrefix(MangledNameStream, Name, DL);
+ }
+ return MangledName;
+ }
+
+ template <typename PtrTy>
+ static PtrTy fromTargetAddress(JITTargetAddress Addr) {
+ return reinterpret_cast<PtrTy>(static_cast<uintptr_t>(Addr));
+ }
+
+ JITTargetAddress
+ createLazyCompileCallback(LLVMOrcLazyCompileCallbackFn Callback,
+ void *CallbackCtx) {
+ auto CCInfo = CCMgr->getCompileCallback();
+ CCInfo.setCompileAction([=]() -> JITTargetAddress {
+ return Callback(wrap(this), CallbackCtx);
+ });
+ return CCInfo.getAddress();
+ }
+
+ LLVMOrcErrorCode createIndirectStub(StringRef StubName,
+ JITTargetAddress Addr) {
+ return mapError(
+ IndirectStubsMgr->createStub(StubName, Addr, JITSymbolFlags::Exported));
+ }
+
+ LLVMOrcErrorCode setIndirectStubPointer(StringRef Name,
+ JITTargetAddress Addr) {
+ return mapError(IndirectStubsMgr->updatePointer(Name, Addr));
+ }
+
+ std::unique_ptr<JITSymbolResolver>
+ createResolver(LLVMOrcSymbolResolverFn ExternalResolver,
+ void *ExternalResolverCtx) {
+ return orc::createLambdaResolver(
+ [this, ExternalResolver, ExternalResolverCtx](const std::string &Name)
+ -> JITSymbol {
+ // Search order:
+ // 1. JIT'd symbols.
+ // 2. Runtime overrides.
+ // 3. External resolver (if present).
+
+ if (auto Sym = CODLayer.findSymbol(Name, true))
+ return Sym;
+ if (auto Sym = CXXRuntimeOverrides.searchOverrides(Name))
+ return Sym;
+
+ if (ExternalResolver)
+ return JITSymbol(
+ ExternalResolver(Name.c_str(), ExternalResolverCtx),
+ JITSymbolFlags::Exported);
+
+ return JITSymbol(nullptr);
+ },
+ [](const std::string &Name) {
+ return JITSymbol(nullptr);
+ });
+ }
+
+ template <typename LayerT>
+ ModuleHandleT addIRModule(LayerT &Layer, std::shared_ptr<Module> M,
+ std::unique_ptr<RuntimeDyld::MemoryManager> MemMgr,
+ LLVMOrcSymbolResolverFn ExternalResolver,
+ void *ExternalResolverCtx) {
+ // Attach a data-layout if one isn't already present.
+ if (M->getDataLayout().isDefault())
+ M->setDataLayout(DL);
+
+ // Record the static constructors and destructors. We have to do this before
+ // we hand over ownership of the module to the JIT.
+ std::vector<std::string> CtorNames, DtorNames;
+ for (auto Ctor : orc::getConstructors(*M))
+ CtorNames.push_back(mangle(Ctor.Func->getName()));
+ for (auto Dtor : orc::getDestructors(*M))
+ DtorNames.push_back(mangle(Dtor.Func->getName()));
+
+ // Create the resolver.
+ auto Resolver = createResolver(ExternalResolver, ExternalResolverCtx);
+
+ // Add the module to the JIT.
+ auto LH = Layer.addModule(std::move(M), std::move(MemMgr),
+ std::move(Resolver));
+ ModuleHandleT H = createHandle(Layer, LH);
+
+ // Run the static constructors, and save the static destructor runner for
+ // execution when the JIT is torn down.
+ orc::CtorDtorRunner<OrcCBindingsStack> CtorRunner(std::move(CtorNames), H);
+ CtorRunner.runViaLayer(*this);
+
+ IRStaticDestructorRunners.emplace_back(std::move(DtorNames), H);
+
+ return H;
+ }
+
+ ModuleHandleT addIRModuleEager(std::shared_ptr<Module> M,
+ LLVMOrcSymbolResolverFn ExternalResolver,
+ void *ExternalResolverCtx) {
+ return addIRModule(CompileLayer, std::move(M),
+ llvm::make_unique<SectionMemoryManager>(),
+ std::move(ExternalResolver), ExternalResolverCtx);
+ }
+
+ ModuleHandleT addIRModuleLazy(std::shared_ptr<Module> M,
+ LLVMOrcSymbolResolverFn ExternalResolver,
+ void *ExternalResolverCtx) {
+ return addIRModule(CODLayer, std::move(M),
+ llvm::make_unique<SectionMemoryManager>(),
+ std::move(ExternalResolver), ExternalResolverCtx);
+ }
+
+ void removeModule(ModuleHandleT H) {
+ GenericHandles[H]->removeModule();
+ GenericHandles[H] = nullptr;
+ FreeHandleIndexes.push_back(H);
+ }
+
+ JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
+ if (auto Sym = IndirectStubsMgr->findStub(Name, ExportedSymbolsOnly))
+ return Sym;
+ return CODLayer.findSymbol(mangle(Name), ExportedSymbolsOnly);
+ }
+
+ JITSymbol findSymbolIn(ModuleHandleT H, const std::string &Name,
+ bool ExportedSymbolsOnly) {
+ return GenericHandles[H]->findSymbolIn(Name, ExportedSymbolsOnly);
+ }
+
+ const std::string &getErrorMessage() const { return ErrMsg; }
+
+private:
+ template <typename LayerT>
+ unsigned createHandle(LayerT &Layer, typename LayerT::ModuleHandleT Handle) {
+ unsigned NewHandle;
+ if (!FreeHandleIndexes.empty()) {
+ NewHandle = FreeHandleIndexes.back();
+ FreeHandleIndexes.pop_back();
+ GenericHandles[NewHandle] = createGenericHandle(Layer, std::move(Handle));
+ return NewHandle;
+ } else {
+ NewHandle = GenericHandles.size();
+ GenericHandles.push_back(createGenericHandle(Layer, std::move(Handle)));
+ }
+ return NewHandle;
+ }
+
+ LLVMOrcErrorCode mapError(Error Err) {
+ LLVMOrcErrorCode Result = LLVMOrcErrSuccess;
+ handleAllErrors(std::move(Err), [&](ErrorInfoBase &EIB) {
+ // Handler of last resort.
+ Result = LLVMOrcErrGeneric;
+ ErrMsg = "";
+ raw_string_ostream ErrStream(ErrMsg);
+ EIB.log(ErrStream);
+ });
+ return Result;
+ }
+
+ DataLayout DL;
+ SectionMemoryManager CCMgrMemMgr;
+
+ std::unique_ptr<orc::IndirectStubsManager> IndirectStubsMgr;
+
+ std::unique_ptr<CompileCallbackMgr> CCMgr;
+ ObjLayerT ObjectLayer;
+ CompileLayerT CompileLayer;
+ CODLayerT CODLayer;
+
+ std::vector<std::unique_ptr<GenericHandle>> GenericHandles;
+ std::vector<unsigned> FreeHandleIndexes;
+
+ orc::LocalCXXRuntimeOverrides CXXRuntimeOverrides;
+ std::vector<orc::CtorDtorRunner<OrcCBindingsStack>> IRStaticDestructorRunners;
+ std::string ErrMsg;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_EXECUTIONENGINE_ORC_ORCCBINDINGSSTACK_H
diff --git a/contrib/llvm/lib/ExecutionEngine/Orc/OrcError.cpp b/contrib/llvm/lib/ExecutionEngine/Orc/OrcError.cpp
new file mode 100644
index 000000000000..9e70c4ac1dbf
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/Orc/OrcError.cpp
@@ -0,0 +1,72 @@
+//===---------------- OrcError.cpp - Error codes for ORC ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Error codes for ORC.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/OrcError.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ManagedStatic.h"
+
+using namespace llvm;
+using namespace llvm::orc;
+
+namespace {
+
+// FIXME: This class is only here to support the transition to llvm::Error. It
+// will be removed once this transition is complete. Clients should prefer to
+// deal with the Error value directly, rather than converting to error_code.
+class OrcErrorCategory : public std::error_category {
+public:
+ const char *name() const noexcept override { return "orc"; }
+
+ std::string message(int condition) const override {
+ switch (static_cast<OrcErrorCode>(condition)) {
+ case OrcErrorCode::RemoteAllocatorDoesNotExist:
+ return "Remote allocator does not exist";
+ case OrcErrorCode::RemoteAllocatorIdAlreadyInUse:
+ return "Remote allocator Id already in use";
+ case OrcErrorCode::RemoteMProtectAddrUnrecognized:
+ return "Remote mprotect call references unallocated memory";
+ case OrcErrorCode::RemoteIndirectStubsOwnerDoesNotExist:
+ return "Remote indirect stubs owner does not exist";
+ case OrcErrorCode::RemoteIndirectStubsOwnerIdAlreadyInUse:
+ return "Remote indirect stubs owner Id already in use";
+ case OrcErrorCode::RPCConnectionClosed:
+ return "RPC connection closed";
+ case OrcErrorCode::RPCCouldNotNegotiateFunction:
+ return "Could not negotiate RPC function";
+ case OrcErrorCode::RPCResponseAbandoned:
+ return "RPC response abandoned";
+ case OrcErrorCode::UnexpectedRPCCall:
+ return "Unexpected RPC call";
+ case OrcErrorCode::UnexpectedRPCResponse:
+ return "Unexpected RPC response";
+ case OrcErrorCode::UnknownErrorCodeFromRemote:
+ return "Unknown error returned from remote RPC function "
+ "(Use StringError to get error message)";
+ }
+ llvm_unreachable("Unhandled error code");
+ }
+};
+
+static ManagedStatic<OrcErrorCategory> OrcErrCat;
+}
+
+namespace llvm {
+namespace orc {
+
+std::error_code orcError(OrcErrorCode ErrCode) {
+ typedef std::underlying_type<OrcErrorCode>::type UT;
+ return std::error_code(static_cast<UT>(ErrCode), *OrcErrCat);
+}
+
+}
+}
diff --git a/contrib/llvm/lib/ExecutionEngine/Orc/OrcMCJITReplacement.cpp b/contrib/llvm/lib/ExecutionEngine/Orc/OrcMCJITReplacement.cpp
new file mode 100644
index 000000000000..f89f21adff41
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/Orc/OrcMCJITReplacement.cpp
@@ -0,0 +1,133 @@
+//===-------- OrcMCJITReplacement.cpp - Orc-based MCJIT replacement -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "OrcMCJITReplacement.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+
+namespace {
+
+static struct RegisterJIT {
+ RegisterJIT() { llvm::orc::OrcMCJITReplacement::Register(); }
+} JITRegistrator;
+
+}
+
+extern "C" void LLVMLinkInOrcMCJITReplacement() {}
+
+namespace llvm {
+namespace orc {
+
+GenericValue
+OrcMCJITReplacement::runFunction(Function *F,
+ ArrayRef<GenericValue> ArgValues) {
+ assert(F && "Function *F was null at entry to run()");
+
+ void *FPtr = getPointerToFunction(F);
+ assert(FPtr && "Pointer to fn's code was null after getPointerToFunction");
+ FunctionType *FTy = F->getFunctionType();
+ Type *RetTy = FTy->getReturnType();
+
+ assert((FTy->getNumParams() == ArgValues.size() ||
+ (FTy->isVarArg() && FTy->getNumParams() <= ArgValues.size())) &&
+ "Wrong number of arguments passed into function!");
+ assert(FTy->getNumParams() == ArgValues.size() &&
+ "This doesn't support passing arguments through varargs (yet)!");
+
+ // Handle some common cases first. These cases correspond to common `main'
+ // prototypes.
+ if (RetTy->isIntegerTy(32) || RetTy->isVoidTy()) {
+ switch (ArgValues.size()) {
+ case 3:
+ if (FTy->getParamType(0)->isIntegerTy(32) &&
+ FTy->getParamType(1)->isPointerTy() &&
+ FTy->getParamType(2)->isPointerTy()) {
+ int (*PF)(int, char **, const char **) =
+ (int (*)(int, char **, const char **))(intptr_t)FPtr;
+
+ // Call the function.
+ GenericValue rv;
+ rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue(),
+ (char **)GVTOP(ArgValues[1]),
+ (const char **)GVTOP(ArgValues[2])));
+ return rv;
+ }
+ break;
+ case 2:
+ if (FTy->getParamType(0)->isIntegerTy(32) &&
+ FTy->getParamType(1)->isPointerTy()) {
+ int (*PF)(int, char **) = (int (*)(int, char **))(intptr_t)FPtr;
+
+ // Call the function.
+ GenericValue rv;
+ rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue(),
+ (char **)GVTOP(ArgValues[1])));
+ return rv;
+ }
+ break;
+ case 1:
+ if (FTy->getNumParams() == 1 && FTy->getParamType(0)->isIntegerTy(32)) {
+ GenericValue rv;
+ int (*PF)(int) = (int (*)(int))(intptr_t)FPtr;
+ rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue()));
+ return rv;
+ }
+ break;
+ }
+ }
+
+ // Handle cases where no arguments are passed first.
+ if (ArgValues.empty()) {
+ GenericValue rv;
+ switch (RetTy->getTypeID()) {
+ default:
+ llvm_unreachable("Unknown return type for function call!");
+ case Type::IntegerTyID: {
+ unsigned BitWidth = cast<IntegerType>(RetTy)->getBitWidth();
+ if (BitWidth == 1)
+ rv.IntVal = APInt(BitWidth, ((bool (*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 8)
+ rv.IntVal = APInt(BitWidth, ((char (*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 16)
+ rv.IntVal = APInt(BitWidth, ((short (*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 32)
+ rv.IntVal = APInt(BitWidth, ((int (*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 64)
+ rv.IntVal = APInt(BitWidth, ((int64_t (*)())(intptr_t)FPtr)());
+ else
+ llvm_unreachable("Integer types > 64 bits not supported");
+ return rv;
+ }
+ case Type::VoidTyID:
+ rv.IntVal = APInt(32, ((int (*)())(intptr_t)FPtr)());
+ return rv;
+ case Type::FloatTyID:
+ rv.FloatVal = ((float (*)())(intptr_t)FPtr)();
+ return rv;
+ case Type::DoubleTyID:
+ rv.DoubleVal = ((double (*)())(intptr_t)FPtr)();
+ return rv;
+ case Type::X86_FP80TyID:
+ case Type::FP128TyID:
+ case Type::PPC_FP128TyID:
+ llvm_unreachable("long double not supported yet");
+ case Type::PointerTyID:
+ return PTOGV(((void *(*)())(intptr_t)FPtr)());
+ }
+ }
+
+ llvm_unreachable("Full-featured argument passing not supported yet!");
+}
+
+void OrcMCJITReplacement::runStaticConstructorsDestructors(bool isDtors) {
+ for (auto &M : LocalModules)
+ ExecutionEngine::runStaticConstructorsDestructors(*M, isDtors);
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/llvm/lib/ExecutionEngine/Orc/OrcMCJITReplacement.h b/contrib/llvm/lib/ExecutionEngine/Orc/OrcMCJITReplacement.h
new file mode 100644
index 000000000000..b20690c7caaf
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/Orc/OrcMCJITReplacement.h
@@ -0,0 +1,411 @@
+//===- OrcMCJITReplacement.h - Orc based MCJIT replacement ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Orc based MCJIT replacement.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_ORC_ORCMCJITREPLACEMENT_H
+#define LLVM_LIB_EXECUTIONENGINE_ORC_ORCMCJITREPLACEMENT_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/CompileUtils.h"
+#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
+#include "llvm/ExecutionEngine/Orc/LazyEmittingLayer.h"
+#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Object/Archive.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetMachine.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+class ObjectCache;
+
+namespace orc {
+
+class OrcMCJITReplacement : public ExecutionEngine {
+ // OrcMCJITReplacement needs to do a little extra book-keeping to ensure that
+ // Orc's automatic finalization doesn't kick in earlier than MCJIT clients are
+ // expecting - see finalizeMemory.
+ class MCJITReplacementMemMgr : public MCJITMemoryManager {
+ public:
+ MCJITReplacementMemMgr(OrcMCJITReplacement &M,
+ std::shared_ptr<MCJITMemoryManager> ClientMM)
+ : M(M), ClientMM(std::move(ClientMM)) {}
+
+ uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID,
+ StringRef SectionName) override {
+ uint8_t *Addr =
+ ClientMM->allocateCodeSection(Size, Alignment, SectionID,
+ SectionName);
+ M.SectionsAllocatedSinceLastLoad.insert(Addr);
+ return Addr;
+ }
+
+ uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID, StringRef SectionName,
+ bool IsReadOnly) override {
+ uint8_t *Addr = ClientMM->allocateDataSection(Size, Alignment, SectionID,
+ SectionName, IsReadOnly);
+ M.SectionsAllocatedSinceLastLoad.insert(Addr);
+ return Addr;
+ }
+
+ void reserveAllocationSpace(uintptr_t CodeSize, uint32_t CodeAlign,
+ uintptr_t RODataSize, uint32_t RODataAlign,
+ uintptr_t RWDataSize,
+ uint32_t RWDataAlign) override {
+ return ClientMM->reserveAllocationSpace(CodeSize, CodeAlign,
+ RODataSize, RODataAlign,
+ RWDataSize, RWDataAlign);
+ }
+
+ bool needsToReserveAllocationSpace() override {
+ return ClientMM->needsToReserveAllocationSpace();
+ }
+
+ void registerEHFrames(uint8_t *Addr, uint64_t LoadAddr,
+ size_t Size) override {
+ return ClientMM->registerEHFrames(Addr, LoadAddr, Size);
+ }
+
+ void deregisterEHFrames() override {
+ return ClientMM->deregisterEHFrames();
+ }
+
+ void notifyObjectLoaded(RuntimeDyld &RTDyld,
+ const object::ObjectFile &O) override {
+ return ClientMM->notifyObjectLoaded(RTDyld, O);
+ }
+
+ void notifyObjectLoaded(ExecutionEngine *EE,
+ const object::ObjectFile &O) override {
+ return ClientMM->notifyObjectLoaded(EE, O);
+ }
+
+ bool finalizeMemory(std::string *ErrMsg = nullptr) override {
+ // Each set of objects loaded will be finalized exactly once, but since
+ // symbol lookup during relocation may recursively trigger the
+ // loading/relocation of other modules, and since we're forwarding all
+ // finalizeMemory calls to a single underlying memory manager, we need to
+ // defer forwarding the call on until all necessary objects have been
+ // loaded. Otherwise, during the relocation of a leaf object, we will end
+ // up finalizing memory, causing a crash further up the stack when we
+ // attempt to apply relocations to finalized memory.
+ // To avoid finalizing too early, look at how many objects have been
+ // loaded but not yet finalized. This is a bit of a hack that relies on
+ // the fact that we're lazily emitting object files: The only way you can
+ // get more than one set of objects loaded but not yet finalized is if
+ // they were loaded during relocation of another set.
+ if (M.UnfinalizedSections.size() == 1)
+ return ClientMM->finalizeMemory(ErrMsg);
+ return false;
+ }
+
+ private:
+ OrcMCJITReplacement &M;
+ std::shared_ptr<MCJITMemoryManager> ClientMM;
+ };
+
+ class LinkingResolver : public JITSymbolResolver {
+ public:
+ LinkingResolver(OrcMCJITReplacement &M) : M(M) {}
+
+ JITSymbol findSymbol(const std::string &Name) override {
+ return M.ClientResolver->findSymbol(Name);
+ }
+
+ JITSymbol findSymbolInLogicalDylib(const std::string &Name) override {
+ if (auto Sym = M.findMangledSymbol(Name))
+ return Sym;
+ return M.ClientResolver->findSymbolInLogicalDylib(Name);
+ }
+
+ private:
+ OrcMCJITReplacement &M;
+ };
+
+private:
+ static ExecutionEngine *
+ createOrcMCJITReplacement(std::string *ErrorMsg,
+ std::shared_ptr<MCJITMemoryManager> MemMgr,
+ std::shared_ptr<JITSymbolResolver> Resolver,
+ std::unique_ptr<TargetMachine> TM) {
+ return new OrcMCJITReplacement(std::move(MemMgr), std::move(Resolver),
+ std::move(TM));
+ }
+
+public:
+ OrcMCJITReplacement(
+ std::shared_ptr<MCJITMemoryManager> MemMgr,
+ std::shared_ptr<JITSymbolResolver> ClientResolver,
+ std::unique_ptr<TargetMachine> TM)
+ : ExecutionEngine(TM->createDataLayout()), TM(std::move(TM)),
+ MemMgr(*this, std::move(MemMgr)), Resolver(*this),
+ ClientResolver(std::move(ClientResolver)), NotifyObjectLoaded(*this),
+ NotifyFinalized(*this),
+ ObjectLayer(NotifyObjectLoaded, NotifyFinalized),
+ CompileLayer(ObjectLayer, SimpleCompiler(*this->TM)),
+ LazyEmitLayer(CompileLayer) {}
+
+ static void Register() {
+ OrcMCJITReplacementCtor = createOrcMCJITReplacement;
+ }
+
+ void addModule(std::unique_ptr<Module> M) override {
+ // If this module doesn't have a DataLayout attached then attach the
+ // default.
+ if (M->getDataLayout().isDefault()) {
+ M->setDataLayout(getDataLayout());
+ } else {
+ assert(M->getDataLayout() == getDataLayout() && "DataLayout Mismatch");
+ }
+ auto *MPtr = M.release();
+ ShouldDelete[MPtr] = true;
+ auto Deleter =
+ [this](Module *Mod) {
+ if (ShouldDelete[Mod])
+ delete Mod;
+ };
+ LocalModules.push_back(std::shared_ptr<Module>(MPtr, std::move(Deleter)));
+ LazyEmitLayer.addModule(LocalModules.back(), &MemMgr, &Resolver);
+ }
+
+ void addObjectFile(std::unique_ptr<object::ObjectFile> O) override {
+ auto Obj =
+ std::make_shared<object::OwningBinary<object::ObjectFile>>(std::move(O),
+ nullptr);
+ ObjectLayer.addObject(std::move(Obj), &MemMgr, &Resolver);
+ }
+
+ void addObjectFile(object::OwningBinary<object::ObjectFile> O) override {
+ auto Obj =
+ std::make_shared<object::OwningBinary<object::ObjectFile>>(std::move(O));
+ ObjectLayer.addObject(std::move(Obj), &MemMgr, &Resolver);
+ }
+
+ void addArchive(object::OwningBinary<object::Archive> A) override {
+ Archives.push_back(std::move(A));
+ }
+
+ bool removeModule(Module *M) override {
+ for (auto I = LocalModules.begin(), E = LocalModules.end(); I != E; ++I) {
+ if (I->get() == M) {
+ ShouldDelete[M] = false;
+ LocalModules.erase(I);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ uint64_t getSymbolAddress(StringRef Name) {
+ return findSymbol(Name).getAddress();
+ }
+
+ JITSymbol findSymbol(StringRef Name) {
+ return findMangledSymbol(Mangle(Name));
+ }
+
+ void finalizeObject() override {
+ // This is deprecated - Aim to remove in ExecutionEngine.
+ // REMOVE IF POSSIBLE - Doesn't make sense for New JIT.
+ }
+
+ void mapSectionAddress(const void *LocalAddress,
+ uint64_t TargetAddress) override {
+ for (auto &P : UnfinalizedSections)
+ if (P.second.count(LocalAddress))
+ ObjectLayer.mapSectionAddress(P.first, LocalAddress, TargetAddress);
+ }
+
+ uint64_t getGlobalValueAddress(const std::string &Name) override {
+ return getSymbolAddress(Name);
+ }
+
+ uint64_t getFunctionAddress(const std::string &Name) override {
+ return getSymbolAddress(Name);
+ }
+
+ void *getPointerToFunction(Function *F) override {
+ uint64_t FAddr = getSymbolAddress(F->getName());
+ return reinterpret_cast<void *>(static_cast<uintptr_t>(FAddr));
+ }
+
+ void *getPointerToNamedFunction(StringRef Name,
+ bool AbortOnFailure = true) override {
+ uint64_t Addr = getSymbolAddress(Name);
+ if (!Addr && AbortOnFailure)
+ llvm_unreachable("Missing symbol!");
+ return reinterpret_cast<void *>(static_cast<uintptr_t>(Addr));
+ }
+
+ GenericValue runFunction(Function *F,
+ ArrayRef<GenericValue> ArgValues) override;
+
+ void setObjectCache(ObjectCache *NewCache) override {
+ CompileLayer.getCompiler().setObjectCache(NewCache);
+ }
+
+ void setProcessAllSections(bool ProcessAllSections) override {
+ ObjectLayer.setProcessAllSections(ProcessAllSections);
+ }
+
+ void runStaticConstructorsDestructors(bool isDtors) override;
+
+private:
+ JITSymbol findMangledSymbol(StringRef Name) {
+ if (auto Sym = LazyEmitLayer.findSymbol(Name, false))
+ return Sym;
+ if (auto Sym = ClientResolver->findSymbol(Name))
+ return Sym;
+ if (auto Sym = scanArchives(Name))
+ return Sym;
+
+ return nullptr;
+ }
+
+ JITSymbol scanArchives(StringRef Name) {
+ for (object::OwningBinary<object::Archive> &OB : Archives) {
+ object::Archive *A = OB.getBinary();
+ // Look for our symbols in each Archive
+ auto OptionalChildOrErr = A->findSym(Name);
+ if (!OptionalChildOrErr)
+ report_fatal_error(OptionalChildOrErr.takeError());
+ auto &OptionalChild = *OptionalChildOrErr;
+ if (OptionalChild) {
+ // FIXME: Support nested archives?
+ Expected<std::unique_ptr<object::Binary>> ChildBinOrErr =
+ OptionalChild->getAsBinary();
+ if (!ChildBinOrErr) {
+ // TODO: Actually report errors helpfully.
+ consumeError(ChildBinOrErr.takeError());
+ continue;
+ }
+ std::unique_ptr<object::Binary> &ChildBin = ChildBinOrErr.get();
+ if (ChildBin->isObject()) {
+ std::unique_ptr<object::ObjectFile> ChildObj(
+ static_cast<object::ObjectFile*>(ChildBinOrErr->release()));
+ auto Obj =
+ std::make_shared<object::OwningBinary<object::ObjectFile>>(
+ std::move(ChildObj), nullptr);
+ ObjectLayer.addObject(std::move(Obj), &MemMgr, &Resolver);
+ if (auto Sym = ObjectLayer.findSymbol(Name, true))
+ return Sym;
+ }
+ }
+ }
+ return nullptr;
+ }
+
+ class NotifyObjectLoadedT {
+ public:
+ using LoadedObjInfoListT =
+ std::vector<std::unique_ptr<RuntimeDyld::LoadedObjectInfo>>;
+
+ NotifyObjectLoadedT(OrcMCJITReplacement &M) : M(M) {}
+
+ void operator()(RTDyldObjectLinkingLayerBase::ObjHandleT H,
+ const RTDyldObjectLinkingLayer::ObjectPtr &Obj,
+ const LoadedObjectInfo &Info) const {
+ M.UnfinalizedSections[H] = std::move(M.SectionsAllocatedSinceLastLoad);
+ M.SectionsAllocatedSinceLastLoad = SectionAddrSet();
+ M.MemMgr.notifyObjectLoaded(&M, *Obj->getBinary());
+ }
+ private:
+ OrcMCJITReplacement &M;
+ };
+
+ class NotifyFinalizedT {
+ public:
+ NotifyFinalizedT(OrcMCJITReplacement &M) : M(M) {}
+
+ void operator()(RTDyldObjectLinkingLayerBase::ObjHandleT H) {
+ M.UnfinalizedSections.erase(H);
+ }
+
+ private:
+ OrcMCJITReplacement &M;
+ };
+
+ std::string Mangle(StringRef Name) {
+ std::string MangledName;
+ {
+ raw_string_ostream MangledNameStream(MangledName);
+ Mang.getNameWithPrefix(MangledNameStream, Name, getDataLayout());
+ }
+ return MangledName;
+ }
+
+ using ObjectLayerT = RTDyldObjectLinkingLayer;
+ using CompileLayerT = IRCompileLayer<ObjectLayerT, orc::SimpleCompiler>;
+ using LazyEmitLayerT = LazyEmittingLayer<CompileLayerT>;
+
+ std::unique_ptr<TargetMachine> TM;
+ MCJITReplacementMemMgr MemMgr;
+ LinkingResolver Resolver;
+ std::shared_ptr<JITSymbolResolver> ClientResolver;
+ Mangler Mang;
+
+ NotifyObjectLoadedT NotifyObjectLoaded;
+ NotifyFinalizedT NotifyFinalized;
+
+ ObjectLayerT ObjectLayer;
+ CompileLayerT CompileLayer;
+ LazyEmitLayerT LazyEmitLayer;
+
+ // We need to store ObjLayerT::ObjSetHandles for each of the object sets
+ // that have been emitted but not yet finalized so that we can forward the
+ // mapSectionAddress calls appropriately.
+ using SectionAddrSet = std::set<const void *>;
+ struct ObjHandleCompare {
+ bool operator()(ObjectLayerT::ObjHandleT H1,
+ ObjectLayerT::ObjHandleT H2) const {
+ return &*H1 < &*H2;
+ }
+ };
+ SectionAddrSet SectionsAllocatedSinceLastLoad;
+ std::map<ObjectLayerT::ObjHandleT, SectionAddrSet, ObjHandleCompare>
+ UnfinalizedSections;
+
+ std::map<Module*, bool> ShouldDelete;
+ std::vector<std::shared_ptr<Module>> LocalModules;
+ std::vector<object::OwningBinary<object::Archive>> Archives;
+};
+
+} // end namespace orc
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_EXECUTIONENGINE_ORC_MCJITREPLACEMENT_H
diff --git a/contrib/llvm/lib/ExecutionEngine/Orc/RPCUtils.cpp b/contrib/llvm/lib/ExecutionEngine/Orc/RPCUtils.cpp
new file mode 100644
index 000000000000..2a7ab5ca8180
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/Orc/RPCUtils.cpp
@@ -0,0 +1,55 @@
+//===--------------- RPCUtils.cpp - RPCUtils implementation ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// RPCUtils implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/RPCUtils.h"
+
+char llvm::orc::rpc::RPCFatalError::ID = 0;
+char llvm::orc::rpc::ConnectionClosed::ID = 0;
+char llvm::orc::rpc::ResponseAbandoned::ID = 0;
+char llvm::orc::rpc::CouldNotNegotiate::ID = 0;
+
+namespace llvm {
+namespace orc {
+namespace rpc {
+
+std::error_code ConnectionClosed::convertToErrorCode() const {
+ return orcError(OrcErrorCode::RPCConnectionClosed);
+}
+
+void ConnectionClosed::log(raw_ostream &OS) const {
+ OS << "RPC connection already closed";
+}
+
+std::error_code ResponseAbandoned::convertToErrorCode() const {
+ return orcError(OrcErrorCode::RPCResponseAbandoned);
+}
+
+void ResponseAbandoned::log(raw_ostream &OS) const {
+ OS << "RPC response abandoned";
+}
+
+CouldNotNegotiate::CouldNotNegotiate(std::string Signature)
+ : Signature(std::move(Signature)) {}
+
+std::error_code CouldNotNegotiate::convertToErrorCode() const {
+ return orcError(OrcErrorCode::RPCCouldNotNegotiateFunction);
+}
+
+void CouldNotNegotiate::log(raw_ostream &OS) const {
+ OS << "Could not negotiate RPC function " << Signature;
+}
+
+
+} // end namespace rpc
+} // end namespace orc
+} // end namespace llvm