llvm.org GIT mirror llvm / c14b8e9
[Orc] Rename OrcArchitectureSupport to OrcABISupport and add Win32 ABI support. This enables lazy JITing on Windows x86-64. Patch by David. Thanks David! git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@268845 91177308-0d34-0410-b5e6-96231b3b80d8 Lang Hames 4 years ago
9 changed file(s) with 818 addition(s) and 695 deletion(s). Raw diff Collapse all Expand all
33 #include "llvm/ExecutionEngine/Orc/LambdaResolver.h"
44 #include "llvm/ExecutionEngine/Orc/LazyEmittingLayer.h"
55 #include "llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h"
6 #include "llvm/ExecutionEngine/Orc/OrcArchitectureSupport.h"
6 #include "llvm/ExecutionEngine/Orc/OrcABISupport.h"
77 #include "llvm/IR/DataLayout.h"
88 #include "llvm/IR/DerivedTypes.h"
99 #include "llvm/IR/IRBuilder.h"
13081308
13091309 std::map> FunctionDefs;
13101310
1311 LocalJITCompileCallbackManager> CompileCallbacks;
1311 LocalJITCompileCallbackManager_SysV> CompileCallbacks;
13121312 };
13131313
13141314 static void HandleDefinition(SessionContext &S, KaleidoscopeJIT &J) {
0 //===-------------- OrcABISupport.h - ABI support code ---------*- C++ -*-===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // ABI specific code for Orc, e.g. callback assembly.
10 //
11 // ABI classes should be part of the JIT *target* process, not the host
12 // process (except where you're doing hosted JITing and the two are one and the
13 // same).
14 //
15 //===----------------------------------------------------------------------===//
16
17 #ifndef LLVM_EXECUTIONENGINE_ORC_ORCABISUPPORT_H
18 #define LLVM_EXECUTIONENGINE_ORC_ORCABISUPPORT_H
19
20 #include "IndirectionUtils.h"
21 #include "llvm/Support/Memory.h"
22 #include "llvm/Support/Process.h"
23
24 namespace llvm {
25 namespace orc {
26
27 /// Generic ORC ABI support.
28 ///
29 /// This class can be substituted as the target architecure support class for
30 /// ORC templates that require one (e.g. IndirectStubsManagers). It does not
31 /// support lazy JITing however, and any attempt to use that functionality
32 /// will result in execution of an llvm_unreachable.
33 class OrcGenericABI {
34 public:
35 static const unsigned PointerSize = sizeof(uintptr_t);
36 static const unsigned TrampolineSize = 1;
37 static const unsigned ResolverCodeSize = 1;
38
39 typedef TargetAddress (*JITReentryFn)(void *CallbackMgr, void *TrampolineId);
40
41 static void writeResolverCode(uint8_t *ResolveMem, JITReentryFn Reentry,
42 void *CallbackMgr) {
43 llvm_unreachable("writeResolverCode is not supported by the generic host "
44 "support class");
45 }
46
47 static void writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
48 unsigned NumTrampolines) {
49 llvm_unreachable("writeTrampolines is not supported by the generic host "
50 "support class");
51 }
52
53 class IndirectStubsInfo {
54 public:
55 const static unsigned StubSize = 1;
56 unsigned getNumStubs() const { llvm_unreachable("Not supported"); }
57 void *getStub(unsigned Idx) const { llvm_unreachable("Not supported"); }
58 void **getPtr(unsigned Idx) const { llvm_unreachable("Not supported"); }
59 };
60
61 static Error emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
62 unsigned MinStubs, void *InitialPtrVal) {
63 llvm_unreachable("emitIndirectStubsBlock is not supported by the generic "
64 "host support class");
65 }
66 };
67
68 /// @brief Provide information about stub blocks generated by the
69 /// makeIndirectStubsBlock function.
70 template class GenericIndirectStubsInfo {
71 public:
72 const static unsigned StubSize = StubSizeVal;
73
74 GenericIndirectStubsInfo() : NumStubs(0) {}
75 GenericIndirectStubsInfo(unsigned NumStubs, sys::OwningMemoryBlock StubsMem)
76 : NumStubs(NumStubs), StubsMem(std::move(StubsMem)) {}
77 GenericIndirectStubsInfo(GenericIndirectStubsInfo &&Other)
78 : NumStubs(Other.NumStubs), StubsMem(std::move(Other.StubsMem)) {
79 Other.NumStubs = 0;
80 }
81 GenericIndirectStubsInfo &operator=(GenericIndirectStubsInfo &&Other) {
82 NumStubs = Other.NumStubs;
83 Other.NumStubs = 0;
84 StubsMem = std::move(Other.StubsMem);
85 return *this;
86 }
87
88 /// @brief Number of stubs in this block.
89 unsigned getNumStubs() const { return NumStubs; }
90
91 /// @brief Get a pointer to the stub at the given index, which must be in
92 /// the range 0 .. getNumStubs() - 1.
93 void *getStub(unsigned Idx) const {
94 return static_cast(StubsMem.base()) + Idx * StubSize;
95 }
96
97 /// @brief Get a pointer to the implementation-pointer at the given index,
98 /// which must be in the range 0 .. getNumStubs() - 1.
99 void **getPtr(unsigned Idx) const {
100 char *PtrsBase = static_cast(StubsMem.base()) + NumStubs * StubSize;
101 return reinterpret_cast(PtrsBase) + Idx;
102 }
103
104 private:
105 unsigned NumStubs;
106 sys::OwningMemoryBlock StubsMem;
107 };
108
109 class OrcAArch64 {
110 public:
111 static const unsigned PointerSize = 8;
112 static const unsigned TrampolineSize = 12;
113 static const unsigned ResolverCodeSize = 0x120;
114
115 typedef GenericIndirectStubsInfo<8> IndirectStubsInfo;
116
117 typedef TargetAddress (*JITReentryFn)(void *CallbackMgr, void *TrampolineId);
118
119 /// @brief Write the resolver code into the given memory. The user is be
120 /// responsible for allocating the memory and setting permissions.
121 static void writeResolverCode(uint8_t *ResolveMem, JITReentryFn Reentry,
122 void *CallbackMgr);
123
124 /// @brief Write the requsted number of trampolines into the given memory,
125 /// which must be big enough to hold 1 pointer, plus NumTrampolines
126 /// trampolines.
127 static void writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
128 unsigned NumTrampolines);
129
130 /// @brief Emit at least MinStubs worth of indirect call stubs, rounded out to
131 /// the nearest page size.
132 ///
133 /// E.g. Asking for 4 stubs on x86-64, where stubs are 8-bytes, with 4k
134 /// pages will return a block of 512 stubs (4096 / 8 = 512). Asking for 513
135 /// will return a block of 1024 (2-pages worth).
136 static Error emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
137 unsigned MinStubs, void *InitialPtrVal);
138 };
139
140 /// @brief X86_64 code that's common to all ABIs.
141 ///
142 /// X86_64 supports lazy JITing.
143 class OrcX86_64_Base {
144 public:
145 static const unsigned PointerSize = 8;
146 static const unsigned TrampolineSize = 8;
147
148 typedef GenericIndirectStubsInfo<8> IndirectStubsInfo;
149
150 /// @brief Write the requsted number of trampolines into the given memory,
151 /// which must be big enough to hold 1 pointer, plus NumTrampolines
152 /// trampolines.
153 static void writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
154 unsigned NumTrampolines);
155
156 /// @brief Emit at least MinStubs worth of indirect call stubs, rounded out to
157 /// the nearest page size.
158 ///
159 /// E.g. Asking for 4 stubs on x86-64, where stubs are 8-bytes, with 4k
160 /// pages will return a block of 512 stubs (4096 / 8 = 512). Asking for 513
161 /// will return a block of 1024 (2-pages worth).
162 static Error emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
163 unsigned MinStubs, void *InitialPtrVal);
164 };
165
166 /// @brief X86_64 support for SysV ABI (Linux, MacOSX).
167 ///
168 /// X86_64_SysV supports lazy JITing.
169 class OrcX86_64_SysV : public OrcX86_64_Base {
170 public:
171 static const unsigned ResolverCodeSize = 0x6C;
172 typedef TargetAddress(*JITReentryFn)(void *CallbackMgr, void *TrampolineId);
173
174 /// @brief Write the resolver code into the given memory. The user is be
175 /// responsible for allocating the memory and setting permissions.
176 static void writeResolverCode(uint8_t *ResolveMem, JITReentryFn Reentry,
177 void *CallbackMgr);
178 };
179
180 /// @brief X86_64 support for Win32.
181 ///
182 /// X86_64_Win32 supports lazy JITing.
183 class OrcX86_64_Win32 : public OrcX86_64_Base {
184 public:
185 static const unsigned ResolverCodeSize = 0x74;
186 typedef TargetAddress(*JITReentryFn)(void *CallbackMgr, void *TrampolineId);
187
188 /// @brief Write the resolver code into the given memory. The user is be
189 /// responsible for allocating the memory and setting permissions.
190 static void writeResolverCode(uint8_t *ResolveMem, JITReentryFn Reentry,
191 void *CallbackMgr);
192 };
193
194 /// @brief I386 support.
195 ///
196 /// I386 supports lazy JITing.
197 class OrcI386 {
198 public:
199 static const unsigned PointerSize = 4;
200 static const unsigned TrampolineSize = 8;
201 static const unsigned ResolverCodeSize = 0x4a;
202
203 typedef GenericIndirectStubsInfo<8> IndirectStubsInfo;
204
205 typedef TargetAddress (*JITReentryFn)(void *CallbackMgr, void *TrampolineId);
206
207 /// @brief Write the resolver code into the given memory. The user is be
208 /// responsible for allocating the memory and setting permissions.
209 static void writeResolverCode(uint8_t *ResolveMem, JITReentryFn Reentry,
210 void *CallbackMgr);
211
212 /// @brief Write the requsted number of trampolines into the given memory,
213 /// which must be big enough to hold 1 pointer, plus NumTrampolines
214 /// trampolines.
215 static void writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
216 unsigned NumTrampolines);
217
218 /// @brief Emit at least MinStubs worth of indirect call stubs, rounded out to
219 /// the nearest page size.
220 ///
221 /// E.g. Asking for 4 stubs on i386, where stubs are 8-bytes, with 4k
222 /// pages will return a block of 512 stubs (4096 / 8 = 512). Asking for 513
223 /// will return a block of 1024 (2-pages worth).
224 static Error emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
225 unsigned MinStubs, void *InitialPtrVal);
226 };
227
228 } // End namespace orc.
229 } // End namespace llvm.
230
231 #endif // LLVM_EXECUTIONENGINE_ORC_ORCABISUPPORT_H
+0
-212
include/llvm/ExecutionEngine/Orc/OrcArchitectureSupport.h less more
None //===-- OrcArchitectureSupport.h - Architecture support code ---*- C++ -*-===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Architecture specific code for Orc, e.g. callback assembly.
10 //
11 // Architecture classes should be part of the JIT *target* process, not the host
12 // process (except where you're doing hosted JITing and the two are one and the
13 // same).
14 //
15 //===----------------------------------------------------------------------===//
16
17 #ifndef LLVM_EXECUTIONENGINE_ORC_ORCARCHITECTURESUPPORT_H
18 #define LLVM_EXECUTIONENGINE_ORC_ORCARCHITECTURESUPPORT_H
19
20 #include "IndirectionUtils.h"
21 #include "llvm/Support/Memory.h"
22 #include "llvm/Support/Process.h"
23
24 namespace llvm {
25 namespace orc {
26
27 /// Generic ORC Architecture support.
28 ///
29 /// This class can be substituted as the target architecure support class for
30 /// ORC templates that require one (e.g. IndirectStubsManagers). It does not
31 /// support lazy JITing however, and any attempt to use that functionality
32 /// will result in execution of an llvm_unreachable.
33 class OrcGenericArchitecture {
34 public:
35 static const unsigned PointerSize = sizeof(uintptr_t);
36 static const unsigned TrampolineSize = 1;
37 static const unsigned ResolverCodeSize = 1;
38
39 typedef TargetAddress (*JITReentryFn)(void *CallbackMgr, void *TrampolineId);
40
41 static void writeResolverCode(uint8_t *ResolveMem, JITReentryFn Reentry,
42 void *CallbackMgr) {
43 llvm_unreachable("writeResolverCode is not supported by the generic host "
44 "support class");
45 }
46
47 static void writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
48 unsigned NumTrampolines) {
49 llvm_unreachable("writeTrampolines is not supported by the generic host "
50 "support class");
51 }
52
53 class IndirectStubsInfo {
54 public:
55 const static unsigned StubSize = 1;
56 unsigned getNumStubs() const { llvm_unreachable("Not supported"); }
57 void *getStub(unsigned Idx) const { llvm_unreachable("Not supported"); }
58 void **getPtr(unsigned Idx) const { llvm_unreachable("Not supported"); }
59 };
60
61 static Error emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
62 unsigned MinStubs, void *InitialPtrVal) {
63 llvm_unreachable("emitIndirectStubsBlock is not supported by the generic "
64 "host support class");
65 }
66 };
67
68 /// @brief Provide information about stub blocks generated by the
69 /// makeIndirectStubsBlock function.
70 template class GenericIndirectStubsInfo {
71 public:
72 const static unsigned StubSize = StubSizeVal;
73
74 GenericIndirectStubsInfo() : NumStubs(0) {}
75 GenericIndirectStubsInfo(unsigned NumStubs, sys::OwningMemoryBlock StubsMem)
76 : NumStubs(NumStubs), StubsMem(std::move(StubsMem)) {}
77 GenericIndirectStubsInfo(GenericIndirectStubsInfo &&Other)
78 : NumStubs(Other.NumStubs), StubsMem(std::move(Other.StubsMem)) {
79 Other.NumStubs = 0;
80 }
81 GenericIndirectStubsInfo &operator=(GenericIndirectStubsInfo &&Other) {
82 NumStubs = Other.NumStubs;
83 Other.NumStubs = 0;
84 StubsMem = std::move(Other.StubsMem);
85 return *this;
86 }
87
88 /// @brief Number of stubs in this block.
89 unsigned getNumStubs() const { return NumStubs; }
90
91 /// @brief Get a pointer to the stub at the given index, which must be in
92 /// the range 0 .. getNumStubs() - 1.
93 void *getStub(unsigned Idx) const {
94 return static_cast(StubsMem.base()) + Idx * StubSize;
95 }
96
97 /// @brief Get a pointer to the implementation-pointer at the given index,
98 /// which must be in the range 0 .. getNumStubs() - 1.
99 void **getPtr(unsigned Idx) const {
100 char *PtrsBase = static_cast(StubsMem.base()) + NumStubs * StubSize;
101 return reinterpret_cast(PtrsBase) + Idx;
102 }
103
104 private:
105 unsigned NumStubs;
106 sys::OwningMemoryBlock StubsMem;
107 };
108
109 class OrcAArch64 {
110 public:
111 static const unsigned PointerSize = 8;
112 static const unsigned TrampolineSize = 12;
113 static const unsigned ResolverCodeSize = 0x120;
114
115 typedef GenericIndirectStubsInfo<8> IndirectStubsInfo;
116
117 typedef TargetAddress (*JITReentryFn)(void *CallbackMgr, void *TrampolineId);
118
119 /// @brief Write the resolver code into the given memory. The user is be
120 /// responsible for allocating the memory and setting permissions.
121 static void writeResolverCode(uint8_t *ResolveMem, JITReentryFn Reentry,
122 void *CallbackMgr);
123
124 /// @brief Write the requsted number of trampolines into the given memory,
125 /// which must be big enough to hold 1 pointer, plus NumTrampolines
126 /// trampolines.
127 static void writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
128 unsigned NumTrampolines);
129
130 /// @brief Emit at least MinStubs worth of indirect call stubs, rounded out to
131 /// the nearest page size.
132 ///
133 /// E.g. Asking for 4 stubs on x86-64, where stubs are 8-bytes, with 4k
134 /// pages will return a block of 512 stubs (4096 / 8 = 512). Asking for 513
135 /// will return a block of 1024 (2-pages worth).
136 static Error emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
137 unsigned MinStubs, void *InitialPtrVal);
138 };
139
140 /// @brief X86_64 support.
141 ///
142 /// X86_64 supports lazy JITing.
143 class OrcX86_64 {
144 public:
145 static const unsigned PointerSize = 8;
146 static const unsigned TrampolineSize = 8;
147 static const unsigned ResolverCodeSize = 0x6C;
148
149 typedef GenericIndirectStubsInfo<8> IndirectStubsInfo;
150
151 typedef TargetAddress (*JITReentryFn)(void *CallbackMgr, void *TrampolineId);
152
153 /// @brief Write the resolver code into the given memory. The user is be
154 /// responsible for allocating the memory and setting permissions.
155 static void writeResolverCode(uint8_t *ResolveMem, JITReentryFn Reentry,
156 void *CallbackMgr);
157
158 /// @brief Write the requsted number of trampolines into the given memory,
159 /// which must be big enough to hold 1 pointer, plus NumTrampolines
160 /// trampolines.
161 static void writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
162 unsigned NumTrampolines);
163
164 /// @brief Emit at least MinStubs worth of indirect call stubs, rounded out to
165 /// the nearest page size.
166 ///
167 /// E.g. Asking for 4 stubs on x86-64, where stubs are 8-bytes, with 4k
168 /// pages will return a block of 512 stubs (4096 / 8 = 512). Asking for 513
169 /// will return a block of 1024 (2-pages worth).
170 static Error emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
171 unsigned MinStubs, void *InitialPtrVal);
172 };
173
174 /// @brief I386 support.
175 ///
176 /// I386 supports lazy JITing.
177 class OrcI386 {
178 public:
179 static const unsigned PointerSize = 4;
180 static const unsigned TrampolineSize = 8;
181 static const unsigned ResolverCodeSize = 0x4a;
182
183 typedef GenericIndirectStubsInfo<8> IndirectStubsInfo;
184
185 typedef TargetAddress (*JITReentryFn)(void *CallbackMgr, void *TrampolineId);
186
187 /// @brief Write the resolver code into the given memory. The user is be
188 /// responsible for allocating the memory and setting permissions.
189 static void writeResolverCode(uint8_t *ResolveMem, JITReentryFn Reentry,
190 void *CallbackMgr);
191
192 /// @brief Write the requsted number of trampolines into the given memory,
193 /// which must be big enough to hold 1 pointer, plus NumTrampolines
194 /// trampolines.
195 static void writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
196 unsigned NumTrampolines);
197
198 /// @brief Emit at least MinStubs worth of indirect call stubs, rounded out to
199 /// the nearest page size.
200 ///
201 /// E.g. Asking for 4 stubs on i386, where stubs are 8-bytes, with 4k
202 /// pages will return a block of 512 stubs (4096 / 8 = 512). Asking for 513
203 /// will return a block of 1024 (2-pages worth).
204 static Error emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
205 unsigned MinStubs, void *InitialPtrVal);
206 };
207
208 } // End namespace orc.
209 } // End namespace llvm.
210
211 #endif // LLVM_EXECUTIONENGINE_ORC_ORCARCHITECTURESUPPORT_H
11 ExecutionUtils.cpp
22 IndirectionUtils.cpp
33 NullResolver.cpp
4 OrcArchitectureSupport.cpp
4 OrcABISupport.cpp
55 OrcCBindings.cpp
66 OrcCBindingsStack.cpp
77 OrcError.cpp
0 //===------------- OrcABISupport.cpp - ABI specific support code ----------===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "llvm/ExecutionEngine/Orc/OrcABISupport.h"
10 #include "llvm/ADT/Triple.h"
11 #include "llvm/Support/Process.h"
12
13 namespace llvm {
14 namespace orc {
15
16 void OrcAArch64::writeResolverCode(uint8_t *ResolverMem, JITReentryFn ReentryFn,
17 void *CallbackMgr) {
18
19 const uint32_t ResolverCode[] = {
20 // resolver_entry:
21 0xa9bf47fd, // 0x000: stp x29, x17, [sp, #-16]!
22 0x910003fd, // 0x004: mov x29, sp
23 0xa9bf73fb, // 0x008: stp x27, x28, [sp, #-16]!
24 0xa9bf6bf9, // 0x00c: stp x25, x26, [sp, #-16]!
25 0xa9bf63f7, // 0x010: stp x23, x24, [sp, #-16]!
26 0xa9bf5bf5, // 0x014: stp x21, x22, [sp, #-16]!
27 0xa9bf53f3, // 0x018: stp x19, x20, [sp, #-16]!
28 0xa9bf3fee, // 0x01c: stp x14, x15, [sp, #-16]!
29 0xa9bf37ec, // 0x020: stp x12, x13, [sp, #-16]!
30 0xa9bf2fea, // 0x024: stp x10, x11, [sp, #-16]!
31 0xa9bf27e8, // 0x028: stp x8, x9, [sp, #-16]!
32 0xa9bf1fe6, // 0x02c: stp x6, x7, [sp, #-16]!
33 0xa9bf17e4, // 0x030: stp x4, x5, [sp, #-16]!
34 0xa9bf0fe2, // 0x034: stp x2, x3, [sp, #-16]!
35 0xa9bf07e0, // 0x038: stp x0, x1, [sp, #-16]!
36 0xadbf7ffe, // 0x03c: stp q30, q31, [sp, #-32]!
37 0xadbf77fc, // 0x040: stp q28, q29, [sp, #-32]!
38 0xadbf6ffa, // 0x044: stp q26, q27, [sp, #-32]!
39 0xadbf67f8, // 0x048: stp q24, q25, [sp, #-32]!
40 0xadbf5ff6, // 0x04c: stp q22, q23, [sp, #-32]!
41 0xadbf57f4, // 0x050: stp q20, q21, [sp, #-32]!
42 0xadbf4ff2, // 0x054: stp q18, q19, [sp, #-32]!
43 0xadbf47f0, // 0x058: stp q16, q17, [sp, #-32]!
44 0xadbf3fee, // 0x05c: stp q14, q15, [sp, #-32]!
45 0xadbf37ec, // 0x060: stp q12, q13, [sp, #-32]!
46 0xadbf2fea, // 0x064: stp q10, q11, [sp, #-32]!
47 0xadbf27e8, // 0x068: stp q8, q9, [sp, #-32]!
48 0xadbf1fe6, // 0x06c: stp q6, q7, [sp, #-32]!
49 0xadbf17e4, // 0x070: stp q4, q5, [sp, #-32]!
50 0xadbf0fe2, // 0x074: stp q2, q3, [sp, #-32]!
51 0xadbf07e0, // 0x078: stp q0, q1, [sp, #-32]!
52 0x580004e0, // 0x07c: ldr x0, Lcallbackmgr
53 0xaa1e03e1, // 0x080: mov x1, x30
54 0xd1003021, // 0x084: sub x1, x1, #12
55 0x58000442, // 0x088: ldr x2, Lreentry_fn_ptr
56 0xd63f0040, // 0x08c: blr x2
57 0xaa0003f1, // 0x090: mov x17, x0
58 0xacc107e0, // 0x094: ldp q0, q1, [sp], #32
59 0xacc10fe2, // 0x098: ldp q2, q3, [sp], #32
60 0xacc117e4, // 0x09c: ldp q4, q5, [sp], #32
61 0xacc11fe6, // 0x0a0: ldp q6, q7, [sp], #32
62 0xacc127e8, // 0x0a4: ldp q8, q9, [sp], #32
63 0xacc12fea, // 0x0a8: ldp q10, q11, [sp], #32
64 0xacc137ec, // 0x0ac: ldp q12, q13, [sp], #32
65 0xacc13fee, // 0x0b0: ldp q14, q15, [sp], #32
66 0xacc147f0, // 0x0b4: ldp q16, q17, [sp], #32
67 0xacc14ff2, // 0x0b8: ldp q18, q19, [sp], #32
68 0xacc157f4, // 0x0bc: ldp q20, q21, [sp], #32
69 0xacc15ff6, // 0x0c0: ldp q22, q23, [sp], #32
70 0xacc167f8, // 0x0c4: ldp q24, q25, [sp], #32
71 0xacc16ffa, // 0x0c8: ldp q26, q27, [sp], #32
72 0xacc177fc, // 0x0cc: ldp q28, q29, [sp], #32
73 0xacc17ffe, // 0x0d0: ldp q30, q31, [sp], #32
74 0xa8c107e0, // 0x0d4: ldp x0, x1, [sp], #16
75 0xa8c10fe2, // 0x0d8: ldp x2, x3, [sp], #16
76 0xa8c117e4, // 0x0dc: ldp x4, x5, [sp], #16
77 0xa8c11fe6, // 0x0e0: ldp x6, x7, [sp], #16
78 0xa8c127e8, // 0x0e4: ldp x8, x9, [sp], #16
79 0xa8c12fea, // 0x0e8: ldp x10, x11, [sp], #16
80 0xa8c137ec, // 0x0ec: ldp x12, x13, [sp], #16
81 0xa8c13fee, // 0x0f0: ldp x14, x15, [sp], #16
82 0xa8c153f3, // 0x0f4: ldp x19, x20, [sp], #16
83 0xa8c15bf5, // 0x0f8: ldp x21, x22, [sp], #16
84 0xa8c163f7, // 0x0fc: ldp x23, x24, [sp], #16
85 0xa8c16bf9, // 0x100: ldp x25, x26, [sp], #16
86 0xa8c173fb, // 0x104: ldp x27, x28, [sp], #16
87 0xa8c17bfd, // 0x108: ldp x29, x30, [sp], #16
88 0xd65f0220, // 0x10c: ret x17
89 0x01234567, // 0x110: Lreentry_fn_ptr:
90 0xdeadbeef, // 0x114: .quad 0
91 0x98765432, // 0x118: Lcallbackmgr:
92 0xcafef00d // 0x11c: .quad 0
93 };
94
95 const unsigned ReentryFnAddrOffset = 0x110;
96 const unsigned CallbackMgrAddrOffset = 0x118;
97
98 memcpy(ResolverMem, ResolverCode, sizeof(ResolverCode));
99 memcpy(ResolverMem + ReentryFnAddrOffset, &ReentryFn, sizeof(ReentryFn));
100 memcpy(ResolverMem + CallbackMgrAddrOffset, &CallbackMgr,
101 sizeof(CallbackMgr));
102 }
103
104 void OrcAArch64::writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
105 unsigned NumTrampolines) {
106
107 unsigned OffsetToPtr = alignTo(NumTrampolines * TrampolineSize, 8);
108
109 memcpy(TrampolineMem + OffsetToPtr, &ResolverAddr, sizeof(void *));
110
111 // OffsetToPtr is actually the offset from the PC for the 2nd instruction, so
112 // subtract 32-bits.
113 OffsetToPtr -= 4;
114
115 uint32_t *Trampolines = reinterpret_cast(TrampolineMem);
116
117 for (unsigned I = 0; I < NumTrampolines; ++I, OffsetToPtr -= TrampolineSize) {
118 Trampolines[3 * I + 0] = 0xaa1e03f1; // mov x17, x30
119 Trampolines[3 * I + 1] = 0x58000010 | (OffsetToPtr << 3); // mov x16, Lptr
120 Trampolines[3 * I + 2] = 0xd63f0200; // blr x16
121 }
122
123 }
124
125 Error OrcAArch64::emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
126 unsigned MinStubs,
127 void *InitialPtrVal) {
128 // Stub format is:
129 //
130 // .section __orc_stubs
131 // stub1:
132 // ldr x0, ptr1 ; PC-rel load of ptr1
133 // br x0 ; Jump to resolver
134 // stub2:
135 // ldr x0, ptr2 ; PC-rel load of ptr2
136 // br x0 ; Jump to resolver
137 //
138 // ...
139 //
140 // .section __orc_ptrs
141 // ptr1:
142 // .quad 0x0
143 // ptr2:
144 // .quad 0x0
145 //
146 // ...
147
148 const unsigned StubSize = IndirectStubsInfo::StubSize;
149
150 // Emit at least MinStubs, rounded up to fill the pages allocated.
151 unsigned PageSize = sys::Process::getPageSize();
152 unsigned NumPages = ((MinStubs * StubSize) + (PageSize - 1)) / PageSize;
153 unsigned NumStubs = (NumPages * PageSize) / StubSize;
154
155 // Allocate memory for stubs and pointers in one call.
156 std::error_code EC;
157 auto StubsMem = sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
158 2 * NumPages * PageSize, nullptr,
159 sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC));
160
161 if (EC)
162 return errorCodeToError(EC);
163
164 // Create separate MemoryBlocks representing the stubs and pointers.
165 sys::MemoryBlock StubsBlock(StubsMem.base(), NumPages * PageSize);
166 sys::MemoryBlock PtrsBlock(static_cast(StubsMem.base()) +
167 NumPages * PageSize,
168 NumPages * PageSize);
169
170 // Populate the stubs page stubs and mark it executable.
171 uint64_t *Stub = reinterpret_cast(StubsBlock.base());
172 uint64_t PtrOffsetField = static_cast(NumPages * PageSize)
173 << 3;
174
175 for (unsigned I = 0; I < NumStubs; ++I)
176 Stub[I] = 0xd61f020058000010 | PtrOffsetField;
177
178 if (auto EC = sys::Memory::protectMappedMemory(
179 StubsBlock, sys::Memory::MF_READ | sys::Memory::MF_EXEC))
180 return errorCodeToError(EC);
181
182 // Initialize all pointers to point at FailureAddress.
183 void **Ptr = reinterpret_cast(PtrsBlock.base());
184 for (unsigned I = 0; I < NumStubs; ++I)
185 Ptr[I] = InitialPtrVal;
186
187 StubsInfo = IndirectStubsInfo(NumStubs, std::move(StubsMem));
188
189 return Error::success();
190 }
191
192 void OrcX86_64_Base::writeTrampolines(uint8_t *TrampolineMem,
193 void *ResolverAddr,
194 unsigned NumTrampolines) {
195
196 unsigned OffsetToPtr = NumTrampolines * TrampolineSize;
197
198 memcpy(TrampolineMem + OffsetToPtr, &ResolverAddr, sizeof(void *));
199
200 uint64_t *Trampolines = reinterpret_cast(TrampolineMem);
201 uint64_t CallIndirPCRel = 0xf1c40000000015ff;
202
203 for (unsigned I = 0; I < NumTrampolines; ++I, OffsetToPtr -= TrampolineSize)
204 Trampolines[I] = CallIndirPCRel | ((OffsetToPtr - 6) << 16);
205 }
206
207 Error OrcX86_64_Base::emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
208 unsigned MinStubs,
209 void *InitialPtrVal) {
210 // Stub format is:
211 //
212 // .section __orc_stubs
213 // stub1:
214 // jmpq *ptr1(%rip)
215 // .byte 0xC4 ; <- Invalid opcode padding.
216 // .byte 0xF1
217 // stub2:
218 // jmpq *ptr2(%rip)
219 //
220 // ...
221 //
222 // .section __orc_ptrs
223 // ptr1:
224 // .quad 0x0
225 // ptr2:
226 // .quad 0x0
227 //
228 // ...
229
230 const unsigned StubSize = IndirectStubsInfo::StubSize;
231
232 // Emit at least MinStubs, rounded up to fill the pages allocated.
233 unsigned PageSize = sys::Process::getPageSize();
234 unsigned NumPages = ((MinStubs * StubSize) + (PageSize - 1)) / PageSize;
235 unsigned NumStubs = (NumPages * PageSize) / StubSize;
236
237 // Allocate memory for stubs and pointers in one call.
238 std::error_code EC;
239 auto StubsMem = sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
240 2 * NumPages * PageSize, nullptr,
241 sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC));
242
243 if (EC)
244 return errorCodeToError(EC);
245
246 // Create separate MemoryBlocks representing the stubs and pointers.
247 sys::MemoryBlock StubsBlock(StubsMem.base(), NumPages * PageSize);
248 sys::MemoryBlock PtrsBlock(static_cast(StubsMem.base()) +
249 NumPages * PageSize,
250 NumPages * PageSize);
251
252 // Populate the stubs page stubs and mark it executable.
253 uint64_t *Stub = reinterpret_cast(StubsBlock.base());
254 uint64_t PtrOffsetField = static_cast(NumPages * PageSize - 6)
255 << 16;
256 for (unsigned I = 0; I < NumStubs; ++I)
257 Stub[I] = 0xF1C40000000025ff | PtrOffsetField;
258
259 if (auto EC = sys::Memory::protectMappedMemory(
260 StubsBlock, sys::Memory::MF_READ | sys::Memory::MF_EXEC))
261 return errorCodeToError(EC);
262
263 // Initialize all pointers to point at FailureAddress.
264 void **Ptr = reinterpret_cast(PtrsBlock.base());
265 for (unsigned I = 0; I < NumStubs; ++I)
266 Ptr[I] = InitialPtrVal;
267
268 StubsInfo = IndirectStubsInfo(NumStubs, std::move(StubsMem));
269
270 return Error::success();
271 }
272
273 void OrcX86_64_SysV::writeResolverCode(uint8_t *ResolverMem,
274 JITReentryFn ReentryFn,
275 void *CallbackMgr) {
276
277 const uint8_t ResolverCode[] = {
278 // resolver_entry:
279 0x55, // 0x00: pushq %rbp
280 0x48, 0x89, 0xe5, // 0x01: movq %rsp, %rbp
281 0x50, // 0x04: pushq %rax
282 0x53, // 0x05: pushq %rbx
283 0x51, // 0x06: pushq %rcx
284 0x52, // 0x07: pushq %rdx
285 0x56, // 0x08: pushq %rsi
286 0x57, // 0x09: pushq %rdi
287 0x41, 0x50, // 0x0a: pushq %r8
288 0x41, 0x51, // 0x0c: pushq %r9
289 0x41, 0x52, // 0x0e: pushq %r10
290 0x41, 0x53, // 0x10: pushq %r11
291 0x41, 0x54, // 0x12: pushq %r12
292 0x41, 0x55, // 0x14: pushq %r13
293 0x41, 0x56, // 0x16: pushq %r14
294 0x41, 0x57, // 0x18: pushq %r15
295 0x48, 0x81, 0xec, 0x08, 0x02, 0x00, 0x00, // 0x1a: subq 0x208, %rsp
296 0x48, 0x0f, 0xae, 0x04, 0x24, // 0x21: fxsave64 (%rsp)
297 0x48, 0xbf, // 0x26: movabsq , %rdi
298
299 // 0x28: Callback manager addr.
300 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
301
302 0x48, 0x8b, 0x75, 0x08, // 0x30: movq 8(%rbp), %rsi
303 0x48, 0x83, 0xee, 0x06, // 0x34: subq $6, %rsi
304 0x48, 0xb8, // 0x38: movabsq , %rax
305
306 // 0x3a: JIT re-entry fn addr:
307 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
308
309 0xff, 0xd0, // 0x42: callq *%rax
310 0x48, 0x89, 0x45, 0x08, // 0x44: movq %rax, 8(%rbp)
311 0x48, 0x0f, 0xae, 0x0c, 0x24, // 0x48: fxrstor64 (%rsp)
312 0x48, 0x81, 0xc4, 0x08, 0x02, 0x00, 0x00, // 0x4d: addq 0x208, %rsp
313 0x41, 0x5f, // 0x54: popq %r15
314 0x41, 0x5e, // 0x56: popq %r14
315 0x41, 0x5d, // 0x58: popq %r13
316 0x41, 0x5c, // 0x5a: popq %r12
317 0x41, 0x5b, // 0x5c: popq %r11
318 0x41, 0x5a, // 0x5e: popq %r10
319 0x41, 0x59, // 0x60: popq %r9
320 0x41, 0x58, // 0x62: popq %r8
321 0x5f, // 0x64: popq %rdi
322 0x5e, // 0x65: popq %rsi
323 0x5a, // 0x66: popq %rdx
324 0x59, // 0x67: popq %rcx
325 0x5b, // 0x68: popq %rbx
326 0x58, // 0x69: popq %rax
327 0x5d, // 0x6a: popq %rbp
328 0xc3, // 0x6b: retq
329 };
330
331 const unsigned ReentryFnAddrOffset = 0x3a;
332 const unsigned CallbackMgrAddrOffset = 0x28;
333
334 memcpy(ResolverMem, ResolverCode, sizeof(ResolverCode));
335 memcpy(ResolverMem + ReentryFnAddrOffset, &ReentryFn, sizeof(ReentryFn));
336 memcpy(ResolverMem + CallbackMgrAddrOffset, &CallbackMgr,
337 sizeof(CallbackMgr));
338 }
339
340 void OrcX86_64_Win32::writeResolverCode(uint8_t *ResolverMem,
341 JITReentryFn ReentryFn,
342 void *CallbackMgr) {
343
344 // resolverCode is similar to OrcX86_64 with differences specific to windows x64 calling convention:
345 // arguments go into rcx, rdx and come in reverse order, shadow space allocation on stack
346 const uint8_t ResolverCode[] = {
347 // resolver_entry:
348 0x55, // 0x00: pushq %rbp
349 0x48, 0x89, 0xe5, // 0x01: movq %rsp, %rbp
350 0x50, // 0x04: pushq %rax
351 0x53, // 0x05: pushq %rbx
352 0x51, // 0x06: pushq %rcx
353 0x52, // 0x07: pushq %rdx
354 0x56, // 0x08: pushq %rsi
355 0x57, // 0x09: pushq %rdi
356 0x41, 0x50, // 0x0a: pushq %r8
357 0x41, 0x51, // 0x0c: pushq %r9
358 0x41, 0x52, // 0x0e: pushq %r10
359 0x41, 0x53, // 0x10: pushq %r11
360 0x41, 0x54, // 0x12: pushq %r12
361 0x41, 0x55, // 0x14: pushq %r13
362 0x41, 0x56, // 0x16: pushq %r14
363 0x41, 0x57, // 0x18: pushq %r15
364 0x48, 0x81, 0xec, 0x08, 0x02, 0x00, 0x00, // 0x1a: subq 0x208, %rsp
365 0x48, 0x0f, 0xae, 0x04, 0x24, // 0x21: fxsave64 (%rsp)
366
367 0x48, 0xb9, // 0x26: movabsq , %rcx
368 // 0x28: Callback manager addr.
369 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
370
371 0x48, 0x8B, 0x55, 0x08, // 0x30: mov rdx, [rbp+0x8]
372 0x48, 0x83, 0xea, 0x06, // 0x34: sub rdx, 0x6
373
374 0x48, 0xb8, // 0x38: movabsq , %rax
375 // 0x3a: JIT re-entry fn addr:
376 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
377
378 // 0x42: sub rsp, 0x20 (Allocate shadow space)
379 0x48, 0x83, 0xEC, 0x20,
380 0xff, 0xd0, // 0x46: callq *%rax
381
382 // 0x48: add rsp, 0x20 (Free shadow space)
383 0x48, 0x83, 0xC4, 0x20,
384
385 0x48, 0x89, 0x45, 0x08, // 0x4C: movq %rax, 8(%rbp)
386 0x48, 0x0f, 0xae, 0x0c, 0x24, // 0x50: fxrstor64 (%rsp)
387 0x48, 0x81, 0xc4, 0x08, 0x02, 0x00, 0x00, // 0x55: addq 0x208, %rsp
388 0x41, 0x5f, // 0x5C: popq %r15
389 0x41, 0x5e, // 0x5E: popq %r14
390 0x41, 0x5d, // 0x60: popq %r13
391 0x41, 0x5c, // 0x62: popq %r12
392 0x41, 0x5b, // 0x64: popq %r11
393 0x41, 0x5a, // 0x66: popq %r10
394 0x41, 0x59, // 0x68: popq %r9
395 0x41, 0x58, // 0x6a: popq %r8
396 0x5f, // 0x6c: popq %rdi
397 0x5e, // 0x6d: popq %rsi
398 0x5a, // 0x6e: popq %rdx
399 0x59, // 0x6f: popq %rcx
400 0x5b, // 0x70: popq %rbx
401 0x58, // 0x71: popq %rax
402 0x5d, // 0x72: popq %rbp
403 0xc3, // 0x73: retq
404 };
405
406
407 const unsigned ReentryFnAddrOffset = 0x3a;
408 const unsigned CallbackMgrAddrOffset = 0x28;
409
410 memcpy(ResolverMem, ResolverCode, sizeof(ResolverCode));
411 memcpy(ResolverMem + ReentryFnAddrOffset, &ReentryFn, sizeof(ReentryFn));
412 memcpy(ResolverMem + CallbackMgrAddrOffset, &CallbackMgr,
413 sizeof(CallbackMgr));
414 }
415
416 void OrcI386::writeResolverCode(uint8_t *ResolverMem, JITReentryFn ReentryFn,
417 void *CallbackMgr) {
418
419 const uint8_t ResolverCode[] = {
420 // resolver_entry:
421 0x55, // 0x00: pushl %ebp
422 0x89, 0xe5, // 0x01: movl %esp, %ebp
423 0x54, // 0x03: pushl %esp
424 0x83, 0xe4, 0xf0, // 0x04: andl $-0x10, %esp
425 0x50, // 0x07: pushl %eax
426 0x53, // 0x08: pushl %ebx
427 0x51, // 0x09: pushl %ecx
428 0x52, // 0x0a: pushl %edx
429 0x56, // 0x0b: pushl %esi
430 0x57, // 0x0c: pushl %edi
431 0x81, 0xec, 0x18, 0x02, 0x00, 0x00, // 0x0d: subl $0x218, %esp
432 0x0f, 0xae, 0x44, 0x24, 0x10, // 0x13: fxsave 0x10(%esp)
433 0x8b, 0x75, 0x04, // 0x18: movl 0x4(%ebp), %esi
434 0x83, 0xee, 0x05, // 0x1b: subl $0x5, %esi
435 0x89, 0x74, 0x24, 0x04, // 0x1e: movl %esi, 0x4(%esp)
436 0xc7, 0x04, 0x24, 0x00, 0x00, 0x00,
437 0x00, // 0x22: movl , (%esp)
438 0xb8, 0x00, 0x00, 0x00, 0x00, // 0x29: movl , %eax
439 0xff, 0xd0, // 0x2e: calll *%eax
440 0x89, 0x45, 0x04, // 0x30: movl %eax, 0x4(%ebp)
441 0x0f, 0xae, 0x4c, 0x24, 0x10, // 0x33: fxrstor 0x10(%esp)
442 0x81, 0xc4, 0x18, 0x02, 0x00, 0x00, // 0x38: addl $0x218, %esp
443 0x5f, // 0x3e: popl %edi
444 0x5e, // 0x3f: popl %esi
445 0x5a, // 0x40: popl %edx
446 0x59, // 0x41: popl %ecx
447 0x5b, // 0x42: popl %ebx
448 0x58, // 0x43: popl %eax
449 0x8b, 0x65, 0xfc, // 0x44: movl -0x4(%ebp), %esp
450 0x5d, // 0x48: popl %ebp
451 0xc3 // 0x49: retl
452 };
453
454 const unsigned ReentryFnAddrOffset = 0x2a;
455 const unsigned CallbackMgrAddrOffset = 0x25;
456
457 memcpy(ResolverMem, ResolverCode, sizeof(ResolverCode));
458 memcpy(ResolverMem + ReentryFnAddrOffset, &ReentryFn, sizeof(ReentryFn));
459 memcpy(ResolverMem + CallbackMgrAddrOffset, &CallbackMgr,
460 sizeof(CallbackMgr));
461 }
462
463 void OrcI386::writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
464 unsigned NumTrampolines) {
465
466 uint64_t CallRelImm = 0xF1C4C400000000e8;
467 uint64_t Resolver = reinterpret_cast(ResolverAddr);
468 uint64_t ResolverRel =
469 Resolver - reinterpret_cast(TrampolineMem) - 5;
470
471 uint64_t *Trampolines = reinterpret_cast(TrampolineMem);
472 for (unsigned I = 0; I < NumTrampolines; ++I, ResolverRel -= TrampolineSize)
473 Trampolines[I] = CallRelImm | (ResolverRel << 8);
474 }
475
476 Error OrcI386::emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
477 unsigned MinStubs, void *InitialPtrVal) {
478 // Stub format is:
479 //
480 // .section __orc_stubs
481 // stub1:
482 // jmpq *ptr1
483 // .byte 0xC4 ; <- Invalid opcode padding.
484 // .byte 0xF1
485 // stub2:
486 // jmpq *ptr2
487 //
488 // ...
489 //
490 // .section __orc_ptrs
491 // ptr1:
492 // .quad 0x0
493 // ptr2:
494 // .quad 0x0
495 //
496 // ...
497
498 const unsigned StubSize = IndirectStubsInfo::StubSize;
499
500 // Emit at least MinStubs, rounded up to fill the pages allocated.
501 unsigned PageSize = sys::Process::getPageSize();
502 unsigned NumPages = ((MinStubs * StubSize) + (PageSize - 1)) / PageSize;
503 unsigned NumStubs = (NumPages * PageSize) / StubSize;
504
505 // Allocate memory for stubs and pointers in one call.
506 std::error_code EC;
507 auto StubsMem = sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
508 2 * NumPages * PageSize, nullptr,
509 sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC));
510
511 if (EC)
512 return errorCodeToError(EC);
513
514 // Create separate MemoryBlocks representing the stubs and pointers.
515 sys::MemoryBlock StubsBlock(StubsMem.base(), NumPages * PageSize);
516 sys::MemoryBlock PtrsBlock(static_cast(StubsMem.base()) +
517 NumPages * PageSize,
518 NumPages * PageSize);
519
520 // Populate the stubs page stubs and mark it executable.
521 uint64_t *Stub = reinterpret_cast(StubsBlock.base());
522 uint64_t PtrAddr = reinterpret_cast(PtrsBlock.base());
523 for (unsigned I = 0; I < NumStubs; ++I, PtrAddr += 4)
524 Stub[I] = 0xF1C40000000025ff | (PtrAddr << 16);
525
526 if (auto EC = sys::Memory::protectMappedMemory(
527 StubsBlock, sys::Memory::MF_READ | sys::Memory::MF_EXEC))
528 return errorCodeToError(EC);
529
530 // Initialize all pointers to point at FailureAddress.
531 void **Ptr = reinterpret_cast(PtrsBlock.base());
532 for (unsigned I = 0; I < NumStubs; ++I)
533 Ptr[I] = InitialPtrVal;
534
535 StubsInfo = IndirectStubsInfo(NumStubs, std::move(StubsMem));
536
537 return Error::success();
538 }
539
540 } // End namespace orc.
541 } // End namespace llvm.
+0
-464
lib/ExecutionEngine/Orc/OrcArchitectureSupport.cpp less more
None //===------ OrcArchSupport.cpp - Architecture specific support code -------===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "llvm/ExecutionEngine/Orc/OrcArchitectureSupport.h"
10 #include "llvm/ADT/Triple.h"
11 #include "llvm/Support/Process.h"
12
13 namespace llvm {
14 namespace orc {
15
16 void OrcAArch64::writeResolverCode(uint8_t *ResolverMem, JITReentryFn ReentryFn,
17 void *CallbackMgr) {
18
19 const uint32_t ResolverCode[] = {
20 // resolver_entry:
21 0xa9bf47fd, // 0x000: stp x29, x17, [sp, #-16]!
22 0x910003fd, // 0x004: mov x29, sp
23 0xa9bf73fb, // 0x008: stp x27, x28, [sp, #-16]!
24 0xa9bf6bf9, // 0x00c: stp x25, x26, [sp, #-16]!
25 0xa9bf63f7, // 0x010: stp x23, x24, [sp, #-16]!
26 0xa9bf5bf5, // 0x014: stp x21, x22, [sp, #-16]!
27 0xa9bf53f3, // 0x018: stp x19, x20, [sp, #-16]!
28 0xa9bf3fee, // 0x01c: stp x14, x15, [sp, #-16]!
29 0xa9bf37ec, // 0x020: stp x12, x13, [sp, #-16]!
30 0xa9bf2fea, // 0x024: stp x10, x11, [sp, #-16]!
31 0xa9bf27e8, // 0x028: stp x8, x9, [sp, #-16]!
32 0xa9bf1fe6, // 0x02c: stp x6, x7, [sp, #-16]!
33 0xa9bf17e4, // 0x030: stp x4, x5, [sp, #-16]!
34 0xa9bf0fe2, // 0x034: stp x2, x3, [sp, #-16]!
35 0xa9bf07e0, // 0x038: stp x0, x1, [sp, #-16]!
36 0xadbf7ffe, // 0x03c: stp q30, q31, [sp, #-32]!
37 0xadbf77fc, // 0x040: stp q28, q29, [sp, #-32]!
38 0xadbf6ffa, // 0x044: stp q26, q27, [sp, #-32]!
39 0xadbf67f8, // 0x048: stp q24, q25, [sp, #-32]!
40 0xadbf5ff6, // 0x04c: stp q22, q23, [sp, #-32]!
41 0xadbf57f4, // 0x050: stp q20, q21, [sp, #-32]!
42 0xadbf4ff2, // 0x054: stp q18, q19, [sp, #-32]!
43 0xadbf47f0, // 0x058: stp q16, q17, [sp, #-32]!
44 0xadbf3fee, // 0x05c: stp q14, q15, [sp, #-32]!
45 0xadbf37ec, // 0x060: stp q12, q13, [sp, #-32]!
46 0xadbf2fea, // 0x064: stp q10, q11, [sp, #-32]!
47 0xadbf27e8, // 0x068: stp q8, q9, [sp, #-32]!
48 0xadbf1fe6, // 0x06c: stp q6, q7, [sp, #-32]!
49 0xadbf17e4, // 0x070: stp q4, q5, [sp, #-32]!
50 0xadbf0fe2, // 0x074: stp q2, q3, [sp, #-32]!
51 0xadbf07e0, // 0x078: stp q0, q1, [sp, #-32]!
52 0x580004e0, // 0x07c: ldr x0, Lcallbackmgr
53 0xaa1e03e1, // 0x080: mov x1, x30
54 0xd1003021, // 0x084: sub x1, x1, #12
55 0x58000442, // 0x088: ldr x2, Lreentry_fn_ptr
56 0xd63f0040, // 0x08c: blr x2
57 0xaa0003f1, // 0x090: mov x17, x0
58 0xacc107e0, // 0x094: ldp q0, q1, [sp], #32
59 0xacc10fe2, // 0x098: ldp q2, q3, [sp], #32
60 0xacc117e4, // 0x09c: ldp q4, q5, [sp], #32
61 0xacc11fe6, // 0x0a0: ldp q6, q7, [sp], #32
62 0xacc127e8, // 0x0a4: ldp q8, q9, [sp], #32
63 0xacc12fea, // 0x0a8: ldp q10, q11, [sp], #32
64 0xacc137ec, // 0x0ac: ldp q12, q13, [sp], #32
65 0xacc13fee, // 0x0b0: ldp q14, q15, [sp], #32
66 0xacc147f0, // 0x0b4: ldp q16, q17, [sp], #32
67 0xacc14ff2, // 0x0b8: ldp q18, q19, [sp], #32
68 0xacc157f4, // 0x0bc: ldp q20, q21, [sp], #32
69 0xacc15ff6, // 0x0c0: ldp q22, q23, [sp], #32
70 0xacc167f8, // 0x0c4: ldp q24, q25, [sp], #32
71 0xacc16ffa, // 0x0c8: ldp q26, q27, [sp], #32
72 0xacc177fc, // 0x0cc: ldp q28, q29, [sp], #32
73 0xacc17ffe, // 0x0d0: ldp q30, q31, [sp], #32
74 0xa8c107e0, // 0x0d4: ldp x0, x1, [sp], #16
75 0xa8c10fe2, // 0x0d8: ldp x2, x3, [sp], #16
76 0xa8c117e4, // 0x0dc: ldp x4, x5, [sp], #16
77 0xa8c11fe6, // 0x0e0: ldp x6, x7, [sp], #16
78 0xa8c127e8, // 0x0e4: ldp x8, x9, [sp], #16
79 0xa8c12fea, // 0x0e8: ldp x10, x11, [sp], #16
80 0xa8c137ec, // 0x0ec: ldp x12, x13, [sp], #16
81 0xa8c13fee, // 0x0f0: ldp x14, x15, [sp], #16
82 0xa8c153f3, // 0x0f4: ldp x19, x20, [sp], #16
83 0xa8c15bf5, // 0x0f8: ldp x21, x22, [sp], #16
84 0xa8c163f7, // 0x0fc: ldp x23, x24, [sp], #16
85 0xa8c16bf9, // 0x100: ldp x25, x26, [sp], #16
86 0xa8c173fb, // 0x104: ldp x27, x28, [sp], #16
87 0xa8c17bfd, // 0x108: ldp x29, x30, [sp], #16
88 0xd65f0220, // 0x10c: ret x17
89 0x01234567, // 0x110: Lreentry_fn_ptr:
90 0xdeadbeef, // 0x114: .quad 0
91 0x98765432, // 0x118: Lcallbackmgr:
92 0xcafef00d // 0x11c: .quad 0
93 };
94
95 const unsigned ReentryFnAddrOffset = 0x110;
96 const unsigned CallbackMgrAddrOffset = 0x118;
97
98 memcpy(ResolverMem, ResolverCode, sizeof(ResolverCode));
99 memcpy(ResolverMem + ReentryFnAddrOffset, &ReentryFn, sizeof(ReentryFn));
100 memcpy(ResolverMem + CallbackMgrAddrOffset, &CallbackMgr,
101 sizeof(CallbackMgr));
102 }
103
104 void OrcAArch64::writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
105 unsigned NumTrampolines) {
106
107 unsigned OffsetToPtr = alignTo(NumTrampolines * TrampolineSize, 8);
108
109 memcpy(TrampolineMem + OffsetToPtr, &ResolverAddr, sizeof(void *));
110
111 // OffsetToPtr is actually the offset from the PC for the 2nd instruction, so
112 // subtract 32-bits.
113 OffsetToPtr -= 4;
114
115 uint32_t *Trampolines = reinterpret_cast(TrampolineMem);
116
117 for (unsigned I = 0; I < NumTrampolines; ++I, OffsetToPtr -= TrampolineSize) {
118 Trampolines[3 * I + 0] = 0xaa1e03f1; // mov x17, x30
119 Trampolines[3 * I + 1] = 0x58000010 | (OffsetToPtr << 3); // mov x16, Lptr
120 Trampolines[3 * I + 2] = 0xd63f0200; // blr x16
121 }
122
123 }
124
125 Error OrcAArch64::emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
126 unsigned MinStubs,
127 void *InitialPtrVal) {
128 // Stub format is:
129 //
130 // .section __orc_stubs
131 // stub1:
132 // ldr x0, ptr1 ; PC-rel load of ptr1
133 // br x0 ; Jump to resolver
134 // stub2:
135 // ldr x0, ptr2 ; PC-rel load of ptr2
136 // br x0 ; Jump to resolver
137 //
138 // ...
139 //
140 // .section __orc_ptrs
141 // ptr1:
142 // .quad 0x0
143 // ptr2:
144 // .quad 0x0
145 //
146 // ...
147
148 const unsigned StubSize = IndirectStubsInfo::StubSize;
149
150 // Emit at least MinStubs, rounded up to fill the pages allocated.
151 unsigned PageSize = sys::Process::getPageSize();
152 unsigned NumPages = ((MinStubs * StubSize) + (PageSize - 1)) / PageSize;
153 unsigned NumStubs = (NumPages * PageSize) / StubSize;
154
155 // Allocate memory for stubs and pointers in one call.
156 std::error_code EC;
157 auto StubsMem = sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
158 2 * NumPages * PageSize, nullptr,
159 sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC));
160
161 if (EC)
162 return errorCodeToError(EC);
163
164 // Create separate MemoryBlocks representing the stubs and pointers.
165 sys::MemoryBlock StubsBlock(StubsMem.base(), NumPages * PageSize);
166 sys::MemoryBlock PtrsBlock(static_cast(StubsMem.base()) +
167 NumPages * PageSize,
168 NumPages * PageSize);
169
170 // Populate the stubs page stubs and mark it executable.
171 uint64_t *Stub = reinterpret_cast(StubsBlock.base());
172 uint64_t PtrOffsetField = static_cast(NumPages * PageSize)
173 << 3;
174
175 for (unsigned I = 0; I < NumStubs; ++I)
176 Stub[I] = 0xd61f020058000010 | PtrOffsetField;
177
178 if (auto EC = sys::Memory::protectMappedMemory(
179 StubsBlock, sys::Memory::MF_READ | sys::Memory::MF_EXEC))
180 return errorCodeToError(EC);
181
182 // Initialize all pointers to point at FailureAddress.
183 void **Ptr = reinterpret_cast(PtrsBlock.base());
184 for (unsigned I = 0; I < NumStubs; ++I)
185 Ptr[I] = InitialPtrVal;
186
187 StubsInfo = IndirectStubsInfo(NumStubs, std::move(StubsMem));
188
189 return Error::success();
190 }
191
192 void OrcX86_64::writeResolverCode(uint8_t *ResolverMem, JITReentryFn ReentryFn,
193 void *CallbackMgr) {
194
195 const uint8_t ResolverCode[] = {
196 // resolver_entry:
197 0x55, // 0x00: pushq %rbp
198 0x48, 0x89, 0xe5, // 0x01: movq %rsp, %rbp
199 0x50, // 0x04: pushq %rax
200 0x53, // 0x05: pushq %rbx
201 0x51, // 0x06: pushq %rcx
202 0x52, // 0x07: pushq %rdx
203 0x56, // 0x08: pushq %rsi
204 0x57, // 0x09: pushq %rdi
205 0x41, 0x50, // 0x0a: pushq %r8
206 0x41, 0x51, // 0x0c: pushq %r9
207 0x41, 0x52, // 0x0e: pushq %r10
208 0x41, 0x53, // 0x10: pushq %r11
209 0x41, 0x54, // 0x12: pushq %r12
210 0x41, 0x55, // 0x14: pushq %r13
211 0x41, 0x56, // 0x16: pushq %r14
212 0x41, 0x57, // 0x18: pushq %r15
213 0x48, 0x81, 0xec, 0x08, 0x02, 0x00, 0x00, // 0x1a: subq 0x208, %rsp
214 0x48, 0x0f, 0xae, 0x04, 0x24, // 0x21: fxsave64 (%rsp)
215 0x48, 0xbf, // 0x26: movabsq , %rdi
216
217 // 0x28: Callback manager addr.
218 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
219
220 0x48, 0x8b, 0x75, 0x08, // 0x30: movq 8(%rbp), %rsi
221 0x48, 0x83, 0xee, 0x06, // 0x34: subq $6, %rsi
222 0x48, 0xb8, // 0x38: movabsq , %rax
223
224 // 0x3a: JIT re-entry fn addr:
225 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
226
227 0xff, 0xd0, // 0x42: callq *%rax
228 0x48, 0x89, 0x45, 0x08, // 0x44: movq %rax, 8(%rbp)
229 0x48, 0x0f, 0xae, 0x0c, 0x24, // 0x48: fxrstor64 (%rsp)
230 0x48, 0x81, 0xc4, 0x08, 0x02, 0x00, 0x00, // 0x4d: addq 0x208, %rsp
231 0x41, 0x5f, // 0x54: popq %r15
232 0x41, 0x5e, // 0x56: popq %r14
233 0x41, 0x5d, // 0x58: popq %r13
234 0x41, 0x5c, // 0x5a: popq %r12
235 0x41, 0x5b, // 0x5c: popq %r11
236 0x41, 0x5a, // 0x5e: popq %r10
237 0x41, 0x59, // 0x60: popq %r9
238 0x41, 0x58, // 0x62: popq %r8
239 0x5f, // 0x64: popq %rdi
240 0x5e, // 0x65: popq %rsi
241 0x5a, // 0x66: popq %rdx
242 0x59, // 0x67: popq %rcx
243 0x5b, // 0x68: popq %rbx
244 0x58, // 0x69: popq %rax
245 0x5d, // 0x6a: popq %rbp
246 0xc3, // 0x6b: retq
247 };
248
249 const unsigned ReentryFnAddrOffset = 0x3a;
250 const unsigned CallbackMgrAddrOffset = 0x28;
251
252 memcpy(ResolverMem, ResolverCode, sizeof(ResolverCode));
253 memcpy(ResolverMem + ReentryFnAddrOffset, &ReentryFn, sizeof(ReentryFn));
254 memcpy(ResolverMem + CallbackMgrAddrOffset, &CallbackMgr,
255 sizeof(CallbackMgr));
256 }
257
258 void OrcX86_64::writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
259 unsigned NumTrampolines) {
260
261 unsigned OffsetToPtr = NumTrampolines * TrampolineSize;
262
263 memcpy(TrampolineMem + OffsetToPtr, &ResolverAddr, sizeof(void *));
264
265 uint64_t *Trampolines = reinterpret_cast(TrampolineMem);
266 uint64_t CallIndirPCRel = 0xf1c40000000015ff;
267
268 for (unsigned I = 0; I < NumTrampolines; ++I, OffsetToPtr -= TrampolineSize)
269 Trampolines[I] = CallIndirPCRel | ((OffsetToPtr - 6) << 16);
270 }
271
272 Error OrcX86_64::emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
273 unsigned MinStubs,
274 void *InitialPtrVal) {
275 // Stub format is:
276 //
277 // .section __orc_stubs
278 // stub1:
279 // jmpq *ptr1(%rip)
280 // .byte 0xC4 ; <- Invalid opcode padding.
281 // .byte 0xF1
282 // stub2:
283 // jmpq *ptr2(%rip)
284 //
285 // ...
286 //
287 // .section __orc_ptrs
288 // ptr1:
289 // .quad 0x0
290 // ptr2:
291 // .quad 0x0
292 //
293 // ...
294
295 const unsigned StubSize = IndirectStubsInfo::StubSize;
296
297 // Emit at least MinStubs, rounded up to fill the pages allocated.
298 unsigned PageSize = sys::Process::getPageSize();
299 unsigned NumPages = ((MinStubs * StubSize) + (PageSize - 1)) / PageSize;
300 unsigned NumStubs = (NumPages * PageSize) / StubSize;
301
302 // Allocate memory for stubs and pointers in one call.
303 std::error_code EC;
304 auto StubsMem = sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
305 2 * NumPages * PageSize, nullptr,
306 sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC));
307
308 if (EC)
309 return errorCodeToError(EC);
310
311 // Create separate MemoryBlocks representing the stubs and pointers.
312 sys::MemoryBlock StubsBlock(StubsMem.base(), NumPages * PageSize);
313 sys::MemoryBlock PtrsBlock(static_cast(StubsMem.base()) +
314 NumPages * PageSize,
315 NumPages * PageSize);
316
317 // Populate the stubs page stubs and mark it executable.
318 uint64_t *Stub = reinterpret_cast(StubsBlock.base());
319 uint64_t PtrOffsetField = static_cast(NumPages * PageSize - 6)
320 << 16;
321 for (unsigned I = 0; I < NumStubs; ++I)
322 Stub[I] = 0xF1C40000000025ff | PtrOffsetField;
323
324 if (auto EC = sys::Memory::protectMappedMemory(
325 StubsBlock, sys::Memory::MF_READ | sys::Memory::MF_EXEC))
326 return errorCodeToError(EC);
327
328 // Initialize all pointers to point at FailureAddress.
329 void **Ptr = reinterpret_cast(PtrsBlock.base());
330 for (unsigned I = 0; I < NumStubs; ++I)
331 Ptr[I] = InitialPtrVal;
332
333 StubsInfo = IndirectStubsInfo(NumStubs, std::move(StubsMem));
334
335 return Error::success();
336 }
337
338 void OrcI386::writeResolverCode(uint8_t *ResolverMem, JITReentryFn ReentryFn,
339 void *CallbackMgr) {
340
341 const uint8_t ResolverCode[] = {
342 // resolver_entry:
343 0x55, // 0x00: pushl %ebp
344 0x89, 0xe5, // 0x01: movl %esp, %ebp
345 0x54, // 0x03: pushl %esp
346 0x83, 0xe4, 0xf0, // 0x04: andl $-0x10, %esp
347 0x50, // 0x07: pushl %eax
348 0x53, // 0x08: pushl %ebx
349 0x51, // 0x09: pushl %ecx
350 0x52, // 0x0a: pushl %edx
351 0x56, // 0x0b: pushl %esi
352 0x57, // 0x0c: pushl %edi
353 0x81, 0xec, 0x18, 0x02, 0x00, 0x00, // 0x0d: subl $0x218, %esp
354 0x0f, 0xae, 0x44, 0x24, 0x10, // 0x13: fxsave 0x10(%esp)
355 0x8b, 0x75, 0x04, // 0x18: movl 0x4(%ebp), %esi
356 0x83, 0xee, 0x05, // 0x1b: subl $0x5, %esi
357 0x89, 0x74, 0x24, 0x04, // 0x1e: movl %esi, 0x4(%esp)
358 0xc7, 0x04, 0x24, 0x00, 0x00, 0x00,
359 0x00, // 0x22: movl , (%esp)
360 0xb8, 0x00, 0x00, 0x00, 0x00, // 0x29: movl , %eax
361 0xff, 0xd0, // 0x2e: calll *%eax
362 0x89, 0x45, 0x04, // 0x30: movl %eax, 0x4(%ebp)
363 0x0f, 0xae, 0x4c, 0x24, 0x10, // 0x33: fxrstor 0x10(%esp)
364 0x81, 0xc4, 0x18, 0x02, 0x00, 0x00, // 0x38: addl $0x218, %esp
365 0x5f, // 0x3e: popl %edi
366 0x5e, // 0x3f: popl %esi
367 0x5a, // 0x40: popl %edx
368 0x59, // 0x41: popl %ecx
369 0x5b, // 0x42: popl %ebx
370 0x58, // 0x43: popl %eax
371 0x8b, 0x65, 0xfc, // 0x44: movl -0x4(%ebp), %esp
372 0x5d, // 0x48: popl %ebp
373 0xc3 // 0x49: retl
374 };
375
376 const unsigned ReentryFnAddrOffset = 0x2a;
377 const unsigned CallbackMgrAddrOffset = 0x25;
378
379 memcpy(ResolverMem, ResolverCode, sizeof(ResolverCode));
380 memcpy(ResolverMem + ReentryFnAddrOffset, &ReentryFn, sizeof(ReentryFn));
381 memcpy(ResolverMem + CallbackMgrAddrOffset, &CallbackMgr,
382 sizeof(CallbackMgr));
383 }
384
385 void OrcI386::writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
386 unsigned NumTrampolines) {
387
388 uint64_t CallRelImm = 0xF1C4C400000000e8;
389 uint64_t Resolver = reinterpret_cast(ResolverAddr);
390 uint64_t ResolverRel =
391 Resolver - reinterpret_cast(TrampolineMem) - 5;
392
393 uint64_t *Trampolines = reinterpret_cast(TrampolineMem);
394 for (unsigned I = 0; I < NumTrampolines; ++I, ResolverRel -= TrampolineSize)
395 Trampolines[I] = CallRelImm | (ResolverRel << 8);
396 }
397
398 Error OrcI386::emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
399 unsigned MinStubs, void *InitialPtrVal) {
400 // Stub format is:
401 //
402 // .section __orc_stubs
403 // stub1:
404 // jmpq *ptr1
405 // .byte 0xC4 ; <- Invalid opcode padding.
406 // .byte 0xF1
407 // stub2:
408 // jmpq *ptr2
409 //
410 // ...
411 //
412 // .section __orc_ptrs
413 // ptr1:
414 // .quad 0x0
415 // ptr2:
416 // .quad 0x0
417 //
418 // ...
419
420 const unsigned StubSize = IndirectStubsInfo::StubSize;
421
422 // Emit at least MinStubs, rounded up to fill the pages allocated.
423 unsigned PageSize = sys::Process::getPageSize();
424 unsigned NumPages = ((MinStubs * StubSize) + (PageSize - 1)) / PageSize;
425 unsigned NumStubs = (NumPages * PageSize) / StubSize;
426
427 // Allocate memory for stubs and pointers in one call.
428 std::error_code EC;
429 auto StubsMem = sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
430 2 * NumPages * PageSize, nullptr,
431 sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC));
432
433 if (EC)
434 return errorCodeToError(EC);
435
436 // Create separate MemoryBlocks representing the stubs and pointers.
437 sys::MemoryBlock StubsBlock(StubsMem.base(), NumPages * PageSize);
438 sys::MemoryBlock PtrsBlock(static_cast(StubsMem.base()) +
439 NumPages * PageSize,
440 NumPages * PageSize);
441
442 // Populate the stubs page stubs and mark it executable.
443 uint64_t *Stub = reinterpret_cast(StubsBlock.base());
444 uint64_t PtrAddr = reinterpret_cast(PtrsBlock.base());
445 for (unsigned I = 0; I < NumStubs; ++I, PtrAddr += 4)
446 Stub[I] = 0xF1C40000000025ff | (PtrAddr << 16);
447
448 if (auto EC = sys::Memory::protectMappedMemory(
449 StubsBlock, sys::Memory::MF_READ | sys::Memory::MF_EXEC))
450 return errorCodeToError(EC);
451
452 // Initialize all pointers to point at FailureAddress.
453 void **Ptr = reinterpret_cast(PtrsBlock.base());
454 for (unsigned I = 0; I < NumStubs; ++I)
455 Ptr[I] = InitialPtrVal;
456
457 StubsInfo = IndirectStubsInfo(NumStubs, std::move(StubsMem));
458
459 return Error::success();
460 }
461
462 } // End namespace orc.
463 } // End namespace llvm.
88
99 #include "OrcCBindingsStack.h"
1010
11 #include "llvm/ExecutionEngine/Orc/OrcArchitectureSupport.h"
11 #include "llvm/ExecutionEngine/Orc/OrcABISupport.h"
1212 #include "llvm/Support/Debug.h"
1313 #include "llvm/Support/DynamicLibrary.h"
1414 #include
2828 };
2929
3030 case Triple::x86_64: {
31 typedef orc::LocalJITCompileCallbackManager CCMgrT;
32 return llvm::make_unique(0);
31 if ( T.getOS() == Triple::OSType::Win32 ) {
32 typedef orc::LocalJITCompileCallbackManager CCMgrT;
33 return llvm::make_unique(0);
34 } else {
35 typedef orc::LocalJITCompileCallbackManager CCMgrT;
36 return llvm::make_unique(0);
37 }
3338 }
3439 }
3540 }
4651 };
4752
4853 case Triple::x86_64:
49 return []() {
50 return llvm::make_unique<
51 orc::LocalIndirectStubsManager>();
52 };
54 if (T.getOS() == Triple::OSType::Win32) {
55 return [](){
56 return llvm::make_unique<
57 orc::LocalIndirectStubsManager>();
58 };
59 } else {
60 return [](){
61 return llvm::make_unique<
62 orc::LocalIndirectStubsManager>();
63 };
64 }
5365 }
5466 }
None #include "llvm/ExecutionEngine/Orc/OrcArchitectureSupport.h"
0 #include "llvm/ExecutionEngine/Orc/OrcABISupport.h"
11 #include "llvm/ExecutionEngine/Orc/OrcRemoteTargetServer.h"
22 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/DynamicLibrary.h"
1111 using namespace llvm::sys;
1212
1313 #ifdef __x86_64__
14 typedef OrcX86_64 HostOrcArch;
14 typedef OrcX86_64_SysV HostOrcArch;
1515 #else
1616 typedef OrcGenericArchitecture HostOrcArch;
1717 #endif
77 //===----------------------------------------------------------------------===//
88
99 #include "OrcLazyJIT.h"
10 #include "llvm/ExecutionEngine/Orc/OrcArchitectureSupport.h"
10 #include "llvm/ExecutionEngine/Orc/OrcABISupport.h"
1111 #include "llvm/Support/Debug.h"
1212 #include "llvm/Support/DynamicLibrary.h"
1313 #include
5656 }
5757
5858 case Triple::x86_64: {
59 typedef orc::LocalJITCompileCallbackManager CCMgrT;
60 return llvm::make_unique(0);
59 if ( T.getOS() == Triple::OSType::Win32 ) {
60 typedef orc::LocalJITCompileCallbackManager CCMgrT;
61 return llvm::make_unique(0);
62 } else {
63 typedef orc::LocalJITCompileCallbackManager CCMgrT;
64 return llvm::make_unique(0);
65 }
6166 }
6267 }
6368 }
7479 };
7580
7681 case Triple::x86_64:
77 return [](){
78 return llvm::make_unique<
79 orc::LocalIndirectStubsManager>();
80 };
82 if (T.getOS() == Triple::OSType::Win32) {
83 return [](){
84 return llvm::make_unique<
85 orc::LocalIndirectStubsManager>();
86 };
87 } else {
88 return [](){
89 return llvm::make_unique<
90 orc::LocalIndirectStubsManager>();
91 };
92 }
8193 }
8294 }
8395
191203 auto Main = fromTargetAddress(MainSym.getAddress());
192204 return Main(ArgC, ArgV);
193205 }
206