llvm.org GIT mirror llvm / 4c711cc
[JITLink] Add an initial implementation of JITLink for MachO/AArch64. This implementation has support for all relocation types except TLV. Compact unwind sections are not yet supported, so exceptions/unwinding will not work. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@374476 91177308-0d34-0410-b5e6-96231b3b80d8 Lang Hames 1 year, 1 month ago
6 changed file(s) with 1138 addition(s) and 0 deletion(s). Raw diff Collapse all Expand all
0 //===---- MachO_arm64.h - JIT link functions for MachO/arm64 ----*- C++ -*-===//
1 //
2 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
3 // See https://llvm.org/LICENSE.txt for license information.
4 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // jit-link functions for MachO/arm64.
9 //
10 //===----------------------------------------------------------------------===//
11
12 #ifndef LLVM_EXECUTIONENGINE_JITLINK_MACHO_ARM64_H
13 #define LLVM_EXECUTIONENGINE_JITLINK_MACHO_ARM64_H
14
15 #include "llvm/ExecutionEngine/JITLink/JITLink.h"
16
17 namespace llvm {
18 namespace jitlink {
19
20 namespace MachO_arm64_Edges {
21
22 enum MachOARM64RelocationKind : Edge::Kind {
23 Branch26 = Edge::FirstRelocation,
24 Pointer32,
25 Pointer64,
26 Pointer64Anon,
27 Page21,
28 PageOffset12,
29 GOTPage21,
30 GOTPageOffset12,
31 PointerToGOT,
32 PairedAddend,
33 LDRLiteral19,
34 Delta32,
35 Delta64,
36 NegDelta32,
37 NegDelta64,
38 };
39
40 } // namespace MachO_arm64_Edges
41
42 /// jit-link the given object buffer, which must be a MachO arm64 object file.
43 ///
44 /// If PrePrunePasses is empty then a default mark-live pass will be inserted
45 /// that will mark all exported atoms live. If PrePrunePasses is not empty, the
46 /// caller is responsible for including a pass to mark atoms as live.
47 ///
48 /// If PostPrunePasses is empty then a default GOT-and-stubs insertion pass will
49 /// be inserted. If PostPrunePasses is not empty then the caller is responsible
50 /// for including a pass to insert GOT and stub edges.
51 void jitLink_MachO_arm64(std::unique_ptr Ctx);
52
53 /// Return the string name of the given MachO arm64 edge kind.
54 StringRef getMachOARM64RelocationKindName(Edge::Kind R);
55
56 } // end namespace jitlink
57 } // end namespace llvm
58
59 #endif // LLVM_EXECUTIONENGINE_JITLINK_MACHO_ARM64_H
33 JITLinkMemoryManager.cpp
44 EHFrameSupport.cpp
55 MachO.cpp
6 MachO_arm64.cpp
67 MachO_x86_64.cpp
78 MachOLinkGraphBuilder.cpp
89
1313 #include "llvm/ExecutionEngine/JITLink/MachO.h"
1414
1515 #include "llvm/BinaryFormat/MachO.h"
16 #include "llvm/ExecutionEngine/JITLink/MachO_arm64.h"
1617 #include "llvm/ExecutionEngine/JITLink/MachO_x86_64.h"
1718 #include "llvm/Support/Endian.h"
1819 #include "llvm/Support/Format.h"
6364 });
6465
6566 switch (Header.cputype) {
67 case MachO::CPU_TYPE_ARM64:
68 return jitLink_MachO_arm64(std::move(Ctx));
6669 case MachO::CPU_TYPE_X86_64:
6770 return jitLink_MachO_x86_64(std::move(Ctx));
6871 }
0 //===---- MachO_arm64.cpp - JIT linker implementation for MachO/arm64 -----===//
1 //
2 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
3 // See https://llvm.org/LICENSE.txt for license information.
4 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // MachO/arm64 jit-link implementation.
9 //
10 //===----------------------------------------------------------------------===//
11
12 #include "llvm/ExecutionEngine/JITLink/MachO_arm64.h"
13
14 #include "BasicGOTAndStubsBuilder.h"
15 #include "MachOLinkGraphBuilder.h"
16
17 #define DEBUG_TYPE "jitlink"
18
19 using namespace llvm;
20 using namespace llvm::jitlink;
21 using namespace llvm::jitlink::MachO_arm64_Edges;
22
23 namespace {
24
25 class MachOLinkGraphBuilder_arm64 : public MachOLinkGraphBuilder {
26 public:
27 MachOLinkGraphBuilder_arm64(const object::MachOObjectFile &Obj)
28 : MachOLinkGraphBuilder(Obj),
29 NumSymbols(Obj.getSymtabLoadCommand().nsyms) {
30 addCustomSectionParser(
31 "__eh_frame", [this](NormalizedSection &EHFrameSection) {
32 if (!EHFrameSection.Data)
33 return make_error(
34 "__eh_frame section is marked zero-fill");
35 return MachOEHFrameBinaryParser(
36 *this, EHFrameSection.Address,
37 StringRef(EHFrameSection.Data, EHFrameSection.Size),
38 *EHFrameSection.GraphSection, 8, 4, NegDelta32, Delta64)
39 .addToGraph();
40 });
41 }
42
43 private:
44 static Expected
45 getRelocationKind(const MachO::relocation_info &RI) {
46 switch (RI.r_type) {
47 case MachO::ARM64_RELOC_UNSIGNED:
48 if (!RI.r_pcrel) {
49 if (RI.r_length == 3)
50 return RI.r_extern ? Pointer64 : Pointer64Anon;
51 else if (RI.r_length == 2)
52 return Pointer32;
53 }
54 break;
55 case MachO::ARM64_RELOC_SUBTRACTOR:
56 // SUBTRACTOR must be non-pc-rel, extern, with length 2 or 3.
57 // Initially represent SUBTRACTOR relocations with 'Delta'.
58 // They may be turned into NegDelta by parsePairRelocation.
59 if (!RI.r_pcrel && RI.r_extern) {
60 if (RI.r_length == 2)
61 return Delta32;
62 else if (RI.r_length == 3)
63 return Delta64;
64 }
65 break;
66 case MachO::ARM64_RELOC_BRANCH26:
67 if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
68 return Branch26;
69 break;
70 case MachO::ARM64_RELOC_PAGE21:
71 if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
72 return Page21;
73 break;
74 case MachO::ARM64_RELOC_PAGEOFF12:
75 if (!RI.r_pcrel && RI.r_extern && RI.r_length == 2)
76 return PageOffset12;
77 break;
78 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
79 if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
80 return GOTPage21;
81 break;
82 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
83 if (!RI.r_pcrel && RI.r_extern && RI.r_length == 2)
84 return GOTPageOffset12;
85 break;
86 case MachO::ARM64_RELOC_POINTER_TO_GOT:
87 if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
88 return PointerToGOT;
89 break;
90 case MachO::ARM64_RELOC_ADDEND:
91 if (!RI.r_pcrel && !RI.r_extern && RI.r_length == 2)
92 return PairedAddend;
93 break;
94 }
95
96 return make_error(
97 "Unsupported arm64 relocation: address=" +
98 formatv("{0:x8}", RI.r_address) +
99 ", symbolnum=" + formatv("{0:x6}", RI.r_symbolnum) +
100 ", kind=" + formatv("{0:x1}", RI.r_type) +
101 ", pc_rel=" + (RI.r_pcrel ? "true" : "false") +
102 ", extern=" + (RI.r_extern ? "true" : "false") +
103 ", length=" + formatv("{0:d}", RI.r_length));
104 }
105
106 MachO::relocation_info
107 getRelocationInfo(const object::relocation_iterator RelItr) {
108 MachO::any_relocation_info ARI =
109 getObject().getRelocation(RelItr->getRawDataRefImpl());
110 MachO::relocation_info RI;
111 memcpy(&RI, &ARI, sizeof(MachO::relocation_info));
112 return RI;
113 }
114
115 using PairRelocInfo =
116 std::tuple;
117
118 // Parses paired SUBTRACTOR/UNSIGNED relocations and, on success,
119 // returns the edge kind and addend to be used.
120 Expected
121 parsePairRelocation(Block &BlockToFix, Edge::Kind SubtractorKind,
122 const MachO::relocation_info &SubRI,
123 JITTargetAddress FixupAddress, const char *FixupContent,
124 object::relocation_iterator &UnsignedRelItr,
125 object::relocation_iterator &RelEnd) {
126 using namespace support;
127
128 assert(((SubtractorKind == Delta32 && SubRI.r_length == 2) ||
129 (SubtractorKind == Delta64 && SubRI.r_length == 3)) &&
130 "Subtractor kind should match length");
131 assert(SubRI.r_extern && "SUBTRACTOR reloc symbol should be extern");
132 assert(!SubRI.r_pcrel && "SUBTRACTOR reloc should not be PCRel");
133
134 if (UnsignedRelItr == RelEnd)
135 return make_error("arm64 SUBTRACTOR without paired "
136 "UNSIGNED relocation");
137
138 auto UnsignedRI = getRelocationInfo(UnsignedRelItr);
139
140 if (SubRI.r_address != UnsignedRI.r_address)
141 return make_error("arm64 SUBTRACTOR and paired UNSIGNED "
142 "point to different addresses");
143
144 if (SubRI.r_length != UnsignedRI.r_length)
145 return make_error("length of arm64 SUBTRACTOR and paired "
146 "UNSIGNED reloc must match");
147
148 Symbol *FromSymbol;
149 if (auto FromSymbolOrErr = findSymbolByIndex(SubRI.r_symbolnum))
150 FromSymbol = FromSymbolOrErr->GraphSymbol;
151 else
152 return FromSymbolOrErr.takeError();
153
154 // Read the current fixup value.
155 uint64_t FixupValue = 0;
156 if (SubRI.r_length == 3)
157 FixupValue = *(const little64_t *)FixupContent;
158 else
159 FixupValue = *(const little32_t *)FixupContent;
160
161 // Find 'ToSymbol' using symbol number or address, depending on whether the
162 // paired UNSIGNED relocation is extern.
163 Symbol *ToSymbol = nullptr;
164 if (UnsignedRI.r_extern) {
165 // Find target symbol by symbol index.
166 if (auto ToSymbolOrErr = findSymbolByIndex(UnsignedRI.r_symbolnum))
167 ToSymbol = ToSymbolOrErr->GraphSymbol;
168 else
169 return ToSymbolOrErr.takeError();
170 } else {
171 if (auto ToSymbolOrErr = findSymbolByAddress(FixupValue))
172 ToSymbol = &*ToSymbolOrErr;
173 else
174 return ToSymbolOrErr.takeError();
175 FixupValue -= ToSymbol->getAddress();
176 }
177
178 MachOARM64RelocationKind DeltaKind;
179 Symbol *TargetSymbol;
180 uint64_t Addend;
181 if (&BlockToFix == &FromSymbol->getAddressable()) {
182 TargetSymbol = ToSymbol;
183 DeltaKind = (SubRI.r_length == 3) ? Delta64 : Delta32;
184 Addend = FixupValue + (FixupAddress - FromSymbol->getAddress());
185 // FIXME: handle extern 'from'.
186 } else if (&BlockToFix == &ToSymbol->getAddressable()) {
187 TargetSymbol = &*FromSymbol;
188 DeltaKind = (SubRI.r_length == 3) ? NegDelta64 : NegDelta32;
189 Addend = FixupValue - (FixupAddress - ToSymbol->getAddress());
190 } else {
191 // BlockToFix was neither FromSymbol nor ToSymbol.
192 return make_error("SUBTRACTOR relocation must fix up "
193 "either 'A' or 'B' (or a symbol in one "
194 "of their alt-entry groups)");
195 }
196
197 return PairRelocInfo(DeltaKind, TargetSymbol, Addend);
198 }
199
200 Error addRelocations() override {
201 using namespace support;
202 auto &Obj = getObject();
203
204 for (auto &S : Obj.sections()) {
205
206 JITTargetAddress SectionAddress = S.getAddress();
207
208 for (auto RelItr = S.relocation_begin(), RelEnd = S.relocation_end();
209 RelItr != RelEnd; ++RelItr) {
210
211 MachO::relocation_info RI = getRelocationInfo(RelItr);
212
213 // Sanity check the relocation kind.
214 auto Kind = getRelocationKind(RI);
215 if (!Kind)
216 return Kind.takeError();
217
218 // Find the address of the value to fix up.
219 JITTargetAddress FixupAddress = SectionAddress + (uint32_t)RI.r_address;
220
221 LLVM_DEBUG({
222 dbgs() << "Processing " << getMachOARM64RelocationKindName(*Kind)
223 << " relocation at " << format("0x%016" PRIx64, FixupAddress)
224 << "\n";
225 });
226
227 // Find the block that the fixup points to.
228 Block *BlockToFix = nullptr;
229 {
230 auto SymbolToFixOrErr = findSymbolByAddress(FixupAddress);
231 if (!SymbolToFixOrErr)
232 return SymbolToFixOrErr.takeError();
233 BlockToFix = &SymbolToFixOrErr->getBlock();
234 }
235
236 if (FixupAddress + static_cast(1ULL << RI.r_length) >
237 BlockToFix->getAddress() + BlockToFix->getContent().size())
238 return make_error(
239 "Relocation content extends past end of fixup block");
240
241 // Get a pointer to the fixup content.
242 const char *FixupContent = BlockToFix->getContent().data() +
243 (FixupAddress - BlockToFix->getAddress());
244
245 // The target symbol and addend will be populated by the switch below.
246 Symbol *TargetSymbol = nullptr;
247 uint64_t Addend = 0;
248
249 if (*Kind == PairedAddend) {
250 // If this is an Addend relocation then process it and move to the
251 // paired reloc.
252
253 Addend = RI.r_symbolnum;
254
255 if (RelItr == RelEnd)
256 return make_error("Unpaired Addend reloc at " +
257 formatv("{0:x16}", FixupAddress));
258 ++RelItr;
259 RI = getRelocationInfo(RelItr);
260
261 Kind = getRelocationKind(RI);
262 if (!Kind)
263 return Kind.takeError();
264
265 if (*Kind != Branch26 & *Kind != Page21 && *Kind != PageOffset12)
266 return make_error(
267 "Invalid relocation pair: Addend + " +
268 getMachOARM64RelocationKindName(*Kind));
269 else
270 LLVM_DEBUG({
271 dbgs() << " pair is " << getMachOARM64RelocationKindName(*Kind)
272 << "`\n";
273 });
274
275 // Find the address of the value to fix up.
276 JITTargetAddress PairedFixupAddress =
277 SectionAddress + (uint32_t)RI.r_address;
278 if (PairedFixupAddress != FixupAddress)
279 return make_error("Paired relocation points at "
280 "different target");
281 }
282
283 switch (*Kind) {
284 case Branch26: {
285 if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
286 TargetSymbol = TargetSymbolOrErr->GraphSymbol;
287 else
288 return TargetSymbolOrErr.takeError();
289 uint32_t Instr = *(const ulittle32_t *)FixupContent;
290 if ((Instr & 0x7fffffff) != 0x14000000)
291 return make_error("BRANCH26 target is not a B or BL "
292 "instruction with a zero addend");
293 break;
294 }
295 case Pointer32:
296 if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
297 TargetSymbol = TargetSymbolOrErr->GraphSymbol;
298 else
299 return TargetSymbolOrErr.takeError();
300 Addend = *(const ulittle32_t *)FixupContent;
301 break;
302 case Pointer64:
303 if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
304 TargetSymbol = TargetSymbolOrErr->GraphSymbol;
305 else
306 return TargetSymbolOrErr.takeError();
307 Addend = *(const ulittle64_t *)FixupContent;
308 break;
309 case Pointer64Anon: {
310 JITTargetAddress TargetAddress = *(const ulittle64_t *)FixupContent;
311 if (auto TargetSymbolOrErr = findSymbolByAddress(TargetAddress))
312 TargetSymbol = &*TargetSymbolOrErr;
313 else
314 return TargetSymbolOrErr.takeError();
315 Addend = TargetAddress - TargetSymbol->getAddress();
316 break;
317 }
318 case Page21:
319 case GOTPage21: {
320 if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
321 TargetSymbol = TargetSymbolOrErr->GraphSymbol;
322 else
323 return TargetSymbolOrErr.takeError();
324 uint32_t Instr = *(const ulittle32_t *)FixupContent;
325 if ((Instr & 0xffffffe0) != 0x90000000)
326 return make_error("PAGE21/GOTPAGE21 target is not an "
327 "ADRP instruction with a zero "
328 "addend");
329 break;
330 }
331 case PageOffset12: {
332 if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
333 TargetSymbol = TargetSymbolOrErr->GraphSymbol;
334 else
335 return TargetSymbolOrErr.takeError();
336 break;
337 }
338 case GOTPageOffset12: {
339 if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
340 TargetSymbol = TargetSymbolOrErr->GraphSymbol;
341 else
342 return TargetSymbolOrErr.takeError();
343 uint32_t Instr = *(const ulittle32_t *)FixupContent;
344 if ((Instr & 0xfffffc00) != 0xf9400000)
345 return make_error("GOTPAGEOFF12 target is not an LDR "
346 "immediate instruction with a zero "
347 "addend");
348 break;
349 }
350 case PointerToGOT:
351 if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
352 TargetSymbol = TargetSymbolOrErr->GraphSymbol;
353 else
354 return TargetSymbolOrErr.takeError();
355 break;
356 case Delta32:
357 case Delta64: {
358 // We use Delta32/Delta64 to represent SUBTRACTOR relocations.
359 // parsePairRelocation handles the paired reloc, and returns the
360 // edge kind to be used (either Delta32/Delta64, or
361 // NegDelta32/NegDelta64, depending on the direction of the
362 // subtraction) along with the addend.
363 auto PairInfo =
364 parsePairRelocation(*BlockToFix, *Kind, RI, FixupAddress,
365 FixupContent, ++RelItr, RelEnd);
366 if (!PairInfo)
367 return PairInfo.takeError();
368 std::tie(*Kind, TargetSymbol, Addend) = *PairInfo;
369 assert(TargetSymbol && "No target symbol from parsePairRelocation?");
370 break;
371 }
372 default:
373 llvm_unreachable("Special relocation kind should not appear in "
374 "mach-o file");
375 }
376
377 LLVM_DEBUG({
378 Edge GE(*Kind, FixupAddress - BlockToFix->getAddress(), *TargetSymbol,
379 Addend);
380 printEdge(dbgs(), *BlockToFix, GE,
381 getMachOARM64RelocationKindName(*Kind));
382 dbgs() << "\n";
383 });
384 BlockToFix->addEdge(*Kind, FixupAddress - BlockToFix->getAddress(),
385 *TargetSymbol, Addend);
386 }
387 }
388 return Error::success();
389 }
390
391 unsigned NumSymbols = 0;
392 };
393
394 class MachO_arm64_GOTAndStubsBuilder
395 : public BasicGOTAndStubsBuilder {
396 public:
397 MachO_arm64_GOTAndStubsBuilder(LinkGraph &G)
398 : BasicGOTAndStubsBuilder(G) {}
399
400 bool isGOTEdge(Edge &E) const {
401 return E.getKind() == GOTPage21 || E.getKind() == GOTPageOffset12 ||
402 E.getKind() == PointerToGOT;
403 }
404
405 Symbol &createGOTEntry(Symbol &Target) {
406 auto &GOTEntryBlock = G.createContentBlock(
407 getGOTSection(), getGOTEntryBlockContent(), 0, 8, 0);
408 GOTEntryBlock.addEdge(Pointer64, 0, Target, 0);
409 return G.addAnonymousSymbol(GOTEntryBlock, 0, 8, false, false);
410 }
411
412 void fixGOTEdge(Edge &E, Symbol &GOTEntry) {
413 if (E.getKind() == GOTPage21 || E.getKind() == GOTPageOffset12) {
414 // Update the target, but leave the edge addend as-is.
415 E.setTarget(GOTEntry);
416 } else if (E.getKind() == PointerToGOT) {
417 E.setTarget(GOTEntry);
418 E.setKind(Delta32);
419 } else
420 llvm_unreachable("Not a GOT edge?");
421 }
422
423 bool isExternalBranchEdge(Edge &E) {
424 return E.getKind() == Branch26 && !E.getTarget().isDefined();
425 }
426
427 Symbol &createStub(Symbol &Target) {
428 auto &StubContentBlock =
429 G.createContentBlock(getStubsSection(), getStubBlockContent(), 0, 1, 0);
430 // Re-use GOT entries for stub targets.
431 auto &GOTEntrySymbol = getGOTEntrySymbol(Target);
432 StubContentBlock.addEdge(LDRLiteral19, 0, GOTEntrySymbol, 0);
433 return G.addAnonymousSymbol(StubContentBlock, 0, 8, true, false);
434 }
435
436 void fixExternalBranchEdge(Edge &E, Symbol &Stub) {
437 assert(E.getKind() == Branch26 && "Not a Branch32 edge?");
438 assert(E.getAddend() == 0 && "Branch32 edge has non-zero addend?");
439 E.setTarget(Stub);
440 }
441
442 private:
443 Section &getGOTSection() {
444 if (!GOTSection)
445 GOTSection = &G.createSection("$__GOT", sys::Memory::MF_READ);
446 return *GOTSection;
447 }
448
449 Section &getStubsSection() {
450 if (!StubsSection) {
451 auto StubsProt = static_cast(
452 sys::Memory::MF_READ | sys::Memory::MF_EXEC);
453 StubsSection = &G.createSection("$__STUBS", StubsProt);
454 }
455 return *StubsSection;
456 }
457
458 StringRef getGOTEntryBlockContent() {
459 return StringRef(reinterpret_cast(NullGOTEntryContent),
460 sizeof(NullGOTEntryContent));
461 }
462
463 StringRef getStubBlockContent() {
464 return StringRef(reinterpret_cast(StubContent),
465 sizeof(StubContent));
466 }
467
468 static const uint8_t NullGOTEntryContent[8];
469 static const uint8_t StubContent[8];
470 Section *GOTSection = nullptr;
471 Section *StubsSection = nullptr;
472 };
473
474 const uint8_t MachO_arm64_GOTAndStubsBuilder::NullGOTEntryContent[8] = {
475 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
476 const uint8_t MachO_arm64_GOTAndStubsBuilder::StubContent[8] = {
477 0x10, 0x00, 0x00, 0x58, // LDR x16,
478 0x00, 0x02, 0x1f, 0xd6 // BR x16
479 };
480
481 } // namespace
482
483 namespace llvm {
484 namespace jitlink {
485
486 class MachOJITLinker_arm64 : public JITLinker {
487 friend class JITLinker;
488
489 public:
490 MachOJITLinker_arm64(std::unique_ptr Ctx,
491 PassConfiguration PassConfig)
492 : JITLinker(std::move(Ctx), std::move(PassConfig)) {}
493
494 private:
495 StringRef getEdgeKindName(Edge::Kind R) const override {
496 return getMachOARM64RelocationKindName(R);
497 }
498
499 Expected>
500 buildGraph(MemoryBufferRef ObjBuffer) override {
501 auto MachOObj = object::ObjectFile::createMachOObjectFile(ObjBuffer);
502 if (!MachOObj)
503 return MachOObj.takeError();
504 return MachOLinkGraphBuilder_arm64(**MachOObj).buildGraph();
505 }
506
507 static Error targetOutOfRangeError(const Block &B, const Edge &E) {
508 std::string ErrMsg;
509 {
510 raw_string_ostream ErrStream(ErrMsg);
511 ErrStream << "Relocation target out of range: ";
512 printEdge(ErrStream, B, E, getMachOARM64RelocationKindName(E.getKind()));
513 ErrStream << "\n";
514 }
515 return make_error(std::move(ErrMsg));
516 }
517
518 static unsigned getPageOffset12Shift(uint32_t Instr) {
519 constexpr uint32_t LDRLiteralMask = 0x3ffffc00;
520
521 // Check for a GPR LDR immediate with a zero embedded literal.
522 // If found, the top two bits contain the shift.
523 if ((Instr & LDRLiteralMask) == 0x39400000)
524 return Instr >> 30;
525
526 // Check for a Neon LDR immediate of size 64-bit or less with a zero
527 // embedded literal. If found, the top two bits contain the shift.
528 if ((Instr & LDRLiteralMask) == 0x3d400000)
529 return Instr >> 30;
530
531 // Check for a Neon LDR immediate of size 128-bit with a zero embedded
532 // literal.
533 constexpr uint32_t SizeBitsMask = 0xc0000000;
534 if ((Instr & (LDRLiteralMask | SizeBitsMask)) == 0x3dc00000)
535 return 4;
536
537 return 0;
538 }
539
540 Error applyFixup(Block &B, const Edge &E, char *BlockWorkingMem) const {
541 using namespace support;
542
543 char *FixupPtr = BlockWorkingMem + E.getOffset();
544 JITTargetAddress FixupAddress = B.getAddress() + E.getOffset();
545
546 switch (E.getKind()) {
547 case Branch26: {
548 assert((FixupAddress & 0x3) == 0 && "Branch-inst is not 32-bit aligned");
549
550 int64_t Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
551
552 if (static_cast(Value) & 0x3)
553 return make_error("Branch26 target is not 32-bit "
554 "aligned");
555
556 if (Value < -(1 << 27) || Value > ((1 << 27) - 1))
557 return targetOutOfRangeError(B, E);
558
559 uint32_t RawInstr = *(little32_t *)FixupPtr;
560 assert((RawInstr & 0x7fffffff) == 0x14000000 &&
561 "RawInstr isn't a B or BR immediate instruction");
562 uint32_t Imm = (static_cast(Value) & ((1 << 28) - 1)) >> 2;
563 uint32_t FixedInstr = RawInstr | Imm;
564 *(little32_t *)FixupPtr = FixedInstr;
565 break;
566 }
567 case Pointer32: {
568 uint64_t Value = E.getTarget().getAddress() + E.getAddend();
569 if (Value > std::numeric_limits::max())
570 return targetOutOfRangeError(B, E);
571 *(ulittle32_t *)FixupPtr = Value;
572 break;
573 }
574 case Pointer64: {
575 uint64_t Value = E.getTarget().getAddress() + E.getAddend();
576 *(ulittle64_t *)FixupPtr = Value;
577 break;
578 }
579 case Page21:
580 case GOTPage21: {
581 assert(E.getAddend() == 0 && "PAGE21/GOTPAGE21 with non-zero addend");
582 uint64_t TargetPage =
583 E.getTarget().getAddress() & ~static_cast(4096 - 1);
584 uint64_t PCPage = B.getAddress() & ~static_cast(4096 - 1);
585
586 int64_t PageDelta = TargetPage - PCPage;
587 if (PageDelta < -(1 << 30) || PageDelta > ((1 << 30) - 1))
588 return targetOutOfRangeError(B, E);
589
590 uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
591 assert((RawInstr & 0xffffffe0) == 0x90000000 &&
592 "RawInstr isn't an ADRP instruction");
593 uint32_t ImmLo = (static_cast(PageDelta) >> 12) & 0x3;
594 uint32_t ImmHi = (static_cast(PageDelta) >> 14) & 0x7ffff;
595 uint32_t FixedInstr = RawInstr | (ImmLo << 29) | (ImmHi << 5);
596 *(ulittle32_t *)FixupPtr = FixedInstr;
597 break;
598 }
599 case PageOffset12: {
600 assert(E.getAddend() == 0 && "PAGEOFF12 with non-zero addend");
601 uint64_t TargetOffset = E.getTarget().getAddress() & 0xfff;
602
603 uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
604 unsigned ImmShift = getPageOffset12Shift(RawInstr);
605
606 if (TargetOffset & ((1 << ImmShift) - 1))
607 return make_error("PAGEOFF12 target is not aligned");
608
609 uint32_t EncodedImm = (TargetOffset >> ImmShift) << 10;
610 uint32_t FixedInstr = RawInstr | EncodedImm;
611 *(ulittle32_t *)FixupPtr = FixedInstr;
612 break;
613 }
614 case GOTPageOffset12: {
615 assert(E.getAddend() == 0 && "GOTPAGEOF12 with non-zero addend");
616 uint64_t TargetOffset = E.getTarget().getAddress() & 0xfff;
617
618 uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
619 assert((RawInstr & 0xfffffc00) == 0xf9400000 &&
620 "RawInstr isn't a 64-bit LDR immediate");
621 uint32_t FixedInstr = RawInstr | (TargetOffset << 10);
622 *(ulittle32_t *)FixupPtr = FixedInstr;
623 break;
624 }
625 case LDRLiteral19: {
626 assert((FixupAddress & 0x3) == 0 && "LDR is not 32-bit aligned");
627 assert(E.getAddend() == 0 && "LDRLiteral19 with non-zero addend");
628 uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
629 assert(RawInstr == 0x58000010 && "RawInstr isn't a 64-bit LDR literal");
630 int64_t Delta = E.getTarget().getAddress() - FixupAddress;
631 if (Delta & 0x3)
632 return make_error("LDR literal target is not 32-bit "
633 "aligned");
634 if (Delta < -(1 << 20) || Delta > ((1 << 20) - 1))
635 return targetOutOfRangeError(B, E);
636
637 uint32_t EncodedImm = (static_cast(Delta) >> 2) << 5;
638 uint32_t FixedInstr = RawInstr | EncodedImm;
639 *(ulittle32_t *)FixupPtr = FixedInstr;
640 break;
641 }
642 case Delta32:
643 case Delta64:
644 case NegDelta32:
645 case NegDelta64: {
646 int64_t Value;
647 if (E.getKind() == Delta32 || E.getKind() == Delta64)
648 Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
649 else
650 Value = FixupAddress - E.getTarget().getAddress() + E.getAddend();
651
652 if (E.getKind() == Delta32 || E.getKind() == NegDelta32) {
653 if (Value < std::numeric_limits::min() ||
654 Value > std::numeric_limits::max())
655 return targetOutOfRangeError(B, E);
656 *(little32_t *)FixupPtr = Value;
657 } else
658 *(little64_t *)FixupPtr = Value;
659 break;
660 }
661 default:
662 llvm_unreachable("Unrecognized edge kind");
663 }
664
665 return Error::success();
666 }
667
668 uint64_t NullValue = 0;
669 };
670
671 void jitLink_MachO_arm64(std::unique_ptr Ctx) {
672 PassConfiguration Config;
673 Triple TT("arm64-apple-ios");
674
675 if (Ctx->shouldAddDefaultTargetPasses(TT)) {
676 // Add a mark-live pass.
677 if (auto MarkLive = Ctx->getMarkLivePass(TT))
678 Config.PrePrunePasses.push_back(std::move(MarkLive));
679 else
680 Config.PrePrunePasses.push_back(markAllSymbolsLive);
681
682 // Add an in-place GOT/Stubs pass.
683 Config.PostPrunePasses.push_back([](LinkGraph &G) -> Error {
684 MachO_arm64_GOTAndStubsBuilder(G).run();
685 return Error::success();
686 });
687 }
688
689 if (auto Err = Ctx->modifyPassConfig(TT, Config))
690 return Ctx->notifyFailed(std::move(Err));
691
692 // Construct a JITLinker and run the link function.
693 MachOJITLinker_arm64::link(std::move(Ctx), std::move(Config));
694 }
695
696 StringRef getMachOARM64RelocationKindName(Edge::Kind R) {
697 switch (R) {
698 case Branch26:
699 return "Branch26";
700 case Pointer64:
701 return "Pointer64";
702 case Pointer64Anon:
703 return "Pointer64Anon";
704 case Page21:
705 return "Page21";
706 case PageOffset12:
707 return "PageOffset12";
708 case GOTPage21:
709 return "GOTPage21";
710 case GOTPageOffset12:
711 return "GOTPageOffset12";
712 case PointerToGOT:
713 return "PointerToGOT";
714 case PairedAddend:
715 return "PairedAddend";
716 case LDRLiteral19:
717 return "LDRLiteral19";
718 case Delta32:
719 return "Delta32";
720 case Delta64:
721 return "Delta64";
722 case NegDelta32:
723 return "NegDelta32";
724 case NegDelta64:
725 return "NegDelta64";
726 default:
727 return getGenericEdgeKindName(static_cast(R));
728 }
729 }
730
731 } // end namespace jitlink
732 } // end namespace llvm
0 # RUN: rm -rf %t && mkdir -p %t
1 # RUN: llvm-mc -triple=arm64-apple-darwin19 -filetype=obj -o %t/macho_reloc.o %s
2 # RUN: llvm-jitlink -noexec -define-abs external_data=0xdeadbeef -define-abs external_func=0xcafef00d -check=%s %t/macho_reloc.o
3
4 .section __TEXT,__text,regular,pure_instructions
5
6 .p2align 2
7 Lanon_func:
8 ret
9
10 .globl named_func
11 .p2align 2
12 named_func:
13 ret
14
15 # Check ARM64_RELOC_BRANCH26 handling with a call to a local function.
16 # The branch instruction only encodes 26 bits of the 28-bit possible branch
17 # range, since the low 2 bits will always be zero.
18 #
19 # jitlink-check: decode_operand(test_local_call, 0)[25:0] = (named_func - test_local_call)[27:2]
20 .globl test_local_call
21 .p2align 2
22 test_local_call:
23 bl named_func
24
25 .globl _main
26 .p2align 2
27 _main:
28 ret
29
30 # Check ARM64_RELOC_GOTPAGE21 / ARM64_RELOC_GOTPAGEOFF12 handling with a
31 # reference to an external symbol. Validate both the reference to the GOT entry,
32 # and also the content of the GOT entry.
33 #
34 # For the GOTPAGE21/ADRP instruction we have the 21-bit delta to the 4k page
35 # containing the GOT entry for external_data.
36 #
37 # For the GOTPAGEOFF/LDR instruction we have the 12-bit offset of the entry
38 # within the page.
39 #
40 # jitlink-check: *{8}(got_addr(macho_reloc.o, external_data)) = external_data
41 # jitlink-check: decode_operand(test_gotpage21, 1) = (got_addr(macho_reloc.o, external_data)[32:12] - test_gotpage21[32:12])
42 # jitlink-check: decode_operand(test_gotpageoff12, 2) = got_addr(macho_reloc.o, external_data)[11:3]
43 .globl test_gotpage21
44 .p2align 2
45 test_gotpage21:
46 adrp x0, external_data@GOTPAGE
47 .globl test_gotpageoff12
48 test_gotpageoff12:
49 ldr x0, [x0, external_data@GOTPAGEOFF]
50
51 # Check ARM64_RELOC_PAGE21 / ARM64_RELOC_PAGEOFF12 handling with a reference to
52 # a local symbol.
53 #
54 # For the PAGE21/ADRP instruction we have the 21-bit delta to the 4k page
55 # containing the global.
56 #
57 # For the GOTPAGEOFF12 relocation we test the ADD instruction, all LDR/GPR
58 # variants and all LDR/Neon variants.
59 #
60 # jitlink-check: decode_operand(test_page21, 1) = (named_data[32:12] - test_page21[32:12])
61 # jitlink-check: decode_operand(test_pageoff12add, 2) = named_data[11:0]
62 # jitlink-check: decode_operand(test_pageoff12gpr8, 2) = named_data[11:0]
63 # jitlink-check: decode_operand(test_pageoff12gpr16, 2) = named_data[11:1]
64 # jitlink-check: decode_operand(test_pageoff12gpr32, 2) = named_data[11:2]
65 # jitlink-check: decode_operand(test_pageoff12gpr64, 2) = named_data[11:3]
66 # jitlink-check: decode_operand(test_pageoff12neon8, 2) = named_data[11:0]
67 # jitlink-check: decode_operand(test_pageoff12neon16, 2) = named_data[11:1]
68 # jitlink-check: decode_operand(test_pageoff12neon32, 2) = named_data[11:2]
69 # jitlink-check: decode_operand(test_pageoff12neon64, 2) = named_data[11:3]
70 # jitlink-check: decode_operand(test_pageoff12neon128, 2) = named_data[11:4]
71 .globl test_page21
72 .p2align 2
73 test_page21:
74 adrp x0, named_data@PAGE
75
76 .globl test_pageoff12add
77 test_pageoff12add:
78 add x0, x0, named_data@PAGEOFF
79
80 .globl test_pageoff12gpr8
81 test_pageoff12gpr8:
82 ldrb w0, [x0, named_data@PAGEOFF]
83
84 .globl test_pageoff12gpr16
85 test_pageoff12gpr16:
86 ldrh w0, [x0, named_data@PAGEOFF]
87
88 .globl test_pageoff12gpr32
89 test_pageoff12gpr32:
90 ldr w0, [x0, named_data@PAGEOFF]
91
92 .globl test_pageoff12gpr64
93 test_pageoff12gpr64:
94 ldr x0, [x0, named_data@PAGEOFF]
95
96 .globl test_pageoff12neon8
97 test_pageoff12neon8:
98 ldr b0, [x0, named_data@PAGEOFF]
99
100 .globl test_pageoff12neon16
101 test_pageoff12neon16:
102 ldr h0, [x0, named_data@PAGEOFF]
103
104 .globl test_pageoff12neon32
105 test_pageoff12neon32:
106 ldr s0, [x0, named_data@PAGEOFF]
107
108 .globl test_pageoff12neon64
109 test_pageoff12neon64:
110 ldr d0, [x0, named_data@PAGEOFF]
111
112 .globl test_pageoff12neon128
113 test_pageoff12neon128:
114 ldr q0, [x0, named_data@PAGEOFF]
115
116 # Check that calls to external functions trigger the generation of stubs and GOT
117 # entries.
118 #
119 # jitlink-check: decode_operand(test_external_call, 0) = (stub_addr(macho_reloc.o, external_func) - test_external_call)[27:2]
120 # jitlink-check: *{8}(got_addr(macho_reloc.o, external_func)) = external_func
121 .globl test_external_call
122 .p2align 2
123 test_external_call:
124 bl external_func
125
126 .section __DATA,__data
127
128 # Storage target for non-extern ARM64_RELOC_SUBTRACTOR relocs.
129 .p2align 3
130 Lanon_data:
131 .quad 0x1111111111111111
132
133 # Check ARM64_RELOC_SUBTRACTOR Quad/Long in anonymous storage with anonymous
134 # minuend: "LA: .quad LA - B + C". The anonymous subtrahend form
135 # "LA: .quad B - LA + C" is not tested as subtrahends are not permitted to be
136 # anonymous.
137 #
138 # Note: +8 offset in expression below to accounts for sizeof(Lanon_data).
139 # jitlink-check: *{8}(section_addr(macho_reloc.o, __data) + 8) = (section_addr(macho_reloc.o, __data) + 8) - named_data + 2
140 .p2align 3
141 Lanon_minuend_quad:
142 .quad Lanon_minuend_quad - named_data + 2
143
144 # Note: +16 offset in expression below to accounts for sizeof(Lanon_data) + sizeof(Lanon_minuend_long).
145 # jitlink-check: *{4}(section_addr(macho_reloc.o, __data) + 16) = ((section_addr(macho_reloc.o, __data) + 16) - named_data + 2)[31:0]
146 .p2align 2
147 Lanon_minuend_long:
148 .long Lanon_minuend_long - named_data + 2
149
150 # Named quad storage target (first named atom in __data).
151 # Align to 16 for use as 128-bit load target.
152 .globl named_data
153 .p2align 4
154 named_data:
155 .quad 0x2222222222222222
156 .quad 0x3333333333333333
157
158 # An alt-entry point for named_data
159 .globl named_data_alt_entry
160 .p2align 3
161 .alt_entry named_data_alt_entry
162 named_data_alt_entry:
163 .quad 0
164
165 # Check ARM64_RELOC_UNSIGNED / quad / extern handling by putting the address of
166 # a local named function into a quad symbol.
167 #
168 # jitlink-check: *{8}named_func_addr_quad = named_func
169 .globl named_func_addr_quad
170 .p2align 3
171 named_func_addr_quad:
172 .quad named_func
173
174 # Check ARM64_RELOC_UNSIGNED / quad / non-extern handling by putting the
175 # address of a local anonymous function into a quad symbol.
176 #
177 # jitlink-check: *{8}anon_func_addr_quad = section_addr(macho_reloc.o, __text)
178 .globl anon_func_addr_quad
179 .p2align 3
180 anon_func_addr_quad:
181 .quad Lanon_func
182
183 # ARM64_RELOC_SUBTRACTOR Quad/Long in named storage with anonymous minuend
184 #
185 # jitlink-check: *{8}anon_minuend_quad1 = section_addr(macho_reloc.o, __data) - anon_minuend_quad1 + 2
186 # Only the form "B: .quad LA - B + C" is tested. The form "B: .quad B - LA + C" is
187 # invalid because the subtrahend can not be local.
188 .globl anon_minuend_quad1
189 .p2align 3
190 anon_minuend_quad1:
191 .quad Lanon_data - anon_minuend_quad1 + 2
192
193 # jitlink-check: *{4}anon_minuend_long1 = (section_addr(macho_reloc.o, __data) - anon_minuend_long1 + 2)[31:0]
194 .globl anon_minuend_long1
195 .p2align 2
196 anon_minuend_long1:
197 .long Lanon_data - anon_minuend_long1 + 2
198
199 # Check ARM64_RELOC_SUBTRACTOR Quad/Long in named storage with minuend and subtrahend.
200 # Both forms "A: .quad A - B + C" and "A: .quad B - A + C" are tested.
201 #
202 # Check "A: .quad B - A + C".
203 # jitlink-check: *{8}subtrahend_quad2 = (named_data - subtrahend_quad2 - 2)
204 .globl subtrahend_quad2
205 .p2align 3
206 subtrahend_quad2:
207 .quad named_data - subtrahend_quad2 - 2
208
209 # Check "A: .long B - A + C".
210 # jitlink-check: *{4}subtrahend_long2 = (named_data - subtrahend_long2 - 2)[31:0]
211 .globl subtrahend_long2
212 .p2align 2
213 subtrahend_long2:
214 .long named_data - subtrahend_long2 - 2
215
216 # Check "A: .quad A - B + C".
217 # jitlink-check: *{8}minuend_quad3 = (minuend_quad3 - named_data - 2)
218 .globl minuend_quad3
219 .p2align 3
220 minuend_quad3:
221 .quad minuend_quad3 - named_data - 2
222
223 # Check "A: .long B - A + C".
224 # jitlink-check: *{4}minuend_long3 = (minuend_long3 - named_data - 2)[31:0]
225 .globl minuend_long3
226 .p2align 2
227 minuend_long3:
228 .long minuend_long3 - named_data - 2
229
230 # Check ARM64_RELOC_SUBTRACTOR handling for exprs of the form
231 # "A: .quad/long B - C + D", where 'B' or 'C' is at a fixed offset from 'A'
232 # (i.e. is part of an alt_entry chain that includes 'A').
233 #
234 # Check "A: .long B - C + D" where 'B' is an alt_entry for 'A'.
235 # jitlink-check: *{4}subtractor_with_alt_entry_minuend_long = (subtractor_with_alt_entry_minuend_long_B - named_data + 2)[31:0]
236 .globl subtractor_with_alt_entry_minuend_long
237 .p2align 2
238 subtractor_with_alt_entry_minuend_long:
239 .long subtractor_with_alt_entry_minuend_long_B - named_data + 2
240
241 .globl subtractor_with_alt_entry_minuend_long_B
242 .p2align 2
243 .alt_entry subtractor_with_alt_entry_minuend_long_B
244 subtractor_with_alt_entry_minuend_long_B:
245 .long 0
246
247 # Check "A: .quad B - C + D" where 'B' is an alt_entry for 'A'.
248 # jitlink-check: *{8}subtractor_with_alt_entry_minuend_quad = (subtractor_with_alt_entry_minuend_quad_B - named_data + 2)
249 .globl subtractor_with_alt_entry_minuend_quad
250 .p2align 3
251 subtractor_with_alt_entry_minuend_quad:
252 .quad subtractor_with_alt_entry_minuend_quad_B - named_data + 2
253
254 .globl subtractor_with_alt_entry_minuend_quad_B
255 .p2align 3
256 .alt_entry subtractor_with_alt_entry_minuend_quad_B
257 subtractor_with_alt_entry_minuend_quad_B:
258 .quad 0
259
260 # Check "A: .long B - C + D" where 'C' is an alt_entry for 'A'.
261 # jitlink-check: *{4}subtractor_with_alt_entry_subtrahend_long = (named_data - subtractor_with_alt_entry_subtrahend_long_B + 2)[31:0]
262 .globl subtractor_with_alt_entry_subtrahend_long
263 .p2align 2
264 subtractor_with_alt_entry_subtrahend_long:
265 .long named_data - subtractor_with_alt_entry_subtrahend_long_B + 2
266
267 .globl subtractor_with_alt_entry_subtrahend_long_B
268 .p2align 2
269 .alt_entry subtractor_with_alt_entry_subtrahend_long_B
270 subtractor_with_alt_entry_subtrahend_long_B:
271 .long 0
272
273 # Check "A: .quad B - C + D" where 'B' is an alt_entry for 'A'.
274 # jitlink-check: *{8}subtractor_with_alt_entry_subtrahend_quad = (named_data - subtractor_with_alt_entry_subtrahend_quad_B + 2)
275 .globl subtractor_with_alt_entry_subtrahend_quad
276 .p2align 3
277 subtractor_with_alt_entry_subtrahend_quad:
278 .quad named_data - subtractor_with_alt_entry_subtrahend_quad_B + 2
279
280 .globl subtractor_with_alt_entry_subtrahend_quad_B
281 .p2align 3
282 .alt_entry subtractor_with_alt_entry_subtrahend_quad_B
283 subtractor_with_alt_entry_subtrahend_quad_B:
284 .quad 0
285
286 # Check ARM64_POINTER_TO_GOT handling.
287 # ARM64_POINTER_TO_GOT is a delta-32 to a GOT entry.
288 #
289 # jitlink-check: *{4}test_got = (got_addr(macho_reloc.o, external_data) - test_got)[31:0]
290 .globl test_got
291 .p2align 2
292 test_got:
293 .long external_data@got - .
294
295 # Check that unreferenced atoms in no-dead-strip sections are not dead stripped.
296 # We need to use a local symbol for this as any named symbol will end up in the
297 # ORC responsibility set, which is automatically marked live and would couse
298 # spurious passes.
299 #
300 # jitlink-check: *{8}section_addr(macho_reloc.o, __nds_test_sect) = 0
301 .section __DATA,__nds_test_sect,regular,no_dead_strip
302 .quad 0
303
304 # Check that unreferenced local symbols that have been marked no-dead-strip are
305 # not dead-striped.
306 #
307 # jitlink-check: *{8}section_addr(macho_reloc.o, __nds_test_nlst) = 0
308 .section __DATA,__nds_test_nlst,regular
309 .no_dead_strip no_dead_strip_test_symbol
310 no_dead_strip_test_symbol:
311 .quad 0
312
313 # Check that explicit zero-fill symbols are supported
314 # jitlink-check: *{8}zero_fill_test = 0
315 .globl zero_fill_test
316 .zerofill __DATA,__zero_fill_test,zero_fill_test,8,3
317
318 # Check that section alignments are respected.
319 # We test this by introducing two segments with alignment 8, each containing one
320 # byte of data. We require both symbols to have an aligned address.
321 #
322 # jitlink-check: section_alignment_check1[2:0] = 0
323 # jitlink-check: section_alignment_check2[2:0] = 0
324 .section __DATA,__sec_align_chk1
325 .p2align 3
326
327 .globl section_alignment_check1
328 section_alignment_check1:
329 .byte 0
330
331 .section __DATA,__sec_align_chk2
332 .p2align 3
333
334 .globl section_alignment_check2
335 section_alignment_check2:
336 .byte 0
337
338 .subsections_via_symbols
0 if not 'AArch64' in config.root.targets:
1 config.unsupported = True