llvm.org GIT mirror llvm / 75ac35d
[RuntimeDyld] Add GOT support for AArch64 to RuntimeDyldMachO. Test cases to follow once RuntimeDyldChecker supports introspection of stubs. Fixes <rdar://problem/17648000> git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@212859 91177308-0d34-0410-b5e6-96231b3b80d8 Lang Hames 5 years ago
1 changed file(s) with 175 addition(s) and 25 deletion(s). Raw diff Collapse all Expand all
416416 const SectionEntry &Section = Sections[RE.SectionID];
417417 uint8_t* LocalAddress = Section.Address + RE.Offset;
418418
419 // If the relocation is PC-relative, the value to be encoded is the
420 // pointer difference.
421 if (RE.IsPCRel) {
422 uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
423 Value -= FinalAddress;
424 }
425
426419 switch (RE.RelType) {
427420 default:
428421 llvm_unreachable("Invalid relocation type!");
429 case MachO::ARM64_RELOC_UNSIGNED:
430 return applyRelocationValue(LocalAddress, Value, 1 << RE.Size);
422 case MachO::ARM64_RELOC_UNSIGNED: {
423 assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_UNSIGNED not supported");
424 // Mask in the target value a byte at a time (we don't have an alignment
425 // guarantee for the target address, so this is safest).
426 if (RE.Size < 2)
427 llvm_unreachable("Invalid size for ARM64_RELOC_UNSIGNED");
428
429 applyRelocationValue(LocalAddress, Value + RE.Addend, 1 << RE.Size);
430 break;
431 }
431432 case MachO::ARM64_RELOC_BRANCH26: {
433 assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_BRANCH26 not supported");
432434 // Mask the value into the target address. We know instructions are
433435 // 32-bit aligned, so we can do it all at once.
434 uint32_t *p = (uint32_t *)LocalAddress;
435 // The low two bits of the value are not encoded.
436 Value >>= 2;
437 // Mask the value to 26 bits.
438 uint64_t FinalValue = Value & 0x3ffffff;
439 // Check for overflow.
440 if (FinalValue != Value)
441 return Error("ARM64 BRANCH26 relocation out of range.");
436 uint32_t *p = (uint32_t*)LocalAddress;
437 // Check if the addend is encoded in the instruction.
438 uint32_t EncodedAddend = *p & 0x03FFFFFF;
439 if (EncodedAddend != 0 ) {
440 if (RE.Addend == 0)
441 llvm_unreachable("branch26 instruction has embedded addend.");
442 else
443 llvm_unreachable("branch26 instruction has embedded addend and" \
444 "ARM64_RELOC_ADDEND.");
445 }
446 // Check if branch is in range.
447 uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
448 uint64_t PCRelVal = Value - FinalAddress + RE.Addend;
449 assert(isInt<26>(PCRelVal) && "Branch target out of range!");
442450 // Insert the value into the instruction.
443 *p = (*p & ~0x3ffffff) | FinalValue;
451 *p = (*p & 0xFC000000) | ((uint32_t)(PCRelVal >> 2) & 0x03FFFFFF);
444452 break;
445453 }
454 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
455 case MachO::ARM64_RELOC_PAGE21: {
456 assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_PAGE21 not supported");
457 // Mask the value into the target address. We know instructions are
458 // 32-bit aligned, so we can do it all at once.
459 uint32_t *p = (uint32_t*)LocalAddress;
460 // Check if the addend is encoded in the instruction.
461 uint32_t EncodedAddend = ((*p & 0x60000000) >> 29) |
462 ((*p & 0x01FFFFE0) >> 3);
463 if (EncodedAddend != 0) {
464 if (RE.Addend == 0)
465 llvm_unreachable("adrp instruction has embedded addend.");
466 else
467 llvm_unreachable("adrp instruction has embedded addend and" \
468 "ARM64_RELOC_ADDEND.");
469 }
470 // Adjust for PC-relative relocation and offset.
471 uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
472 uint64_t PCRelVal = ((Value + RE.Addend) & (-4096)) -
473 (FinalAddress & (-4096));
474 // Check that the value fits into 21 bits (+ 12 lower bits).
475 assert(isInt<33>(PCRelVal) && "Invalid page reloc value!");
476 // Insert the value into the instruction.
477 uint32_t ImmLoValue = (uint32_t)(PCRelVal << 17) & 0x60000000;
478 uint32_t ImmHiValue = (uint32_t)(PCRelVal >> 9) & 0x00FFFFE0;
479 *p = (*p & 0x9F00001F) | ImmHiValue | ImmLoValue;
480 break;
481 }
482 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
483 case MachO::ARM64_RELOC_PAGEOFF12: {
484 assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_PAGEOFF21 not supported");
485 // Mask the value into the target address. We know instructions are
486 // 32-bit aligned, so we can do it all at once.
487 uint32_t *p = (uint32_t*)LocalAddress;
488 // Check if the addend is encoded in the instruction.
489 uint32_t EncodedAddend = *p & 0x003FFC00;
490 if (EncodedAddend != 0) {
491 if (RE.Addend == 0)
492 llvm_unreachable("adrp instruction has embedded addend.");
493 else
494 llvm_unreachable("adrp instruction has embedded addend and" \
495 "ARM64_RELOC_ADDEND.");
496 }
497 // Add the offset from the symbol.
498 Value += RE.Addend;
499 // Mask out the page address and only use the lower 12 bits.
500 Value &= 0xFFF;
501 // Check which instruction we are updating to obtain the implicit shift
502 // factor from LDR/STR instructions.
503 if (*p & 0x08000000) {
504 uint32_t ImplicitShift = ((*p >> 30) & 0x3);
505 switch (ImplicitShift) {
506 case 0:
507 // Check if this a vector op.
508 if ((*p & 0x04800000) == 0x04800000) {
509 ImplicitShift = 4;
510 assert(((Value & 0xF) == 0) &&
511 "128-bit LDR/STR not 16-byte aligned.");
512 }
513 break;
514 case 1:
515 assert(((Value & 0x1) == 0) && "16-bit LDR/STR not 2-byte aligned.");
516 case 2:
517 assert(((Value & 0x3) == 0) && "32-bit LDR/STR not 4-byte aligned.");
518 case 3:
519 assert(((Value & 0x7) == 0) && "64-bit LDR/STR not 8-byte aligned.");
520 }
521 // Compensate for implicit shift.
522 Value >>= ImplicitShift;
523 }
524 // Insert the value into the instruction.
525 *p = (*p & 0xFFC003FF) | ((uint32_t)(Value << 10) & 0x003FFC00);
526 break;
527 }
446528 case MachO::ARM64_RELOC_SUBTRACTOR:
447 case MachO::ARM64_RELOC_PAGE21:
448 case MachO::ARM64_RELOC_PAGEOFF12:
449 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
450 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
451529 case MachO::ARM64_RELOC_POINTER_TO_GOT:
452530 case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21:
453531 case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
532 llvm_unreachable("Relocation type not implemented yet!");
533 return Error("Relocation type not implemented yet!");
454534 case MachO::ARM64_RELOC_ADDEND:
455 return Error("Relocation type not implemented yet!");
535 llvm_unreachable("ARM64_RELOC_ADDEND should have been handeled by " \
536 "processRelocationRef!");
456537 }
457538 return false;
458539 }
658739 const MachOObjectFile *MachO = static_cast(OF);
659740 MachO::any_relocation_info RE =
660741 MachO->getRelocation(RelI->getRawDataRefImpl());
742 int64_t RelocAddendValue = 0;
743 bool HasRelocAddendValue = false;
661744
662745 uint32_t RelType = MachO->getAnyRelocationType(RE);
746 if (Arch == Triple::arm64) {
747 // ARM64_RELOC_ADDEND provides the offset (addend) that will be used by the
748 // next relocation entry. Save the value and advance to the next relocation
749 // entry.
750 if (RelType == MachO::ARM64_RELOC_ADDEND) {
751 assert(!MachO->getPlainRelocationExternal(RE));
752 assert(!MachO->getAnyRelocationPCRel(RE));
753 assert(MachO->getAnyRelocationLength(RE) == 2);
754 uint64_t RawAddend = MachO->getPlainRelocationSymbolNum(RE);
755 // Sign-extend the 24-bit to 64-bit.
756 RelocAddendValue = RawAddend << 40;
757 RelocAddendValue >>= 40;
758 HasRelocAddendValue = true;
759
760 // Get the next entry.
761 RE = MachO->getRelocation((++RelI)->getRawDataRefImpl());
762 RelType = MachO->getAnyRelocationType(RE);
763 assert(RelType == MachO::ARM64_RELOC_BRANCH26 ||
764 RelType == MachO::ARM64_RELOC_PAGE21 ||
765 RelType == MachO::ARM64_RELOC_PAGEOFF12);
766
767 } else if (RelType == MachO::ARM64_RELOC_BRANCH26 ||
768 RelType == MachO::ARM64_RELOC_PAGE21 ||
769 RelType == MachO::ARM64_RELOC_PAGEOFF12 ||
770 RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGE21 ||
771 RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12) {
772 RelocAddendValue = 0;
773 HasRelocAddendValue = true;
774 }
775 }
663776
664777 // FIXME: Properly handle scattered relocations.
665778 // Special case the couple of scattered relocations that we know how
690803 RelI->getOffset(Offset);
691804 uint8_t *LocalAddress = Section.Address + Offset;
692805 unsigned NumBytes = 1 << Size;
693 uint64_t Addend = 0;
694 memcpy(&Addend, LocalAddress, NumBytes);
806 int64_t Addend = 0;
807 if (HasRelocAddendValue)
808 Addend = RelocAddendValue;
809 else
810 memcpy(&Addend, LocalAddress, NumBytes);
695811
696812 if (IsExtern) {
697813 // Obtain the symbol name which is referenced in the relocation
792908 RelocationEntry TargetRE(Value.SectionID, Offset, RelType, 0, IsPCRel,
793909 Size);
794910 resolveRelocation(TargetRE, (uint64_t)Addr);
911 } else if (Arch == Triple::arm64 &&
912 (RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGE21 ||
913 RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12)) {
914 assert(Size == 2);
915 StubMap::const_iterator i = Stubs.find(Value);
916 uint8_t *Addr;
917 if (i != Stubs.end())
918 Addr = Section.Address + i->second;
919 else {
920 // FIXME: There must be a better way to do this then to check and fix the
921 // alignment every time!!!
922 uintptr_t BaseAddress = uintptr_t(Section.Address);
923 uintptr_t StubAlignment = getStubAlignment();
924 uintptr_t StubAddress
925 = (BaseAddress + Section.StubOffset + StubAlignment - 1) &
926 -StubAlignment;
927 unsigned StubOffset = StubAddress - BaseAddress;
928 Stubs[Value] = StubOffset;
929 assert(((StubAddress % getStubAlignment()) == 0) &&
930 "GOT entry not aligned");
931 RelocationEntry GOTRE(SectionID, StubOffset, MachO::ARM64_RELOC_UNSIGNED,
932 Value.Addend, /*IsPCRel=*/false, /*Size=*/3);
933 if (Value.SymbolName)
934 addRelocationForSymbol(GOTRE, Value.SymbolName);
935 else
936 addRelocationForSection(GOTRE, Value.SectionID);
937 Section.StubOffset = StubOffset + getMaxStubSize();
938
939 Addr = (uint8_t *)StubAddress;
940 }
941 RelocationEntry TargetRE(SectionID, Offset, RelType, /*Addend=*/0, IsPCRel,
942 Size);
943 resolveRelocation(TargetRE, (uint64_t)Addr);
795944 } else {
945
796946 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend, IsPCRel, Size);
797947 if (Value.SymbolName)
798948 addRelocationForSymbol(RE, Value.SymbolName);