llvm.org GIT mirror llvm / cce6c29
On Darwin ARM, memory needs special handling to do JIT. This patch expands this handling to work properly for modifying stub functions, relocations back to entry points after JIT compilation, etc.. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57013 91177308-0d34-0410-b5e6-96231b3b80d8 Jim Grosbach 11 years ago
6 changed file(s) with 66 addition(s) and 15 deletion(s). Raw diff Collapse all Expand all
3434 /// JIT Memory Manager if the client does not provide one to the JIT.
3535 static JITMemoryManager *CreateDefaultMemManager();
3636
37 /// setMemoryWritable - When code generation is in progress,
38 /// the code pages may need permissions changed.
39 virtual void setMemoryWritable(void) = 0;
40
41 /// setMemoryExecutable - When code generation is done and we're ready to
42 /// start execution, the code pages may need permissions changed.
43 virtual void setMemoryExecutable(void) = 0;
44
3745 //===--------------------------------------------------------------------===//
3846 // Global Offset Table Management
3947 //===--------------------------------------------------------------------===//
6969 /// platforms.
7070 static void InvalidateInstructionCache(const void *Addr, size_t Len);
7171
72 /// SetRXPrivilege - Before the JIT can run a block of code, it has to be
72 /// setExecutable - Before the JIT can run a block of code, it has to be
7373 /// given read and executable privilege. Return true if it is already r-x
7474 /// or the system is able to change its previlege.
75 static bool SetRXPrivilege(const void *Addr, size_t Size);
75 static bool setExecutable (MemoryBlock &M, std::string *ErrMsg = 0);
76
77 /// setWritable - When adding to a block of code, the JIT may need
78 /// to mark a block of code as RW since the protections are on page
79 /// boundaries, and the JIT internal allocations are not page aligned.
80 static bool setWritable (MemoryBlock &M, std::string *ErrMsg = 0);
7681 };
7782 }
7883 }
559559 if (ExceptionHandling) DE->setModuleInfo(Info);
560560 }
561561
562 void setMemoryExecutable(void) {
563 MemMgr->setMemoryExecutable();
564 }
565
562566 private:
563567 void *getPointerToGlobal(GlobalValue *GV, void *Reference, bool NoNeedStub);
564568 void *getPointerToGVLazyPtr(GlobalValue *V, void *Reference,
790794
791795 void JITEmitter::startFunction(MachineFunction &F) {
792796 uintptr_t ActualSize = 0;
797 // Set the memory writable, if it's not already
798 MemMgr->setMemoryWritable();
793799 if (MemMgr->NeedsExactSize()) {
794800 DOUT << "ExactSize\n";
795801 const TargetInstrInfo* TII = F.getTarget().getInstrInfo();
937943 Relocations.clear();
938944
939945 // Mark code region readable and executable if it's not so already.
940 sys::Memory::SetRXPrivilege(FnStart, FnEnd-FnStart);
946 MemMgr->setMemoryExecutable();
941947
942948 #ifndef NDEBUG
943949 {
10851091
10861092 void *JITEmitter::finishFunctionStub(const GlobalValue* F) {
10871093 NumBytes += getCurrentPCOffset();
1094
1095 // Invalidate the icache if necessary.
1096 sys::Memory::InvalidateInstructionCache(BufferBegin, NumBytes);
1097
10881098 std::swap(SavedBufferBegin, BufferBegin);
10891099 BufferEnd = SavedBufferEnd;
10901100 CurBufferPtr = SavedCurBufferPtr;
363363
364364 // Finally, remove this entry from TableBlocks.
365365 TableBlocks.erase(I);
366 }
367
368 /// setMemoryWritable - When code generation is in progress,
369 /// the code pages may need permissions changed.
370 void setMemoryWritable(void)
371 {
372 for (unsigned i = 0, e = Blocks.size(); i != e; ++i)
373 sys::Memory::setWritable(Blocks[i]);
374 }
375 /// setMemoryExecutable - When code generation is done and we're ready to
376 /// start execution, the code pages may need permissions changed.
377 void setMemoryExecutable(void)
378 {
379 for (unsigned i = 0, e = Blocks.size(); i != e; ++i)
380 sys::Memory::setExecutable(Blocks[i]);
366381 }
367382 };
368383 }
5757 #endif // end PPC
5858
5959 }
60
61 bool llvm::sys::Memory::SetRXPrivilege(const void *Addr, size_t Size) {
62 #if defined(__APPLE__) && defined(__arm__)
63 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
64 (vm_size_t)Size, 0,
65 VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
66 return KERN_SUCCESS == kr;
67 #else
68 return true;
69 #endif
70 }
7575 (vm_size_t)(pageSize*NumPages), 0,
7676 VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
7777 if (KERN_SUCCESS != kr) {
78 MakeErrMsg(ErrMsg, "vm_protect max RWX failed\n");
78 MakeErrMsg(ErrMsg, "vm_protect max RX failed\n");
7979 return sys::MemoryBlock();
8080 }
8181
102102 return false;
103103 }
104104
105 bool llvm::sys::Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) {
106 #if defined(__APPLE__) && defined(__arm__)
107 if (M.Address == 0 || M.Size == 0) return false;
108 sys::Memory::InvalidateInstructionCache(M.Address, M.Size);
109 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
110 (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_WRITE);
111 return KERN_SUCCESS == kr;
112 #else
113 return true;
114 #endif
115 }
116
117 bool llvm::sys::Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) {
118 #if defined(__APPLE__) && defined(__arm__)
119 if (M.Address == 0 || M.Size == 0) return false;
120 sys::Memory::InvalidateInstructionCache(M.Address, M.Size);
121 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
122 (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
123 return KERN_SUCCESS == kr;
124 #else
125 return false;
126 #endif
127 }
128