llvm.org GIT mirror llvm / bc4707a
Preliminary support for systems which require changing JIT memory regions privilege from read / write to read / executable. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@56303 91177308-0d34-0410-b5e6-96231b3b80d8 Evan Cheng 12 years ago
6 changed file(s) with 54 addition(s) and 1 deletion(s). Raw diff Collapse all Expand all
6868 /// that has been emitted it must invalidate the instruction cache on some
6969 /// platforms.
7070 static void InvalidateInstructionCache(const void *Addr, size_t Len);
71
72 /// SetRXPrivilege - Before the JIT can run a block of code, it has to be
73 /// given read and executable privilege. Return true if it is already r-x
74 /// or the system is able to change its previlege.
75 static bool SetRXPrivilege(const void *Addr, size_t Size);
7176 };
7277 }
7378 }
8585 /// existing data in memory.
8686 void ExecutionEngine::addGlobalMapping(const GlobalValue *GV, void *Addr) {
8787 MutexGuard locked(lock);
88
88
89 DOUT << "Map " << *GV << " to " << Addr << "\n";
8990 void *&CurVal = state.getGlobalAddressMap(locked)[GV];
9091 assert((CurVal == 0 || Addr == 0) && "GlobalMapping already established!");
9192 CurVal = Addr;
924924 << Relocations.size() << " relocations\n";
925925 Relocations.clear();
926926
927 // Mark code region readable and executable if it's not so already.
928 sys::Memory::SetRXPrivilege(FnStart, FnEnd-FnStart);
929
927930 #ifndef NDEBUG
928931 {
929932 DOUT << std::hex;
369369
370370 DefaultJITMemoryManager::DefaultJITMemoryManager() {
371371 // Allocate a 16M block of memory for functions.
372 #if defined(__APPLE__) && defined(__arm__)
373 sys::MemoryBlock MemBlock = getNewMemoryBlock(4 << 20);
374 #else
372375 sys::MemoryBlock MemBlock = getNewMemoryBlock(16 << 20);
376 #endif
373377
374378 unsigned char *MemBase = static_cast(MemBlock.base());
375379
5757 #endif // end PPC
5858
5959 }
60
61 bool llvm::sys::Memory::SetRXPrivilege(const void *Addr, size_t Size) {
62 #if defined(__APPLE__) && defined(__arm__)
63 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
64 (vm_size_t)Size, 0,
65 VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
66 return KERN_SUCCESS == kr;
67 #else
68 return true;
69 #endif
70 }
1515
1616 #ifdef HAVE_SYS_MMAN_H
1717 #include
18 #endif
19
20 #ifdef __APPLE__
21 #include
1822 #endif
1923
2024 /// AllocateRWX - Allocate a slab of memory with read/write/execute
5155 void* start = NearBlock ? (unsigned char*)NearBlock->base() +
5256 NearBlock->size() : 0;
5357
58 #if defined(__APPLE__) && defined(__arm__)
59 void *pa = ::mmap(start, pageSize*NumPages, PROT_READ|PROT_EXEC,
60 flags, fd, 0);
61 #else
5462 void *pa = ::mmap(start, pageSize*NumPages, PROT_READ|PROT_WRITE|PROT_EXEC,
5563 flags, fd, 0);
64 #endif
5665 if (pa == MAP_FAILED) {
5766 if (NearBlock) //Try again without a near hint
5867 return AllocateRWX(NumBytes, 0);
6069 MakeErrMsg(ErrMsg, "Can't allocate RWX Memory");
6170 return MemoryBlock();
6271 }
72
73 #if defined(__APPLE__) && defined(__arm__)
74 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)pa,
75 (vm_size_t)(pageSize*NumPages), 0,
76 VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
77 if (KERN_SUCCESS != kr) {
78 MakeErrMsg(ErrMsg, "vm_protect max RWX failed\n");
79 return sys::MemoryBlock();
80 }
81
82 kr = vm_protect(mach_task_self(), (vm_address_t)pa,
83 (vm_size_t)(pageSize*NumPages), 0,
84 VM_PROT_READ | VM_PROT_WRITE);
85 if (KERN_SUCCESS != kr) {
86 MakeErrMsg(ErrMsg, "vm_protect RW failed\n");
87 return sys::MemoryBlock();
88 }
89 #endif
90
6391 MemoryBlock result;
6492 result.Address = pa;
6593 result.Size = NumPages*pageSize;
94
6695 return result;
6796 }
6897