llvm.org GIT mirror llvm / bbf628b
This patch adds memory support functions which will later be used to implement section-specific protection handling in MCJIT. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@164249 91177308-0d34-0410-b5e6-96231b3b80d8 Andrew Kaylor 8 years ago
6 changed file(s) with 754 addition(s) and 97 deletion(s). Raw diff Collapse all Expand all
1414 #define LLVM_SYSTEM_MEMORY_H
1515
1616 #include "llvm/Support/DataTypes.h"
17 #include "llvm/Support/system_error.h"
1718 #include
1819
1920 namespace llvm {
4243 /// @brief An abstraction for memory operations.
4344 class Memory {
4445 public:
46 enum ProtectionFlags {
47 MF_READ = 0x1000000,
48 MF_WRITE = 0x2000000,
49 MF_EXEC = 0x4000000
50 };
51
52 /// This method allocates a block of memory that is suitable for loading
53 /// dynamically generated code (e.g. JIT). An attempt to allocate
54 /// \p NumBytes bytes of virtual memory is made.
55 /// \p NearBlock may point to an existing allocation in which case
56 /// an attempt is made to allocate more memory near the existing block.
57 /// The actual allocated address is not guaranteed to be near the requested
58 /// address.
59 /// \p Flags is used to set the initial protection flags for the block
60 /// of the memory.
61 /// \p EC [out] returns an object describing any error that occurs.
62 ///
63 /// This method may allocate more than the number of bytes requested. The
64 /// actual number of bytes allocated is indicated in the returned
65 /// MemoryBlock.
66 ///
67 /// The start of the allocated block must be aligned with the
68 /// system allocation granularity (64K on Windows, page size on Linux).
69 /// If the address following \p NearBlock is not so aligned, it will be
70 /// rounded up to the next allocation granularity boundary.
71 ///
72 /// \r a non-null MemoryBlock if the function was successful,
73 /// otherwise a null MemoryBlock is with \p EC describing the error.
74 ///
75 /// @brief Allocate mapped memory.
76 static MemoryBlock allocateMappedMemory(size_t NumBytes,
77 const MemoryBlock *const NearBlock,
78 unsigned Flags,
79 error_code &EC);
80
81 /// This method releases a block of memory that was allocated with the
82 /// allocateMappedMemory method. It should not be used to release any
83 /// memory block allocated any other way.
84 /// \p Block describes the memory to be released.
85 ///
86 /// \r error_success if the function was successful, or an error_code
87 /// describing the failure if an error occurred.
88 ///
89 /// @brief Release mapped memory.
90 static error_code releaseMappedMemory(MemoryBlock &Block);
91
92 /// This method sets the protection flags for a block of memory to the
93 /// state specified by /p Flags. The behavior is not specified if the
94 /// memory was not allocated using the allocateMappedMemory method.
95 /// \p Block describes the memory block to be protected.
96 /// \p Flags specifies the new protection state to be assigned to the block.
97 /// \p ErrMsg [out] returns a string describing any error that occured.
98 ///
99 /// If \p Flags is MF_WRITE, the actual behavior varies
100 /// with the operating system (i.e. MF_READWRITE on Windows) and the
101 /// target architecture (i.e. MF_WRITE -> MF_READWRITE on i386).
102 ///
103 /// \r error_success if the function was successful, or an error_code
104 /// describing the failure if an error occurred.
105 ///
106 /// @brief Set memory protection state.
107 static error_code protectMappedMemory(const MemoryBlock &Block,
108 unsigned Flags);
109
45110 /// This method allocates a block of Read/Write/Execute memory that is
46111 /// suitable for executing dynamically generated code (e.g. JIT). An
47112 /// attempt to allocate \p NumBytes bytes of virtual memory is made.
1515 #include "llvm/Support/Valgrind.h"
1616 #include "llvm/Config/config.h"
1717
18 namespace llvm {
19 using namespace sys;
20 }
21
2218 // Include the platform-specific parts of this class.
2319 #ifdef LLVM_ON_UNIX
2420 #include "Unix/Memory.inc"
2622 #ifdef LLVM_ON_WIN32
2723 #include "Windows/Memory.inc"
2824 #endif
29
30 extern "C" void sys_icache_invalidate(const void *Addr, size_t len);
31
32 /// InvalidateInstructionCache - Before the JIT can run a block of code
33 /// that has been emitted it must invalidate the instruction cache on some
34 /// platforms.
35 void llvm::sys::Memory::InvalidateInstructionCache(const void *Addr,
36 size_t Len) {
37
38 // icache invalidation for PPC and ARM.
39 #if defined(__APPLE__)
40
41 # if (defined(__POWERPC__) || defined (__ppc__) || \
42 defined(_POWER) || defined(_ARCH_PPC)) || defined(__arm__)
43 sys_icache_invalidate(const_cast(Addr), Len);
44 # endif
45
46 #else
47
48 # if (defined(__POWERPC__) || defined (__ppc__) || \
49 defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__)
50 const size_t LineSize = 32;
51
52 const intptr_t Mask = ~(LineSize - 1);
53 const intptr_t StartLine = ((intptr_t) Addr) & Mask;
54 const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask;
55
56 for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
57 asm volatile("dcbf 0, %0" : : "r"(Line));
58 asm volatile("sync");
59
60 for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
61 asm volatile("icbi 0, %0" : : "r"(Line));
62 asm volatile("isync");
63 # elif defined(__arm__) && defined(__GNUC__)
64 // FIXME: Can we safely always call this for __GNUC__ everywhere?
65 const char *Start = static_cast(Addr);
66 const char *End = Start + Len;
67 __clear_cache(const_cast(Start), const_cast(End));
68 # elif defined(__mips__)
69 const char *Start = static_cast(Addr);
70 cacheflush(const_cast(Start), Len, BCACHE);
71 # endif
72
73 #endif // end apple
74
75 ValgrindDiscardTranslations(Addr, Len);
76 }
1212
1313 #include "Unix.h"
1414 #include "llvm/Support/DataTypes.h"
15 #include "llvm/Support/ErrorHandling.h"
1516 #include "llvm/Support/Process.h"
1617
1718 #ifdef HAVE_SYS_MMAN_H
3031 # endif
3132 #endif
3233
34 extern "C" void sys_icache_invalidate(const void *Addr, size_t len);
35
36 namespace {
37
38 int getPosixProtectionFlags(unsigned Flags) {
39 switch (Flags) {
40 case llvm::sys::Memory::MF_READ:
41 return PROT_READ;
42 case llvm::sys::Memory::MF_WRITE:
43 return PROT_WRITE;
44 case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE:
45 return PROT_READ | PROT_WRITE;
46 case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC:
47 return PROT_READ | PROT_EXEC;
48 case llvm::sys::Memory::MF_READ |
49 llvm::sys::Memory::MF_WRITE |
50 llvm::sys::Memory::MF_EXEC:
51 return PROT_READ | PROT_WRITE | PROT_EXEC;
52 case llvm::sys::Memory::MF_EXEC:
53 return PROT_EXEC;
54 default:
55 llvm_unreachable("Illegal memory protection flag specified!");
56 }
57 // Provide a default return value as required by some compilers.
58 return PROT_NONE;
59 }
60
61 } // namespace
62
63 namespace llvm {
64 namespace sys {
65
66 MemoryBlock
67 Memory::allocateMappedMemory(size_t NumBytes,
68 const MemoryBlock *const NearBlock,
69 unsigned PFlags,
70 error_code &EC) {
71 EC = error_code::success();
72 if (NumBytes == 0)
73 return MemoryBlock();
74
75 static const size_t PageSize = Process::GetPageSize();
76 const size_t NumPages = (NumBytes+PageSize-1)/PageSize;
77
78 int fd = -1;
79 #ifdef NEED_DEV_ZERO_FOR_MMAP
80 static int zero_fd = open("/dev/zero", O_RDWR);
81 if (zero_fd == -1) {
82 EC = error_code(errno, system_category());
83 return MemoryBlock();
84 }
85 fd = zero_fd;
86 #endif
87
88 int MMFlags = MAP_PRIVATE |
89 #ifdef HAVE_MMAP_ANONYMOUS
90 MAP_ANONYMOUS
91 #else
92 MAP_ANON
93 #endif
94 ; // Ends statement above
95
96 int Protect = getPosixProtectionFlags(PFlags);
97
98 // Use any near hint and the page size to set a page-aligned starting address
99 uintptr_t Start = NearBlock ? reinterpret_cast(NearBlock->base()) +
100 NearBlock->size() : 0;
101 if (Start && Start % PageSize)
102 Start += PageSize - Start % PageSize;
103
104 void *Addr = ::mmap(reinterpret_cast(Start), PageSize*NumPages,
105 Protect, MMFlags, fd, 0);
106 if (Addr == MAP_FAILED) {
107 if (NearBlock) //Try again without a near hint
108 return allocateMappedMemory(NumBytes, 0, PFlags, EC);
109
110 EC = error_code(errno, system_category());
111 return MemoryBlock();
112 }
113
114 MemoryBlock Result;
115 Result.Address = Addr;
116 Result.Size = NumPages*PageSize;
117
118 if (PFlags & MF_EXEC)
119 Memory::InvalidateInstructionCache(Result.Address, Result.Size);
120
121 return Result;
122 }
123
124 error_code
125 Memory::releaseMappedMemory(MemoryBlock &M) {
126 if (M.Address == 0 || M.Size == 0)
127 return error_code::success();
128
129 if (0 != ::munmap(M.Address, M.Size))
130 return error_code(errno, system_category());
131
132 M.Address = 0;
133 M.Size = 0;
134
135 return error_code::success();
136 }
137
138 error_code
139 Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) {
140 if (M.Address == 0 || M.Size == 0)
141 return error_code::success();
142
143 if (!Flags)
144 return error_code(EINVAL, generic_category());
145
146 int Protect = getPosixProtectionFlags(Flags);
147
148 int Result = ::mprotect(M.Address, M.Size, Protect);
149 if (Result != 0)
150 return error_code(errno, system_category());
151
152 if (Flags & MF_EXEC)
153 Memory::InvalidateInstructionCache(M.Address, M.Size);
154
155 return error_code::success();
156 }
157
33158 /// AllocateRWX - Allocate a slab of memory with read/write/execute
34159 /// permissions. This is typically used for JIT applications where we want
35160 /// to emit code to the memory then jump to it. Getting this type of memory
36161 /// is very OS specific.
37162 ///
38 llvm::sys::MemoryBlock
39 llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
40 std::string *ErrMsg) {
163 MemoryBlock
164 Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
165 std::string *ErrMsg) {
41166 if (NumBytes == 0) return MemoryBlock();
42167
43168 size_t pageSize = Process::GetPageSize();
85210 VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
86211 if (KERN_SUCCESS != kr) {
87212 MakeErrMsg(ErrMsg, "vm_protect max RX failed");
88 return sys::MemoryBlock();
213 return MemoryBlock();
89214 }
90215
91216 kr = vm_protect(mach_task_self(), (vm_address_t)pa,
93218 VM_PROT_READ | VM_PROT_WRITE);
94219 if (KERN_SUCCESS != kr) {
95220 MakeErrMsg(ErrMsg, "vm_protect RW failed");
96 return sys::MemoryBlock();
221 return MemoryBlock();
97222 }
98223 #endif
99224
104229 return result;
105230 }
106231
107 bool llvm::sys::Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
232 bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
108233 if (M.Address == 0 || M.Size == 0) return false;
109234 if (0 != ::munmap(M.Address, M.Size))
110235 return MakeErrMsg(ErrMsg, "Can't release RWX Memory");
111236 return false;
112237 }
113238
114 bool llvm::sys::Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) {
239 bool Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) {
115240 #if defined(__APPLE__) && defined(__arm__)
116241 if (M.Address == 0 || M.Size == 0) return false;
117 sys::Memory::InvalidateInstructionCache(M.Address, M.Size);
242 Memory::InvalidateInstructionCache(M.Address, M.Size);
118243 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
119244 (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_WRITE);
120245 return KERN_SUCCESS == kr;
123248 #endif
124249 }
125250
126 bool llvm::sys::Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) {
251 bool Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) {
127252 #if defined(__APPLE__) && defined(__arm__)
128253 if (M.Address == 0 || M.Size == 0) return false;
129 sys::Memory::InvalidateInstructionCache(M.Address, M.Size);
254 Memory::InvalidateInstructionCache(M.Address, M.Size);
130255 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
131256 (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
132257 return KERN_SUCCESS == kr;
135260 #endif
136261 }
137262
138 bool llvm::sys::Memory::setRangeWritable(const void *Addr, size_t Size) {
263 bool Memory::setRangeWritable(const void *Addr, size_t Size) {
139264 #if defined(__APPLE__) && defined(__arm__)
140265 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
141266 (vm_size_t)Size, 0,
146271 #endif
147272 }
148273
149 bool llvm::sys::Memory::setRangeExecutable(const void *Addr, size_t Size) {
274 bool Memory::setRangeExecutable(const void *Addr, size_t Size) {
150275 #if defined(__APPLE__) && defined(__arm__)
151276 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
152277 (vm_size_t)Size, 0,
156281 return true;
157282 #endif
158283 }
284
285 /// InvalidateInstructionCache - Before the JIT can run a block of code
286 /// that has been emitted it must invalidate the instruction cache on some
287 /// platforms.
288 void Memory::InvalidateInstructionCache(const void *Addr,
289 size_t Len) {
290
291 // icache invalidation for PPC and ARM.
292 #if defined(__APPLE__)
293
294 # if (defined(__POWERPC__) || defined (__ppc__) || \
295 defined(_POWER) || defined(_ARCH_PPC)) || defined(__arm__)
296 sys_icache_invalidate(const_cast(Addr), Len);
297 # endif
298
299 #else
300
301 # if (defined(__POWERPC__) || defined (__ppc__) || \
302 defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__)
303 const size_t LineSize = 32;
304
305 const intptr_t Mask = ~(LineSize - 1);
306 const intptr_t StartLine = ((intptr_t) Addr) & Mask;
307 const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask;
308
309 for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
310 asm volatile("dcbf 0, %0" : : "r"(Line));
311 asm volatile("sync");
312
313 for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
314 asm volatile("icbi 0, %0" : : "r"(Line));
315 asm volatile("isync");
316 # elif defined(__arm__) && defined(__GNUC__)
317 // FIXME: Can we safely always call this for __GNUC__ everywhere?
318 const char *Start = static_cast(Addr);
319 const char *End = Start + Len;
320 __clear_cache(const_cast(Start), const_cast(End));
321 # elif defined(__mips__)
322 const char *Start = static_cast(Addr);
323 cacheflush(const_cast(Start), Len, BCACHE);
324 # endif
325
326 #endif // end apple
327
328 ValgrindDiscardTranslations(Addr, Len);
329 }
330
331 } // namespace sys
332 } // namespace llvm
1111 //
1212 //===----------------------------------------------------------------------===//
1313
14 #include "llvm/Support/DataTypes.h"
15 #include "llvm/Support/ErrorHandling.h"
16 #include "llvm/Support/Process.h"
1417 #include "Windows.h"
15 #include "llvm/Support/DataTypes.h"
16 #include "llvm/Support/Process.h"
18
19 namespace {
20
21 DWORD getWindowsProtectionFlags(unsigned Flags) {
22 switch (Flags) {
23 // Contrary to what you might expect, the Windows page protection flags
24 // are not a bitwise combination of RWX values
25 case llvm::sys::Memory::MF_READ:
26 return PAGE_READONLY;
27 case llvm::sys::Memory::MF_WRITE:
28 // Note: PAGE_WRITE is not supported by VirtualProtect
29 return PAGE_READWRITE;
30 case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE:
31 return PAGE_READWRITE;
32 case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC:
33 return PAGE_EXECUTE_READ;
34 case llvm::sys::Memory::MF_READ |
35 llvm::sys::Memory::MF_WRITE |
36 llvm::sys::Memory::MF_EXEC:
37 return PAGE_EXECUTE_READWRITE;
38 case llvm::sys::Memory::MF_EXEC:
39 return PAGE_EXECUTE;
40 default:
41 llvm_unreachable("Illegal memory protection flag specified!");
42 }
43 // Provide a default return value as required by some compilers.
44 return PAGE_NOACCESS;
45 }
46
47 size_t getAllocationGranularity() {
48 SYSTEM_INFO Info;
49 ::GetSystemInfo(&Info);
50 if (Info.dwPageSize > Info.dwAllocationGranularity)
51 return Info.dwPageSize;
52 else
53 return Info.dwAllocationGranularity;
54 }
55
56 } // namespace
1757
1858 namespace llvm {
19 using namespace sys;
59 namespace sys {
2060
2161 //===----------------------------------------------------------------------===//
2262 //=== WARNING: Implementation here must contain only Win32 specific code
2363 //=== and must not be UNIX code
2464 //===----------------------------------------------------------------------===//
65
66 MemoryBlock Memory::allocateMappedMemory(size_t NumBytes,
67 const MemoryBlock *const NearBlock,
68 unsigned Flags,
69 error_code &EC) {
70 EC = error_code::success();
71 if (NumBytes == 0)
72 return MemoryBlock();
73
74 // While we'd be happy to allocate single pages, the Windows allocation
75 // granularity may be larger than a single page (in practice, it is 64K)
76 // so mapping less than that will create an unreachable fragment of memory.
77 static const size_t Granularity = getAllocationGranularity();
78 const size_t NumBlocks = (NumBytes+Granularity-1)/Granularity;
79
80 uintptr_t Start = NearBlock ? reinterpret_cast(NearBlock->base()) +
81 NearBlock->size()
82 : NULL;
83
84 // If the requested address is not aligned to the allocation granularity,
85 // round up to get beyond NearBlock. VirtualAlloc would have rounded down.
86 if (Start && Start % Granularity != 0)
87 Start += Granularity - Start % Granularity;
88
89 DWORD Protect = getWindowsProtectionFlags(Flags);
90
91 void *PA = ::VirtualAlloc(reinterpret_cast(Start),
92 NumBlocks*Granularity,
93 MEM_RESERVE | MEM_COMMIT, Protect);
94 if (PA == NULL) {
95 if (NearBlock) {
96 // Try again without the NearBlock hint
97 return allocateMappedMemory(NumBytes, NULL, Flags, EC);
98 }
99 EC = error_code(::GetLastError(), system_category());
100 return MemoryBlock();
101 }
102
103 MemoryBlock Result;
104 Result.Address = PA;
105 Result.Size = NumBlocks*Granularity;
106 ;
107 if (Flags & MF_EXEC)
108 Memory::InvalidateInstructionCache(Result.Address, Result.Size);
109
110 return Result;
111 }
112
113 error_code Memory::releaseMappedMemory(MemoryBlock &M) {
114 if (M.Address == 0 || M.Size == 0)
115 return error_code::success();
116
117 if (!VirtualFree(M.Address, 0, MEM_RELEASE))
118 return error_code(::GetLastError(), system_category());
119
120 M.Address = 0;
121 M.Size = 0;
122
123 return error_code::success();
124 }
125
126 error_code Memory::protectMappedMemory(const MemoryBlock &M,
127 unsigned Flags) {
128 if (M.Address == 0 || M.Size == 0)
129 return error_code::success();
130
131 DWORD Protect = getWindowsProtectionFlags(Flags);
132
133 DWORD OldFlags;
134 if (!VirtualProtect(M.Address, M.Size, Protect, &OldFlags))
135 return error_code(::GetLastError(), system_category());
136
137 if (Flags & MF_EXEC)
138 Memory::InvalidateInstructionCache(M.Address, M.Size);
139
140 return error_code::success();
141 }
142
143 /// InvalidateInstructionCache - Before the JIT can run a block of code
144 /// that has been emitted it must invalidate the instruction cache on some
145 /// platforms.
146 void Memory::InvalidateInstructionCache(
147 const void *Addr, size_t Len) {
148 FlushInstructionCache(GetCurrentProcess(), Addr, Len);
149 }
150
25151
26152 MemoryBlock Memory::AllocateRWX(size_t NumBytes,
27153 const MemoryBlock *NearBlock,
28154 std::string *ErrMsg) {
29 if (NumBytes == 0) return MemoryBlock();
30
31 static const size_t pageSize = Process::GetPageSize();
32 size_t NumPages = (NumBytes+pageSize-1)/pageSize;
33
34 PVOID start = NearBlock ? static_cast(NearBlock->base()) +
35 NearBlock->size() : NULL;
36
37 void *pa = VirtualAlloc(start, NumPages*pageSize, MEM_RESERVE | MEM_COMMIT,
38 PAGE_EXECUTE_READWRITE);
39 if (pa == NULL) {
40 if (NearBlock) {
41 // Try again without the NearBlock hint
42 return AllocateRWX(NumBytes, NULL, ErrMsg);
43 }
44 MakeErrMsg(ErrMsg, "Can't allocate RWX Memory: ");
45 return MemoryBlock();
46 }
47
48 MemoryBlock result;
49 result.Address = pa;
50 result.Size = NumPages*pageSize;
51 return result;
155 MemoryBlock MB;
156 error_code EC;
157 MB = allocateMappedMemory(NumBytes, NearBlock,
158 MF_READ|MF_WRITE|MF_EXEC, EC);
159 if (EC != error_code::success() && ErrMsg) {
160 MakeErrMsg(ErrMsg, EC.message());
161 }
162 return MB;
52163 }
53164
54165 bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
55 if (M.Address == 0 || M.Size == 0) return false;
56 if (!VirtualFree(M.Address, 0, MEM_RELEASE))
57 return MakeErrMsg(ErrMsg, "Can't release RWX Memory: ");
58 return false;
166 error_code EC = releaseMappedMemory(M);
167 if (EC == error_code::success())
168 return false;
169 MakeErrMsg(ErrMsg, EC.message());
170 return true;
59171 }
60172
61173 static DWORD getProtection(const void *addr) {
92204 }
93205
94206 DWORD oldProt;
95 sys::Memory::InvalidateInstructionCache(Addr, Size);
207 Memory::InvalidateInstructionCache(Addr, Size);
96208 return ::VirtualProtect(const_cast(Addr), Size, prot, &oldProt)
97209 == TRUE;
98210 }
111223 }
112224
113225 DWORD oldProt;
114 sys::Memory::InvalidateInstructionCache(Addr, Size);
226 Memory::InvalidateInstructionCache(Addr, Size);
115227 return ::VirtualProtect(const_cast(Addr), Size, prot, &oldProt)
116228 == TRUE;
117229 }
118230
119 }
231 } // namespace sys
232 } // namespace llvm
1616 LeakDetectorTest.cpp
1717 ManagedStatic.cpp
1818 MathExtrasTest.cpp
19 MemoryTest.cpp
1920 Path.cpp
2021 RegexTest.cpp
2122 SwapByteOrderTest.cpp
0 //===- llvm/unittest/Support/AllocatorTest.cpp - BumpPtrAllocator tests ---===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "llvm/Support/Memory.h"
10 #include "llvm/Support/Process.h"
11
12 #include "gtest/gtest.h"
13 #include
14
15 using namespace llvm;
16 using namespace sys;
17
18 namespace {
19
20 class MappedMemoryTest : public ::testing::TestWithParam {
21 public:
22 MappedMemoryTest() {
23 Flags = GetParam();
24 PageSize = sys::Process::GetPageSize();
25 }
26
27 protected:
28 // Adds RW flags to permit testing of the resulting memory
29 unsigned getTestableEquivalent(unsigned RequestedFlags) {
30 switch (RequestedFlags) {
31 case Memory::MF_READ:
32 case Memory::MF_WRITE:
33 case Memory::MF_READ|Memory::MF_WRITE:
34 return Memory::MF_READ|Memory::MF_WRITE;
35 case Memory::MF_READ|Memory::MF_EXEC:
36 case Memory::MF_READ|Memory::MF_WRITE|Memory::MF_EXEC:
37 case Memory::MF_EXEC:
38 return Memory::MF_READ|Memory::MF_WRITE|Memory::MF_EXEC;
39 }
40 // Default in case values are added to the enum, as required by some compilers
41 return Memory::MF_READ|Memory::MF_WRITE;
42 }
43
44 // Returns true if the memory blocks overlap
45 bool doesOverlap(MemoryBlock M1, MemoryBlock M2) {
46 if (M1.base() == M2.base())
47 return true;
48
49 if (M1.base() > M2.base())
50 return (unsigned char *)M2.base() + M2.size() > M1.base();
51
52 return (unsigned char *)M1.base() + M1.size() > M2.base();
53 }
54
55 unsigned Flags;
56 size_t PageSize;
57 };
58
59 TEST_P(MappedMemoryTest, AllocAndRelease) {
60 error_code EC;
61 MemoryBlock M1 = Memory::allocateMappedMemory(sizeof(int), 0, Flags, EC);
62 EXPECT_EQ(error_code::success(), EC);
63
64 EXPECT_NE((void*)0, M1.base());
65 EXPECT_LE(sizeof(int), M1.size());
66
67 EXPECT_FALSE(Memory::releaseMappedMemory(M1));
68 }
69
70 TEST_P(MappedMemoryTest, MultipleAllocAndRelease) {
71 error_code EC;
72 MemoryBlock M1 = Memory::allocateMappedMemory(16, 0, Flags, EC);
73 EXPECT_EQ(error_code::success(), EC);
74 MemoryBlock M2 = Memory::allocateMappedMemory(64, 0, Flags, EC);
75 EXPECT_EQ(error_code::success(), EC);
76 MemoryBlock M3 = Memory::allocateMappedMemory(32, 0, Flags, EC);
77 EXPECT_EQ(error_code::success(), EC);
78
79 EXPECT_NE((void*)0, M1.base());
80 EXPECT_LE(16U, M1.size());
81 EXPECT_NE((void*)0, M2.base());
82 EXPECT_LE(64U, M2.size());
83 EXPECT_NE((void*)0, M3.base());
84 EXPECT_LE(32U, M3.size());
85
86 EXPECT_FALSE(doesOverlap(M1, M2));
87 EXPECT_FALSE(doesOverlap(M2, M3));
88 EXPECT_FALSE(doesOverlap(M1, M3));
89
90 EXPECT_FALSE(Memory::releaseMappedMemory(M1));
91 EXPECT_FALSE(Memory::releaseMappedMemory(M3));
92 MemoryBlock M4 = Memory::allocateMappedMemory(16, 0, Flags, EC);
93 EXPECT_EQ(error_code::success(), EC);
94 EXPECT_NE((void*)0, M4.base());
95 EXPECT_LE(16U, M4.size());
96 EXPECT_FALSE(Memory::releaseMappedMemory(M4));
97 EXPECT_FALSE(Memory::releaseMappedMemory(M2));
98 }
99
100 TEST_P(MappedMemoryTest, BasicWrite) {
101 // This test applies only to writeable combinations
102 if (Flags && !(Flags & Memory::MF_WRITE))
103 return;
104
105 error_code EC;
106 MemoryBlock M1 = Memory::allocateMappedMemory(sizeof(int), 0, Flags, EC);
107 EXPECT_EQ(error_code::success(), EC);
108
109 EXPECT_NE((void*)0, M1.base());
110 EXPECT_LE(sizeof(int), M1.size());
111
112 int *a = (int*)M1.base();
113 *a = 1;
114 EXPECT_EQ(1, *a);
115
116 EXPECT_FALSE(Memory::releaseMappedMemory(M1));
117 }
118
119 TEST_P(MappedMemoryTest, MultipleWrite) {
120 // This test applies only to writeable combinations
121 if (Flags && !(Flags & Memory::MF_WRITE))
122 return;
123 error_code EC;
124 MemoryBlock M1 = Memory::allocateMappedMemory(sizeof(int), 0, Flags, EC);
125 EXPECT_EQ(error_code::success(), EC);
126 MemoryBlock M2 = Memory::allocateMappedMemory(8 * sizeof(int), 0, Flags, EC);
127 EXPECT_EQ(error_code::success(), EC);
128 MemoryBlock M3 = Memory::allocateMappedMemory(4 * sizeof(int), 0, Flags, EC);
129 EXPECT_EQ(error_code::success(), EC);
130
131 EXPECT_FALSE(doesOverlap(M1, M2));
132 EXPECT_FALSE(doesOverlap(M2, M3));
133 EXPECT_FALSE(doesOverlap(M1, M3));
134
135 EXPECT_NE((void*)0, M1.base());
136 EXPECT_LE(1U * sizeof(int), M1.size());
137 EXPECT_NE((void*)0, M2.base());
138 EXPECT_LE(8U * sizeof(int), M2.size());
139 EXPECT_NE((void*)0, M3.base());
140 EXPECT_LE(4U * sizeof(int), M3.size());
141
142 int *x = (int*)M1.base();
143 *x = 1;
144
145 int *y = (int*)M2.base();
146 for (int i = 0; i < 8; i++) {
147 y[i] = i;
148 }
149
150 int *z = (int*)M3.base();
151 *z = 42;
152
153 EXPECT_EQ(1, *x);
154 EXPECT_EQ(7, y[7]);
155 EXPECT_EQ(42, *z);
156
157 EXPECT_FALSE(Memory::releaseMappedMemory(M1));
158 EXPECT_FALSE(Memory::releaseMappedMemory(M3));
159
160 MemoryBlock M4 = Memory::allocateMappedMemory(64 * sizeof(int), 0, Flags, EC);
161 EXPECT_EQ(error_code::success(), EC);
162 EXPECT_NE((void*)0, M4.base());
163 EXPECT_LE(64U * sizeof(int), M4.size());
164 x = (int*)M4.base();
165 *x = 4;
166 EXPECT_EQ(4, *x);
167 EXPECT_FALSE(Memory::releaseMappedMemory(M4));
168
169 // Verify that M2 remains unaffected by other activity
170 for (int i = 0; i < 8; i++) {
171 EXPECT_EQ(i, y[i]);
172 }
173 EXPECT_FALSE(Memory::releaseMappedMemory(M2));
174 }
175
176 TEST_P(MappedMemoryTest, EnabledWrite) {
177 error_code EC;
178 MemoryBlock M1 = Memory::allocateMappedMemory(2 * sizeof(int), 0, Flags, EC);
179 EXPECT_EQ(error_code::success(), EC);
180 MemoryBlock M2 = Memory::allocateMappedMemory(8 * sizeof(int), 0, Flags, EC);
181 EXPECT_EQ(error_code::success(), EC);
182 MemoryBlock M3 = Memory::allocateMappedMemory(4 * sizeof(int), 0, Flags, EC);
183 EXPECT_EQ(error_code::success(), EC);
184
185 EXPECT_NE((void*)0, M1.base());
186 EXPECT_LE(2U * sizeof(int), M1.size());
187 EXPECT_NE((void*)0, M2.base());
188 EXPECT_LE(8U * sizeof(int), M2.size());
189 EXPECT_NE((void*)0, M3.base());
190 EXPECT_LE(4U * sizeof(int), M3.size());
191
192 EXPECT_FALSE(Memory::protectMappedMemory(M1, getTestableEquivalent(Flags)));
193 EXPECT_FALSE(Memory::protectMappedMemory(M2, getTestableEquivalent(Flags)));
194 EXPECT_FALSE(Memory::protectMappedMemory(M3, getTestableEquivalent(Flags)));
195
196 EXPECT_FALSE(doesOverlap(M1, M2));
197 EXPECT_FALSE(doesOverlap(M2, M3));
198 EXPECT_FALSE(doesOverlap(M1, M3));
199
200 int *x = (int*)M1.base();
201 *x = 1;
202 int *y = (int*)M2.base();
203 for (unsigned int i = 0; i < 8; i++) {
204 y[i] = i;
205 }
206 int *z = (int*)M3.base();
207 *z = 42;
208
209 EXPECT_EQ(1, *x);
210 EXPECT_EQ(7, y[7]);
211 EXPECT_EQ(42, *z);
212
213 EXPECT_FALSE(Memory::releaseMappedMemory(M1));
214 EXPECT_FALSE(Memory::releaseMappedMemory(M3));
215 EXPECT_EQ(6, y[6]);
216
217 MemoryBlock M4 = Memory::allocateMappedMemory(16, 0, Flags, EC);
218 EXPECT_EQ(error_code::success(), EC);
219 EXPECT_NE((void*)0, M4.base());
220 EXPECT_LE(16U, M4.size());
221 EXPECT_EQ(error_code::success(), Memory::protectMappedMemory(M4, getTestableEquivalent(Flags)));
222 x = (int*)M4.base();
223 *x = 4;
224 EXPECT_EQ(4, *x);
225 EXPECT_FALSE(Memory::releaseMappedMemory(M4));
226 EXPECT_FALSE(Memory::releaseMappedMemory(M2));
227 }
228
229 TEST_P(MappedMemoryTest, SuccessiveNear) {
230 error_code EC;
231 MemoryBlock M1 = Memory::allocateMappedMemory(16, 0, Flags, EC);
232 EXPECT_EQ(error_code::success(), EC);
233 MemoryBlock M2 = Memory::allocateMappedMemory(64, &M1, Flags, EC);
234 EXPECT_EQ(error_code::success(), EC);
235 MemoryBlock M3 = Memory::allocateMappedMemory(32, &M2, Flags, EC);
236 EXPECT_EQ(error_code::success(), EC);
237
238 EXPECT_NE((void*)0, M1.base());
239 EXPECT_LE(16U, M1.size());
240 EXPECT_NE((void*)0, M2.base());
241 EXPECT_LE(64U, M2.size());
242 EXPECT_NE((void*)0, M3.base());
243 EXPECT_LE(32U, M3.size());
244
245 EXPECT_FALSE(doesOverlap(M1, M2));
246 EXPECT_FALSE(doesOverlap(M2, M3));
247 EXPECT_FALSE(doesOverlap(M1, M3));
248
249 EXPECT_FALSE(Memory::releaseMappedMemory(M1));
250 EXPECT_FALSE(Memory::releaseMappedMemory(M3));
251 EXPECT_FALSE(Memory::releaseMappedMemory(M2));
252 }
253
254 TEST_P(MappedMemoryTest, DuplicateNear) {
255 error_code EC;
256 MemoryBlock Near((void*)(3*PageSize), 16);
257 MemoryBlock M1 = Memory::allocateMappedMemory(16, &Near, Flags, EC);
258 EXPECT_EQ(error_code::success(), EC);
259 MemoryBlock M2 = Memory::allocateMappedMemory(64, &Near, Flags, EC);
260 EXPECT_EQ(error_code::success(), EC);
261 MemoryBlock M3 = Memory::allocateMappedMemory(32, &Near, Flags, EC);
262 EXPECT_EQ(error_code::success(), EC);
263
264 EXPECT_NE((void*)0, M1.base());
265 EXPECT_LE(16U, M1.size());
266 EXPECT_NE((void*)0, M2.base());
267 EXPECT_LE(64U, M2.size());
268 EXPECT_NE((void*)0, M3.base());
269 EXPECT_LE(32U, M3.size());
270
271 EXPECT_FALSE(Memory::releaseMappedMemory(M1));
272 EXPECT_FALSE(Memory::releaseMappedMemory(M3));
273 EXPECT_FALSE(Memory::releaseMappedMemory(M2));
274 }
275
276 TEST_P(MappedMemoryTest, ZeroNear) {
277 error_code EC;
278 MemoryBlock Near(0, 0);
279 MemoryBlock M1 = Memory::allocateMappedMemory(16, &Near, Flags, EC);
280 EXPECT_EQ(error_code::success(), EC);
281 MemoryBlock M2 = Memory::allocateMappedMemory(64, &Near, Flags, EC);
282 EXPECT_EQ(error_code::success(), EC);
283 MemoryBlock M3 = Memory::allocateMappedMemory(32, &Near, Flags, EC);
284 EXPECT_EQ(error_code::success(), EC);
285
286 EXPECT_NE((void*)0, M1.base());
287 EXPECT_LE(16U, M1.size());
288 EXPECT_NE((void*)0, M2.base());
289 EXPECT_LE(64U, M2.size());
290 EXPECT_NE((void*)0, M3.base());
291 EXPECT_LE(32U, M3.size());
292
293 EXPECT_FALSE(doesOverlap(M1, M2));
294 EXPECT_FALSE(doesOverlap(M2, M3));
295 EXPECT_FALSE(doesOverlap(M1, M3));
296
297 EXPECT_FALSE(Memory::releaseMappedMemory(M1));
298 EXPECT_FALSE(Memory::releaseMappedMemory(M3));
299 EXPECT_FALSE(Memory::releaseMappedMemory(M2));
300 }
301
302 TEST_P(MappedMemoryTest, ZeroSizeNear) {
303 error_code EC;
304 MemoryBlock Near((void*)(4*PageSize), 0);
305 MemoryBlock M1 = Memory::allocateMappedMemory(16, &Near, Flags, EC);
306 EXPECT_EQ(error_code::success(), EC);
307 MemoryBlock M2 = Memory::allocateMappedMemory(64, &Near, Flags, EC);
308 EXPECT_EQ(error_code::success(), EC);
309 MemoryBlock M3 = Memory::allocateMappedMemory(32, &Near, Flags, EC);
310 EXPECT_EQ(error_code::success(), EC);
311
312 EXPECT_NE((void*)0, M1.base());
313 EXPECT_LE(16U, M1.size());
314 EXPECT_NE((void*)0, M2.base());
315 EXPECT_LE(64U, M2.size());
316 EXPECT_NE((void*)0, M3.base());
317 EXPECT_LE(32U, M3.size());
318
319 EXPECT_FALSE(doesOverlap(M1, M2));
320 EXPECT_FALSE(doesOverlap(M2, M3));
321 EXPECT_FALSE(doesOverlap(M1, M3));
322
323 EXPECT_FALSE(Memory::releaseMappedMemory(M1));
324 EXPECT_FALSE(Memory::releaseMappedMemory(M3));
325 EXPECT_FALSE(Memory::releaseMappedMemory(M2));
326 }
327
328 TEST_P(MappedMemoryTest, UnalignedNear) {
329 error_code EC;
330 MemoryBlock Near((void*)(2*PageSize+5), 0);
331 MemoryBlock M1 = Memory::allocateMappedMemory(15, &Near, Flags, EC);
332 EXPECT_EQ(error_code::success(), EC);
333
334 EXPECT_NE((void*)0, M1.base());
335 EXPECT_LE(sizeof(int), M1.size());
336
337 EXPECT_FALSE(Memory::releaseMappedMemory(M1));
338 }
339
340 // Note that Memory::MF_WRITE is not supported exclusively across
341 // operating systems and architectures and can imply MF_READ|MF_WRITE
342 unsigned MemoryFlags[] = {
343 Memory::MF_READ,
344 Memory::MF_WRITE,
345 Memory::MF_READ|Memory::MF_WRITE,
346 Memory::MF_EXEC,
347 Memory::MF_READ|Memory::MF_EXEC,
348 Memory::MF_READ|Memory::MF_WRITE|Memory::MF_EXEC
349 };
350
351 INSTANTIATE_TEST_CASE_P(AllocationTests,
352 MappedMemoryTest,
353 ::testing::ValuesIn(MemoryFlags));
354
355 } // anonymous namespace