llvm.org GIT mirror llvm / 4bf3706
Reverting r76825 and r76828, since they caused clang runtime errors and some build failure involving memset. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@76838 91177308-0d34-0410-b5e6-96231b3b80d8 Reid Kleckner 10 years ago
11 changed file(s) with 244 addition(s) and 956 deletion(s). Raw diff Collapse all Expand all
1414 #define LLVM_EXECUTION_ENGINE_JIT_MEMMANAGER_H
1515
1616 #include "llvm/Support/DataTypes.h"
17 #include
1817
1918 namespace llvm {
20
2119 class Function;
22 class GlobalValue;
2320
2421 /// JITMemoryManager - This interface is used by the JIT to allocate and manage
2522 /// memory for the code generated by the JIT. This can be reimplemented by
9087 //===--------------------------------------------------------------------===//
9188 // Main Allocation Functions
9289 //===--------------------------------------------------------------------===//
93
94 /// startFunctionBody - When we start JITing a function, the JIT calls this
90
91 /// startFunctionBody - When we start JITing a function, the JIT calls this
9592 /// method to allocate a block of free RWX memory, which returns a pointer to
96 /// it. If the JIT wants to request a block of memory of at least a certain
97 /// size, it passes that value as ActualSize, and this method returns a block
98 /// with at least that much space. If the JIT doesn't know ahead of time how
99 /// much space it will need to emit the function, it passes 0 for the
100 /// ActualSize. In either case, this method is required to pass back the size
101 /// of the allocated block through ActualSize. The JIT will be careful to
102 /// not write more than the returned ActualSize bytes of memory.
103 virtual uint8_t *startFunctionBody(const Function *F,
93 /// it. The JIT doesn't know ahead of time how much space it will need to
94 /// emit the function, so it doesn't pass in the size. Instead, this method
95 /// is required to pass back a "valid size". The JIT will be careful to not
96 /// write more than the returned ActualSize bytes of memory.
97 virtual uint8_t *startFunctionBody(const Function *F,
10498 uintptr_t &ActualSize) = 0;
105
99
106100 /// allocateStub - This method is called by the JIT to allocate space for a
107101 /// function stub (used to handle limited branch displacements) while it is
108102 /// JIT compiling a function. For example, if foo calls bar, and if bar
123117 virtual void endFunctionBody(const Function *F, uint8_t *FunctionStart,
124118 uint8_t *FunctionEnd) = 0;
125119
126 /// allocateSpace - Allocate a memory block of the given size. This method
127 /// cannot be called between calls to startFunctionBody and endFunctionBody.
120 /// allocateSpace - Allocate a memory block of the given size.
128121 virtual uint8_t *allocateSpace(intptr_t Size, unsigned Alignment) = 0;
129122
130123 /// allocateGlobal - Allocate memory for a global.
131 ///
132124 virtual uint8_t *allocateGlobal(uintptr_t Size, unsigned Alignment) = 0;
133125
134126 /// deallocateMemForFunction - Free JIT memory for the specified function.
144136 /// the exception table.
145137 virtual void endExceptionTable(const Function *F, uint8_t *TableStart,
146138 uint8_t *TableEnd, uint8_t* FrameRegister) = 0;
147
148 /// CheckInvariants - For testing only. Return true if all internal
149 /// invariants are preserved, or return false and set ErrorStr to a helpful
150 /// error message.
151 virtual bool CheckInvariants(std::string &ErrorStr) {
152 return true;
153 }
154
155 /// GetDefaultCodeSlabSize - For testing only. Returns DefaultCodeSlabSize
156 /// from DefaultJITMemoryManager.
157 virtual size_t GetDefaultCodeSlabSize() {
158 return 0;
159 }
160
161 /// GetDefaultDataSlabSize - For testing only. Returns DefaultCodeSlabSize
162 /// from DefaultJITMemoryManager.
163 virtual size_t GetDefaultDataSlabSize() {
164 return 0;
165 }
166
167 /// GetDefaultStubSlabSize - For testing only. Returns DefaultCodeSlabSize
168 /// from DefaultJITMemoryManager.
169 virtual size_t GetDefaultStubSlabSize() {
170 return 0;
171 }
172
173 /// GetNumCodeSlabs - For testing only. Returns the number of MemoryBlocks
174 /// allocated for code.
175 virtual unsigned GetNumCodeSlabs() {
176 return 0;
177 }
178
179 /// GetNumDataSlabs - For testing only. Returns the number of MemoryBlocks
180 /// allocated for data.
181 virtual unsigned GetNumDataSlabs() {
182 return 0;
183 }
184
185 /// GetNumStubSlabs - For testing only. Returns the number of MemoryBlocks
186 /// allocated for function stubs.
187 virtual unsigned GetNumStubSlabs() {
188 return 0;
189 }
190139 };
191140
192141 } // end namespace llvm.
1414 #define LLVM_SUPPORT_ALLOCATOR_H
1515
1616 #include "llvm/Support/AlignOf.h"
17 #include "llvm/Support/DataTypes.h"
18 #include
1917 #include
2018
2119 namespace llvm {
4240 void PrintStats() const {}
4341 };
4442
45 /// MemSlab - This structure lives at the beginning of every slab allocated by
46 /// the bump allocator.
47 class MemSlab {
48 public:
49 size_t Size;
50 MemSlab *NextPtr;
51 };
52
53 /// SlabAllocator - This class can be used to parameterize the underlying
54 /// allocation strategy for the bump allocator. In particular, this is used
55 /// by the JIT to allocate contiguous swathes of executable memory. The
56 /// interface uses MemSlab's instead of void *'s so that the allocator
57 /// doesn't have to remember the size of the pointer it allocated.
58 class SlabAllocator {
59 public:
60 virtual ~SlabAllocator();
61 virtual MemSlab *Allocate(size_t Size) = 0;
62 virtual void Deallocate(MemSlab *Slab) = 0;
63 };
64
65 /// MallocSlabAllocator - The default slab allocator for the bump allocator
66 /// is an adapter class for MallocAllocator that just forwards the method
67 /// calls and translates the arguments.
68 class MallocSlabAllocator : public SlabAllocator {
69 /// Allocator - The underlying allocator that we forward to.
70 ///
71 MallocAllocator Allocator;
72
73 public:
74 MallocSlabAllocator() : Allocator() { }
75 virtual ~MallocSlabAllocator();
76 virtual MemSlab *Allocate(size_t Size);
77 virtual void Deallocate(MemSlab *Slab);
78 };
79
80 /// BumpPtrAllocator - This allocator is useful for containers that need
81 /// very simple memory allocation strategies. In particular, this just keeps
43 /// BumpPtrAllocator - This allocator is useful for containers that need very
44 /// simple memory allocation strategies. In particular, this just keeps
8245 /// allocating memory, and never deletes it until the entire block is dead. This
8346 /// makes allocation speedy, but must only be used when the trade-off is ok.
8447 class BumpPtrAllocator {
8548 BumpPtrAllocator(const BumpPtrAllocator &); // do not implement
8649 void operator=(const BumpPtrAllocator &); // do not implement
8750
88 /// SlabSize - Allocate data into slabs of this size unless we get an
89 /// allocation above SizeThreshold.
90 size_t SlabSize;
91
92 /// SizeThreshold - For any allocation larger than this threshold, we should
93 /// allocate a separate slab.
94 size_t SizeThreshold;
95
96 /// Allocator - The underlying allocator we use to get slabs of memory. This
97 /// defaults to MallocSlabAllocator, which wraps malloc, but it could be
98 /// changed to use a custom allocator.
99 SlabAllocator &Allocator;
100
101 /// CurSlab - The slab that we are currently allocating into.
102 ///
103 MemSlab *CurSlab;
104
105 /// CurPtr - The current pointer into the current slab. This points to the
106 /// next free byte in the slab.
107 char *CurPtr;
108
109 /// End - The end of the current slab.
110 ///
111 char *End;
112
113 /// BytesAllocated - This field tracks how many bytes we've allocated, so
114 /// that we can compute how much space was wasted.
115 size_t BytesAllocated;
116
117 /// AlignPtr - Align Ptr to Alignment bytes, rounding up. Alignment should
118 /// be a power of two. This method rounds up, so AlignPtr(7, 4) == 8 and
119 /// AlignPtr(8, 4) == 8.
120 static char *AlignPtr(char *Ptr, size_t Alignment);
121
122 /// StartNewSlab - Allocate a new slab and move the bump pointers over into
123 /// the new slab. Modifies CurPtr and End.
124 void StartNewSlab();
125
126 /// DeallocateSlabs - Deallocate all memory slabs after and including this
127 /// one.
128 void DeallocateSlabs(MemSlab *Slab);
129
130 static MallocSlabAllocator DefaultSlabAllocator;
131
51 void *TheMemory;
13252 public:
133 BumpPtrAllocator(size_t size = 4096, size_t threshold = 4096,
134 SlabAllocator &allocator = DefaultSlabAllocator);
53 BumpPtrAllocator();
13554 ~BumpPtrAllocator();
13655
137 /// Reset - Deallocate all but the current slab and reset the current pointer
138 /// to the beginning of it, freeing all memory allocated so far.
13956 void Reset();
14057
141 /// Allocate - Allocate space at the specified alignment.
142 ///
14358 void *Allocate(size_t Size, size_t Alignment);
14459
14560 /// Allocate space, but do not construct, one object.
16782
16883 void Deallocate(const void * /*Ptr*/) {}
16984
170 unsigned GetNumSlabs() const;
171
17285 void PrintStats() const;
17386 };
17487
17588 } // end namespace llvm
17689
177 #endif // LLVM_SUPPORT_ALLOCATOR_H
90 #endif
1313 #ifndef LLVM_SYSTEM_MEMORY_H
1414 #define LLVM_SYSTEM_MEMORY_H
1515
16 #include "llvm/Support/DataTypes.h"
1716 #include
1817
1918 namespace llvm {
2625 /// @brief Memory block abstraction.
2726 class MemoryBlock {
2827 public:
29 MemoryBlock() { }
30 MemoryBlock(void *addr, size_t size) : Address(addr), Size(size) { }
3128 void *base() const { return Address; }
32 size_t size() const { return Size; }
29 unsigned size() const { return Size; }
3330 private:
3431 void *Address; ///< Address of first byte of memory area
35 size_t Size; ///< Size, in bytes of the memory area
32 unsigned Size; ///< Size, in bytes of the memory area
3633 friend class Memory;
3734 };
3835
5249 /// a null memory block and fills in *ErrMsg.
5350 ///
5451 /// @brief Allocate Read/Write/Execute memory.
55 static MemoryBlock AllocateRWX(size_t NumBytes,
52 static MemoryBlock AllocateRWX(unsigned NumBytes,
5653 const MemoryBlock *NearBlock,
5754 std::string *ErrMsg = 0);
5855
5050
5151 STATISTIC(NumBytes, "Number of bytes of machine code compiled");
5252 STATISTIC(NumRelos, "Number of relocations applied");
53 STATISTIC(NumRetries, "Number of retries with more memory");
5453 static JIT *TheJIT = 0;
5554
5655
425424 // save BufferBegin/BufferEnd/CurBufferPtr here.
426425 uint8_t *SavedBufferBegin, *SavedBufferEnd, *SavedCurBufferPtr;
427426
428 // When reattempting to JIT a function after running out of space, we store
429 // the estimated size of the function we're trying to JIT here, so we can
430 // ask the memory manager for at least this much space. When we
431 // successfully emit the function, we reset this back to zero.
432 uintptr_t SizeEstimate;
433
434427 /// Relocations - These are the relocations that the function needs, as
435428 /// emitted.
436429 std::vector Relocations;
502495 DebugLocTuple PrevDLT;
503496
504497 public:
505 JITEmitter(JIT &jit, JITMemoryManager *JMM)
506 : SizeEstimate(0), Resolver(jit), CurFn(0) {
498 JITEmitter(JIT &jit, JITMemoryManager *JMM) : Resolver(jit), CurFn(0) {
507499 MemMgr = JMM ? JMM : JITMemoryManager::CreateDefaultMemManager();
508500 if (jit.getJITInfo().needsGOT()) {
509501 MemMgr->AllocateGOT();
568560 return MBBLocations[MBB->getNumber()];
569561 }
570562
571 /// retryWithMoreMemory - Log a retry and deallocate all memory for the
572 /// given function. Increase the minimum allocation size so that we get
573 /// more memory next time.
574 void retryWithMoreMemory(MachineFunction &F);
575
576563 /// deallocateMemForFunction - Deallocate all memory for the specified
577564 /// function body.
578 void deallocateMemForFunction(const Function *F);
565 void deallocateMemForFunction(Function *F);
579566
580567 /// AddStubToCurrentFunction - Mark the current function being JIT'd as
581568 /// using the stub at the specified address. Allows
937924 // previously allocated.
938925 ActualSize += GetSizeOfGlobalsInBytes(F);
939926 DOUT << "JIT: ActualSize after globals " << ActualSize << "\n";
940 } else if (SizeEstimate > 0) {
941 // SizeEstimate will be non-zero on reallocation attempts.
942 ActualSize = SizeEstimate;
943927 }
944928
945929 BufferBegin = CurBufferPtr = MemMgr->startFunctionBody(F.getFunction(),
964948
965949 bool JITEmitter::finishFunction(MachineFunction &F) {
966950 if (CurBufferPtr == BufferEnd) {
967 // We must call endFunctionBody before retrying, because
968 // deallocateMemForFunction requires it.
969 MemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
970 retryWithMoreMemory(F);
971 return true;
972 }
973
951 // FIXME: Allocate more space, then try again.
952 llvm_report_error("JIT: Ran out of space for generated machine code!");
953 }
954
974955 emitJumpTableInfo(F.getJumpTableInfo());
975
956
976957 // FnStart is the start of the text, not the start of the constant pool and
977958 // other per-function data.
978959 uint8_t *FnStart =
10631044 MemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
10641045
10651046 if (CurBufferPtr == BufferEnd) {
1066 retryWithMoreMemory(F);
1067 return true;
1068 } else {
1069 // Now that we've succeeded in emitting the function, reset the
1070 // SizeEstimate back down to zero.
1071 SizeEstimate = 0;
1047 // FIXME: Allocate more space, then try again.
1048 llvm_report_error("JIT: Ran out of space for generated machine code!");
10721049 }
10731050
10741051 BufferBegin = CurBufferPtr = 0;
11531130 return false;
11541131 }
11551132
1156 void JITEmitter::retryWithMoreMemory(MachineFunction &F) {
1157 DOUT << "JIT: Ran out of space for native code. Reattempting.\n";
1158 Relocations.clear(); // Clear the old relocations or we'll reapply them.
1159 ConstPoolAddresses.clear();
1160 ++NumRetries;
1161 deallocateMemForFunction(F.getFunction());
1162 // Try again with at least twice as much free space.
1163 SizeEstimate = (uintptr_t)(2 * (BufferEnd - BufferBegin));
1164 }
1165
11661133 /// deallocateMemForFunction - Deallocate all memory for the specified
11671134 /// function body. Also drop any references the function has to stubs.
1168 void JITEmitter::deallocateMemForFunction(const Function *F) {
1135 void JITEmitter::deallocateMemForFunction(Function *F) {
11691136 MemMgr->deallocateMemForFunction(F);
11701137
11711138 // If the function did not reference any stubs, return.
1010 //
1111 //===----------------------------------------------------------------------===//
1212
13 #define DEBUG_TYPE "jit"
13 #include "llvm/GlobalValue.h"
1414 #include "llvm/ExecutionEngine/JITMemoryManager.h"
15 #include "llvm/ADT/SmallPtrSet.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/GlobalValue.h"
18 #include "llvm/Support/Allocator.h"
1915 #include "llvm/Support/Compiler.h"
20 #include "llvm/Support/Debug.h"
2116 #include "llvm/Support/ErrorHandling.h"
22 #include "llvm/Support/raw_ostream.h"
2317 #include "llvm/System/Memory.h"
2418 #include
2519 #include
3024 #include
3125 using namespace llvm;
3226
33 STATISTIC(NumSlabs, "Number of slabs of memory allocated by the JIT");
3427
3528 JITMemoryManager::~JITMemoryManager() {}
3629
147140 /// FreeRangeHeader to allocate from.
148141 FreeRangeHeader *MemoryRangeHeader::FreeBlock(FreeRangeHeader *FreeList) {
149142 MemoryRangeHeader *FollowingBlock = &getBlockAfter();
150 assert(ThisAllocated && "This block is already free!");
143 assert(ThisAllocated && "This block is already allocated!");
151144 assert(FollowingBlock->PrevAllocated && "Flags out of sync!");
152145
153146 FreeRangeHeader *FreeListToReturn = FreeList;
250243 // Memory Block Implementation.
251244 //===----------------------------------------------------------------------===//
252245
253 namespace {
254
255 class DefaultJITMemoryManager;
256
257 class JITSlabAllocator : public SlabAllocator {
258 DefaultJITMemoryManager &JMM;
259 public:
260 JITSlabAllocator(DefaultJITMemoryManager &jmm) : JMM(jmm) { }
261 virtual ~JITSlabAllocator() { }
262 virtual MemSlab *Allocate(size_t Size);
263 virtual void Deallocate(MemSlab *Slab);
264 };
265
246 namespace {
266247 /// DefaultJITMemoryManager - Manage memory for the JIT code generation.
267248 /// This splits a large block of MAP_NORESERVE'd memory into two
268249 /// sections, one for function stubs, one for the functions themselves. We
269250 /// have to do this because we may need to emit a function stub while in the
270251 /// middle of emitting a function, and we don't know how large the function we
271252 /// are emitting is.
272 class DefaultJITMemoryManager : public JITMemoryManager {
273
274 // Whether to poison freed memory.
275 bool PoisonMemory;
276
277 /// LastSlab - This points to the last slab allocated and is used as the
278 /// NearBlock parameter to AllocateRWX so that we can attempt to lay out all
279 /// stubs, data, and code contiguously in memory. In general, however, this
280 /// is not possible because the NearBlock parameter is ignored on Windows
281 /// platforms and even on Unix it works on a best-effort pasis.
282 sys::MemoryBlock LastSlab;
283
284 // Memory slabs allocated by the JIT. We refer to them as slabs so we don't
285 // confuse them with the blocks of memory descibed above.
286 std::vector CodeSlabs;
287 JITSlabAllocator BumpSlabAllocator;
288 BumpPtrAllocator StubAllocator;
289 BumpPtrAllocator DataAllocator;
290
291 // Circular list of free blocks.
292 FreeRangeHeader *FreeMemoryList;
293
253 class VISIBILITY_HIDDEN DefaultJITMemoryManager : public JITMemoryManager {
254 bool PoisonMemory; // Whether to poison freed memory.
255
256 std::vector Blocks; // Memory blocks allocated by the JIT
257 FreeRangeHeader *FreeMemoryList; // Circular list of free blocks.
258
294259 // When emitting code into a memory block, this is the block.
295260 MemoryRangeHeader *CurBlock;
296
261
262 uint8_t *CurStubPtr, *StubBase;
263 uint8_t *CurGlobalPtr, *GlobalEnd;
297264 uint8_t *GOTBase; // Target Specific reserved memory
298265 void *DlsymTable; // Stub external symbol information
299266
267 // Centralize memory block allocation.
268 sys::MemoryBlock getNewMemoryBlock(unsigned size);
269
300270 std::map FunctionBlocks;
301271 std::map TableBlocks;
302272 public:
303273 DefaultJITMemoryManager();
304274 ~DefaultJITMemoryManager();
305275
306 /// allocateNewSlab - Allocates a new MemoryBlock and remembers it as the
307 /// last slab it allocated, so that subsequent allocations follow it.
308 sys::MemoryBlock allocateNewSlab(size_t size);
309
310 /// DefaultCodeSlabSize - When we have to go map more memory, we allocate at
311 /// least this much unless more is requested.
312 static const size_t DefaultCodeSlabSize;
313
314 /// DefaultSlabSize - Allocate data into slabs of this size unless we get
315 /// an allocation above SizeThreshold.
316 static const size_t DefaultSlabSize;
317
318 /// DefaultSizeThreshold - For any allocation larger than this threshold, we
319 /// should allocate a separate slab.
320 static const size_t DefaultSizeThreshold;
321
322276 void AllocateGOT();
323277 void SetDlsymTable(void *);
324
325 // Testing methods.
326 virtual bool CheckInvariants(std::string &ErrorStr);
327 size_t GetDefaultCodeSlabSize() { return DefaultCodeSlabSize; }
328 size_t GetDefaultDataSlabSize() { return DefaultSlabSize; }
329 size_t GetDefaultStubSlabSize() { return DefaultSlabSize; }
330 unsigned GetNumCodeSlabs() { return CodeSlabs.size(); }
331 unsigned GetNumDataSlabs() { return DataAllocator.GetNumSlabs(); }
332 unsigned GetNumStubSlabs() { return StubAllocator.GetNumSlabs(); }
333
278
279 uint8_t *allocateStub(const GlobalValue* F, unsigned StubSize,
280 unsigned Alignment);
281
334282 /// startFunctionBody - When a function starts, allocate a block of free
335283 /// executable memory, returning a pointer to it and its actual size.
336284 uint8_t *startFunctionBody(const Function *F, uintptr_t &ActualSize) {
337
285
338286 FreeRangeHeader* candidateBlock = FreeMemoryList;
339287 FreeRangeHeader* head = FreeMemoryList;
340288 FreeRangeHeader* iter = head->Next;
341289
342290 uintptr_t largest = candidateBlock->BlockSize;
343
291
344292 // Search for the largest free block
345293 while (iter != head) {
346 if (iter->BlockSize > largest) {
347 largest = iter->BlockSize;
348 candidateBlock = iter;
349 }
350 iter = iter->Next;
294 if (iter->BlockSize > largest) {
295 largest = iter->BlockSize;
296 candidateBlock = iter;
297 }
298 iter = iter->Next;
351299 }
352
353 // If this block isn't big enough for the allocation desired, allocate
354 // another block of memory and add it to the free list.
355 if (largest - sizeof(MemoryRangeHeader) < ActualSize) {
356 DOUT << "JIT: Allocating another slab of memory for function.";
357 candidateBlock = allocateNewCodeSlab((size_t)ActualSize);
358 }
359
300
360301 // Select this candidate block for allocation
361302 CurBlock = candidateBlock;
362303
363304 // Allocate the entire memory block.
364305 FreeMemoryList = candidateBlock->AllocateBlock();
365 ActualSize = CurBlock->BlockSize - sizeof(MemoryRangeHeader);
366 return (uint8_t *)(CurBlock + 1);
367 }
368
369 /// allocateNewCodeSlab - Helper method to allocate a new slab of code
370 /// memory from the OS and add it to the free list. Returns the new
371 /// FreeRangeHeader at the base of the slab.
372 FreeRangeHeader *allocateNewCodeSlab(size_t MinSize) {
373 // If the user needs at least MinSize free memory, then we account for
374 // two MemoryRangeHeaders: the one in the user's block, and the one at the
375 // end of the slab.
376 size_t PaddedMin = MinSize + 2 * sizeof(MemoryRangeHeader);
377 size_t SlabSize = std::max(DefaultCodeSlabSize, PaddedMin);
378 sys::MemoryBlock B = allocateNewSlab(SlabSize);
379 CodeSlabs.push_back(B);
380 char *MemBase = (char*)(B.base());
381
382 // Put a tiny allocated block at the end of the memory chunk, so when
383 // FreeBlock calls getBlockAfter it doesn't fall off the end.
384 MemoryRangeHeader *EndBlock =
385 (MemoryRangeHeader*)(MemBase + B.size()) - 1;
386 EndBlock->ThisAllocated = 1;
387 EndBlock->PrevAllocated = 0;
388 EndBlock->BlockSize = sizeof(MemoryRangeHeader);
389
390 // Start out with a vast new block of free memory.
391 FreeRangeHeader *NewBlock = (FreeRangeHeader*)MemBase;
392 NewBlock->ThisAllocated = 0;
393 // Make sure getFreeBlockBefore doesn't look into unmapped memory.
394 NewBlock->PrevAllocated = 1;
395 NewBlock->BlockSize = (uintptr_t)EndBlock - (uintptr_t)NewBlock;
396 NewBlock->SetEndOfBlockSizeMarker();
397 NewBlock->AddToFreeList(FreeMemoryList);
398
399 assert(NewBlock->BlockSize - sizeof(MemoryRangeHeader) >= MinSize &&
400 "The block was too small!");
401 return NewBlock;
402 }
403
306 ActualSize = CurBlock->BlockSize-sizeof(MemoryRangeHeader);
307 return (uint8_t *)(CurBlock+1);
308 }
309
404310 /// endFunctionBody - The function F is now allocated, and takes the memory
405311 /// in the range [FunctionStart,FunctionEnd).
406312 void endFunctionBody(const Function *F, uint8_t *FunctionStart,
416322 FreeMemoryList =CurBlock->TrimAllocationToSize(FreeMemoryList, BlockSize);
417323 }
418324
419 /// allocateSpace - Allocate a memory block of the given size. This method
420 /// cannot be called between calls to startFunctionBody and endFunctionBody.
325 /// allocateSpace - Allocate a memory block of the given size.
421326 uint8_t *allocateSpace(intptr_t Size, unsigned Alignment) {
422327 CurBlock = FreeMemoryList;
423328 FreeMemoryList = FreeMemoryList->AllocateBlock();
434339 return result;
435340 }
436341
437 /// allocateStub - Allocate memory for a function stub.
438 uint8_t *allocateStub(const GlobalValue* F, unsigned StubSize,
439 unsigned Alignment) {
440 return (uint8_t*)StubAllocator.Allocate(StubSize, Alignment);
441 }
442
443 /// allocateGlobal - Allocate memory for a global.
342 /// allocateGlobal - Allocate memory for a global. Unlike allocateSpace,
343 /// this method does not touch the current block and can be called at any
344 /// time.
444345 uint8_t *allocateGlobal(uintptr_t Size, unsigned Alignment) {
445 return (uint8_t*)DataAllocator.Allocate(Size, Alignment);
346 uint8_t *Result = CurGlobalPtr;
347
348 // Align the pointer.
349 if (Alignment == 0) Alignment = 1;
350 Result = (uint8_t*)(((uintptr_t)Result + Alignment-1) &
351 ~(uintptr_t)(Alignment-1));
352
353 // Move the current global pointer forward.
354 CurGlobalPtr += Result - CurGlobalPtr + Size;
355
356 // Check for overflow.
357 if (CurGlobalPtr > GlobalEnd) {
358 // FIXME: Allocate more memory.
359 llvm_report_error("JIT ran out of memory for globals!");
360 }
361
362 return Result;
446363 }
447364
448365 /// startExceptionTable - Use startFunctionBody to allocate memory for the
519436 /// the code pages may need permissions changed.
520437 void setMemoryWritable(void)
521438 {
522 for (unsigned i = 0, e = CodeSlabs.size(); i != e; ++i)
523 sys::Memory::setWritable(CodeSlabs[i]);
439 for (unsigned i = 0, e = Blocks.size(); i != e; ++i)
440 sys::Memory::setWritable(Blocks[i]);
524441 }
525442 /// setMemoryExecutable - When code generation is done and we're ready to
526443 /// start execution, the code pages may need permissions changed.
527444 void setMemoryExecutable(void)
528445 {
529 for (unsigned i = 0, e = CodeSlabs.size(); i != e; ++i)
530 sys::Memory::setExecutable(CodeSlabs[i]);
446 for (unsigned i = 0, e = Blocks.size(); i != e; ++i)
447 sys::Memory::setExecutable(Blocks[i]);
531448 }
532449
533450 /// setPoisonMemory - Controls whether we write garbage over freed memory.
538455 };
539456 }
540457
541 MemSlab *JITSlabAllocator::Allocate(size_t Size) {
542 sys::MemoryBlock B = JMM.allocateNewSlab(Size);
543 MemSlab *Slab = (MemSlab*)B.base();
544 Slab->Size = B.size();
545 Slab->NextPtr = 0;
546 return Slab;
547 }
548
549 void JITSlabAllocator::Deallocate(MemSlab *Slab) {
550 sys::MemoryBlock B(Slab, Slab->Size);
551 sys::Memory::ReleaseRWX(B);
552 }
553
554 DefaultJITMemoryManager::DefaultJITMemoryManager()
555 : LastSlab(0, 0),
556 BumpSlabAllocator(*this),
557 StubAllocator(DefaultSlabSize, DefaultSizeThreshold, BumpSlabAllocator),
558 DataAllocator(DefaultSlabSize, DefaultSizeThreshold, BumpSlabAllocator) {
559
458 DefaultJITMemoryManager::DefaultJITMemoryManager() {
560459 #ifdef NDEBUG
460 PoisonMemory = true;
461 #else
561462 PoisonMemory = false;
463 #endif
464
465 // Allocate a 16M block of memory for functions.
466 #if defined(__APPLE__) && defined(__arm__)
467 sys::MemoryBlock MemBlock = getNewMemoryBlock(4 << 20);
562468 #else
563 PoisonMemory = true;
469 sys::MemoryBlock MemBlock = getNewMemoryBlock(16 << 20);
564470 #endif
565471
566 // Allocate space for code.
567 sys::MemoryBlock MemBlock = allocateNewSlab(DefaultCodeSlabSize);
568 CodeSlabs.push_back(MemBlock);
569 uint8_t *MemBase = (uint8_t*)MemBlock.base();
472 uint8_t *MemBase = static_cast(MemBlock.base());
473
474 // Allocate stubs backwards to the base, globals forward from the stubs, and
475 // functions forward after globals.
476 StubBase = MemBase;
477 CurStubPtr = MemBase + 512*1024; // Use 512k for stubs, working backwards.
478 CurGlobalPtr = CurStubPtr; // Use 2M for globals, working forwards.
479 GlobalEnd = CurGlobalPtr + 2*1024*1024;
570480
571481 // We set up the memory chunk with 4 mem regions, like this:
572482 // [ START
583493 MemoryRangeHeader *Mem3 = (MemoryRangeHeader*)(MemBase+MemBlock.size())-1;
584494 Mem3->ThisAllocated = 1;
585495 Mem3->PrevAllocated = 0;
586 Mem3->BlockSize = sizeof(MemoryRangeHeader);
496 Mem3->BlockSize = 0;
587497
588498 /// Add a tiny free region so that the free list always has one entry.
589499 FreeRangeHeader *Mem2 =
599509 MemoryRangeHeader *Mem1 = (MemoryRangeHeader*)Mem2-1;
600510 Mem1->ThisAllocated = 1;
601511 Mem1->PrevAllocated = 0;
602 Mem1->BlockSize = sizeof(MemoryRangeHeader);
512 Mem1->BlockSize = (char*)Mem2 - (char*)Mem1;
603513
604514 // Add a FreeRangeHeader to the start of the function body region, indicating
605515 // that the space is free. Mark the previous block allocated so we never look
606516 // at it.
607 FreeRangeHeader *Mem0 = (FreeRangeHeader*)MemBase;
517 FreeRangeHeader *Mem0 = (FreeRangeHeader*)GlobalEnd;
608518 Mem0->ThisAllocated = 0;
609519 Mem0->PrevAllocated = 1;
610520 Mem0->BlockSize = (char*)Mem1-(char*)Mem0;
629539 }
630540
631541 DefaultJITMemoryManager::~DefaultJITMemoryManager() {
632 for (unsigned i = 0, e = CodeSlabs.size(); i != e; ++i)
633 sys::Memory::ReleaseRWX(CodeSlabs[i]);
634
542 for (unsigned i = 0, e = Blocks.size(); i != e; ++i)
543 sys::Memory::ReleaseRWX(Blocks[i]);
544
635545 delete[] GOTBase;
636 }
637
638 sys::MemoryBlock DefaultJITMemoryManager::allocateNewSlab(size_t size) {
546 Blocks.clear();
547 }
548
549 uint8_t *DefaultJITMemoryManager::allocateStub(const GlobalValue* F,
550 unsigned StubSize,
551 unsigned Alignment) {
552 CurStubPtr -= StubSize;
553 CurStubPtr = (uint8_t*)(((intptr_t)CurStubPtr) &
554 ~(intptr_t)(Alignment-1));
555 if (CurStubPtr < StubBase) {
556 // FIXME: allocate a new block
557 llvm_report_error("JIT ran out of memory for function stubs!");
558 }
559 return CurStubPtr;
560 }
561
562 sys::MemoryBlock DefaultJITMemoryManager::getNewMemoryBlock(unsigned size) {
639563 // Allocate a new block close to the last one.
564 const sys::MemoryBlock *BOld = Blocks.empty() ? 0 : &Blocks.back();
640565 std::string ErrMsg;
641 sys::MemoryBlock *LastSlabPtr = LastSlab.base() ? &LastSlab : 0;
642 sys::MemoryBlock B = sys::Memory::AllocateRWX(size, LastSlabPtr, &ErrMsg);
566 sys::MemoryBlock B = sys::Memory::AllocateRWX(size, BOld, &ErrMsg);
643567 if (B.base() == 0) {
644568 llvm_report_error("Allocation failed when allocating new memory in the"
645569 " JIT\n" + ErrMsg);
646570 }
647 LastSlab = B;
648 ++NumSlabs;
571 Blocks.push_back(B);
649572 return B;
650573 }
651574
652 /// CheckInvariants - For testing only. Return "" if all internal invariants
653 /// are preserved, and a helpful error message otherwise. For free and
654 /// allocated blocks, make sure that adding BlockSize gives a valid block.
655 /// For free blocks, make sure they're in the free list and that their end of
656 /// block size marker is correct. This function should return an error before
657 /// accessing bad memory. This function is defined here instead of in
658 /// JITMemoryManagerTest.cpp so that we don't have to expose all of the
659 /// implementation details of DefaultJITMemoryManager.
660 bool DefaultJITMemoryManager::CheckInvariants(std::string &ErrorStr) {
661 raw_string_ostream Err(ErrorStr);
662
663 // Construct a the set of FreeRangeHeader pointers so we can query it
664 // efficiently.
665 llvm::SmallPtrSet FreeHdrSet;
666 FreeRangeHeader* FreeHead = FreeMemoryList;
667 FreeRangeHeader* FreeRange = FreeHead;
668
669 do {
670 // Check that the free range pointer is in the blocks we've allocated.
671 bool Found = false;
672 for (std::vector::iterator I = CodeSlabs.begin(),
673 E = CodeSlabs.end(); I != E && !Found; ++I) {
674 char *Start = (char*)I->base();
675 char *End = Start + I->size();
676 Found = (Start <= (char*)FreeRange && (char*)FreeRange < End);
677 }
678 if (!Found) {
679 Err << "Corrupt free list; points to " << FreeRange;
680 return false;
681 }
682
683 if (FreeRange->Next->Prev != FreeRange) {
684 Err << "Next and Prev pointers do not match.";
685 return false;
686 }
687
688 // Otherwise, add it to the set.
689 FreeHdrSet.insert(FreeRange);
690 FreeRange = FreeRange->Next;
691 } while (FreeRange != FreeHead);
692
693 // Go over each block, and look at each MemoryRangeHeader.
694 for (std::vector::iterator I = CodeSlabs.begin(),
695 E = CodeSlabs.end(); I != E; ++I) {
696 char *Start = (char*)I->base();
697 char *End = Start + I->size();
698
699 // Check each memory range.
700 for (MemoryRangeHeader *Hdr = (MemoryRangeHeader*)Start, *LastHdr = NULL;
701 Start <= (char*)Hdr && (char*)Hdr < End;
702 Hdr = &Hdr->getBlockAfter()) {
703 if (Hdr->ThisAllocated == 0) {
704 // Check that this range is in the free list.
705 if (!FreeHdrSet.count(Hdr)) {
706 Err << "Found free header at " << Hdr << " that is not in free list.";
707 return false;
708 }
709
710 // Now make sure the size marker at the end of the block is correct.
711 uintptr_t *Marker = ((uintptr_t*)&Hdr->getBlockAfter()) - 1;
712 if (!(Start <= (char*)Marker && (char*)Marker < End)) {
713 Err << "Block size in header points out of current MemoryBlock.";
714 return false;
715 }
716 if (Hdr->BlockSize != *Marker) {
717 Err << "End of block size marker (" << *Marker << ") "
718 << "and BlockSize (" << Hdr->BlockSize << ") don't match.";
719 return false;
720 }
721 }
722
723 if (LastHdr && LastHdr->ThisAllocated != Hdr->PrevAllocated) {
724 Err << "Hdr->PrevAllocated (" << Hdr->PrevAllocated << ") != "
725 << "LastHdr->ThisAllocated (" << LastHdr->ThisAllocated << ")";
726 return false;
727 } else if (!LastHdr && !Hdr->PrevAllocated) {
728 Err << "The first header should have PrevAllocated true.";
729 return false;
730 }
731
732 // Remember the last header.
733 LastHdr = Hdr;
734 }
735 }
736
737 // All invariants are preserved.
738 return true;
739 }
740575
741576 JITMemoryManager *JITMemoryManager::CreateDefaultMemManager() {
742577 return new DefaultJITMemoryManager();
743578 }
744
745 // Allocate memory for code in 512K slabs.
746 const size_t DefaultJITMemoryManager::DefaultCodeSlabSize = 512 * 1024;
747
748 // Allocate globals and stubs in slabs of 64K. (probably 16 pages)
749 const size_t DefaultJITMemoryManager::DefaultSlabSize = 64 * 1024;
750
751 // Waste at most 16K at the end of each bump slab. (probably 4 pages)
752 const size_t DefaultJITMemoryManager::DefaultSizeThreshold = 16 * 1024;
1414 #include "llvm/Support/Recycler.h"
1515 #include "llvm/Support/DataTypes.h"
1616 #include "llvm/Support/Streams.h"
17 #include <cstring>
17 #include <ostream>
18 using namespace llvm;
1819
19 namespace llvm {
20 //===----------------------------------------------------------------------===//
21 // MemRegion class implementation
22 //===----------------------------------------------------------------------===//
2023
21 BumpPtrAllocator::BumpPtrAllocator(size_t size, size_t threshold,
22 SlabAllocator &allocator)
23 : SlabSize(size), SizeThreshold(threshold), Allocator(allocator),
24 CurSlab(0), BytesAllocated(0) {
25 StartNewSlab();
24 namespace {
25 /// MemRegion - This is one chunk of the BumpPtrAllocator.
26 class MemRegion {
27 unsigned RegionSize;
28 MemRegion *Next;
29 char *NextPtr;
30 public:
31 void Init(unsigned size, unsigned Alignment, MemRegion *next) {
32 RegionSize = size;
33 Next = next;
34 NextPtr = (char*)(this+1);
35
36 // Align NextPtr.
37 NextPtr = (char*)((intptr_t)(NextPtr+Alignment-1) &
38 ~(intptr_t)(Alignment-1));
39 }
40
41 const MemRegion *getNext() const { return Next; }
42 unsigned getNumBytesAllocated() const {
43 return NextPtr-(const char*)this;
44 }
45
46 /// Allocate - Allocate and return at least the specified number of bytes.
47 ///
48 void *Allocate(size_t AllocSize, size_t Alignment, MemRegion **RegPtr) {
49
50 char* Result = (char*) (((uintptr_t) (NextPtr+Alignment-1))
51 & ~((uintptr_t) Alignment-1));
52
53 // Speculate the new value of NextPtr.
54 char* NextPtrTmp = Result + AllocSize;
55
56 // If we are still within the current region, return Result.
57 if (unsigned (NextPtrTmp - (char*) this) <= RegionSize) {
58 NextPtr = NextPtrTmp;
59 return Result;
60 }
61
62 // Otherwise, we have to allocate a new chunk. Create one twice as big as
63 // this one.
64 MemRegion *NewRegion = (MemRegion *)malloc(RegionSize*2);
65 NewRegion->Init(RegionSize*2, Alignment, this);
66
67 // Update the current "first region" pointer to point to the new region.
68 *RegPtr = NewRegion;
69
70 // Try allocating from it now.
71 return NewRegion->Allocate(AllocSize, Alignment, RegPtr);
72 }
73
74 /// Deallocate - Recursively release all memory for this and its next regions
75 /// to the system.
76 void Deallocate() {
77 MemRegion *next = Next;
78 free(this);
79 if (next)
80 next->Deallocate();
81 }
82
83 /// DeallocateAllButLast - Recursively release all memory for this and its
84 /// next regions to the system stopping at the last region in the list.
85 /// Returns the pointer to the last region.
86 MemRegion *DeallocateAllButLast() {
87 MemRegion *next = Next;
88 if (!next)
89 return this;
90 free(this);
91 return next->DeallocateAllButLast();
92 }
93 };
94 }
95
96 //===----------------------------------------------------------------------===//
97 // BumpPtrAllocator class implementation
98 //===----------------------------------------------------------------------===//
99
100 BumpPtrAllocator::BumpPtrAllocator() {
101 TheMemory = malloc(4096);
102 ((MemRegion*)TheMemory)->Init(4096, 1, 0);
26103 }
27104
28105 BumpPtrAllocator::~BumpPtrAllocator() {
29 DeallocateSlabs(CurSlab);
106 ((MemRegion*)TheMemory)->Deallocate();
30107 }
31108
32 /// AlignPtr - Align Ptr to Alignment bytes, rounding up. Alignment should
33 /// be a power of two. This method rounds up, so AlignPtr(7, 4) == 8 and
34 /// AlignPtr(8, 4) == 8.
35 char *BumpPtrAllocator::AlignPtr(char *Ptr, size_t Alignment) {
36 assert(Alignment && (Alignment & (Alignment - 1)) == 0 &&
37 "Alignment is not a power of two!");
38
39 // Do the alignment.
40 return (char*)(((uintptr_t)Ptr + Alignment - 1) &
41 ~(uintptr_t)(Alignment - 1));
109 void BumpPtrAllocator::Reset() {
110 MemRegion *MRP = (MemRegion*)TheMemory;
111 MRP = MRP->DeallocateAllButLast();
112 MRP->Init(4096, 1, 0);
113 TheMemory = MRP;
42114 }
43115
44 /// StartNewSlab - Allocate a new slab and move the bump pointers over into
45 /// the new slab. Modifies CurPtr and End.
46 void BumpPtrAllocator::StartNewSlab() {
47 MemSlab *NewSlab = Allocator.Allocate(SlabSize);
48 NewSlab->NextPtr = CurSlab;
49 CurSlab = NewSlab;
50 CurPtr = (char*)(CurSlab + 1);
51 End = CurPtr + CurSlab->Size;
52 }
53
54 /// DeallocateSlabs - Deallocate all memory slabs after and including this
55 /// one.
56 void BumpPtrAllocator::DeallocateSlabs(MemSlab *Slab) {
57 while (Slab) {
58 MemSlab *NextSlab = Slab->NextPtr;
59 #ifndef NDEBUG
60 // Poison the memory so stale pointers crash sooner. Note we must
61 // preserve the Size and NextPtr fields at the beginning.
62 memset(Slab + 1, 0xCD, Slab->Size - sizeof(MemSlab));
63 #endif
64 Allocator.Deallocate(Slab);
65 Slab = NextSlab;
66 }
67 }
68
69 /// Reset - Deallocate all but the current slab and reset the current pointer
70 /// to the beginning of it, freeing all memory allocated so far.
71 void BumpPtrAllocator::Reset() {
72 DeallocateSlabs(CurSlab->NextPtr);
73 CurSlab->NextPtr = 0;
74 CurPtr = (char*)(CurSlab + 1);
75 End = CurPtr + CurSlab->Size;
76 }
77
78 /// Allocate - Allocate space at the specified alignment.
79 ///
80 void *BumpPtrAllocator::Allocate(size_t Size, size_t Alignment) {
81 // Keep track of how many bytes we've allocated.
82 BytesAllocated += Size;
83
84 // 0-byte alignment means 1-byte alignment.
85 if (Alignment == 0) Alignment = 1;
86
87 // Allocate the aligned space, going forwards from CurPtr.
88 char *Ptr = AlignPtr(CurPtr, Alignment);
89
90 // Check if we can hold it.
91 if (Ptr + Size < End) {
92 CurPtr = Ptr + Size;
93 return Ptr;
94 }
95
96 // If Size is really big, allocate a separate slab for it.
97 if (Size > SizeThreshold) {
98 size_t PaddedSize = Size + sizeof(MemSlab) + Alignment - 1;
99 MemSlab *NewSlab = Allocator.Allocate(PaddedSize);
100
101 // Put the new slab after the current slab, since we are not allocating
102 // into it.
103 NewSlab->NextPtr = CurSlab->NextPtr;
104 CurSlab->NextPtr = NewSlab;
105
106 Ptr = AlignPtr((char*)(NewSlab + 1), Alignment);
107 assert((uintptr_t)Ptr + Size < (uintptr_t)NewSlab + NewSlab->Size);
108 return Ptr;
109 }
110
111 // Otherwise, start a new slab and try again.
112 StartNewSlab();
113 Ptr = AlignPtr(CurPtr, Alignment);
114 CurPtr = Ptr + Size;
115 assert(CurPtr < End && "Unable to allocate memory!");
116 void *BumpPtrAllocator::Allocate(size_t Size, size_t Align) {
117 MemRegion *MRP = (MemRegion*)TheMemory;
118 void *Ptr = MRP->Allocate(Size, Align, &MRP);
119 TheMemory = MRP;
116120 return Ptr;
117121 }
118122
119 unsigned BumpPtrAllocator::GetNumSlabs() const {
120 unsigned NumSlabs = 0;
121 for (MemSlab *Slab = CurSlab; Slab != 0; Slab = Slab->NextPtr) {
122 ++NumSlabs;
123 }
124 return NumSlabs;
123 void BumpPtrAllocator::PrintStats() const {
124 unsigned BytesUsed = 0;
125 unsigned NumRegions = 0;
126 const MemRegion *R = (MemRegion*)TheMemory;
127 for (; R; R = R->getNext(), ++NumRegions)
128 BytesUsed += R->getNumBytesAllocated();
129
130 cerr << "\nNumber of memory regions: " << NumRegions << "\n";
131 cerr << "Bytes allocated: " << BytesUsed << "\n";
125132 }
126133
127 void BumpPtrAllocator::PrintStats() const {
128 unsigned NumSlabs = 0;
129 size_t TotalMemory = 0;
130 for (MemSlab *Slab = CurSlab; Slab != 0; Slab = Slab->NextPtr) {
131 TotalMemory += Slab->Size;
132 ++NumSlabs;
133 }
134
135 cerr << "\nNumber of memory regions: " << NumSlabs << '\n'
136 << "Bytes used: " << BytesAllocated << '\n'
137 << "Bytes allocated: " << TotalMemory << '\n'
138 << "Bytes wasted: " << (TotalMemory - BytesAllocated)
139 << " (includes alignment, etc)\n";
134 void llvm::PrintRecyclerStats(size_t Size,
135 size_t Align,
136 size_t FreeListSize) {
137 cerr << "Recycler element size: " << Size << '\n';
138 cerr << "Recycler element alignment: " << Align << '\n';
139 cerr << "Number of elements free for recycling: " << FreeListSize << '\n';
140140 }
141
142 MallocSlabAllocator BumpPtrAllocator::DefaultSlabAllocator =
143 MallocSlabAllocator();
144
145 SlabAllocator::~SlabAllocator() { }
146
147 MallocSlabAllocator::~MallocSlabAllocator() { }
148
149 MemSlab *MallocSlabAllocator::Allocate(size_t Size) {
150 MemSlab *Slab = (MemSlab*)Allocator.Allocate(Size, 0);
151 Slab->Size = Size;
152 Slab->NextPtr = 0;
153 return Slab;
154 }
155
156 void MallocSlabAllocator::Deallocate(MemSlab *Slab) {
157 Allocator.Deallocate(Slab);
158 }
159
160 void PrintRecyclerStats(size_t Size,
161 size_t Align,
162 size_t FreeListSize) {
163 cerr << "Recycler element size: " << Size << '\n'
164 << "Recycler element alignment: " << Align << '\n'
165 << "Number of elements free for recycling: " << FreeListSize << '\n';
166 }
167
168 }
1111 //===----------------------------------------------------------------------===//
1212
1313 #include "Unix.h"
14 #include "llvm/Support/DataTypes.h"
1514 #include "llvm/System/Process.h"
1615
1716 #ifdef HAVE_SYS_MMAN_H
2827 /// is very OS specific.
2928 ///
3029 llvm::sys::MemoryBlock
31 llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
30 llvm::sys::Memory::AllocateRWX(unsigned NumBytes, const MemoryBlock* NearBlock,
3231 std::string *ErrMsg) {
3332 if (NumBytes == 0) return MemoryBlock();
3433
35 size_t pageSize = Process::GetPageSize();
36 size_t NumPages = (NumBytes+pageSize-1)/pageSize;
34 unsigned pageSize = Process::GetPageSize();
35 unsigned NumPages = (NumBytes+pageSize-1)/pageSize;
3736
3837 int fd = -1;
3938 #ifdef NEED_DEV_ZERO_FOR_MMAP
1212 //===----------------------------------------------------------------------===//
1313
1414 #include "Win32.h"
15 #include "llvm/Support/DataTypes.h"
1615 #include "llvm/System/Process.h"
1716
1817 namespace llvm {
2322 //=== and must not be UNIX code
2423 //===----------------------------------------------------------------------===//
2524
26 MemoryBlock Memory::AllocateRWX(size_t NumBytes,
25 MemoryBlock Memory::AllocateRWX(unsigned NumBytes,
2726 const MemoryBlock *NearBlock,
2827 std::string *ErrMsg) {
2928 if (NumBytes == 0) return MemoryBlock();
3029
31 static const size_t pageSize = Process::GetPageSize();
32 size_t NumPages = (NumBytes+pageSize-1)/pageSize;
30 static const long pageSize = Process::GetPageSize();
31 unsigned NumPages = (NumBytes+pageSize-1)/pageSize;
3332
3433 //FIXME: support NearBlock if ever needed on Win64.
3534
135135 builder.setEngineKind(ForceInterpreter
136136 ? EngineKind::Interpreter
137137 : EngineKind::JIT);
138 // FIXME: Don't allocate GVs with code once the JIT because smarter about
139 // memory management.
140 builder.setAllocateGVsWithCode(true);
138141
139142 // If we are supposed to override the target triple, do so now.
140143 if (!TargetTriple.empty())
None //===- JITMemoryManagerTest.cpp - Unit tests for the JIT memory manager ---===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "gtest/gtest.h"
10 #include "llvm/ADT/OwningPtr.h"
11 #include "llvm/ExecutionEngine/JITMemoryManager.h"
12 #include "llvm/DerivedTypes.h"
13 #include "llvm/Function.h"
14 #include "llvm/GlobalValue.h"
15
16 using namespace llvm;
17
18 namespace {
19
20 Function *makeFakeFunction() {
21 std::vector params;
22 const FunctionType *FTy = FunctionType::get(Type::VoidTy, params, false);
23 return Function::Create(FTy, GlobalValue::ExternalLinkage);
24 }
25
26 // Allocate three simple functions that fit in the initial slab. This exercises
27 // the code in the case that we don't have to allocate more memory to store the
28 // function bodies.
29 TEST(JITMemoryManagerTest, NoAllocations) {
30 OwningPtr MemMgr(
31 JITMemoryManager::CreateDefaultMemManager());
32 uintptr_t size;
33 uint8_t *start;
34 std::string Error;
35
36 // Allocate the functions.
37 OwningPtr F1(makeFakeFunction());
38 size = 1024;
39 start = MemMgr->startFunctionBody(F1.get(), size);
40 memset(start, 0xFF, 1024);
41 MemMgr->endFunctionBody(F1.get(), start, start + 1024);
42 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
43
44 OwningPtr F2(makeFakeFunction());
45 size = 1024;
46 start = MemMgr->startFunctionBody(F2.get(), size);
47 memset(start, 0xFF, 1024);
48 MemMgr->endFunctionBody(F2.get(), start, start + 1024);
49 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
50
51 OwningPtr F3(makeFakeFunction());
52 size = 1024;
53 start = MemMgr->startFunctionBody(F3.get(), size);
54 memset(start, 0xFF, 1024);
55 MemMgr->endFunctionBody(F3.get(), start, start + 1024);
56 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
57
58 // Deallocate them out of order, in case that matters.
59 MemMgr->deallocateMemForFunction(F2.get());
60 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
61 MemMgr->deallocateMemForFunction(F1.get());
62 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
63 MemMgr->deallocateMemForFunction(F3.get());
64 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
65 }
66
67 // Make three large functions that take up most of the space in the slab. Then
68 // try allocating three smaller functions that don't require additional slabs.
69 TEST(JITMemoryManagerTest, TestCodeAllocation) {
70 OwningPtr MemMgr(
71 JITMemoryManager::CreateDefaultMemManager());
72 uintptr_t size;
73 uint8_t *start;
74 std::string Error;
75
76 // Big functions are a little less than the largest block size.
77 const uintptr_t smallFuncSize = 1024;
78 const uintptr_t bigFuncSize = (MemMgr->GetDefaultCodeSlabSize() -
79 smallFuncSize * 2);
80
81 // Allocate big functions
82 OwningPtr F1(makeFakeFunction());
83 size = bigFuncSize;
84 start = MemMgr->startFunctionBody(F1.get(), size);
85 ASSERT_LE(bigFuncSize, size);
86 memset(start, 0xFF, bigFuncSize);
87 MemMgr->endFunctionBody(F1.get(), start, start + bigFuncSize);
88 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
89
90 OwningPtr F2(makeFakeFunction());
91 size = bigFuncSize;
92 start = MemMgr->startFunctionBody(F2.get(), size);
93 ASSERT_LE(bigFuncSize, size);
94 memset(start, 0xFF, bigFuncSize);
95 MemMgr->endFunctionBody(F2.get(), start, start + bigFuncSize);
96 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
97
98 OwningPtr F3(makeFakeFunction());
99 size = bigFuncSize;
100 start = MemMgr->startFunctionBody(F3.get(), size);
101 ASSERT_LE(bigFuncSize, size);
102 memset(start, 0xFF, bigFuncSize);
103 MemMgr->endFunctionBody(F3.get(), start, start + bigFuncSize);
104 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
105
106 // Check that each large function took it's own slab.
107 EXPECT_EQ(3U, MemMgr->GetNumCodeSlabs());
108
109 // Allocate small functions
110 OwningPtr F4(makeFakeFunction());
111 size = smallFuncSize;
112 start = MemMgr->startFunctionBody(F4.get(), size);
113 ASSERT_LE(smallFuncSize, size);
114 memset(start, 0xFF, smallFuncSize);
115 MemMgr->endFunctionBody(F4.get(), start, start + smallFuncSize);
116 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
117
118 OwningPtr F5(makeFakeFunction());
119 size = smallFuncSize;
120 start = MemMgr->startFunctionBody(F5.get(), size);
121 ASSERT_LE(smallFuncSize, size);
122 memset(start, 0xFF, smallFuncSize);
123 MemMgr->endFunctionBody(F5.get(), start, start + smallFuncSize);
124 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
125
126 OwningPtr F6(makeFakeFunction());
127 size = smallFuncSize;
128 start = MemMgr->startFunctionBody(F6.get(), size);
129 ASSERT_LE(smallFuncSize, size);
130 memset(start, 0xFF, smallFuncSize);
131 MemMgr->endFunctionBody(F6.get(), start, start + smallFuncSize);
132 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
133
134 // Check that the small functions didn't allocate any new slabs.
135 EXPECT_EQ(3U, MemMgr->GetNumCodeSlabs());
136
137 // Deallocate them out of order, in case that matters.
138 MemMgr->deallocateMemForFunction(F2.get());
139 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
140 MemMgr->deallocateMemForFunction(F1.get());
141 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
142 MemMgr->deallocateMemForFunction(F4.get());
143 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
144 MemMgr->deallocateMemForFunction(F3.get());
145 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
146 MemMgr->deallocateMemForFunction(F5.get());
147 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
148 MemMgr->deallocateMemForFunction(F6.get());
149 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
150 }
151
152 // Allocate five global ints of varying widths and alignment, and check their
153 // alignment and overlap.
154 TEST(JITMemoryManagerTest, TestSmallGlobalInts) {
155 OwningPtr MemMgr(
156 JITMemoryManager::CreateDefaultMemManager());
157 uint8_t *a = (uint8_t *)MemMgr->allocateGlobal(8, 0);
158 uint16_t *b = (uint16_t*)MemMgr->allocateGlobal(16, 2);
159 uint32_t *c = (uint32_t*)MemMgr->allocateGlobal(32, 4);
160 uint64_t *d = (uint64_t*)MemMgr->allocateGlobal(64, 8);
161
162 // Check the alignment.
163 EXPECT_EQ(0U, ((uintptr_t)b) & 0x1);
164 EXPECT_EQ(0U, ((uintptr_t)c) & 0x3);
165 EXPECT_EQ(0U, ((uintptr_t)d) & 0x7);
166
167 // Initialize them each one at a time and make sure they don't overlap.
168 *a = 0xff;
169 *b = 0U;
170 *c = 0U;
171 *d = 0U;
172 EXPECT_EQ(0xffU, *a);
173 EXPECT_EQ(0U, *b);
174 EXPECT_EQ(0U, *c);
175 EXPECT_EQ(0U, *d);
176 *a = 0U;
177 *b = 0xffffU;
178 EXPECT_EQ(0U, *a);
179 EXPECT_EQ(0xffffU, *b);
180 EXPECT_EQ(0U, *c);
181 EXPECT_EQ(0U, *d);
182 *b = 0U;
183 *c = 0xffffffffU;
184 EXPECT_EQ(0U, *a);
185 EXPECT_EQ(0U, *b);
186 EXPECT_EQ(0xffffffffU, *c);
187 EXPECT_EQ(0U, *d);
188 *c = 0U;
189 *d = 0xffffffffffffffffU;
190 EXPECT_EQ(0U, *a);
191 EXPECT_EQ(0U, *b);
192 EXPECT_EQ(0U, *c);
193 EXPECT_EQ(0xffffffffffffffffU, *d);
194
195 // Make sure we didn't allocate any extra slabs for this tiny amount of data.
196 EXPECT_EQ(1U, MemMgr->GetNumDataSlabs());
197 }
198
199 // Allocate a small global, a big global, and a third global, and make sure we
200 // only use two slabs for that.
201 TEST(JITMemoryManagerTest, TestLargeGlobalArray) {
202 OwningPtr MemMgr(
203 JITMemoryManager::CreateDefaultMemManager());
204 size_t Size = 4 * MemMgr->GetDefaultDataSlabSize();
205 uint64_t *a = (uint64_t*)MemMgr->allocateGlobal(64, 8);
206 uint8_t *g = MemMgr->allocateGlobal(Size, 8);
207 uint64_t *b = (uint64_t*)MemMgr->allocateGlobal(64, 8);
208
209 // Check the alignment.
210 EXPECT_EQ(0U, ((uintptr_t)a) & 0x7);
211 EXPECT_EQ(0U, ((uintptr_t)g) & 0x7);
212 EXPECT_EQ(0U, ((uintptr_t)b) & 0x7);
213
214 // Initialize them to make sure we don't segfault and make sure they don't
215 // overlap.
216 memset(a, 0x1, 8);
217 memset(g, 0x2, Size);
218 memset(b, 0x3, 8);
219 EXPECT_EQ(0x0101010101010101U, *a);
220 // Just check the edges.
221 EXPECT_EQ(0x02U, g[0]);
222 EXPECT_EQ(0x02U, g[Size - 1]);
223 EXPECT_EQ(0x0303030303030303U, *b);
224
225 // Check the number of slabs.
226 EXPECT_EQ(2U, MemMgr->GetNumDataSlabs());
227 }
228
229 // Allocate lots of medium globals so that we can test moving the bump allocator
230 // to a new slab.
231 TEST(JITMemoryManagerTest, TestManyGlobals) {
232 OwningPtr MemMgr(
233 JITMemoryManager::CreateDefaultMemManager());
234 size_t SlabSize = MemMgr->GetDefaultDataSlabSize();
235 size_t Size = 128;
236 int Iters = (SlabSize / Size) + 1;
237
238 // We should start with one slab.
239 EXPECT_EQ(1U, MemMgr->GetNumDataSlabs());
240
241 // After allocating a bunch of globals, we should have two.
242 for (int I = 0; I < Iters; ++I)
243 MemMgr->allocateGlobal(Size, 8);
244 EXPECT_EQ(2U, MemMgr->GetNumDataSlabs());
245
246 // And after much more, we should have three.
247 for (int I = 0; I < Iters; ++I)
248 MemMgr->allocateGlobal(Size, 8);
249 EXPECT_EQ(3U, MemMgr->GetNumDataSlabs());
250 }
251
252 // Allocate lots of function stubs so that we can test moving the stub bump
253 // allocator to a new slab.
254 TEST(JITMemoryManagerTest, TestManyStubs) {
255 OwningPtr MemMgr(
256 JITMemoryManager::CreateDefaultMemManager());
257 size_t SlabSize = MemMgr->GetDefaultStubSlabSize();
258 size_t Size = 128;
259 int Iters = (SlabSize / Size) + 1;
260
261 // We should start with one slab.
262 EXPECT_EQ(1U, MemMgr->GetNumStubSlabs());
263
264 // After allocating a bunch of stubs, we should have two.
265 for (int I = 0; I < Iters; ++I)
266 MemMgr->allocateStub(NULL, Size, 8);
267 EXPECT_EQ(2U, MemMgr->GetNumStubSlabs());
268
269 // And after much more, we should have three.
270 for (int I = 0; I < Iters; ++I)
271 MemMgr->allocateStub(NULL, Size, 8);
272 EXPECT_EQ(3U, MemMgr->GetNumStubSlabs());
273 }
274
275 }
None //===- llvm/unittest/Support/AllocatorTest.cpp - BumpPtrAllocator tests ---===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "llvm/Support/Allocator.h"
10
11 #include "gtest/gtest.h"
12
13 using namespace llvm;
14
15 namespace {
16
17 TEST(AllocatorTest, Basics) {
18 BumpPtrAllocator Alloc;
19 int *a = (int*)Alloc.Allocate(sizeof(int), 0);
20 int *b = (int*)Alloc.Allocate(sizeof(int) * 10, 0);
21 int *c = (int*)Alloc.Allocate(sizeof(int), 0);
22 *a = 1;
23 b[0] = 2;
24 b[9] = 2;
25 *c = 3;
26 EXPECT_EQ(1, *a);
27 EXPECT_EQ(2, b[0]);
28 EXPECT_EQ(2, b[9]);
29 EXPECT_EQ(3, *c);
30 EXPECT_EQ(1U, Alloc.GetNumSlabs());
31 }
32
33 // Allocate enough bytes to create three slabs.
34 TEST(AllocatorTest, ThreeSlabs) {
35 BumpPtrAllocator Alloc(4096, 4096);
36 Alloc.Allocate(3000, 0);
37 EXPECT_EQ(1U, Alloc.GetNumSlabs());
38 Alloc.Allocate(3000, 0);
39 EXPECT_EQ(2U, Alloc.GetNumSlabs());
40 Alloc.Allocate(3000, 0);
41 EXPECT_EQ(3U, Alloc.GetNumSlabs());
42 }
43
44 // Allocate enough bytes to create two slabs, reset the allocator, and do it
45 // again.
46 TEST(AllocatorTest, TestReset) {
47 BumpPtrAllocator Alloc(4096, 4096);
48 Alloc.Allocate(3000, 0);
49 EXPECT_EQ(1U, Alloc.GetNumSlabs());
50 Alloc.Allocate(3000, 0);
51 EXPECT_EQ(2U, Alloc.GetNumSlabs());
52 Alloc.Reset();
53 EXPECT_EQ(1U, Alloc.GetNumSlabs());
54 Alloc.Allocate(3000, 0);
55 EXPECT_EQ(1U, Alloc.GetNumSlabs());
56 Alloc.Allocate(3000, 0);
57 EXPECT_EQ(2U, Alloc.GetNumSlabs());
58 }
59
60 } // anonymous namespace