llvm.org GIT mirror llvm / 81ce3ed
Make the JIT code emitter properly retry and ask for more memory when it runs out of memory, and also make the default memory manager allocate more memory when it runs out. Also, switch function stubs and global data over to using the BumpPtrAllocator. This makes it so the JIT no longer mmaps (or the equivalent on Windows) 16 MB of memory, and instead allocates in 512K slabs. I suspect this size could go lower, especially on embedded platforms, now that more slabs can be allocated. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@76828 91177308-0d34-0410-b5e6-96231b3b80d8 Reid Kleckner 11 years ago
8 changed file(s) with 667 addition(s) and 131 deletion(s). Raw diff Collapse all Expand all
1414 #define LLVM_EXECUTION_ENGINE_JIT_MEMMANAGER_H
1515
1616 #include "llvm/Support/DataTypes.h"
17 #include
1718
1819 namespace llvm {
20
1921 class Function;
22 class GlobalValue;
2023
2124 /// JITMemoryManager - This interface is used by the JIT to allocate and manage
2225 /// memory for the code generated by the JIT. This can be reimplemented by
8790 //===--------------------------------------------------------------------===//
8891 // Main Allocation Functions
8992 //===--------------------------------------------------------------------===//
90
91 /// startFunctionBody - When we start JITing a function, the JIT calls this
93
94 /// startFunctionBody - When we start JITing a function, the JIT calls this
9295 /// method to allocate a block of free RWX memory, which returns a pointer to
93 /// it. The JIT doesn't know ahead of time how much space it will need to
94 /// emit the function, so it doesn't pass in the size. Instead, this method
95 /// is required to pass back a "valid size". The JIT will be careful to not
96 /// write more than the returned ActualSize bytes of memory.
97 virtual uint8_t *startFunctionBody(const Function *F,
96 /// it. If the JIT wants to request a block of memory of at least a certain
97 /// size, it passes that value as ActualSize, and this method returns a block
98 /// with at least that much space. If the JIT doesn't know ahead of time how
99 /// much space it will need to emit the function, it passes 0 for the
100 /// ActualSize. In either case, this method is required to pass back the size
101 /// of the allocated block through ActualSize. The JIT will be careful to
102 /// not write more than the returned ActualSize bytes of memory.
103 virtual uint8_t *startFunctionBody(const Function *F,
98104 uintptr_t &ActualSize) = 0;
99
105
100106 /// allocateStub - This method is called by the JIT to allocate space for a
101107 /// function stub (used to handle limited branch displacements) while it is
102108 /// JIT compiling a function. For example, if foo calls bar, and if bar
117123 virtual void endFunctionBody(const Function *F, uint8_t *FunctionStart,
118124 uint8_t *FunctionEnd) = 0;
119125
120 /// allocateSpace - Allocate a memory block of the given size.
126 /// allocateSpace - Allocate a memory block of the given size. This method
127 /// cannot be called between calls to startFunctionBody and endFunctionBody.
121128 virtual uint8_t *allocateSpace(intptr_t Size, unsigned Alignment) = 0;
122129
123130 /// allocateGlobal - Allocate memory for a global.
131 ///
124132 virtual uint8_t *allocateGlobal(uintptr_t Size, unsigned Alignment) = 0;
125133
126134 /// deallocateMemForFunction - Free JIT memory for the specified function.
136144 /// the exception table.
137145 virtual void endExceptionTable(const Function *F, uint8_t *TableStart,
138146 uint8_t *TableEnd, uint8_t* FrameRegister) = 0;
147
148 /// CheckInvariants - For testing only. Return true if all internal
149 /// invariants are preserved, or return false and set ErrorStr to a helpful
150 /// error message.
151 virtual bool CheckInvariants(std::string &ErrorStr) {
152 return true;
153 }
154
155 /// GetDefaultCodeSlabSize - For testing only. Returns DefaultCodeSlabSize
156 /// from DefaultJITMemoryManager.
157 virtual size_t GetDefaultCodeSlabSize() {
158 return 0;
159 }
160
161 /// GetDefaultDataSlabSize - For testing only. Returns DefaultCodeSlabSize
162 /// from DefaultJITMemoryManager.
163 virtual size_t GetDefaultDataSlabSize() {
164 return 0;
165 }
166
167 /// GetDefaultStubSlabSize - For testing only. Returns DefaultCodeSlabSize
168 /// from DefaultJITMemoryManager.
169 virtual size_t GetDefaultStubSlabSize() {
170 return 0;
171 }
172
173 /// GetNumCodeSlabs - For testing only. Returns the number of MemoryBlocks
174 /// allocated for code.
175 virtual unsigned GetNumCodeSlabs() {
176 return 0;
177 }
178
179 /// GetNumDataSlabs - For testing only. Returns the number of MemoryBlocks
180 /// allocated for data.
181 virtual unsigned GetNumDataSlabs() {
182 return 0;
183 }
184
185 /// GetNumStubSlabs - For testing only. Returns the number of MemoryBlocks
186 /// allocated for function stubs.
187 virtual unsigned GetNumStubSlabs() {
188 return 0;
189 }
139190 };
140191
141192 } // end namespace llvm.
1313 #ifndef LLVM_SYSTEM_MEMORY_H
1414 #define LLVM_SYSTEM_MEMORY_H
1515
16 #include "llvm/Support/DataTypes.h"
1617 #include
1718
1819 namespace llvm {
2526 /// @brief Memory block abstraction.
2627 class MemoryBlock {
2728 public:
29 MemoryBlock() { }
30 MemoryBlock(void *addr, size_t size) : Address(addr), Size(size) { }
2831 void *base() const { return Address; }
29 unsigned size() const { return Size; }
32 size_t size() const { return Size; }
3033 private:
3134 void *Address; ///< Address of first byte of memory area
32 unsigned Size; ///< Size, in bytes of the memory area
35 size_t Size; ///< Size, in bytes of the memory area
3336 friend class Memory;
3437 };
3538
4952 /// a null memory block and fills in *ErrMsg.
5053 ///
5154 /// @brief Allocate Read/Write/Execute memory.
52 static MemoryBlock AllocateRWX(unsigned NumBytes,
55 static MemoryBlock AllocateRWX(size_t NumBytes,
5356 const MemoryBlock *NearBlock,
5457 std::string *ErrMsg = 0);
5558
5050
5151 STATISTIC(NumBytes, "Number of bytes of machine code compiled");
5252 STATISTIC(NumRelos, "Number of relocations applied");
53 STATISTIC(NumRetries, "Number of retries with more memory");
5354 static JIT *TheJIT = 0;
5455
5556
424425 // save BufferBegin/BufferEnd/CurBufferPtr here.
425426 uint8_t *SavedBufferBegin, *SavedBufferEnd, *SavedCurBufferPtr;
426427
428 // When reattempting to JIT a function after running out of space, we store
429 // the estimated size of the function we're trying to JIT here, so we can
430 // ask the memory manager for at least this much space. When we
431 // successfully emit the function, we reset this back to zero.
432 uintptr_t SizeEstimate;
433
427434 /// Relocations - These are the relocations that the function needs, as
428435 /// emitted.
429436 std::vector Relocations;
495502 DebugLocTuple PrevDLT;
496503
497504 public:
498 JITEmitter(JIT &jit, JITMemoryManager *JMM) : Resolver(jit), CurFn(0) {
505 JITEmitter(JIT &jit, JITMemoryManager *JMM)
506 : SizeEstimate(0), Resolver(jit), CurFn(0) {
499507 MemMgr = JMM ? JMM : JITMemoryManager::CreateDefaultMemManager();
500508 if (jit.getJITInfo().needsGOT()) {
501509 MemMgr->AllocateGOT();
560568 return MBBLocations[MBB->getNumber()];
561569 }
562570
571 /// retryWithMoreMemory - Log a retry and deallocate all memory for the
572 /// given function. Increase the minimum allocation size so that we get
573 /// more memory next time.
574 void retryWithMoreMemory(MachineFunction &F);
575
563576 /// deallocateMemForFunction - Deallocate all memory for the specified
564577 /// function body.
565 void deallocateMemForFunction(Function *F);
578 void deallocateMemForFunction(const Function *F);
566579
567580 /// AddStubToCurrentFunction - Mark the current function being JIT'd as
568581 /// using the stub at the specified address. Allows
924937 // previously allocated.
925938 ActualSize += GetSizeOfGlobalsInBytes(F);
926939 DOUT << "JIT: ActualSize after globals " << ActualSize << "\n";
940 } else if (SizeEstimate > 0) {
941 // SizeEstimate will be non-zero on reallocation attempts.
942 ActualSize = SizeEstimate;
927943 }
928944
929945 BufferBegin = CurBufferPtr = MemMgr->startFunctionBody(F.getFunction(),
948964
949965 bool JITEmitter::finishFunction(MachineFunction &F) {
950966 if (CurBufferPtr == BufferEnd) {
951 // FIXME: Allocate more space, then try again.
952 llvm_report_error("JIT: Ran out of space for generated machine code!");
953 }
954
967 // We must call endFunctionBody before retrying, because
968 // deallocateMemForFunction requires it.
969 MemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
970 retryWithMoreMemory(F);
971 return true;
972 }
973
955974 emitJumpTableInfo(F.getJumpTableInfo());
956
975
957976 // FnStart is the start of the text, not the start of the constant pool and
958977 // other per-function data.
959978 uint8_t *FnStart =
10441063 MemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
10451064
10461065 if (CurBufferPtr == BufferEnd) {
1047 // FIXME: Allocate more space, then try again.
1048 llvm_report_error("JIT: Ran out of space for generated machine code!");
1066 retryWithMoreMemory(F);
1067 return true;
1068 } else {
1069 // Now that we've succeeded in emitting the function, reset the
1070 // SizeEstimate back down to zero.
1071 SizeEstimate = 0;
10491072 }
10501073
10511074 BufferBegin = CurBufferPtr = 0;
11301153 return false;
11311154 }
11321155
1156 void JITEmitter::retryWithMoreMemory(MachineFunction &F) {
1157 DOUT << "JIT: Ran out of space for native code. Reattempting.\n";
1158 Relocations.clear(); // Clear the old relocations or we'll reapply them.
1159 ConstPoolAddresses.clear();
1160 ++NumRetries;
1161 deallocateMemForFunction(F.getFunction());
1162 // Try again with at least twice as much free space.
1163 SizeEstimate = (uintptr_t)(2 * (BufferEnd - BufferBegin));
1164 }
1165
11331166 /// deallocateMemForFunction - Deallocate all memory for the specified
11341167 /// function body. Also drop any references the function has to stubs.
1135 void JITEmitter::deallocateMemForFunction(Function *F) {
1168 void JITEmitter::deallocateMemForFunction(const Function *F) {
11361169 MemMgr->deallocateMemForFunction(F);
11371170
11381171 // If the function did not reference any stubs, return.
1010 //
1111 //===----------------------------------------------------------------------===//
1212
13 #define DEBUG_TYPE "jit"
14 #include "llvm/ExecutionEngine/JITMemoryManager.h"
15 #include "llvm/ADT/SmallPtrSet.h"
16 #include "llvm/ADT/Statistic.h"
1317 #include "llvm/GlobalValue.h"
14 #include "llvm/ExecutionEngine/JITMemoryManager.h"
18 #include "llvm/Support/Allocator.h"
1519 #include "llvm/Support/Compiler.h"
20 #include "llvm/Support/Debug.h"
1621 #include "llvm/Support/ErrorHandling.h"
22 #include "llvm/Support/raw_ostream.h"
1723 #include "llvm/System/Memory.h"
1824 #include
1925 #include
2430 #include
2531 using namespace llvm;
2632
33 STATISTIC(NumSlabs, "Number of slabs of memory allocated by the JIT");
2734
2835 JITMemoryManager::~JITMemoryManager() {}
2936
140147 /// FreeRangeHeader to allocate from.
141148 FreeRangeHeader *MemoryRangeHeader::FreeBlock(FreeRangeHeader *FreeList) {
142149 MemoryRangeHeader *FollowingBlock = &getBlockAfter();
143 assert(ThisAllocated && "This block is already allocated!");
150 assert(ThisAllocated && "This block is already free!");
144151 assert(FollowingBlock->PrevAllocated && "Flags out of sync!");
145152
146153 FreeRangeHeader *FreeListToReturn = FreeList;
243250 // Memory Block Implementation.
244251 //===----------------------------------------------------------------------===//
245252
246 namespace {
253 namespace {
254
255 class DefaultJITMemoryManager;
256
257 class JITSlabAllocator : public SlabAllocator {
258 DefaultJITMemoryManager &JMM;
259 public:
260 JITSlabAllocator(DefaultJITMemoryManager &jmm) : JMM(jmm) { }
261 virtual ~JITSlabAllocator() { }
262 virtual MemSlab *Allocate(size_t Size);
263 virtual void Deallocate(MemSlab *Slab);
264 };
265
247266 /// DefaultJITMemoryManager - Manage memory for the JIT code generation.
248267 /// This splits a large block of MAP_NORESERVE'd memory into two
249268 /// sections, one for function stubs, one for the functions themselves. We
250269 /// have to do this because we may need to emit a function stub while in the
251270 /// middle of emitting a function, and we don't know how large the function we
252271 /// are emitting is.
253 class VISIBILITY_HIDDEN DefaultJITMemoryManager : public JITMemoryManager {
254 bool PoisonMemory; // Whether to poison freed memory.
255
256 std::vector Blocks; // Memory blocks allocated by the JIT
257 FreeRangeHeader *FreeMemoryList; // Circular list of free blocks.
258
272 class DefaultJITMemoryManager : public JITMemoryManager {
273
274 // Whether to poison freed memory.
275 bool PoisonMemory;
276
277 /// LastSlab - This points to the last slab allocated and is used as the
278 /// NearBlock parameter to AllocateRWX so that we can attempt to lay out all
279 /// stubs, data, and code contiguously in memory. In general, however, this
280 /// is not possible because the NearBlock parameter is ignored on Windows
281 /// platforms and even on Unix it works on a best-effort pasis.
282 sys::MemoryBlock LastSlab;
283
284 // Memory slabs allocated by the JIT. We refer to them as slabs so we don't
285 // confuse them with the blocks of memory descibed above.
286 std::vector CodeSlabs;
287 JITSlabAllocator BumpSlabAllocator;
288 BumpPtrAllocator StubAllocator;
289 BumpPtrAllocator DataAllocator;
290
291 // Circular list of free blocks.
292 FreeRangeHeader *FreeMemoryList;
293
259294 // When emitting code into a memory block, this is the block.
260295 MemoryRangeHeader *CurBlock;
261
262 uint8_t *CurStubPtr, *StubBase;
263 uint8_t *CurGlobalPtr, *GlobalEnd;
296
264297 uint8_t *GOTBase; // Target Specific reserved memory
265298 void *DlsymTable; // Stub external symbol information
266299
267 // Centralize memory block allocation.
268 sys::MemoryBlock getNewMemoryBlock(unsigned size);
269
270300 std::map FunctionBlocks;
271301 std::map TableBlocks;
272302 public:
273303 DefaultJITMemoryManager();
274304 ~DefaultJITMemoryManager();
275305
306 /// allocateNewSlab - Allocates a new MemoryBlock and remembers it as the
307 /// last slab it allocated, so that subsequent allocations follow it.
308 sys::MemoryBlock allocateNewSlab(size_t size);
309
310 /// DefaultCodeSlabSize - When we have to go map more memory, we allocate at
311 /// least this much unless more is requested.
312 static const size_t DefaultCodeSlabSize;
313
314 /// DefaultSlabSize - Allocate data into slabs of this size unless we get
315 /// an allocation above SizeThreshold.
316 static const size_t DefaultSlabSize;
317
318 /// DefaultSizeThreshold - For any allocation larger than this threshold, we
319 /// should allocate a separate slab.
320 static const size_t DefaultSizeThreshold;
321
276322 void AllocateGOT();
277323 void SetDlsymTable(void *);
278
279 uint8_t *allocateStub(const GlobalValue* F, unsigned StubSize,
280 unsigned Alignment);
281
324
325 // Testing methods.
326 virtual bool CheckInvariants(std::string &ErrorStr);
327 size_t GetDefaultCodeSlabSize() { return DefaultCodeSlabSize; }
328 size_t GetDefaultDataSlabSize() { return DefaultSlabSize; }
329 size_t GetDefaultStubSlabSize() { return DefaultSlabSize; }
330 unsigned GetNumCodeSlabs() { return CodeSlabs.size(); }
331 unsigned GetNumDataSlabs() { return DataAllocator.GetNumSlabs(); }
332 unsigned GetNumStubSlabs() { return StubAllocator.GetNumSlabs(); }
333
282334 /// startFunctionBody - When a function starts, allocate a block of free
283335 /// executable memory, returning a pointer to it and its actual size.
284336 uint8_t *startFunctionBody(const Function *F, uintptr_t &ActualSize) {
285
337
286338 FreeRangeHeader* candidateBlock = FreeMemoryList;
287339 FreeRangeHeader* head = FreeMemoryList;
288340 FreeRangeHeader* iter = head->Next;
289341
290342 uintptr_t largest = candidateBlock->BlockSize;
291
343
292344 // Search for the largest free block
293345 while (iter != head) {
294 if (iter->BlockSize > largest) {
295 largest = iter->BlockSize;
296 candidateBlock = iter;
297 }
298 iter = iter->Next;
346 if (iter->BlockSize > largest) {
347 largest = iter->BlockSize;
348 candidateBlock = iter;
349 }
350 iter = iter->Next;
299351 }
300
352
353 // If this block isn't big enough for the allocation desired, allocate
354 // another block of memory and add it to the free list.
355 if (largest - sizeof(MemoryRangeHeader) < ActualSize) {
356 DOUT << "JIT: Allocating another slab of memory for function.";
357 candidateBlock = allocateNewCodeSlab((size_t)ActualSize);
358 }
359
301360 // Select this candidate block for allocation
302361 CurBlock = candidateBlock;
303362
304363 // Allocate the entire memory block.
305364 FreeMemoryList = candidateBlock->AllocateBlock();
306 ActualSize = CurBlock->BlockSize-sizeof(MemoryRangeHeader);
307 return (uint8_t *)(CurBlock+1);
308 }
309
365 ActualSize = CurBlock->BlockSize - sizeof(MemoryRangeHeader);
366 return (uint8_t *)(CurBlock + 1);
367 }
368
369 /// allocateNewCodeSlab - Helper method to allocate a new slab of code
370 /// memory from the OS and add it to the free list. Returns the new
371 /// FreeRangeHeader at the base of the slab.
372 FreeRangeHeader *allocateNewCodeSlab(size_t MinSize) {
373 // If the user needs at least MinSize free memory, then we account for
374 // two MemoryRangeHeaders: the one in the user's block, and the one at the
375 // end of the slab.
376 size_t PaddedMin = MinSize + 2 * sizeof(MemoryRangeHeader);
377 size_t SlabSize = std::max(DefaultCodeSlabSize, PaddedMin);
378 sys::MemoryBlock B = allocateNewSlab(SlabSize);
379 CodeSlabs.push_back(B);
380 char *MemBase = (char*)(B.base());
381
382 // Put a tiny allocated block at the end of the memory chunk, so when
383 // FreeBlock calls getBlockAfter it doesn't fall off the end.
384 MemoryRangeHeader *EndBlock =
385 (MemoryRangeHeader*)(MemBase + B.size()) - 1;
386 EndBlock->ThisAllocated = 1;
387 EndBlock->PrevAllocated = 0;
388 EndBlock->BlockSize = sizeof(MemoryRangeHeader);
389
390 // Start out with a vast new block of free memory.
391 FreeRangeHeader *NewBlock = (FreeRangeHeader*)MemBase;
392 NewBlock->ThisAllocated = 0;
393 // Make sure getFreeBlockBefore doesn't look into unmapped memory.
394 NewBlock->PrevAllocated = 1;
395 NewBlock->BlockSize = (uintptr_t)EndBlock - (uintptr_t)NewBlock;
396 NewBlock->SetEndOfBlockSizeMarker();
397 NewBlock->AddToFreeList(FreeMemoryList);
398
399 assert(NewBlock->BlockSize - sizeof(MemoryRangeHeader) >= MinSize &&
400 "The block was too small!");
401 return NewBlock;
402 }
403
310404 /// endFunctionBody - The function F is now allocated, and takes the memory
311405 /// in the range [FunctionStart,FunctionEnd).
312406 void endFunctionBody(const Function *F, uint8_t *FunctionStart,
322416 FreeMemoryList =CurBlock->TrimAllocationToSize(FreeMemoryList, BlockSize);
323417 }
324418
325 /// allocateSpace - Allocate a memory block of the given size.
419 /// allocateSpace - Allocate a memory block of the given size. This method
420 /// cannot be called between calls to startFunctionBody and endFunctionBody.
326421 uint8_t *allocateSpace(intptr_t Size, unsigned Alignment) {
327422 CurBlock = FreeMemoryList;
328423 FreeMemoryList = FreeMemoryList->AllocateBlock();
339434 return result;
340435 }
341436
342 /// allocateGlobal - Allocate memory for a global. Unlike allocateSpace,
343 /// this method does not touch the current block and can be called at any
344 /// time.
437 /// allocateStub - Allocate memory for a function stub.
438 uint8_t *allocateStub(const GlobalValue* F, unsigned StubSize,
439 unsigned Alignment) {
440 return (uint8_t*)StubAllocator.Allocate(StubSize, Alignment);
441 }
442
443 /// allocateGlobal - Allocate memory for a global.
345444 uint8_t *allocateGlobal(uintptr_t Size, unsigned Alignment) {
346 uint8_t *Result = CurGlobalPtr;
347
348 // Align the pointer.
349 if (Alignment == 0) Alignment = 1;
350 Result = (uint8_t*)(((uintptr_t)Result + Alignment-1) &
351 ~(uintptr_t)(Alignment-1));
352
353 // Move the current global pointer forward.
354 CurGlobalPtr += Result - CurGlobalPtr + Size;
355
356 // Check for overflow.
357 if (CurGlobalPtr > GlobalEnd) {
358 // FIXME: Allocate more memory.
359 llvm_report_error("JIT ran out of memory for globals!");
360 }
361
362 return Result;
445 return (uint8_t*)DataAllocator.Allocate(Size, Alignment);
363446 }
364447
365448 /// startExceptionTable - Use startFunctionBody to allocate memory for the
436519 /// the code pages may need permissions changed.
437520 void setMemoryWritable(void)
438521 {
439 for (unsigned i = 0, e = Blocks.size(); i != e; ++i)
440 sys::Memory::setWritable(Blocks[i]);
522 for (unsigned i = 0, e = CodeSlabs.size(); i != e; ++i)
523 sys::Memory::setWritable(CodeSlabs[i]);
441524 }
442525 /// setMemoryExecutable - When code generation is done and we're ready to
443526 /// start execution, the code pages may need permissions changed.
444527 void setMemoryExecutable(void)
445528 {
446 for (unsigned i = 0, e = Blocks.size(); i != e; ++i)
447 sys::Memory::setExecutable(Blocks[i]);
529 for (unsigned i = 0, e = CodeSlabs.size(); i != e; ++i)
530 sys::Memory::setExecutable(CodeSlabs[i]);
448531 }
449532
450533 /// setPoisonMemory - Controls whether we write garbage over freed memory.
455538 };
456539 }
457540
458 DefaultJITMemoryManager::DefaultJITMemoryManager() {
541 MemSlab *JITSlabAllocator::Allocate(size_t Size) {
542 sys::MemoryBlock B = JMM.allocateNewSlab(Size);
543 MemSlab *Slab = (MemSlab*)B.base();
544 Slab->Size = B.size();
545 Slab->NextPtr = 0;
546 return Slab;
547 }
548
549 void JITSlabAllocator::Deallocate(MemSlab *Slab) {
550 sys::MemoryBlock B(Slab, Slab->Size);
551 sys::Memory::ReleaseRWX(B);
552 }
553
554 DefaultJITMemoryManager::DefaultJITMemoryManager()
555 : LastSlab(0, 0),
556 BumpSlabAllocator(*this),
557 StubAllocator(DefaultSlabSize, DefaultSizeThreshold, BumpSlabAllocator),
558 DataAllocator(DefaultSlabSize, DefaultSizeThreshold, BumpSlabAllocator) {
559
459560 #ifdef NDEBUG
561 PoisonMemory = false;
562 #else
460563 PoisonMemory = true;
461 #else
462 PoisonMemory = false;
463564 #endif
464565
465 // Allocate a 16M block of memory for functions.
466 #if defined(__APPLE__) && defined(__arm__)
467 sys::MemoryBlock MemBlock = getNewMemoryBlock(4 << 20);
468 #else
469 sys::MemoryBlock MemBlock = getNewMemoryBlock(16 << 20);
470 #endif
471
472 uint8_t *MemBase = static_cast(MemBlock.base());
473
474 // Allocate stubs backwards to the base, globals forward from the stubs, and
475 // functions forward after globals.
476 StubBase = MemBase;
477 CurStubPtr = MemBase + 512*1024; // Use 512k for stubs, working backwards.
478 CurGlobalPtr = CurStubPtr; // Use 2M for globals, working forwards.
479 GlobalEnd = CurGlobalPtr + 2*1024*1024;
566 // Allocate space for code.
567 sys::MemoryBlock MemBlock = allocateNewSlab(DefaultCodeSlabSize);
568 CodeSlabs.push_back(MemBlock);
569 uint8_t *MemBase = (uint8_t*)MemBlock.base();
480570
481571 // We set up the memory chunk with 4 mem regions, like this:
482572 // [ START
493583 MemoryRangeHeader *Mem3 = (MemoryRangeHeader*)(MemBase+MemBlock.size())-1;
494584 Mem3->ThisAllocated = 1;
495585 Mem3->PrevAllocated = 0;
496 Mem3->BlockSize = 0;
586 Mem3->BlockSize = sizeof(MemoryRangeHeader);
497587
498588 /// Add a tiny free region so that the free list always has one entry.
499589 FreeRangeHeader *Mem2 =
509599 MemoryRangeHeader *Mem1 = (MemoryRangeHeader*)Mem2-1;
510600 Mem1->ThisAllocated = 1;
511601 Mem1->PrevAllocated = 0;
512 Mem1->BlockSize = (char*)Mem2 - (char*)Mem1;
602 Mem1->BlockSize = sizeof(MemoryRangeHeader);
513603
514604 // Add a FreeRangeHeader to the start of the function body region, indicating
515605 // that the space is free. Mark the previous block allocated so we never look
516606 // at it.
517 FreeRangeHeader *Mem0 = (FreeRangeHeader*)GlobalEnd;
607 FreeRangeHeader *Mem0 = (FreeRangeHeader*)MemBase;
518608 Mem0->ThisAllocated = 0;
519609 Mem0->PrevAllocated = 1;
520610 Mem0->BlockSize = (char*)Mem1-(char*)Mem0;
539629 }
540630
541631 DefaultJITMemoryManager::~DefaultJITMemoryManager() {
542 for (unsigned i = 0, e = Blocks.size(); i != e; ++i)
543 sys::Memory::ReleaseRWX(Blocks[i]);
544
632 for (unsigned i = 0, e = CodeSlabs.size(); i != e; ++i)
633 sys::Memory::ReleaseRWX(CodeSlabs[i]);
634
545635 delete[] GOTBase;
546 Blocks.clear();
547 }
548
549 uint8_t *DefaultJITMemoryManager::allocateStub(const GlobalValue* F,
550 unsigned StubSize,
551 unsigned Alignment) {
552 CurStubPtr -= StubSize;
553 CurStubPtr = (uint8_t*)(((intptr_t)CurStubPtr) &
554 ~(intptr_t)(Alignment-1));
555 if (CurStubPtr < StubBase) {
556 // FIXME: allocate a new block
557 llvm_report_error("JIT ran out of memory for function stubs!");
558 }
559 return CurStubPtr;
560 }
561
562 sys::MemoryBlock DefaultJITMemoryManager::getNewMemoryBlock(unsigned size) {
636 }
637
638 sys::MemoryBlock DefaultJITMemoryManager::allocateNewSlab(size_t size) {
563639 // Allocate a new block close to the last one.
564 const sys::MemoryBlock *BOld = Blocks.empty() ? 0 : &Blocks.back();
565640 std::string ErrMsg;
566 sys::MemoryBlock B = sys::Memory::AllocateRWX(size, BOld, &ErrMsg);
641 sys::MemoryBlock *LastSlabPtr = LastSlab.base() ? &LastSlab : 0;
642 sys::MemoryBlock B = sys::Memory::AllocateRWX(size, LastSlabPtr, &ErrMsg);
567643 if (B.base() == 0) {
568644 llvm_report_error("Allocation failed when allocating new memory in the"
569645 " JIT\n" + ErrMsg);
570646 }
571 Blocks.push_back(B);
647 LastSlab = B;
648 ++NumSlabs;
572649 return B;
573650 }
574651
652 /// CheckInvariants - For testing only. Return "" if all internal invariants
653 /// are preserved, and a helpful error message otherwise. For free and
654 /// allocated blocks, make sure that adding BlockSize gives a valid block.
655 /// For free blocks, make sure they're in the free list and that their end of
656 /// block size marker is correct. This function should return an error before
657 /// accessing bad memory. This function is defined here instead of in
658 /// JITMemoryManagerTest.cpp so that we don't have to expose all of the
659 /// implementation details of DefaultJITMemoryManager.
660 bool DefaultJITMemoryManager::CheckInvariants(std::string &ErrorStr) {
661 raw_string_ostream Err(ErrorStr);
662
663 // Construct a the set of FreeRangeHeader pointers so we can query it
664 // efficiently.
665 llvm::SmallPtrSet FreeHdrSet;
666 FreeRangeHeader* FreeHead = FreeMemoryList;
667 FreeRangeHeader* FreeRange = FreeHead;
668
669 do {
670 // Check that the free range pointer is in the blocks we've allocated.
671 bool Found = false;
672 for (std::vector::iterator I = CodeSlabs.begin(),
673 E = CodeSlabs.end(); I != E && !Found; ++I) {
674 char *Start = (char*)I->base();
675 char *End = Start + I->size();
676 Found = (Start <= (char*)FreeRange && (char*)FreeRange < End);
677 }
678 if (!Found) {
679 Err << "Corrupt free list; points to " << FreeRange;
680 return false;
681 }
682
683 if (FreeRange->Next->Prev != FreeRange) {
684 Err << "Next and Prev pointers do not match.";
685 return false;
686 }
687
688 // Otherwise, add it to the set.
689 FreeHdrSet.insert(FreeRange);
690 FreeRange = FreeRange->Next;
691 } while (FreeRange != FreeHead);
692
693 // Go over each block, and look at each MemoryRangeHeader.
694 for (std::vector::iterator I = CodeSlabs.begin(),
695 E = CodeSlabs.end(); I != E; ++I) {
696 char *Start = (char*)I->base();
697 char *End = Start + I->size();
698
699 // Check each memory range.
700 for (MemoryRangeHeader *Hdr = (MemoryRangeHeader*)Start, *LastHdr = NULL;
701 Start <= (char*)Hdr && (char*)Hdr < End;
702 Hdr = &Hdr->getBlockAfter()) {
703 if (Hdr->ThisAllocated == 0) {
704 // Check that this range is in the free list.
705 if (!FreeHdrSet.count(Hdr)) {
706 Err << "Found free header at " << Hdr << " that is not in free list.";
707 return false;
708 }
709
710 // Now make sure the size marker at the end of the block is correct.
711 uintptr_t *Marker = ((uintptr_t*)&Hdr->getBlockAfter()) - 1;
712 if (!(Start <= (char*)Marker && (char*)Marker < End)) {
713 Err << "Block size in header points out of current MemoryBlock.";
714 return false;
715 }
716 if (Hdr->BlockSize != *Marker) {
717 Err << "End of block size marker (" << *Marker << ") "
718 << "and BlockSize (" << Hdr->BlockSize << ") don't match.";
719 return false;
720 }
721 }
722
723 if (LastHdr && LastHdr->ThisAllocated != Hdr->PrevAllocated) {
724 Err << "Hdr->PrevAllocated (" << Hdr->PrevAllocated << ") != "
725 << "LastHdr->ThisAllocated (" << LastHdr->ThisAllocated << ")";
726 return false;
727 } else if (!LastHdr && !Hdr->PrevAllocated) {
728 Err << "The first header should have PrevAllocated true.";
729 return false;
730 }
731
732 // Remember the last header.
733 LastHdr = Hdr;
734 }
735 }
736
737 // All invariants are preserved.
738 return true;
739 }
575740
576741 JITMemoryManager *JITMemoryManager::CreateDefaultMemManager() {
577742 return new DefaultJITMemoryManager();
578743 }
744
745 // Allocate memory for code in 512K slabs.
746 const size_t DefaultJITMemoryManager::DefaultCodeSlabSize = 512 * 1024;
747
748 // Allocate globals and stubs in slabs of 64K. (probably 16 pages)
749 const size_t DefaultJITMemoryManager::DefaultSlabSize = 64 * 1024;
750
751 // Waste at most 16K at the end of each bump slab. (probably 4 pages)
752 const size_t DefaultJITMemoryManager::DefaultSizeThreshold = 16 * 1024;
1111 //===----------------------------------------------------------------------===//
1212
1313 #include "Unix.h"
14 #include "llvm/Support/DataTypes.h"
1415 #include "llvm/System/Process.h"
1516
1617 #ifdef HAVE_SYS_MMAN_H
2728 /// is very OS specific.
2829 ///
2930 llvm::sys::MemoryBlock
30 llvm::sys::Memory::AllocateRWX(unsigned NumBytes, const MemoryBlock* NearBlock,
31 llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
3132 std::string *ErrMsg) {
3233 if (NumBytes == 0) return MemoryBlock();
3334
34 unsigned pageSize = Process::GetPageSize();
35 unsigned NumPages = (NumBytes+pageSize-1)/pageSize;
35 size_t pageSize = Process::GetPageSize();
36 size_t NumPages = (NumBytes+pageSize-1)/pageSize;
3637
3738 int fd = -1;
3839 #ifdef NEED_DEV_ZERO_FOR_MMAP
1212 //===----------------------------------------------------------------------===//
1313
1414 #include "Win32.h"
15 #include "llvm/Support/DataTypes.h"
1516 #include "llvm/System/Process.h"
1617
1718 namespace llvm {
2223 //=== and must not be UNIX code
2324 //===----------------------------------------------------------------------===//
2425
25 MemoryBlock Memory::AllocateRWX(unsigned NumBytes,
26 MemoryBlock Memory::AllocateRWX(size_t NumBytes,
2627 const MemoryBlock *NearBlock,
2728 std::string *ErrMsg) {
2829 if (NumBytes == 0) return MemoryBlock();
2930
30 static const long pageSize = Process::GetPageSize();
31 unsigned NumPages = (NumBytes+pageSize-1)/pageSize;
31 static const size_t pageSize = Process::GetPageSize();
32 size_t NumPages = (NumBytes+pageSize-1)/pageSize;
3233
3334 //FIXME: support NearBlock if ever needed on Win64.
3435
135135 builder.setEngineKind(ForceInterpreter
136136 ? EngineKind::Interpreter
137137 : EngineKind::JIT);
138 // FIXME: Don't allocate GVs with code once the JIT because smarter about
139 // memory management.
140 builder.setAllocateGVsWithCode(true);
141138
142139 // If we are supposed to override the target triple, do so now.
143140 if (!TargetTriple.empty())
0 //===- JITMemoryManagerTest.cpp - Unit tests for the JIT memory manager ---===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "gtest/gtest.h"
10 #include "llvm/ADT/OwningPtr.h"
11 #include "llvm/ExecutionEngine/JITMemoryManager.h"
12 #include "llvm/DerivedTypes.h"
13 #include "llvm/Function.h"
14 #include "llvm/GlobalValue.h"
15
16 using namespace llvm;
17
18 namespace {
19
20 Function *makeFakeFunction() {
21 std::vector params;
22 const FunctionType *FTy = FunctionType::get(Type::VoidTy, params, false);
23 return Function::Create(FTy, GlobalValue::ExternalLinkage);
24 }
25
26 // Allocate three simple functions that fit in the initial slab. This exercises
27 // the code in the case that we don't have to allocate more memory to store the
28 // function bodies.
29 TEST(JITMemoryManagerTest, NoAllocations) {
30 OwningPtr MemMgr(
31 JITMemoryManager::CreateDefaultMemManager());
32 uintptr_t size;
33 uint8_t *start;
34 std::string Error;
35
36 // Allocate the functions.
37 OwningPtr F1(makeFakeFunction());
38 size = 1024;
39 start = MemMgr->startFunctionBody(F1.get(), size);
40 memset(start, 0xFF, 1024);
41 MemMgr->endFunctionBody(F1.get(), start, start + 1024);
42 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
43
44 OwningPtr F2(makeFakeFunction());
45 size = 1024;
46 start = MemMgr->startFunctionBody(F2.get(), size);
47 memset(start, 0xFF, 1024);
48 MemMgr->endFunctionBody(F2.get(), start, start + 1024);
49 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
50
51 OwningPtr F3(makeFakeFunction());
52 size = 1024;
53 start = MemMgr->startFunctionBody(F3.get(), size);
54 memset(start, 0xFF, 1024);
55 MemMgr->endFunctionBody(F3.get(), start, start + 1024);
56 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
57
58 // Deallocate them out of order, in case that matters.
59 MemMgr->deallocateMemForFunction(F2.get());
60 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
61 MemMgr->deallocateMemForFunction(F1.get());
62 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
63 MemMgr->deallocateMemForFunction(F3.get());
64 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
65 }
66
67 // Make three large functions that take up most of the space in the slab. Then
68 // try allocating three smaller functions that don't require additional slabs.
69 TEST(JITMemoryManagerTest, TestCodeAllocation) {
70 OwningPtr MemMgr(
71 JITMemoryManager::CreateDefaultMemManager());
72 uintptr_t size;
73 uint8_t *start;
74 std::string Error;
75
76 // Big functions are a little less than the largest block size.
77 const uintptr_t smallFuncSize = 1024;
78 const uintptr_t bigFuncSize = (MemMgr->GetDefaultCodeSlabSize() -
79 smallFuncSize * 2);
80
81 // Allocate big functions
82 OwningPtr F1(makeFakeFunction());
83 size = bigFuncSize;
84 start = MemMgr->startFunctionBody(F1.get(), size);
85 ASSERT_LE(bigFuncSize, size);
86 memset(start, 0xFF, bigFuncSize);
87 MemMgr->endFunctionBody(F1.get(), start, start + bigFuncSize);
88 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
89
90 OwningPtr F2(makeFakeFunction());
91 size = bigFuncSize;
92 start = MemMgr->startFunctionBody(F2.get(), size);
93 ASSERT_LE(bigFuncSize, size);
94 memset(start, 0xFF, bigFuncSize);
95 MemMgr->endFunctionBody(F2.get(), start, start + bigFuncSize);
96 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
97
98 OwningPtr F3(makeFakeFunction());
99 size = bigFuncSize;
100 start = MemMgr->startFunctionBody(F3.get(), size);
101 ASSERT_LE(bigFuncSize, size);
102 memset(start, 0xFF, bigFuncSize);
103 MemMgr->endFunctionBody(F3.get(), start, start + bigFuncSize);
104 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
105
106 // Check that each large function took it's own slab.
107 EXPECT_EQ(3U, MemMgr->GetNumCodeSlabs());
108
109 // Allocate small functions
110 OwningPtr F4(makeFakeFunction());
111 size = smallFuncSize;
112 start = MemMgr->startFunctionBody(F4.get(), size);
113 ASSERT_LE(smallFuncSize, size);
114 memset(start, 0xFF, smallFuncSize);
115 MemMgr->endFunctionBody(F4.get(), start, start + smallFuncSize);
116 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
117
118 OwningPtr F5(makeFakeFunction());
119 size = smallFuncSize;
120 start = MemMgr->startFunctionBody(F5.get(), size);
121 ASSERT_LE(smallFuncSize, size);
122 memset(start, 0xFF, smallFuncSize);
123 MemMgr->endFunctionBody(F5.get(), start, start + smallFuncSize);
124 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
125
126 OwningPtr F6(makeFakeFunction());
127 size = smallFuncSize;
128 start = MemMgr->startFunctionBody(F6.get(), size);
129 ASSERT_LE(smallFuncSize, size);
130 memset(start, 0xFF, smallFuncSize);
131 MemMgr->endFunctionBody(F6.get(), start, start + smallFuncSize);
132 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
133
134 // Check that the small functions didn't allocate any new slabs.
135 EXPECT_EQ(3U, MemMgr->GetNumCodeSlabs());
136
137 // Deallocate them out of order, in case that matters.
138 MemMgr->deallocateMemForFunction(F2.get());
139 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
140 MemMgr->deallocateMemForFunction(F1.get());
141 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
142 MemMgr->deallocateMemForFunction(F4.get());
143 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
144 MemMgr->deallocateMemForFunction(F3.get());
145 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
146 MemMgr->deallocateMemForFunction(F5.get());
147 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
148 MemMgr->deallocateMemForFunction(F6.get());
149 EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
150 }
151
152 // Allocate five global ints of varying widths and alignment, and check their
153 // alignment and overlap.
154 TEST(JITMemoryManagerTest, TestSmallGlobalInts) {
155 OwningPtr MemMgr(
156 JITMemoryManager::CreateDefaultMemManager());
157 uint8_t *a = (uint8_t *)MemMgr->allocateGlobal(8, 0);
158 uint16_t *b = (uint16_t*)MemMgr->allocateGlobal(16, 2);
159 uint32_t *c = (uint32_t*)MemMgr->allocateGlobal(32, 4);
160 uint64_t *d = (uint64_t*)MemMgr->allocateGlobal(64, 8);
161
162 // Check the alignment.
163 EXPECT_EQ(0U, ((uintptr_t)b) & 0x1);
164 EXPECT_EQ(0U, ((uintptr_t)c) & 0x3);
165 EXPECT_EQ(0U, ((uintptr_t)d) & 0x7);
166
167 // Initialize them each one at a time and make sure they don't overlap.
168 *a = 0xff;
169 *b = 0U;
170 *c = 0U;
171 *d = 0U;
172 EXPECT_EQ(0xffU, *a);
173 EXPECT_EQ(0U, *b);
174 EXPECT_EQ(0U, *c);
175 EXPECT_EQ(0U, *d);
176 *a = 0U;
177 *b = 0xffffU;
178 EXPECT_EQ(0U, *a);
179 EXPECT_EQ(0xffffU, *b);
180 EXPECT_EQ(0U, *c);
181 EXPECT_EQ(0U, *d);
182 *b = 0U;
183 *c = 0xffffffffU;
184 EXPECT_EQ(0U, *a);
185 EXPECT_EQ(0U, *b);
186 EXPECT_EQ(0xffffffffU, *c);
187 EXPECT_EQ(0U, *d);
188 *c = 0U;
189 *d = 0xffffffffffffffffU;
190 EXPECT_EQ(0U, *a);
191 EXPECT_EQ(0U, *b);
192 EXPECT_EQ(0U, *c);
193 EXPECT_EQ(0xffffffffffffffffU, *d);
194
195 // Make sure we didn't allocate any extra slabs for this tiny amount of data.
196 EXPECT_EQ(1U, MemMgr->GetNumDataSlabs());
197 }
198
199 // Allocate a small global, a big global, and a third global, and make sure we
200 // only use two slabs for that.
201 TEST(JITMemoryManagerTest, TestLargeGlobalArray) {
202 OwningPtr MemMgr(
203 JITMemoryManager::CreateDefaultMemManager());
204 size_t Size = 4 * MemMgr->GetDefaultDataSlabSize();
205 uint64_t *a = (uint64_t*)MemMgr->allocateGlobal(64, 8);
206 uint8_t *g = MemMgr->allocateGlobal(Size, 8);
207 uint64_t *b = (uint64_t*)MemMgr->allocateGlobal(64, 8);
208
209 // Check the alignment.
210 EXPECT_EQ(0U, ((uintptr_t)a) & 0x7);
211 EXPECT_EQ(0U, ((uintptr_t)g) & 0x7);
212 EXPECT_EQ(0U, ((uintptr_t)b) & 0x7);
213
214 // Initialize them to make sure we don't segfault and make sure they don't
215 // overlap.
216 memset(a, 0x1, 8);
217 memset(g, 0x2, Size);
218 memset(b, 0x3, 8);
219 EXPECT_EQ(0x0101010101010101U, *a);
220 // Just check the edges.
221 EXPECT_EQ(0x02U, g[0]);
222 EXPECT_EQ(0x02U, g[Size - 1]);
223 EXPECT_EQ(0x0303030303030303U, *b);
224
225 // Check the number of slabs.
226 EXPECT_EQ(2U, MemMgr->GetNumDataSlabs());
227 }
228
229 // Allocate lots of medium globals so that we can test moving the bump allocator
230 // to a new slab.
231 TEST(JITMemoryManagerTest, TestManyGlobals) {
232 OwningPtr MemMgr(
233 JITMemoryManager::CreateDefaultMemManager());
234 size_t SlabSize = MemMgr->GetDefaultDataSlabSize();
235 size_t Size = 128;
236 int Iters = (SlabSize / Size) + 1;
237
238 // We should start with one slab.
239 EXPECT_EQ(1U, MemMgr->GetNumDataSlabs());
240
241 // After allocating a bunch of globals, we should have two.
242 for (int I = 0; I < Iters; ++I)
243 MemMgr->allocateGlobal(Size, 8);
244 EXPECT_EQ(2U, MemMgr->GetNumDataSlabs());
245
246 // And after much more, we should have three.
247 for (int I = 0; I < Iters; ++I)
248 MemMgr->allocateGlobal(Size, 8);
249 EXPECT_EQ(3U, MemMgr->GetNumDataSlabs());
250 }
251
252 // Allocate lots of function stubs so that we can test moving the stub bump
253 // allocator to a new slab.
254 TEST(JITMemoryManagerTest, TestManyStubs) {
255 OwningPtr MemMgr(
256 JITMemoryManager::CreateDefaultMemManager());
257 size_t SlabSize = MemMgr->GetDefaultStubSlabSize();
258 size_t Size = 128;
259 int Iters = (SlabSize / Size) + 1;
260
261 // We should start with one slab.
262 EXPECT_EQ(1U, MemMgr->GetNumStubSlabs());
263
264 // After allocating a bunch of stubs, we should have two.
265 for (int I = 0; I < Iters; ++I)
266 MemMgr->allocateStub(NULL, Size, 8);
267 EXPECT_EQ(2U, MemMgr->GetNumStubSlabs());
268
269 // And after much more, we should have three.
270 for (int I = 0; I < Iters; ++I)
271 MemMgr->allocateStub(NULL, Size, 8);
272 EXPECT_EQ(3U, MemMgr->GetNumStubSlabs());
273 }
274
275 }