llvm.org GIT mirror llvm / 6dacb1c
[Alignment][NFC] Move and type functions from MathExtras to Alignment Summary: This is patch is part of a series to introduce an Alignment type. See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html See this patch for the introduction of the type: https://reviews.llvm.org/D64790 Reviewers: courbet Subscribers: hiraditya, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D68942 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@374773 91177308-0d34-0410-b5e6-96231b3b80d8 Guillaume Chatelet 7 months ago
11 changed file(s) with 86 addition(s) and 42 deletion(s). Raw diff Collapse all Expand all
153153 return SizeInBytes % (*Lhs).value() == 0;
154154 }
155155
156 /// Checks that Addr is a multiple of the alignment.
157 inline bool isAddrAligned(Align Lhs, const void *Addr) {
158 return isAligned(Lhs, reinterpret_cast(Addr));
159 }
160
156161 /// Returns a multiple of A needed to store `Size` bytes.
157162 inline uint64_t alignTo(uint64_t Size, Align A) {
158163 return (Size + A.value() - 1) / A.value() * A.value();
164169 return A ? alignTo(Size, A.getValue()) : Size;
165170 }
166171
172 /// Aligns `Addr` to `Alignment` bytes, rounding up.
173 inline uintptr_t alignAddr(const void *Addr, Align Alignment) {
174 uintptr_t ArithAddr = reinterpret_cast(Addr);
175 assert(ArithAddr + Alignment.value() - 1 >= ArithAddr && "Overflow");
176 return alignTo(ArithAddr, Alignment);
177 }
178
167179 /// Returns the offset to the next integer (mod 2**64) that is greater than
168180 /// or equal to \p Value and is a multiple of \p Align.
169181 inline uint64_t offsetToAlignment(uint64_t Value, Align Alignment) {
170182 return alignTo(Value, Alignment) - Value;
183 }
184
185 /// Returns the necessary adjustment for aligning `Addr` to `Alignment`
186 /// bytes, rounding up.
187 inline uint64_t offsetToAlignedAddr(const void *Addr, Align Alignment) {
188 return offsetToAlignment(reinterpret_cast(Addr), Alignment);
171189 }
172190
173191 /// Returns the log2 of the alignment.
2121
2222 #include "llvm/ADT/Optional.h"
2323 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/Support/Alignment.h"
2425 #include "llvm/Support/Compiler.h"
2526 #include "llvm/Support/ErrorHandling.h"
2627 #include "llvm/Support/MathExtras.h"
210211
211212 /// Allocate space at the specified alignment.
212213 LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void *
213 Allocate(size_t Size, size_t Alignment) {
214 assert(Alignment > 0 && "0-byte alignnment is not allowed. Use 1 instead.");
215
214 Allocate(size_t Size, Align Alignment) {
216215 // Keep track of how many bytes we've allocated.
217216 BytesAllocated += Size;
218217
219 size_t Adjustment = alignmentAdjustment(CurPtr, Alignment);
218 size_t Adjustment = offsetToAlignedAddr(CurPtr, Alignment);
220219 assert(Adjustment + Size >= Size && "Adjustment + Size must not overflow");
221220
222221 size_t SizeToAllocate = Size;
239238 }
240239
241240 // If Size is really big, allocate a separate slab for it.
242 size_t PaddedSize = SizeToAllocate + Alignment - 1;
241 size_t PaddedSize = SizeToAllocate + Alignment.value() - 1;
243242 if (PaddedSize > SizeThreshold) {
244243 void *NewSlab = Allocator.Allocate(PaddedSize, 0);
245244 // We own the new slab and don't want anyone reading anyting other than
265264 __msan_allocated_memory(AlignedPtr, Size);
266265 __asan_unpoison_memory_region(AlignedPtr, Size);
267266 return AlignedPtr;
267 }
268
269 inline LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void *
270 Allocate(size_t Size, size_t Alignment) {
271 assert(Alignment > 0 && "0-byte alignnment is not allowed. Use 1 instead.");
272 return Allocate(Size, Align(Alignment));
268273 }
269274
270275 // Pull in base class overloads.
460465 /// all memory allocated so far.
461466 void DestroyAll() {
462467 auto DestroyElements = [](char *Begin, char *End) {
463 assert(Begin == (char *)alignAddr(Begin, alignof(T)));
468 assert(Begin == (char *)alignAddr(Begin, Align::Of()));
464469 for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T))
465470 reinterpret_cast(Ptr)->~T();
466471 };
469474 ++I) {
470475 size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
471476 std::distance(Allocator.Slabs.begin(), I));
472 char *Begin = (char *)alignAddr(*I, alignof(T));
477 char *Begin = (char *)alignAddr(*I, Align::Of());
473478 char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr
474479 : (char *)*I + AllocatedSlabSize;
475480
479484 for (auto &PtrAndSize : Allocator.CustomSizedSlabs) {
480485 void *Ptr = PtrAndSize.first;
481486 size_t Size = PtrAndSize.second;
482 DestroyElements((char *)alignAddr(Ptr, alignof(T)), (char *)Ptr + Size);
487 DestroyElements((char *)alignAddr(Ptr, Align::Of()),
488 (char *)Ptr + Size);
483489 }
484490
485491 Allocator.Reset();
285285 // an exact multiple of the element size.
286286 consumeError(std::move(EC));
287287 }
288 assert(llvm::alignmentAdjustment(Data.data(), alignof(T)) == 0);
288 assert(isAddrAligned(Align::Of(), Data.data()));
289289 return *reinterpret_cast(Data.data());
290290 }
291291
197197 if (auto EC = readBytes(Bytes, NumElements * sizeof(T)))
198198 return EC;
199199
200 assert(alignmentAdjustment(Bytes.data(), alignof(T)) == 0 &&
200 assert(isAddrAligned(Align::Of(), Bytes.data()) &&
201201 "Reading at invalid alignment!");
202202
203203 Array = ArrayRef(reinterpret_cast(Bytes.data()), NumElements);
666666 return (A | B) & (1 + ~(A | B));
667667 }
668668
669 /// Aligns \c Addr to \c Alignment bytes, rounding up.
670 ///
671 /// Alignment should be a power of two. This method rounds up, so
672 /// alignAddr(7, 4) == 8 and alignAddr(8, 4) == 8.
673 inline uintptr_t alignAddr(const void *Addr, size_t Alignment) {
674 assert(Alignment && isPowerOf2_64((uint64_t)Alignment) &&
675 "Alignment is not a power of two!");
676
677 assert((uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr);
678
679 return (((uintptr_t)Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1));
680 }
681
682 /// Returns the necessary adjustment for aligning \c Ptr to \c Alignment
683 /// bytes, rounding up.
684 inline size_t alignmentAdjustment(const void *Ptr, size_t Alignment) {
685 return alignAddr(Ptr, Alignment) - (uintptr_t)Ptr;
686 }
687
688669 /// Returns the next power of two (in 64-bits) that is strictly greater than A.
689670 /// Returns zero on overflow.
690671 inline uint64_t NextPowerOf2(uint64_t A) {
4646 #define LLVM_SUPPORT_TRAILINGOBJECTS_H
4747
4848 #include "llvm/Support/AlignOf.h"
49 #include "llvm/Support/Alignment.h"
4950 #include "llvm/Support/Compiler.h"
5051 #include "llvm/Support/MathExtras.h"
5152 #include "llvm/Support/type_traits.h"
166167
167168 if (requiresRealignment())
168169 return reinterpret_cast(
169 llvm::alignAddr(Ptr, alignof(NextTy)));
170 alignAddr(Ptr, Align::Of()));
170171 else
171172 return reinterpret_cast(Ptr);
172173 }
180181 Obj, TrailingObjectsBase::OverloadToken());
181182
182183 if (requiresRealignment())
183 return reinterpret_cast(llvm::alignAddr(Ptr, alignof(NextTy)));
184 return reinterpret_cast(alignAddr(Ptr, Align::Of()));
184185 else
185186 return reinterpret_cast(Ptr);
186187 }
505505 return make_error(coveragemap_error::malformed);
506506 // Each coverage map has an alignment of 8, so we need to adjust alignment
507507 // before reading the next map.
508 Buf += alignmentAdjustment(Buf, 8);
508 Buf += offsetToAlignedAddr(Buf, Align(8));
509509
510510 auto CFR = reinterpret_cast(FunBuf);
511511 while ((const char *)CFR < FunEnd) {
647647 // Skip the padding bytes because coverage map data has an alignment of 8.
648648 if (CoverageMapping.empty())
649649 return make_error(coveragemap_error::truncated);
650 size_t Pad = alignmentAdjustment(CoverageMapping.data(), 8);
650 size_t Pad = offsetToAlignedAddr(CoverageMapping.data(), Align(8));
651651 if (CoverageMapping.size() < Pad)
652652 return make_error(coveragemap_error::malformed);
653653 CoverageMapping = CoverageMapping.substr(Pad);
175175
176176 std::error_code
177177 Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) {
178 static const size_t PageSize = Process::getPageSizeEstimate();
178 static const Align PageSize = Align(Process::getPageSizeEstimate());
179179 if (M.Address == nullptr || M.AllocatedSize == 0)
180180 return std::error_code();
181181
183183 return std::error_code(EINVAL, std::generic_category());
184184
185185 int Protect = getPosixProtectionFlags(Flags);
186 uintptr_t Start = alignAddr((uint8_t *)M.Address - PageSize + 1, PageSize);
187 uintptr_t End = alignAddr((uint8_t *)M.Address + M.AllocatedSize, PageSize);
186 uintptr_t Start = alignAddr((const uint8_t *)M.Address - PageSize.value() + 1, PageSize);
187 uintptr_t End = alignAddr((const uint8_t *)M.Address + M.AllocatedSize, PageSize);
188188
189189 bool InvalidateCache = (Flags & MF_EXEC);
190190
8989 uint64_t alignment;
9090 uint64_t offset;
9191 uint64_t rounded;
92 const void *forgedAddr() const {
93 // A value of any integral or enumeration type can be converted to a
94 // pointer type.
95 return reinterpret_cast(offset);
96 }
9297 } kTests[] = {
9398 // MaybeAlign
9499 {0, 0, 0},
115120 // Test Align
116121 if (A) {
117122 EXPECT_EQ(alignTo(T.offset, A.getValue()), T.rounded);
123 EXPECT_EQ(alignAddr(T.forgedAddr(), A.getValue()), T.rounded);
118124 }
119125 }
120126 }
173179 EXPECT_EQ(Expected, Actual);
174180 }
175181
176 TEST(AlignmentTest, isAligned) {
182 TEST(AlignmentTest, isAligned_isAddrAligned) {
177183 struct {
178184 uint64_t alignment;
179185 uint64_t offset;
180186 bool isAligned;
187 const void *forgedAddr() const {
188 // A value of any integral or enumeration type can be converted to a
189 // pointer type.
190 return reinterpret_cast(offset);
191 }
181192 } kTests[] = {
182 // MaybeAlign / Align
183193 {1, 0, true}, {1, 1, true}, {1, 5, true}, {2, 0, true},
184194 {2, 1, false}, {2, 2, true}, {2, 7, false}, {2, 16, true},
185195 {4, 0, true}, {4, 1, false}, {4, 4, true}, {4, 6, false},
191201 // Test Align
192202 if (A) {
193203 EXPECT_EQ(isAligned(A.getValue(), T.offset), T.isAligned);
194 }
204 EXPECT_EQ(isAddrAligned(A.getValue(), T.forgedAddr()), T.isAligned);
205 }
206 }
207 }
208
209 TEST(AlignmentTest, offsetToAlignment) {
210 struct {
211 uint64_t alignment;
212 uint64_t offset;
213 uint64_t alignedOffset;
214 const void *forgedAddr() const {
215 // A value of any integral or enumeration type can be converted to a
216 // pointer type.
217 return reinterpret_cast(offset);
218 }
219 } kTests[] = {
220 {1, 0, 0}, {1, 1, 0}, {1, 5, 0}, {2, 0, 0}, {2, 1, 1}, {2, 2, 0},
221 {2, 7, 1}, {2, 16, 0}, {4, 0, 0}, {4, 1, 3}, {4, 4, 0}, {4, 6, 2},
222 };
223 for (const auto &T : kTests) {
224 const Align A(T.alignment);
225 EXPECT_EQ(offsetToAlignment(T.offset, A), T.alignedOffset);
226 EXPECT_EQ(offsetToAlignedAddr(T.forgedAddr(), A), T.alignedOffset);
195227 }
196228 }
197229
348380 }
349381 }
350382
383 TEST(AlignmentDeathTest, AlignAddr) {
384 const void *const unaligned_high_ptr =
385 reinterpret_cast(std::numeric_limits::max() - 1);
386 EXPECT_DEATH(alignAddr(unaligned_high_ptr, Align(16)), "Overflow");
387 }
388
351389 #endif // NDEBUG
352390
353391 } // end anonymous namespace
144144 void *Allocate(size_t Size, size_t /*Alignment*/) {
145145 // Allocate space for the alignment, the slab, and a void* that goes right
146146 // before the slab.
147 size_t Alignment = 4096;
148 void *MemBase = safe_malloc(Size + Alignment - 1 + sizeof(void*));
147 Align Alignment(4096);
148 void *MemBase = safe_malloc(Size + Alignment.value() - 1 + sizeof(void *));
149149
150150 // Find the slab start.
151151 void *Slab = (void *)alignAddr((char*)MemBase + sizeof(void *), Alignment);
231231 EXPECT_EQ(C->getTrailingObjects(), reinterpret_cast(C + 1));
232232 EXPECT_EQ(C->getTrailingObjects(),
233233 reinterpret_cast(llvm::alignAddr(
234 reinterpret_cast(C + 1) + 1, alignof(long))));
234 reinterpret_cast(C + 1) + 1, Align::Of())));
235235 }
236236 }
237237