llvm.org GIT mirror llvm / 3edb0ec
Update to use new name alignTo(). git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@257804 91177308-0d34-0410-b5e6-96231b3b80d8 Rui Ueyama 3 years ago
48 changed file(s) with 126 addition(s) and 137 deletion(s). Raw diff Collapse all Expand all
243243
244244 BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE);
245245 Bits[I / BITWORD_SIZE] |= PrefixMask;
246 I = RoundUpToAlignment(I, BITWORD_SIZE);
246 I = alignTo(I, BITWORD_SIZE);
247247
248248 for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
249249 Bits[I / BITWORD_SIZE] = ~0UL;
282282
283283 BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE);
284284 Bits[I / BITWORD_SIZE] &= ~PrefixMask;
285 I = RoundUpToAlignment(I, BITWORD_SIZE);
285 I = alignTo(I, BITWORD_SIZE);
286286
287287 for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
288288 Bits[I / BITWORD_SIZE] = 0UL;
280280 /// be able to store all arguments and such that the alignment requirement
281281 /// of each of the arguments is satisfied.
282282 unsigned getAlignedCallFrameSize() const {
283 return RoundUpToAlignment(StackOffset, MaxStackArgAlign);
283 return alignTo(StackOffset, MaxStackArgAlign);
284284 }
285285
286286 /// isAllocated - Return true if the specified register (or an alias) is
411411 /// and alignment.
412412 unsigned AllocateStack(unsigned Size, unsigned Align) {
413413 assert(Align && ((Align - 1) & Align) == 0); // Align is power of 2.
414 StackOffset = RoundUpToAlignment(StackOffset, Align);
414 StackOffset = alignTo(StackOffset, Align);
415415 unsigned Result = StackOffset;
416416 StackOffset += Size;
417417 MaxStackArgAlign = std::max(Align, MaxStackArgAlign);
155155 {
156156 TargetAddress NextCodeAddr = ObjAllocs.RemoteCodeAddr;
157157 for (auto &Alloc : ObjAllocs.CodeAllocs) {
158 NextCodeAddr = RoundUpToAlignment(NextCodeAddr, Alloc.getAlign());
158 NextCodeAddr = alignTo(NextCodeAddr, Alloc.getAlign());
159159 Dyld.mapSectionAddress(Alloc.getLocalAddress(), NextCodeAddr);
160160 DEBUG(dbgs() << " code: "
161161 << static_cast(Alloc.getLocalAddress())
167167 {
168168 TargetAddress NextRODataAddr = ObjAllocs.RemoteRODataAddr;
169169 for (auto &Alloc : ObjAllocs.RODataAllocs) {
170 NextRODataAddr =
171 RoundUpToAlignment(NextRODataAddr, Alloc.getAlign());
170 NextRODataAddr = alignTo(NextRODataAddr, Alloc.getAlign());
172171 Dyld.mapSectionAddress(Alloc.getLocalAddress(), NextRODataAddr);
173172 DEBUG(dbgs() << " ro-data: "
174173 << static_cast(Alloc.getLocalAddress())
181180 {
182181 TargetAddress NextRWDataAddr = ObjAllocs.RemoteRWDataAddr;
183182 for (auto &Alloc : ObjAllocs.RWDataAllocs) {
184 NextRWDataAddr =
185 RoundUpToAlignment(NextRWDataAddr, Alloc.getAlign());
183 NextRWDataAddr = alignTo(NextRWDataAddr, Alloc.getAlign());
186184 Dyld.mapSectionAddress(Alloc.getLocalAddress(), NextRWDataAddr);
187185 DEBUG(dbgs() << " rw-data: "
188186 << static_cast(Alloc.getLocalAddress())
283281
284282 char *getLocalAddress() const {
285283 uintptr_t LocalAddr = reinterpret_cast(Contents.get());
286 LocalAddr = RoundUpToAlignment(LocalAddr, Align);
284 LocalAddr = alignTo(LocalAddr, Align);
287285 return reinterpret_cast(LocalAddr);
288286 }
289287
386386 /// returns 12 or 16 for x86_fp80, depending on alignment.
387387 uint64_t getTypeAllocSize(Type *Ty) const {
388388 // Round up to the next alignment boundary.
389 return RoundUpToAlignment(getTypeStoreSize(Ty), getABITypeAlignment(Ty));
389 return alignTo(getTypeStoreSize(Ty), getABITypeAlignment(Ty));
390390 }
391391
392392 /// \brief Returns the offset in bits between successive objects of the
629629 /// or equal to \p Value and is a multiple of \p Align. \p Align must be
630630 /// non-zero.
631631 inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) {
632 return RoundUpToAlignment(Value, Align) - Value;
632 return alignTo(Value, Align) - Value;
633633 }
634634
635635 /// SignExtend32 - Sign extend B-bit number x to 32-bit int.
196196 typename ExtractSecondType::type... MoreCounts) {
197197 return additionalSizeToAllocImpl(
198198 (requiresRealignment()
199 ? llvm::RoundUpToAlignment(SizeSoFar, llvm::alignOf())
199 ? llvm::alignTo(SizeSoFar, llvm::alignOf())
200200 : SizeSoFar) +
201201 sizeof(NextTy) * Count1,
202202 MoreCounts...);
7474 ///
7575 int alignSPAdjust(int SPAdj) const {
7676 if (SPAdj < 0) {
77 SPAdj = -RoundUpToAlignment(-SPAdj, StackAlignment);
77 SPAdj = -alignTo(-SPAdj, StackAlignment);
7878 } else {
79 SPAdj = RoundUpToAlignment(SPAdj, StackAlignment);
79 SPAdj = alignTo(SPAdj, StackAlignment);
8080 }
8181 return SPAdj;
8282 }
375375
376376 APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Align) {
377377 if (RoundToAlign && Align)
378 return APInt(IntTyBits, RoundUpToAlignment(Size.getZExtValue(), Align));
378 return APInt(IntTyBits, alignTo(Size.getZExtValue(), Align));
379379 return Size;
380380 }
381381
5252 Align = MinAlign;
5353 MF.getFrameInfo()->ensureMaxAlignment(Align);
5454 MF.getSubtarget().getTargetLowering()->HandleByVal(this, Size, Align);
55 Size = unsigned(RoundUpToAlignment(Size, MinAlign));
55 Size = unsigned(alignTo(Size, MinAlign));
5656 unsigned Offset = AllocateStack(Size, Align);
5757 addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
5858 }
511511 MaxAlign = std::max(MaxAlign, Align);
512512
513513 // Adjust to alignment boundary.
514 Offset = RoundUpToAlignment(Offset, Align, Skew);
514 Offset = alignTo(Offset, Align, Skew);
515515
516516 if (StackGrowsDown) {
517517 DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset << "]\n");
595595
596596 unsigned Align = MFI->getObjectAlignment(i);
597597 // Adjust to alignment boundary
598 Offset = RoundUpToAlignment(Offset, Align, Skew);
598 Offset = alignTo(Offset, Align, Skew);
599599
600600 MFI->setObjectOffset(i, -Offset); // Set the computed offset
601601 }
604604 for (int i = MaxCSFI; i >= MinCSFI ; --i) {
605605 unsigned Align = MFI->getObjectAlignment(i);
606606 // Adjust to alignment boundary
607 Offset = RoundUpToAlignment(Offset, Align, Skew);
607 Offset = alignTo(Offset, Align, Skew);
608608
609609 MFI->setObjectOffset(i, Offset);
610610 Offset += MFI->getObjectSize(i);
637637 unsigned Align = MFI->getLocalFrameMaxAlign();
638638
639639 // Adjust to alignment boundary.
640 Offset = RoundUpToAlignment(Offset, Align, Skew);
640 Offset = alignTo(Offset, Align, Skew);
641641
642642 DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n");
643643
756756 // If the frame pointer is eliminated, all frame offsets will be relative to
757757 // SP not FP. Align to MaxAlign so this works.
758758 StackAlign = std::max(StackAlign, MaxAlign);
759 Offset = RoundUpToAlignment(Offset, StackAlign, Skew);
759 Offset = alignTo(Offset, StackAlign, Skew);
760760 }
761761
762762 // Update frame info to pretend that this is part of the stack...
105105 Type *ElTy = GV->getType()->getElementType();
106106 size_t GVSize = (size_t)TD.getTypeAllocSize(ElTy);
107107 void *RawMemory = ::operator new(
108 RoundUpToAlignment(sizeof(GVMemoryBlock),
109 TD.getPreferredAlignment(GV))
110 + GVSize);
108 alignTo(sizeof(GVMemoryBlock), TD.getPreferredAlignment(GV)) + GVSize);
111109 new(RawMemory) GVMemoryBlock(GV);
112110 return static_cast(RawMemory) + sizeof(GVMemoryBlock);
113111 }
5151 // Add padding if necessary to align the data element properly.
5252 if ((StructSize & (TyAlign-1)) != 0) {
5353 IsPadded = true;
54 StructSize = RoundUpToAlignment(StructSize, TyAlign);
54 StructSize = alignTo(StructSize, TyAlign);
5555 }
5656
5757 // Keep track of maximum alignment constraint.
6868 // and all array elements would be aligned correctly.
6969 if ((StructSize & (StructAlignment-1)) != 0) {
7070 IsPadded = true;
71 StructSize = RoundUpToAlignment(StructSize, StructAlignment);
71 StructSize = alignTo(StructSize, StructAlignment);
7272 }
7373 }
7474
9898 Ty = cast(Ty)->getElementType();
9999 // Size should be aligned to pointer size.
100100 unsigned PtrSize = DL.getPointerSize();
101 ArgWords += RoundUpToAlignment(DL.getTypeAllocSize(Ty), PtrSize);
101 ArgWords += alignTo(DL.getTypeAllocSize(Ty), PtrSize);
102102 }
103103
104104 OS << '@' << ArgWords;
432432 size_t OpSize = NumOps * sizeof(MDOperand);
433433 // uint64_t is the most aligned type we need support (ensured by static_assert
434434 // above)
435 OpSize = RoundUpToAlignment(OpSize, llvm::alignOf());
435 OpSize = alignTo(OpSize, llvm::alignOf());
436436 void *Ptr = reinterpret_cast(::operator new(OpSize + Size)) + OpSize;
437437 MDOperand *O = static_cast(Ptr);
438438 for (MDOperand *E = O - NumOps; O != E; --O)
443443 void MDNode::operator delete(void *Mem) {
444444 MDNode *N = static_cast(Mem);
445445 size_t OpSize = N->NumOperands * sizeof(MDOperand);
446 OpSize = RoundUpToAlignment(OpSize, llvm::alignOf());
446 OpSize = alignTo(OpSize, llvm::alignOf());
447447
448448 MDOperand *O = static_cast(Mem);
449449 for (MDOperand *E = O - N->NumOperands; O != E; --O)
403403 unsigned Size = sizeof(MachO::linker_option_command);
404404 for (const std::string &Option : Options)
405405 Size += Option.size() + 1;
406 return RoundUpToAlignment(Size, is64Bit ? 8 : 4);
406 return alignTo(Size, is64Bit ? 8 : 4);
407407 }
408408
409409 void MachObjectWriter::writeLinkerOptionsLoadCommand(
605605 const MCAsmLayout &Layout) {
606606 uint64_t StartAddress = 0;
607607 for (const MCSection *Sec : Layout.getSectionOrder()) {
608 StartAddress = RoundUpToAlignment(StartAddress, Sec->getAlignment());
608 StartAddress = alignTo(StartAddress, Sec->getAlignment());
609609 SectionAddress[Sec] = StartAddress;
610610 StartAddress += Layout.getSectionAddressSize(Sec);
611611
735735
736736 // Add the loh load command size, if used.
737737 uint64_t LOHRawSize = Asm.getLOHContainer().getEmitSize(*this, Layout);
738 uint64_t LOHSize = RoundUpToAlignment(LOHRawSize, is64Bit() ? 8 : 4);
738 uint64_t LOHSize = alignTo(LOHRawSize, is64Bit() ? 8 : 4);
739739 if (LOHSize) {
740740 ++NumLoadCommands;
741741 LoadCommandsSize += sizeof(MachO::linkedit_data_command);
923923
924924 if (IsPhysicalSection(Sec)) {
925925 // Align the section data to a four byte boundary.
926 offset = RoundUpToAlignment(offset, 4);
926 offset = alignTo(offset, 4);
927927 Sec->Header.PointerToRawData = offset;
928928
929929 offset += Sec->Header.SizeOfRawData;
134134 SmallString<256> NameBuf;
135135 StringRef NameRef = BufferName.toStringRef(NameBuf);
136136 size_t AlignedStringLen =
137 RoundUpToAlignment(sizeof(MemoryBufferMem) + NameRef.size() + 1, 16);
137 alignTo(sizeof(MemoryBufferMem) + NameRef.size() + 1, 16);
138138 size_t RealLen = AlignedStringLen + Size + 1;
139139 char *Mem = static_cast(operator new(RealLen, std::nothrow));
140140 if (!Mem)
169169 unsigned Align = getStackAlignment();
170170
171171 int64_t Amount = I->getOperand(0).getImm();
172 Amount = RoundUpToAlignment(Amount, Align);
172 Amount = alignTo(Amount, Align);
173173 if (!IsDestroy)
174174 Amount = -Amount;
175175
25442544 // This is a non-standard ABI so by fiat I say we're allowed to make full
25452545 // use of the stack area to be popped, which must be aligned to 16 bytes in
25462546 // any case:
2547 StackArgSize = RoundUpToAlignment(StackArgSize, 16);
2547 StackArgSize = alignTo(StackArgSize, 16);
25482548
25492549 // If we're expected to restore the stack (e.g. fastcc) then we'll be adding
25502550 // a multiple of 16.
29582958
29592959 // Since callee will pop argument stack as a tail call, we must keep the
29602960 // popped size 16-byte aligned.
2961 NumBytes = RoundUpToAlignment(NumBytes, 16);
2961 NumBytes = alignTo(NumBytes, 16);
29622962
29632963 // FPDiff will be negative if this tail call requires more space than we
29642964 // would automatically have in our incoming argument space. Positive if we
31983198 Chain = DAG.getNode(AArch64ISD::CALL, DL, NodeTys, Ops);
31993199 InFlag = Chain.getValue(1);
32003200
3201 uint64_t CalleePopBytes = DoesCalleeRestoreStack(CallConv, TailCallOpt)
3202 ? RoundUpToAlignment(NumBytes, 16)
3203 : 0;
3201 uint64_t CalleePopBytes =
3202 DoesCalleeRestoreStack(CallConv, TailCallOpt) ? alignTo(NumBytes, 16) : 0;
32043203
32053204 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true),
32063205 DAG.getIntPtrConstant(CalleePopBytes, DL, true),
975975
976976 // Do alignment, specialized to power of 2 and for signed ints,
977977 // avoiding having to do a C-style cast from uint_64t to int when
978 // using RoundUpToAlignment from include/llvm/Support/MathExtras.h.
978 // using alignTo from include/llvm/Support/MathExtras.h.
979979 // FIXME: Move this function to include/MathExtras.h?
980980 static int alignTo(int Num, int PowOf2) {
981981 return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
326326
327327 if (MFI->getShaderType() == ShaderType::COMPUTE) {
328328 OutStreamer->EmitIntValue(R_0288E8_SQ_LDS_ALLOC, 4);
329 OutStreamer->EmitIntValue(RoundUpToAlignment(MFI->LDSSize, 4) >> 2, 4);
329 OutStreamer->EmitIntValue(alignTo(MFI->LDSSize, 4) >> 2, 4);
330330 }
331331 }
332332
502502
503503 ProgInfo.LDSSize = MFI->LDSSize + LDSSpillSize;
504504 ProgInfo.LDSBlocks =
505 RoundUpToAlignment(ProgInfo.LDSSize, 1 << LDSAlignShift) >> LDSAlignShift;
505 alignTo(ProgInfo.LDSSize, 1 << LDSAlignShift) >> LDSAlignShift;
506506
507507 // Scratch is allocated in 256 dword blocks.
508508 unsigned ScratchAlignShift = 10;
510510 // is used by the entire wave. ProgInfo.ScratchSize is the amount of
511511 // scratch memory used per thread.
512512 ProgInfo.ScratchBlocks =
513 RoundUpToAlignment(ProgInfo.ScratchSize * STM.getWavefrontSize(),
514 1 << ScratchAlignShift) >> ScratchAlignShift;
513 alignTo(ProgInfo.ScratchSize * STM.getWavefrontSize(),
514 1 << ScratchAlignShift) >>
515 ScratchAlignShift;
515516
516517 ProgInfo.ComputePGMRSrc1 =
517518 S_00B848_VGPRS(ProgInfo.VGPRBlocks) |
8686 int UpperBound = FI == -1 ? MFI->getNumObjects() : FI;
8787
8888 for (int i = MFI->getObjectIndexBegin(); i < UpperBound; ++i) {
89 OffsetBytes = RoundUpToAlignment(OffsetBytes, MFI->getObjectAlignment(i));
89 OffsetBytes = alignTo(OffsetBytes, MFI->getObjectAlignment(i));
9090 OffsetBytes += MFI->getObjectSize(i);
9191 // Each register holds 4 bytes, so we must always align the offset to at
9292 // least 4 bytes, so that 2 frame objects won't share the same register.
93 OffsetBytes = RoundUpToAlignment(OffsetBytes, 4);
93 OffsetBytes = alignTo(OffsetBytes, 4);
9494 }
9595
9696 if (FI != -1)
97 OffsetBytes = RoundUpToAlignment(OffsetBytes, MFI->getObjectAlignment(FI));
97 OffsetBytes = alignTo(OffsetBytes, MFI->getObjectAlignment(FI));
9898
9999 return OffsetBytes / (getStackWidth(MF) * 4);
100100 }
141141 }
142142
143143 void CFStack::updateMaxStackSize() {
144 unsigned CurrentStackSize = CurrentEntries +
145 (RoundUpToAlignment(CurrentSubEntries, 4) / 4);
144 unsigned CurrentStackSize =
145 CurrentEntries + (alignTo(CurrentSubEntries, 4) / 4);
146146 MaxStackSize = std::max(CurrentStackSize, MaxStackSize);
147147 }
148148
210210
211211 // First consume all registers that would give an unaligned object. Whether
212212 // we go on stack or in regs, no-one will be using them in future.
213 unsigned RegAlign = RoundUpToAlignment(Align, 4) / 4;
213 unsigned RegAlign = alignTo(Align, 4) / 4;
214214 while (RegIdx % RegAlign != 0 && RegIdx < RegList.size())
215215 State.AllocateReg(RegList[RegIdx++]);
216216
280280
281281 unsigned RequiredExtraInstrs;
282282 if (ExtraRange)
283 RequiredExtraInstrs = RoundUpToAlignment(RangeAfterCopy, ExtraRange) / ExtraRange;
283 RequiredExtraInstrs = alignTo(RangeAfterCopy, ExtraRange) / ExtraRange;
284284 else if (RangeAfterCopy > 0)
285285 // We need an extra instruction but none is available
286286 RequiredExtraInstrs = 1000000;
435435 // Get the number of bytes to allocate from the FrameInfo.
436436 unsigned FrameSize = MFI->getStackSize();
437437 // Round up the max call frame size to the max alignment on the stack.
438 unsigned MaxCFA = RoundUpToAlignment(MFI->getMaxCallFrameSize(), MaxAlign);
438 unsigned MaxCFA = alignTo(MFI->getMaxCallFrameSize(), MaxAlign);
439439 MFI->setMaxCallFrameSize(MaxCFA);
440440
441 FrameSize = MaxCFA + RoundUpToAlignment(FrameSize, MaxAlign);
441 FrameSize = MaxCFA + alignTo(FrameSize, MaxAlign);
442442 MFI->setStackSize(FrameSize);
443443
444444 bool AlignStack = (MaxAlign > getStackAlignment());
11791179 // for now (will return false). We need to determine the right alignment
11801180 // based on the normal alignment for the underlying machine type.
11811181 //
1182 unsigned ArgSize = RoundUpToAlignment(ArgVT.getSizeInBits(), 4);
1182 unsigned ArgSize = alignTo(ArgVT.getSizeInBits(), 4);
11831183
11841184 unsigned BEAlign = 0;
11851185 if (ArgSize < 8 && !Subtarget->isLittle())
121121 // Conservatively assume all callee-saved registers will be saved.
122122 for (const MCPhysReg *R = TRI.getCalleeSavedRegs(&MF); *R; ++R) {
123123 unsigned Size = TRI.getMinimalPhysRegClass(*R)->getSize();
124 Offset = RoundUpToAlignment(Offset + Size, Size);
124 Offset = alignTo(Offset + Size, Size);
125125 }
126126
127127 unsigned MaxAlign = MFI->getMaxAlignment();
132132
133133 // Iterate over other objects.
134134 for (unsigned I = 0, E = MFI->getObjectIndexEnd(); I != E; ++I)
135 Offset = RoundUpToAlignment(Offset + MFI->getObjectSize(I), MaxAlign);
135 Offset = alignTo(Offset + MFI->getObjectSize(I), MaxAlign);
136136
137137 // Call frame.
138138 if (MFI->adjustsStack() && hasReservedCallFrame(MF))
139 Offset = RoundUpToAlignment(Offset + MFI->getMaxCallFrameSize(),
140 std::max(MaxAlign, getStackAlignment()));
139 Offset = alignTo(Offset + MFI->getMaxCallFrameSize(),
140 std::max(MaxAlign, getStackAlignment()));
141141
142 return RoundUpToAlignment(Offset, getStackAlignment());
142 return alignTo(Offset, getStackAlignment());
143143 }
144144
145145 // Eliminate ADJCALLSTACKDOWN, ADJCALLSTACKUP pseudo instructions
18721872 auto &TD = DAG.getDataLayout();
18731873 unsigned ArgSizeInBytes =
18741874 TD.getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext()));
1875 SDValue Tmp3 = DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
1876 DAG.getConstant(RoundUpToAlignment(ArgSizeInBytes,
1877 ArgSlotSizeInBytes),
1878 DL, VAList.getValueType()));
1875 SDValue Tmp3 =
1876 DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
1877 DAG.getConstant(alignTo(ArgSizeInBytes, ArgSlotSizeInBytes),
1878 DL, VAList.getValueType()));
18791879 // Store the incremented VAList to the legalized pointer
18801880 Chain = DAG.getStore(VAListLoad.getValue(1), DL, Tmp3, VAListPtr,
18811881 MachinePointerInfo(SV), false, false, 0);
26032603 // ByValChain is the output chain of the last Memcpy node created for copying
26042604 // byval arguments to the stack.
26052605 unsigned StackAlignment = TFL->getStackAlignment();
2606 NextStackOffset = RoundUpToAlignment(NextStackOffset, StackAlignment);
2606 NextStackOffset = alignTo(NextStackOffset, StackAlignment);
26072607 SDValue NextStackOffsetVal = DAG.getIntPtrConstant(NextStackOffset, DL, true);
26082608
26092609 if (!IsTailCall)
37863786 int VaArgOffset;
37873787
37883788 if (ArgRegs.size() == Idx)
3789 VaArgOffset =
3790 RoundUpToAlignment(State.getNextStackOffset(), RegSizeInBytes);
3789 VaArgOffset = alignTo(State.getNextStackOffset(), RegSizeInBytes);
37913790 else {
37923791 VaArgOffset =
37933792 (int)ABI.GetCalleeAllocdArgSizeInBytes(State.getCallingConv()) -
38533852 }
38543853
38553854 // Mark the registers allocated.
3856 Size = RoundUpToAlignment(Size, RegSizeInBytes);
3855 Size = alignTo(Size, RegSizeInBytes);
38573856 for (unsigned I = FirstReg; Size > 0 && (I < IntArgRegs.size());
38583857 Size -= RegSizeInBytes, ++I, ++NumRegs)
38593858 State->AllocateReg(IntArgRegs[I], ShadowRegs[I]);
145145 // Finally, ensure that the size is sufficiently aligned for the
146146 // data on the stack.
147147 if (MFI->getMaxAlignment() > 0) {
148 NumBytes = RoundUpToAlignment(NumBytes, MFI->getMaxAlignment());
148 NumBytes = alignTo(NumBytes, MFI->getMaxAlignment());
149149 }
150150
151151 // Update stack size with corrected value.
11301130 unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset());
11311131
11321132 // Keep stack frames 16-byte aligned.
1133 ArgsSize = RoundUpToAlignment(ArgsSize, 16);
1133 ArgsSize = alignTo(ArgsSize, 16);
11341134
11351135 // Varargs calls require special treatment.
11361136 if (CLI.IsVarArg)
6363 frameSize += 128;
6464 // Frames with calls must also reserve space for 6 outgoing arguments
6565 // whether they are used or not. LowerCall_64 takes care of that.
66 frameSize = RoundUpToAlignment(frameSize, 16);
66 frameSize = alignTo(frameSize, 16);
6767 } else {
6868 // Emit the correct save instruction based on the number of bytes in
6969 // the frame. Minimum stack frame size according to V8 ABI is:
7676
7777 // Round up to next doubleword boundary -- a double-word boundary
7878 // is required by the ABI.
79 frameSize = RoundUpToAlignment(frameSize, 8);
79 frameSize = alignTo(frameSize, 8);
8080 }
8181 return frameSize;
8282 }
527527 MaxAlign = std::max(MaxAlign, Align);
528528
529529 // Adjust to alignment boundary.
530 Offset = RoundUpToAlignment(Offset, Align, Skew);
530 Offset = alignTo(Offset, Align, Skew);
531531
532532 if (StackGrowsDown) {
533533 DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset << "]\n");
611611
612612 unsigned Align = MFI->getObjectAlignment(i);
613613 // Adjust to alignment boundary
614 Offset = RoundUpToAlignment(Offset, Align, Skew);
614 Offset = alignTo(Offset, Align, Skew);
615615
616616 MFI->setObjectOffset(i, -Offset); // Set the computed offset
617617 }
620620 for (int i = MaxCSFI; i >= MinCSFI ; --i) {
621621 unsigned Align = MFI->getObjectAlignment(i);
622622 // Adjust to alignment boundary
623 Offset = RoundUpToAlignment(Offset, Align, Skew);
623 Offset = alignTo(Offset, Align, Skew);
624624
625625 MFI->setObjectOffset(i, Offset);
626626 Offset += MFI->getObjectSize(i);
653653 unsigned Align = MFI->getLocalFrameMaxAlign();
654654
655655 // Adjust to alignment boundary.
656 Offset = RoundUpToAlignment(Offset, Align, Skew);
656 Offset = alignTo(Offset, Align, Skew);
657657
658658 DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n");
659659
772772 // If the frame pointer is eliminated, all frame offsets will be relative to
773773 // SP not FP. Align to MaxAlign so this works.
774774 StackAlign = std::max(StackAlign, MaxAlign);
775 Offset = RoundUpToAlignment(Offset, StackAlign, Skew);
775 Offset = alignTo(Offset, StackAlign, Skew);
776776 }
777777
778778 // Update frame info to pretend that this is part of the stack...
10091009
10101010 // Callee-saved registers are pushed on stack before the stack is realigned.
10111011 if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
1012 NumBytes = RoundUpToAlignment(NumBytes, MaxAlign);
1012 NumBytes = alignTo(NumBytes, MaxAlign);
10131013
10141014 // Get the offset of the stack slot for the EBP register, which is
10151015 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
11301130 // virtual memory manager are allocated in correct sequence.
11311131 uint64_t AlignedNumBytes = NumBytes;
11321132 if (IsWin64Prologue && !IsFunclet && TRI->needsStackRealignment(MF))
1133 AlignedNumBytes = RoundUpToAlignment(AlignedNumBytes, MaxAlign);
1133 AlignedNumBytes = alignTo(AlignedNumBytes, MaxAlign);
11341134 if (AlignedNumBytes >= StackProbeSize && UseStackProbe) {
11351135 // Check whether EAX is livein for this function.
11361136 bool isEAXAlive = isEAXLiveIn(MF);
14291429 // RBP is not included in the callee saved register block. After pushing RBP,
14301430 // everything is 16 byte aligned. Everything we allocate before an outgoing
14311431 // call must also be 16 byte aligned.
1432 unsigned FrameSizeMinusRBP =
1433 RoundUpToAlignment(CSSize + UsedSize, getStackAlignment());
1432 unsigned FrameSizeMinusRBP = alignTo(CSSize + UsedSize, getStackAlignment());
14341433 // Subtract out the size of the callee saved registers. This is how much stack
14351434 // each funclet will allocate.
14361435 return FrameSizeMinusRBP - CSSize;
14901489 // Callee-saved registers were pushed on stack before the stack was
14911490 // realigned.
14921491 if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
1493 NumBytes = RoundUpToAlignment(FrameSize, MaxAlign);
1492 NumBytes = alignTo(FrameSize, MaxAlign);
14941493
14951494 // Pop EBP.
14961495 BuildMI(MBB, MBBI, DL,
24792478 // amount of space needed for the outgoing arguments up to the next
24802479 // alignment boundary.
24812480 unsigned StackAlign = getStackAlignment();
2482 Amount = RoundUpToAlignment(Amount, StackAlign);
2481 Amount = alignTo(Amount, StackAlign);
24832482
24842483 MachineModuleInfo &MMI = MF.getMMI();
24852484 const Function *Fn = MF.getFunction();
380380 false, false, 0);
381381 }
382382 // Lower to pair of consecutive word aligned loads plus some bit shifting.
383 int32_t HighOffset = RoundUpToAlignment(Offset, 4);
383 int32_t HighOffset = alignTo(Offset, 4);
384384 int32_t LowOffset = HighOffset - 4;
385385 SDValue LowAddr, HighAddr;
386386 if (GlobalAddressSDNode *GASD =
543543 // Cap at 128 was found experimentally to have a good data/instruction
544544 // overhead tradeoff.
545545 if (Padding > 128)
546 Padding = RoundUpToAlignment(InitSize, 128) - InitSize;
546 Padding = alignTo(InitSize, 128) - InitSize;
547547
548548 GlobalInits.push_back(
549549 ConstantAggregateZero::get(ArrayType::get(Int8Ty, Padding)));
11411141 setOrigin(A, getCleanOrigin());
11421142 }
11431143 }
1144 ArgOffset += RoundUpToAlignment(Size, kShadowTLSAlignment);
1144 ArgOffset += alignTo(Size, kShadowTLSAlignment);
11451145 }
11461146 assert(*ShadowPtr && "Could not find shadow for an argument");
11471147 return *ShadowPtr;
24972497 (void)Store;
24982498 assert(Size != 0 && Store != nullptr);
24992499 DEBUG(dbgs() << " Param:" << *Store << "\n");
2500 ArgOffset += RoundUpToAlignment(Size, 8);
2500 ArgOffset += alignTo(Size, 8);
25012501 }
25022502 DEBUG(dbgs() << " done with call args\n");
25032503
28172817 Type *RealTy = A->getType()->getPointerElementType();
28182818 uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
28192819 Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset);
2820 OverflowOffset += RoundUpToAlignment(ArgSize, 8);
2820 OverflowOffset += alignTo(ArgSize, 8);
28212821 IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB),
28222822 ArgSize, kShadowTLSAlignment);
28232823 } else {
28392839 case AK_Memory:
28402840 uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
28412841 Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
2842 OverflowOffset += RoundUpToAlignment(ArgSize, 8);
2842 OverflowOffset += alignTo(ArgSize, 8);
28432843 }
28442844 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
28452845 }
29642964 #endif
29652965 Base = getShadowPtrForVAArgument(A->getType(), IRB, VAArgOffset);
29662966 VAArgOffset += ArgSize;
2967 VAArgOffset = RoundUpToAlignment(VAArgOffset, 8);
2967 VAArgOffset = alignTo(VAArgOffset, 8);
29682968 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
29692969 }
29702970
31093109 case AK_Memory:
31103110 uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
31113111 Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
3112 OverflowOffset += RoundUpToAlignment(ArgSize, 8);
3112 OverflowOffset += alignTo(ArgSize, 8);
31133113 break;
31143114 }
31153115 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
533533 // Add alignment.
534534 // NOTE: we ensure that BasePointer itself is aligned to >= Align.
535535 StaticOffset += Size;
536 StaticOffset = RoundUpToAlignment(StaticOffset, Align);
536 StaticOffset = alignTo(StaticOffset, Align);
537537
538538 Value *Off = IRB.CreateGEP(BasePointer, // BasePointer is i8*
539539 ConstantInt::get(Int32Ty, -StaticOffset));
564564 // Add alignment.
565565 // NOTE: we ensure that BasePointer itself is aligned to >= Align.
566566 StaticOffset += Size;
567 StaticOffset = RoundUpToAlignment(StaticOffset, Align);
567 StaticOffset = alignTo(StaticOffset, Align);
568568
569569 Value *Off = IRB.CreateGEP(BasePointer, // BasePointer is i8*
570570 ConstantInt::get(Int32Ty, -StaticOffset));
581581 // Re-align BasePointer so that our callees would see it aligned as
582582 // expected.
583583 // FIXME: no need to update BasePointer in leaf functions.
584 StaticOffset = RoundUpToAlignment(StaticOffset, StackAlignment);
584 StaticOffset = alignTo(StaticOffset, StackAlignment);
585585
586586 // Update shadow stack pointer in the function epilogue.
587587 IRB.SetInsertPoint(BasePointer->getNextNode());
252252 if (Options.Use8bitCounters) {
253253 // Make sure the array is 16-aligned.
254254 static const int kCounterAlignment = 16;
255 Type *Int8ArrayNTy =
256 ArrayType::get(Int8Ty, RoundUpToAlignment(N, kCounterAlignment));
255 Type *Int8ArrayNTy = ArrayType::get(Int8Ty, alignTo(N, kCounterAlignment));
257256 RealEightBitCounterArray = new GlobalVariable(
258257 M, Int8ArrayNTy, false, GlobalValue::PrivateLinkage,
259258 Constant::getNullValue(Int8ArrayNTy), "__sancov_gen_cov_counter");
4343 else if (Size <= 512) Res = Size + 64;
4444 else if (Size <= 4096) Res = Size + 128;
4545 else Res = Size + 256;
46 return RoundUpToAlignment(Res, Alignment);
46 return alignTo(Res, Alignment);
4747 }
4848
4949 void
240240 // start address leave a sufficient gap to store the __DWARF
241241 // segment.
242242 uint64_t PrevEndAddress = EndAddress;
243 EndAddress = RoundUpToAlignment(EndAddress, 0x1000);
243 EndAddress = alignTo(EndAddress, 0x1000);
244244 if (GapForDwarf == UINT64_MAX && Segment.vmaddr > EndAddress &&
245245 Segment.vmaddr - EndAddress >= DwarfSegmentSize)
246246 GapForDwarf = EndAddress;
267267 uint64_t FileSize, unsigned NumSections,
268268 MCAsmLayout &Layout, MachObjectWriter &Writer) {
269269 Writer.writeSegmentLoadCommand("__DWARF", NumSections, VMAddr,
270 RoundUpToAlignment(FileSize, 0x1000),
271 FileOffset, FileSize, /* MaxProt */ 7,
270 alignTo(FileSize, 0x1000), FileOffset,
271 FileSize, /* MaxProt */ 7,
272272 /* InitProt =*/3);
273273
274274 for (unsigned int i = 0, n = Layout.getSectionOrder().size(); i != n; ++i) {
278278
279279 unsigned Align = Sec->getAlignment();
280280 if (Align > 1) {
281 VMAddr = RoundUpToAlignment(VMAddr, Align);
282 FileOffset = RoundUpToAlignment(FileOffset, Align);
281 VMAddr = alignTo(VMAddr, Align);
282 FileOffset = alignTo(FileOffset, Align);
283283 }
284284 Writer.writeSection(Layout, *Sec, VMAddr, FileOffset, 0, 0, 0);
285285
393393 continue;
394394
395395 if (uint64_t Size = Layout.getSectionFileSize(Sec)) {
396 DwarfSegmentSize =
397 RoundUpToAlignment(DwarfSegmentSize, Sec->getAlignment());
396 DwarfSegmentSize = alignTo(DwarfSegmentSize, Sec->getAlignment());
398397 DwarfSegmentSize += Size;
399398 ++NumDwarfSections;
400399 }
418417
419418 uint64_t SymtabStart = LoadCommandSize;
420419 SymtabStart += HeaderSize;
421 SymtabStart = RoundUpToAlignment(SymtabStart, 0x1000);
420 SymtabStart = alignTo(SymtabStart, 0x1000);
422421
423422 // We gathered all the information we need, start emitting the output file.
424423 Writer.writeHeader(MachO::MH_DSYM, NumLoadCommands, LoadCommandSize, false);
440439 NewStringsSize);
441440
442441 uint64_t DwarfSegmentStart = StringStart + NewStringsSize;
443 DwarfSegmentStart = RoundUpToAlignment(DwarfSegmentStart, 0x1000);
442 DwarfSegmentStart = alignTo(DwarfSegmentStart, 0x1000);
444443
445444 // Write the load commands for the segments and sections we 'import' from
446445 // the original binary.
459458 DwarfSegmentSize, GapForDwarf, EndAddress);
460459 }
461460
462 uint64_t DwarfVMAddr = RoundUpToAlignment(EndAddress, 0x1000);
461 uint64_t DwarfVMAddr = alignTo(EndAddress, 0x1000);
463462 uint64_t DwarfVMMax = Is64Bit ? UINT64_MAX : UINT32_MAX;
464463 if (DwarfVMAddr + DwarfSegmentSize > DwarfVMMax ||
465464 DwarfVMAddr + DwarfSegmentSize < DwarfVMAddr /* Overflow */) {
509508 continue;
510509
511510 uint64_t Pos = OutFile.tell();
512 Writer.WriteZeros(RoundUpToAlignment(Pos, Sec.getAlignment()) - Pos);
511 Writer.WriteZeros(alignTo(Pos, Sec.getAlignment()) - Pos);
513512 MCAsm.writeSectionData(&Sec, Layout);
514513 }
515514
183183 }
184184
185185 static uint64_t bytesToBlocks(uint64_t NumBytes, uint64_t BlockSize) {
186 return RoundUpToAlignment(NumBytes, BlockSize) / BlockSize;
186 return alignTo(NumBytes, BlockSize) / BlockSize;
187187 }
188188
189189 static uint64_t blockToOffset(uint64_t BlockNumber, uint64_t BlockSize) {
981981 // the next subsection.
982982 size_t SectionOffset = Data.data() - SectionContents.data();
983983 size_t NextOffset = SectionOffset + SubSectionSize;
984 NextOffset = RoundUpToAlignment(NextOffset, 4);
984 NextOffset = alignTo(NextOffset, 4);
985985 Data = SectionContents.drop_front(NextOffset);
986986
987987 // Optionally print the subsection bytes in case our parsing gets confused
190190 }
191191
192192 uint8_t *allocateFromSlab(uintptr_t Size, unsigned Alignment, bool isCode) {
193 Size = RoundUpToAlignment(Size, Alignment);
193 Size = alignTo(Size, Alignment);
194194 if (CurrentSlabOffset + Size > SlabSize)
195195 report_fatal_error("Can't allocate enough memory. Tune --preallocate");
196196
172172 // Assign each section data address consecutively.
173173 for (COFFYAML::Section &S : CP.Obj.Sections) {
174174 if (S.SectionData.binary_size() > 0) {
175 CurrentSectionDataOffset = RoundUpToAlignment(
176 CurrentSectionDataOffset, CP.isPE() ? CP.getFileAlignment() : 4);
175 CurrentSectionDataOffset = alignTo(CurrentSectionDataOffset,
176 CP.isPE() ? CP.getFileAlignment() : 4);
177177 S.Header.SizeOfRawData = S.SectionData.binary_size();
178178 if (CP.isPE())
179179 S.Header.SizeOfRawData =
180 RoundUpToAlignment(S.Header.SizeOfRawData, CP.getFileAlignment());
180 alignTo(S.Header.SizeOfRawData, CP.getFileAlignment());
181181 S.Header.PointerToRawData = CurrentSectionDataOffset;
182182 CurrentSectionDataOffset += S.Header.SizeOfRawData;
183183 if (!S.Relocations.empty()) {
291291 Header->FileAlignment = CP.Obj.OptionalHeader->Header.FileAlignment;
292292 uint32_t SizeOfCode = 0, SizeOfInitializedData = 0,
293293 SizeOfUninitializedData = 0;
294 uint32_t SizeOfHeaders = RoundUpToAlignment(
295 CP.SectionTableStart + CP.SectionTableSize, Header->FileAlignment);
296 uint32_t SizeOfImage =
297 RoundUpToAlignment(SizeOfHeaders, Header->SectionAlignment);
294 uint32_t SizeOfHeaders = alignTo(CP.SectionTableStart + CP.SectionTableSize,
295 Header->FileAlignment);
296 uint32_t SizeOfImage = alignTo(SizeOfHeaders, Header->SectionAlignment);
298297 uint32_t BaseOfData = 0;
299298 for (const COFFYAML::Section &S : CP.Obj.Sections) {
300299 if (S.Header.Characteristics & COFF::IMAGE_SCN_CNT_CODE)
308307 else if (S.Name.equals(".data"))
309308 BaseOfData = S.Header.VirtualAddress; // RVA
310309 if (S.Header.VirtualAddress)
311 SizeOfImage +=
312 RoundUpToAlignment(S.Header.VirtualSize, Header->SectionAlignment);
310 SizeOfImage += alignTo(S.Header.VirtualSize, Header->SectionAlignment);
313311 }
314312 Header->SizeOfCode = SizeOfCode;
315313 Header->SizeOfInitializedData = SizeOfInitializedData;
3737 if (Align == 0)
3838 Align = 1;
3939 uint64_t CurrentOffset = InitialOffset + OS.tell();
40 uint64_t AlignedOffset = RoundUpToAlignment(CurrentOffset, Align);
40 uint64_t AlignedOffset = alignTo(CurrentOffset, Align);
4141 for (; CurrentOffset != AlignedOffset; ++CurrentOffset)
4242 OS.write('\0');
4343 return AlignedOffset; // == CurrentOffset;
178178 EXPECT_EQ(256u, NextPowerOf2(128));
179179 }
180180
181 TEST(MathExtras, RoundUpToAlignment) {
182 EXPECT_EQ(8u, RoundUpToAlignment(5, 8));
183 EXPECT_EQ(24u, RoundUpToAlignment(17, 8));
184 EXPECT_EQ(0u, RoundUpToAlignment(~0LL, 8));
185
186 EXPECT_EQ(7u, RoundUpToAlignment(5, 8, 7));
187 EXPECT_EQ(17u, RoundUpToAlignment(17, 8, 1));
188 EXPECT_EQ(3u, RoundUpToAlignment(~0LL, 8, 3));
189 EXPECT_EQ(552u, RoundUpToAlignment(321, 255, 42));
181 TEST(MathExtras, alignTo) {
182 EXPECT_EQ(8u, alignTo(5, 8));
183 EXPECT_EQ(24u, alignTo(17, 8));
184 EXPECT_EQ(0u, alignTo(~0LL, 8));
185
186 EXPECT_EQ(7u, alignTo(5, 8, 7));
187 EXPECT_EQ(17u, alignTo(17, 8, 1));
188 EXPECT_EQ(3u, alignTo(~0LL, 8, 3));
189 EXPECT_EQ(552u, alignTo(321, 255, 42));
190190 }
191191
192192 template
117117 Class2 *C1 = Class2::create(4);
118118 Class2 *C2 = Class2::create(0, 4.2);
119119
120 EXPECT_EQ(sizeof(Class2), llvm::RoundUpToAlignment(sizeof(bool) * 2,
121 llvm::alignOf()));
120 EXPECT_EQ(sizeof(Class2),
121 llvm::alignTo(sizeof(bool) * 2, llvm::alignOf()));
122122 EXPECT_EQ(llvm::alignOf(), llvm::alignOf());
123123
124124 EXPECT_EQ((Class2::additionalSizeToAlloc(1, 0)),
161161 TEST(TrailingObjects, ThreeArg) {
162162 EXPECT_EQ((Class3::additionalSizeToAlloc(1, 1, 3)),
163163 sizeof(double) + sizeof(short) + 3 * sizeof(bool));
164 EXPECT_EQ(sizeof(Class3),
165 llvm::RoundUpToAlignment(1, llvm::alignOf()));
164 EXPECT_EQ(sizeof(Class3), llvm::alignTo(1, llvm::alignOf()));
166165 std::unique_ptr P(new char[1000]);
167166 Class3 *C = reinterpret_cast(P.get());
168167 EXPECT_EQ(C->getTrailingObjects(), reinterpret_cast(C + 1));
182181
183182 TEST(TrailingObjects, Realignment) {
184183 EXPECT_EQ((Class4::additionalSizeToAlloc(1, 1)),
185 llvm::RoundUpToAlignment(sizeof(long) + 1, llvm::alignOf()));
186 EXPECT_EQ(sizeof(Class4), llvm::RoundUpToAlignment(1, llvm::alignOf()));
184 llvm::alignTo(sizeof(long) + 1, llvm::alignOf()));
185 EXPECT_EQ(sizeof(Class4), llvm::alignTo(1, llvm::alignOf()));
187186 std::unique_ptr P(new char[1000]);
188187 Class4 *C = reinterpret_cast(P.get());
189188 EXPECT_EQ(C->getTrailingObjects(), reinterpret_cast(C + 1));