summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Analysis/MemoryBuiltins.cpp2
-rw-r--r--llvm/lib/CodeGen/CallingConvLower.cpp2
-rw-r--r--llvm/lib/CodeGen/PrologEpilogInserter.cpp10
-rw-r--r--llvm/lib/ExecutionEngine/ExecutionEngine.cpp4
-rw-r--r--llvm/lib/IR/DataLayout.cpp4
-rw-r--r--llvm/lib/IR/Mangler.cpp2
-rw-r--r--llvm/lib/IR/Metadata.cpp4
-rw-r--r--llvm/lib/MC/MachObjectWriter.cpp6
-rw-r--r--llvm/lib/MC/WinCOFFObjectWriter.cpp2
-rw-r--r--llvm/lib/Support/MemoryBuffer.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64FrameLowering.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp9
-rw-r--r--llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp9
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUFrameLowering.cpp6
-rw-r--r--llvm/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp4
-rw-r--r--llvm/lib/Target/ARM/ARMCallingConv.h2
-rw-r--r--llvm/lib/Target/ARM/ThumbRegisterInfo.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp4
-rw-r--r--llvm/lib/Target/Mips/MipsFastISel.cpp2
-rw-r--r--llvm/lib/Target/Mips/MipsFrameLowering.cpp10
-rw-r--r--llvm/lib/Target/Mips/MipsISelLowering.cpp15
-rw-r--r--llvm/lib/Target/Sparc/SparcFrameLowering.cpp2
-rw-r--r--llvm/lib/Target/Sparc/SparcISelLowering.cpp2
-rw-r--r--llvm/lib/Target/Sparc/SparcSubtarget.cpp4
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyPEI.cpp10
-rw-r--r--llvm/lib/Target/X86/X86FrameLowering.cpp11
-rw-r--r--llvm/lib/Target/XCore/XCoreISelLowering.cpp2
-rw-r--r--llvm/lib/Transforms/IPO/LowerBitSets.cpp2
-rw-r--r--llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp12
-rw-r--r--llvm/lib/Transforms/Instrumentation/SafeStack.cpp6
-rw-r--r--llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp3
-rw-r--r--llvm/lib/Transforms/Utils/ASanStackFrameLayout.cpp2
33 files changed, 78 insertions, 83 deletions
diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp
index 9e896aed0dc..480ab5cddde 100644
--- a/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -376,7 +376,7 @@ STATISTIC(ObjectVisitorLoad,
APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Align) {
if (RoundToAlign && Align)
- return APInt(IntTyBits, RoundUpToAlignment(Size.getZExtValue(), Align));
+ return APInt(IntTyBits, alignTo(Size.getZExtValue(), Align));
return Size;
}
diff --git a/llvm/lib/CodeGen/CallingConvLower.cpp b/llvm/lib/CodeGen/CallingConvLower.cpp
index 23c0d542560..bdd5b5552c0 100644
--- a/llvm/lib/CodeGen/CallingConvLower.cpp
+++ b/llvm/lib/CodeGen/CallingConvLower.cpp
@@ -53,7 +53,7 @@ void CCState::HandleByVal(unsigned ValNo, MVT ValVT,
Align = MinAlign;
MF.getFrameInfo()->ensureMaxAlignment(Align);
MF.getSubtarget().getTargetLowering()->HandleByVal(this, Size, Align);
- Size = unsigned(RoundUpToAlignment(Size, MinAlign));
+ Size = unsigned(alignTo(Size, MinAlign));
unsigned Offset = AllocateStack(Size, Align);
addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
}
diff --git a/llvm/lib/CodeGen/PrologEpilogInserter.cpp b/llvm/lib/CodeGen/PrologEpilogInserter.cpp
index 939c50027b0..02b49e038fb 100644
--- a/llvm/lib/CodeGen/PrologEpilogInserter.cpp
+++ b/llvm/lib/CodeGen/PrologEpilogInserter.cpp
@@ -512,7 +512,7 @@ AdjustStackOffset(MachineFrameInfo *MFI, int FrameIdx,
MaxAlign = std::max(MaxAlign, Align);
// Adjust to alignment boundary.
- Offset = RoundUpToAlignment(Offset, Align, Skew);
+ Offset = alignTo(Offset, Align, Skew);
if (StackGrowsDown) {
DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset << "]\n");
@@ -596,7 +596,7 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
unsigned Align = MFI->getObjectAlignment(i);
// Adjust to alignment boundary
- Offset = RoundUpToAlignment(Offset, Align, Skew);
+ Offset = alignTo(Offset, Align, Skew);
MFI->setObjectOffset(i, -Offset); // Set the computed offset
}
@@ -605,7 +605,7 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
for (int i = MaxCSFI; i >= MinCSFI ; --i) {
unsigned Align = MFI->getObjectAlignment(i);
// Adjust to alignment boundary
- Offset = RoundUpToAlignment(Offset, Align, Skew);
+ Offset = alignTo(Offset, Align, Skew);
MFI->setObjectOffset(i, Offset);
Offset += MFI->getObjectSize(i);
@@ -638,7 +638,7 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
unsigned Align = MFI->getLocalFrameMaxAlign();
// Adjust to alignment boundary.
- Offset = RoundUpToAlignment(Offset, Align, Skew);
+ Offset = alignTo(Offset, Align, Skew);
DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n");
@@ -757,7 +757,7 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
// If the frame pointer is eliminated, all frame offsets will be relative to
// SP not FP. Align to MaxAlign so this works.
StackAlign = std::max(StackAlign, MaxAlign);
- Offset = RoundUpToAlignment(Offset, StackAlign, Skew);
+ Offset = alignTo(Offset, StackAlign, Skew);
}
// Update frame info to pretend that this is part of the stack...
diff --git a/llvm/lib/ExecutionEngine/ExecutionEngine.cpp b/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
index 41c8da40346..7d09ec6d1da 100644
--- a/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
+++ b/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
@@ -106,9 +106,7 @@ public:
Type *ElTy = GV->getType()->getElementType();
size_t GVSize = (size_t)TD.getTypeAllocSize(ElTy);
void *RawMemory = ::operator new(
- RoundUpToAlignment(sizeof(GVMemoryBlock),
- TD.getPreferredAlignment(GV))
- + GVSize);
+ alignTo(sizeof(GVMemoryBlock), TD.getPreferredAlignment(GV)) + GVSize);
new(RawMemory) GVMemoryBlock(GV);
return static_cast<char*>(RawMemory) + sizeof(GVMemoryBlock);
}
diff --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp
index 5468f47bbfe..5ec5f92b388 100644
--- a/llvm/lib/IR/DataLayout.cpp
+++ b/llvm/lib/IR/DataLayout.cpp
@@ -52,7 +52,7 @@ StructLayout::StructLayout(StructType *ST, const DataLayout &DL) {
// Add padding if necessary to align the data element properly.
if ((StructSize & (TyAlign-1)) != 0) {
IsPadded = true;
- StructSize = RoundUpToAlignment(StructSize, TyAlign);
+ StructSize = alignTo(StructSize, TyAlign);
}
// Keep track of maximum alignment constraint.
@@ -69,7 +69,7 @@ StructLayout::StructLayout(StructType *ST, const DataLayout &DL) {
// and all array elements would be aligned correctly.
if ((StructSize & (StructAlignment-1)) != 0) {
IsPadded = true;
- StructSize = RoundUpToAlignment(StructSize, StructAlignment);
+ StructSize = alignTo(StructSize, StructAlignment);
}
}
diff --git a/llvm/lib/IR/Mangler.cpp b/llvm/lib/IR/Mangler.cpp
index 016cb9eb689..ddf024df8cc 100644
--- a/llvm/lib/IR/Mangler.cpp
+++ b/llvm/lib/IR/Mangler.cpp
@@ -99,7 +99,7 @@ static void addByteCountSuffix(raw_ostream &OS, const Function *F,
Ty = cast<PointerType>(Ty)->getElementType();
// Size should be aligned to pointer size.
unsigned PtrSize = DL.getPointerSize();
- ArgWords += RoundUpToAlignment(DL.getTypeAllocSize(Ty), PtrSize);
+ ArgWords += alignTo(DL.getTypeAllocSize(Ty), PtrSize);
}
OS << '@' << ArgWords;
diff --git a/llvm/lib/IR/Metadata.cpp b/llvm/lib/IR/Metadata.cpp
index 9a9a5017841..543eaac483d 100644
--- a/llvm/lib/IR/Metadata.cpp
+++ b/llvm/lib/IR/Metadata.cpp
@@ -433,7 +433,7 @@ void *MDNode::operator new(size_t Size, unsigned NumOps) {
size_t OpSize = NumOps * sizeof(MDOperand);
// uint64_t is the most aligned type we need support (ensured by static_assert
// above)
- OpSize = RoundUpToAlignment(OpSize, llvm::alignOf<uint64_t>());
+ OpSize = alignTo(OpSize, llvm::alignOf<uint64_t>());
void *Ptr = reinterpret_cast<char *>(::operator new(OpSize + Size)) + OpSize;
MDOperand *O = static_cast<MDOperand *>(Ptr);
for (MDOperand *E = O - NumOps; O != E; --O)
@@ -444,7 +444,7 @@ void *MDNode::operator new(size_t Size, unsigned NumOps) {
void MDNode::operator delete(void *Mem) {
MDNode *N = static_cast<MDNode *>(Mem);
size_t OpSize = N->NumOperands * sizeof(MDOperand);
- OpSize = RoundUpToAlignment(OpSize, llvm::alignOf<uint64_t>());
+ OpSize = alignTo(OpSize, llvm::alignOf<uint64_t>());
MDOperand *O = static_cast<MDOperand *>(Mem);
for (MDOperand *E = O - N->NumOperands; O != E; --O)
diff --git a/llvm/lib/MC/MachObjectWriter.cpp b/llvm/lib/MC/MachObjectWriter.cpp
index 324385fa132..8ebd7031c65 100644
--- a/llvm/lib/MC/MachObjectWriter.cpp
+++ b/llvm/lib/MC/MachObjectWriter.cpp
@@ -404,7 +404,7 @@ static unsigned ComputeLinkerOptionsLoadCommandSize(
unsigned Size = sizeof(MachO::linker_option_command);
for (const std::string &Option : Options)
Size += Option.size() + 1;
- return RoundUpToAlignment(Size, is64Bit ? 8 : 4);
+ return alignTo(Size, is64Bit ? 8 : 4);
}
void MachObjectWriter::writeLinkerOptionsLoadCommand(
@@ -606,7 +606,7 @@ void MachObjectWriter::computeSectionAddresses(const MCAssembler &Asm,
const MCAsmLayout &Layout) {
uint64_t StartAddress = 0;
for (const MCSection *Sec : Layout.getSectionOrder()) {
- StartAddress = RoundUpToAlignment(StartAddress, Sec->getAlignment());
+ StartAddress = alignTo(StartAddress, Sec->getAlignment());
SectionAddress[Sec] = StartAddress;
StartAddress += Layout.getSectionAddressSize(Sec);
@@ -736,7 +736,7 @@ void MachObjectWriter::writeObject(MCAssembler &Asm,
// Add the loh load command size, if used.
uint64_t LOHRawSize = Asm.getLOHContainer().getEmitSize(*this, Layout);
- uint64_t LOHSize = RoundUpToAlignment(LOHRawSize, is64Bit() ? 8 : 4);
+ uint64_t LOHSize = alignTo(LOHRawSize, is64Bit() ? 8 : 4);
if (LOHSize) {
++NumLoadCommands;
LoadCommandsSize += sizeof(MachO::linkedit_data_command);
diff --git a/llvm/lib/MC/WinCOFFObjectWriter.cpp b/llvm/lib/MC/WinCOFFObjectWriter.cpp
index a76cbdbd544..36b317eebb9 100644
--- a/llvm/lib/MC/WinCOFFObjectWriter.cpp
+++ b/llvm/lib/MC/WinCOFFObjectWriter.cpp
@@ -924,7 +924,7 @@ void WinCOFFObjectWriter::writeObject(MCAssembler &Asm,
if (IsPhysicalSection(Sec)) {
// Align the section data to a four byte boundary.
- offset = RoundUpToAlignment(offset, 4);
+ offset = alignTo(offset, 4);
Sec->Header.PointerToRawData = offset;
offset += Sec->Header.SizeOfRawData;
diff --git a/llvm/lib/Support/MemoryBuffer.cpp b/llvm/lib/Support/MemoryBuffer.cpp
index faee10bb07c..25ddfe52a1d 100644
--- a/llvm/lib/Support/MemoryBuffer.cpp
+++ b/llvm/lib/Support/MemoryBuffer.cpp
@@ -135,7 +135,7 @@ MemoryBuffer::getNewUninitMemBuffer(size_t Size, const Twine &BufferName) {
SmallString<256> NameBuf;
StringRef NameRef = BufferName.toStringRef(NameBuf);
size_t AlignedStringLen =
- RoundUpToAlignment(sizeof(MemoryBufferMem) + NameRef.size() + 1, 16);
+ alignTo(sizeof(MemoryBufferMem) + NameRef.size() + 1, 16);
size_t RealLen = AlignedStringLen + Size + 1;
char *Mem = static_cast<char*>(operator new(RealLen, std::nothrow));
if (!Mem)
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 11ae8005370..3c57644c483 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -170,7 +170,7 @@ void AArch64FrameLowering::eliminateCallFramePseudoInstr(
unsigned Align = getStackAlignment();
int64_t Amount = I->getOperand(0).getImm();
- Amount = RoundUpToAlignment(Amount, Align);
+ Amount = alignTo(Amount, Align);
if (!IsDestroy)
Amount = -Amount;
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 77637364337..465f153708f 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -2545,7 +2545,7 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
// This is a non-standard ABI so by fiat I say we're allowed to make full
// use of the stack area to be popped, which must be aligned to 16 bytes in
// any case:
- StackArgSize = RoundUpToAlignment(StackArgSize, 16);
+ StackArgSize = alignTo(StackArgSize, 16);
// If we're expected to restore the stack (e.g. fastcc) then we'll be adding
// a multiple of 16.
@@ -2959,7 +2959,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
// Since callee will pop argument stack as a tail call, we must keep the
// popped size 16-byte aligned.
- NumBytes = RoundUpToAlignment(NumBytes, 16);
+ NumBytes = alignTo(NumBytes, 16);
// FPDiff will be negative if this tail call requires more space than we
// would automatically have in our incoming argument space. Positive if we
@@ -3199,9 +3199,8 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
Chain = DAG.getNode(AArch64ISD::CALL, DL, NodeTys, Ops);
InFlag = Chain.getValue(1);
- uint64_t CalleePopBytes = DoesCalleeRestoreStack(CallConv, TailCallOpt)
- ? RoundUpToAlignment(NumBytes, 16)
- : 0;
+ uint64_t CalleePopBytes =
+ DoesCalleeRestoreStack(CallConv, TailCallOpt) ? alignTo(NumBytes, 16) : 0;
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true),
DAG.getIntPtrConstant(CalleePopBytes, DL, true),
diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index 43664df3b86..6d09df11c2b 100644
--- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -976,7 +976,7 @@ static bool inBoundsForPair(bool IsUnscaled, int Offset, int OffsetStride) {
// Do alignment, specialized to power of 2 and for signed ints,
// avoiding having to do a C-style cast from uint_64t to int when
-// using RoundUpToAlignment from include/llvm/Support/MathExtras.h.
+// using alignTo from include/llvm/Support/MathExtras.h.
// FIXME: Move this function to include/MathExtras.h?
static int alignTo(int Num, int PowOf2) {
return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
index 1239dfb235e..0e911c451af 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
@@ -327,7 +327,7 @@ void AMDGPUAsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) {
if (MFI->getShaderType() == ShaderType::COMPUTE) {
OutStreamer->EmitIntValue(R_0288E8_SQ_LDS_ALLOC, 4);
- OutStreamer->EmitIntValue(RoundUpToAlignment(MFI->LDSSize, 4) >> 2, 4);
+ OutStreamer->EmitIntValue(alignTo(MFI->LDSSize, 4) >> 2, 4);
}
}
@@ -503,7 +503,7 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
ProgInfo.LDSSize = MFI->LDSSize + LDSSpillSize;
ProgInfo.LDSBlocks =
- RoundUpToAlignment(ProgInfo.LDSSize, 1 << LDSAlignShift) >> LDSAlignShift;
+ alignTo(ProgInfo.LDSSize, 1 << LDSAlignShift) >> LDSAlignShift;
// Scratch is allocated in 256 dword blocks.
unsigned ScratchAlignShift = 10;
@@ -511,8 +511,9 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
// is used by the entire wave. ProgInfo.ScratchSize is the amount of
// scratch memory used per thread.
ProgInfo.ScratchBlocks =
- RoundUpToAlignment(ProgInfo.ScratchSize * STM.getWavefrontSize(),
- 1 << ScratchAlignShift) >> ScratchAlignShift;
+ alignTo(ProgInfo.ScratchSize * STM.getWavefrontSize(),
+ 1 << ScratchAlignShift) >>
+ ScratchAlignShift;
ProgInfo.ComputePGMRSrc1 =
S_00B848_VGPRS(ProgInfo.VGPRBlocks) |
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUFrameLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUFrameLowering.cpp
index 4d84d281d99..3e6495546d1 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUFrameLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUFrameLowering.cpp
@@ -87,15 +87,15 @@ int AMDGPUFrameLowering::getFrameIndexReference(const MachineFunction &MF,
int UpperBound = FI == -1 ? MFI->getNumObjects() : FI;
for (int i = MFI->getObjectIndexBegin(); i < UpperBound; ++i) {
- OffsetBytes = RoundUpToAlignment(OffsetBytes, MFI->getObjectAlignment(i));
+ OffsetBytes = alignTo(OffsetBytes, MFI->getObjectAlignment(i));
OffsetBytes += MFI->getObjectSize(i);
// Each register holds 4 bytes, so we must always align the offset to at
// least 4 bytes, so that 2 frame objects won't share the same register.
- OffsetBytes = RoundUpToAlignment(OffsetBytes, 4);
+ OffsetBytes = alignTo(OffsetBytes, 4);
}
if (FI != -1)
- OffsetBytes = RoundUpToAlignment(OffsetBytes, MFI->getObjectAlignment(FI));
+ OffsetBytes = alignTo(OffsetBytes, MFI->getObjectAlignment(FI));
return OffsetBytes / (getStackWidth(MF) * 4);
}
diff --git a/llvm/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp b/llvm/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
index bd80bb211b4..bb4bda25470 100644
--- a/llvm/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
+++ b/llvm/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
@@ -142,8 +142,8 @@ unsigned CFStack::getSubEntrySize(CFStack::StackItem Item) {
}
void CFStack::updateMaxStackSize() {
- unsigned CurrentStackSize = CurrentEntries +
- (RoundUpToAlignment(CurrentSubEntries, 4) / 4);
+ unsigned CurrentStackSize =
+ CurrentEntries + (alignTo(CurrentSubEntries, 4) / 4);
MaxStackSize = std::max(CurrentStackSize, MaxStackSize);
}
diff --git a/llvm/lib/Target/ARM/ARMCallingConv.h b/llvm/lib/Target/ARM/ARMCallingConv.h
index a731d00883a..71b81936240 100644
--- a/llvm/lib/Target/ARM/ARMCallingConv.h
+++ b/llvm/lib/Target/ARM/ARMCallingConv.h
@@ -211,7 +211,7 @@ static bool CC_ARM_AAPCS_Custom_Aggregate(unsigned &ValNo, MVT &ValVT,
// First consume all registers that would give an unaligned object. Whether
// we go on stack or in regs, no-one will be using them in future.
- unsigned RegAlign = RoundUpToAlignment(Align, 4) / 4;
+ unsigned RegAlign = alignTo(Align, 4) / 4;
while (RegIdx % RegAlign != 0 && RegIdx < RegList.size())
State.AllocateReg(RegList[RegIdx++]);
diff --git a/llvm/lib/Target/ARM/ThumbRegisterInfo.cpp b/llvm/lib/Target/ARM/ThumbRegisterInfo.cpp
index b5f9d7e38f2..427ec6ea7e0 100644
--- a/llvm/lib/Target/ARM/ThumbRegisterInfo.cpp
+++ b/llvm/lib/Target/ARM/ThumbRegisterInfo.cpp
@@ -281,7 +281,7 @@ void llvm::emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
unsigned RequiredExtraInstrs;
if (ExtraRange)
- RequiredExtraInstrs = RoundUpToAlignment(RangeAfterCopy, ExtraRange) / ExtraRange;
+ RequiredExtraInstrs = alignTo(RangeAfterCopy, ExtraRange) / ExtraRange;
else if (RangeAfterCopy > 0)
// We need an extra instruction but none is available
RequiredExtraInstrs = 1000000;
diff --git a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
index 7a52a1c9eae..c7b06d22fd4 100644
--- a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
@@ -436,10 +436,10 @@ void HexagonFrameLowering::insertPrologueInBlock(MachineBasicBlock &MBB) const {
// Get the number of bytes to allocate from the FrameInfo.
unsigned FrameSize = MFI->getStackSize();
// Round up the max call frame size to the max alignment on the stack.
- unsigned MaxCFA = RoundUpToAlignment(MFI->getMaxCallFrameSize(), MaxAlign);
+ unsigned MaxCFA = alignTo(MFI->getMaxCallFrameSize(), MaxAlign);
MFI->setMaxCallFrameSize(MaxCFA);
- FrameSize = MaxCFA + RoundUpToAlignment(FrameSize, MaxAlign);
+ FrameSize = MaxCFA + alignTo(FrameSize, MaxAlign);
MFI->setStackSize(FrameSize);
bool AlignStack = (MaxAlign > getStackAlignment());
diff --git a/llvm/lib/Target/Mips/MipsFastISel.cpp b/llvm/lib/Target/Mips/MipsFastISel.cpp
index e9eaf810637..8bbe3d4df69 100644
--- a/llvm/lib/Target/Mips/MipsFastISel.cpp
+++ b/llvm/lib/Target/Mips/MipsFastISel.cpp
@@ -1180,7 +1180,7 @@ bool MipsFastISel::processCallArgs(CallLoweringInfo &CLI,
// for now (will return false). We need to determine the right alignment
// based on the normal alignment for the underlying machine type.
//
- unsigned ArgSize = RoundUpToAlignment(ArgVT.getSizeInBits(), 4);
+ unsigned ArgSize = alignTo(ArgVT.getSizeInBits(), 4);
unsigned BEAlign = 0;
if (ArgSize < 8 && !Subtarget->isLittle())
diff --git a/llvm/lib/Target/Mips/MipsFrameLowering.cpp b/llvm/lib/Target/Mips/MipsFrameLowering.cpp
index a74c8abd2e2..1c7e5c50363 100644
--- a/llvm/lib/Target/Mips/MipsFrameLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsFrameLowering.cpp
@@ -122,7 +122,7 @@ uint64_t MipsFrameLowering::estimateStackSize(const MachineFunction &MF) const {
// Conservatively assume all callee-saved registers will be saved.
for (const MCPhysReg *R = TRI.getCalleeSavedRegs(&MF); *R; ++R) {
unsigned Size = TRI.getMinimalPhysRegClass(*R)->getSize();
- Offset = RoundUpToAlignment(Offset + Size, Size);
+ Offset = alignTo(Offset + Size, Size);
}
unsigned MaxAlign = MFI->getMaxAlignment();
@@ -133,14 +133,14 @@ uint64_t MipsFrameLowering::estimateStackSize(const MachineFunction &MF) const {
// Iterate over other objects.
for (unsigned I = 0, E = MFI->getObjectIndexEnd(); I != E; ++I)
- Offset = RoundUpToAlignment(Offset + MFI->getObjectSize(I), MaxAlign);
+ Offset = alignTo(Offset + MFI->getObjectSize(I), MaxAlign);
// Call frame.
if (MFI->adjustsStack() && hasReservedCallFrame(MF))
- Offset = RoundUpToAlignment(Offset + MFI->getMaxCallFrameSize(),
- std::max(MaxAlign, getStackAlignment()));
+ Offset = alignTo(Offset + MFI->getMaxCallFrameSize(),
+ std::max(MaxAlign, getStackAlignment()));
- return RoundUpToAlignment(Offset, getStackAlignment());
+ return alignTo(Offset, getStackAlignment());
}
// Eliminate ADJCALLSTACKDOWN, ADJCALLSTACKUP pseudo instructions
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index 5680130b91b..694ff9b4943 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -1873,10 +1873,10 @@ SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const {
auto &TD = DAG.getDataLayout();
unsigned ArgSizeInBytes =
TD.getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext()));
- SDValue Tmp3 = DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
- DAG.getConstant(RoundUpToAlignment(ArgSizeInBytes,
- ArgSlotSizeInBytes),
- DL, VAList.getValueType()));
+ SDValue Tmp3 =
+ DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
+ DAG.getConstant(alignTo(ArgSizeInBytes, ArgSlotSizeInBytes),
+ DL, VAList.getValueType()));
// Store the incremented VAList to the legalized pointer
Chain = DAG.getStore(VAListLoad.getValue(1), DL, Tmp3, VAListPtr,
MachinePointerInfo(SV), false, false, 0);
@@ -2604,7 +2604,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// ByValChain is the output chain of the last Memcpy node created for copying
// byval arguments to the stack.
unsigned StackAlignment = TFL->getStackAlignment();
- NextStackOffset = RoundUpToAlignment(NextStackOffset, StackAlignment);
+ NextStackOffset = alignTo(NextStackOffset, StackAlignment);
SDValue NextStackOffsetVal = DAG.getIntPtrConstant(NextStackOffset, DL, true);
if (!IsTailCall)
@@ -3787,8 +3787,7 @@ void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
int VaArgOffset;
if (ArgRegs.size() == Idx)
- VaArgOffset =
- RoundUpToAlignment(State.getNextStackOffset(), RegSizeInBytes);
+ VaArgOffset = alignTo(State.getNextStackOffset(), RegSizeInBytes);
else {
VaArgOffset =
(int)ABI.GetCalleeAllocdArgSizeInBytes(State.getCallingConv()) -
@@ -3854,7 +3853,7 @@ void MipsTargetLowering::HandleByVal(CCState *State, unsigned &Size,
}
// Mark the registers allocated.
- Size = RoundUpToAlignment(Size, RegSizeInBytes);
+ Size = alignTo(Size, RegSizeInBytes);
for (unsigned I = FirstReg; Size > 0 && (I < IntArgRegs.size());
Size -= RegSizeInBytes, ++I, ++NumRegs)
State->AllocateReg(IntArgRegs[I], ShadowRegs[I]);
diff --git a/llvm/lib/Target/Sparc/SparcFrameLowering.cpp b/llvm/lib/Target/Sparc/SparcFrameLowering.cpp
index 39b5e809c9b..348af439e29 100644
--- a/llvm/lib/Target/Sparc/SparcFrameLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcFrameLowering.cpp
@@ -146,7 +146,7 @@ void SparcFrameLowering::emitPrologue(MachineFunction &MF,
// Finally, ensure that the size is sufficiently aligned for the
// data on the stack.
if (MFI->getMaxAlignment() > 0) {
- NumBytes = RoundUpToAlignment(NumBytes, MFI->getMaxAlignment());
+ NumBytes = alignTo(NumBytes, MFI->getMaxAlignment());
}
// Update stack size with corrected value.
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index 5e70ffe2223..d78633f567d 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -1131,7 +1131,7 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset());
// Keep stack frames 16-byte aligned.
- ArgsSize = RoundUpToAlignment(ArgsSize, 16);
+ ArgsSize = alignTo(ArgsSize, 16);
// Varargs calls require special treatment.
if (CLI.IsVarArg)
diff --git a/llvm/lib/Target/Sparc/SparcSubtarget.cpp b/llvm/lib/Target/Sparc/SparcSubtarget.cpp
index d701594d27a..69cf294aeed 100644
--- a/llvm/lib/Target/Sparc/SparcSubtarget.cpp
+++ b/llvm/lib/Target/Sparc/SparcSubtarget.cpp
@@ -64,7 +64,7 @@ int SparcSubtarget::getAdjustedFrameSize(int frameSize) const {
frameSize += 128;
// Frames with calls must also reserve space for 6 outgoing arguments
// whether they are used or not. LowerCall_64 takes care of that.
- frameSize = RoundUpToAlignment(frameSize, 16);
+ frameSize = alignTo(frameSize, 16);
} else {
// Emit the correct save instruction based on the number of bytes in
// the frame. Minimum stack frame size according to V8 ABI is:
@@ -77,7 +77,7 @@ int SparcSubtarget::getAdjustedFrameSize(int frameSize) const {
// Round up to next doubleword boundary -- a double-word boundary
// is required by the ABI.
- frameSize = RoundUpToAlignment(frameSize, 8);
+ frameSize = alignTo(frameSize, 8);
}
return frameSize;
}
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyPEI.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyPEI.cpp
index d570d426611..d00ae703951 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyPEI.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyPEI.cpp
@@ -528,7 +528,7 @@ AdjustStackOffset(MachineFrameInfo *MFI, int FrameIdx,
MaxAlign = std::max(MaxAlign, Align);
// Adjust to alignment boundary.
- Offset = RoundUpToAlignment(Offset, Align, Skew);
+ Offset = alignTo(Offset, Align, Skew);
if (StackGrowsDown) {
DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset << "]\n");
@@ -612,7 +612,7 @@ void WasmPEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
unsigned Align = MFI->getObjectAlignment(i);
// Adjust to alignment boundary
- Offset = RoundUpToAlignment(Offset, Align, Skew);
+ Offset = alignTo(Offset, Align, Skew);
MFI->setObjectOffset(i, -Offset); // Set the computed offset
}
@@ -621,7 +621,7 @@ void WasmPEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
for (int i = MaxCSFI; i >= MinCSFI ; --i) {
unsigned Align = MFI->getObjectAlignment(i);
// Adjust to alignment boundary
- Offset = RoundUpToAlignment(Offset, Align, Skew);
+ Offset = alignTo(Offset, Align, Skew);
MFI->setObjectOffset(i, Offset);
Offset += MFI->getObjectSize(i);
@@ -654,7 +654,7 @@ void WasmPEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
unsigned Align = MFI->getLocalFrameMaxAlign();
// Adjust to alignment boundary.
- Offset = RoundUpToAlignment(Offset, Align, Skew);
+ Offset = alignTo(Offset, Align, Skew);
DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n");
@@ -773,7 +773,7 @@ void WasmPEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
// If the frame pointer is eliminated, all frame offsets will be relative to
// SP not FP. Align to MaxAlign so this works.
StackAlign = std::max(StackAlign, MaxAlign);
- Offset = RoundUpToAlignment(Offset, StackAlign, Skew);
+ Offset = alignTo(Offset, StackAlign, Skew);
}
// Update frame info to pretend that this is part of the stack...
diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp
index 8632bb8254f..8f21fb49728 100644
--- a/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -1010,7 +1010,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
// Callee-saved registers are pushed on stack before the stack is realigned.
if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
- NumBytes = RoundUpToAlignment(NumBytes, MaxAlign);
+ NumBytes = alignTo(NumBytes, MaxAlign);
// Get the offset of the stack slot for the EBP register, which is
// guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
@@ -1131,7 +1131,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
// virtual memory manager are allocated in correct sequence.
uint64_t AlignedNumBytes = NumBytes;
if (IsWin64Prologue && !IsFunclet && TRI->needsStackRealignment(MF))
- AlignedNumBytes = RoundUpToAlignment(AlignedNumBytes, MaxAlign);
+ AlignedNumBytes = alignTo(AlignedNumBytes, MaxAlign);
if (AlignedNumBytes >= StackProbeSize && UseStackProbe) {
// Check whether EAX is livein for this function.
bool isEAXAlive = isEAXLiveIn(MF);
@@ -1430,8 +1430,7 @@ X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const {
// RBP is not included in the callee saved register block. After pushing RBP,
// everything is 16 byte aligned. Everything we allocate before an outgoing
// call must also be 16 byte aligned.
- unsigned FrameSizeMinusRBP =
- RoundUpToAlignment(CSSize + UsedSize, getStackAlignment());
+ unsigned FrameSizeMinusRBP = alignTo(CSSize + UsedSize, getStackAlignment());
// Subtract out the size of the callee saved registers. This is how much stack
// each funclet will allocate.
return FrameSizeMinusRBP - CSSize;
@@ -1491,7 +1490,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
// Callee-saved registers were pushed on stack before the stack was
// realigned.
if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
- NumBytes = RoundUpToAlignment(FrameSize, MaxAlign);
+ NumBytes = alignTo(FrameSize, MaxAlign);
// Pop EBP.
BuildMI(MBB, MBBI, DL,
@@ -2480,7 +2479,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// amount of space needed for the outgoing arguments up to the next
// alignment boundary.
unsigned StackAlign = getStackAlignment();
- Amount = RoundUpToAlignment(Amount, StackAlign);
+ Amount = alignTo(Amount, StackAlign);
MachineModuleInfo &MMI = MF.getMMI();
const Function *Fn = MF.getFunction();
diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
index 105b2cfb1be..38e6df06bd0 100644
--- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
@@ -381,7 +381,7 @@ lowerLoadWordFromAlignedBasePlusOffset(SDLoc DL, SDValue Chain, SDValue Base,
false, false, 0);
}
// Lower to pair of consecutive word aligned loads plus some bit shifting.
- int32_t HighOffset = RoundUpToAlignment(Offset, 4);
+ int32_t HighOffset = alignTo(Offset, 4);
int32_t LowOffset = HighOffset - 4;
SDValue LowAddr, HighAddr;
if (GlobalAddressSDNode *GASD =
diff --git a/llvm/lib/Transforms/IPO/LowerBitSets.cpp b/llvm/lib/Transforms/IPO/LowerBitSets.cpp
index 7b515745c31..92eaf9fe87f 100644
--- a/llvm/lib/Transforms/IPO/LowerBitSets.cpp
+++ b/llvm/lib/Transforms/IPO/LowerBitSets.cpp
@@ -544,7 +544,7 @@ void LowerBitSets::buildBitSetsFromGlobalVariables(
// Cap at 128 was found experimentally to have a good data/instruction
// overhead tradeoff.
if (Padding > 128)
- Padding = RoundUpToAlignment(InitSize, 128) - InitSize;
+ Padding = alignTo(InitSize, 128) - InitSize;
GlobalInits.push_back(
ConstantAggregateZero::get(ArrayType::get(Int8Ty, Padding)));
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 34aaa7f27d6..dda01d1a572 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -1142,7 +1142,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
setOrigin(A, getCleanOrigin());
}
}
- ArgOffset += RoundUpToAlignment(Size, kShadowTLSAlignment);
+ ArgOffset += alignTo(Size, kShadowTLSAlignment);
}
assert(*ShadowPtr && "Could not find shadow for an argument");
return *ShadowPtr;
@@ -2498,7 +2498,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
(void)Store;
assert(Size != 0 && Store != nullptr);
DEBUG(dbgs() << " Param:" << *Store << "\n");
- ArgOffset += RoundUpToAlignment(Size, 8);
+ ArgOffset += alignTo(Size, 8);
}
DEBUG(dbgs() << " done with call args\n");
@@ -2818,7 +2818,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
Type *RealTy = A->getType()->getPointerElementType();
uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset);
- OverflowOffset += RoundUpToAlignment(ArgSize, 8);
+ OverflowOffset += alignTo(ArgSize, 8);
IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB),
ArgSize, kShadowTLSAlignment);
} else {
@@ -2840,7 +2840,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
case AK_Memory:
uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
- OverflowOffset += RoundUpToAlignment(ArgSize, 8);
+ OverflowOffset += alignTo(ArgSize, 8);
}
IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
}
@@ -2965,7 +2965,7 @@ struct VarArgMIPS64Helper : public VarArgHelper {
#endif
Base = getShadowPtrForVAArgument(A->getType(), IRB, VAArgOffset);
VAArgOffset += ArgSize;
- VAArgOffset = RoundUpToAlignment(VAArgOffset, 8);
+ VAArgOffset = alignTo(VAArgOffset, 8);
IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
}
@@ -3110,7 +3110,7 @@ struct VarArgAArch64Helper : public VarArgHelper {
case AK_Memory:
uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
- OverflowOffset += RoundUpToAlignment(ArgSize, 8);
+ OverflowOffset += alignTo(ArgSize, 8);
break;
}
IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
diff --git a/llvm/lib/Transforms/Instrumentation/SafeStack.cpp b/llvm/lib/Transforms/Instrumentation/SafeStack.cpp
index abed465f102..ee8d4fad752 100644
--- a/llvm/lib/Transforms/Instrumentation/SafeStack.cpp
+++ b/llvm/lib/Transforms/Instrumentation/SafeStack.cpp
@@ -534,7 +534,7 @@ Value *SafeStack::moveStaticAllocasToUnsafeStack(
// Add alignment.
// NOTE: we ensure that BasePointer itself is aligned to >= Align.
StaticOffset += Size;
- StaticOffset = RoundUpToAlignment(StaticOffset, Align);
+ StaticOffset = alignTo(StaticOffset, Align);
Value *Off = IRB.CreateGEP(BasePointer, // BasePointer is i8*
ConstantInt::get(Int32Ty, -StaticOffset));
@@ -565,7 +565,7 @@ Value *SafeStack::moveStaticAllocasToUnsafeStack(
// Add alignment.
// NOTE: we ensure that BasePointer itself is aligned to >= Align.
StaticOffset += Size;
- StaticOffset = RoundUpToAlignment(StaticOffset, Align);
+ StaticOffset = alignTo(StaticOffset, Align);
Value *Off = IRB.CreateGEP(BasePointer, // BasePointer is i8*
ConstantInt::get(Int32Ty, -StaticOffset));
@@ -582,7 +582,7 @@ Value *SafeStack::moveStaticAllocasToUnsafeStack(
// Re-align BasePointer so that our callees would see it aligned as
// expected.
// FIXME: no need to update BasePointer in leaf functions.
- StaticOffset = RoundUpToAlignment(StaticOffset, StackAlignment);
+ StaticOffset = alignTo(StaticOffset, StackAlignment);
// Update shadow stack pointer in the function epilogue.
IRB.SetInsertPoint(BasePointer->getNextNode());
diff --git a/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp b/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
index 09de7a2cda2..82a070843bf 100644
--- a/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
+++ b/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
@@ -253,8 +253,7 @@ bool SanitizerCoverageModule::runOnModule(Module &M) {
if (Options.Use8bitCounters) {
// Make sure the array is 16-aligned.
static const int kCounterAlignment = 16;
- Type *Int8ArrayNTy =
- ArrayType::get(Int8Ty, RoundUpToAlignment(N, kCounterAlignment));
+ Type *Int8ArrayNTy = ArrayType::get(Int8Ty, alignTo(N, kCounterAlignment));
RealEightBitCounterArray = new GlobalVariable(
M, Int8ArrayNTy, false, GlobalValue::PrivateLinkage,
Constant::getNullValue(Int8ArrayNTy), "__sancov_gen_cov_counter");
diff --git a/llvm/lib/Transforms/Utils/ASanStackFrameLayout.cpp b/llvm/lib/Transforms/Utils/ASanStackFrameLayout.cpp
index 409326eba40..7e50d4bb447 100644
--- a/llvm/lib/Transforms/Utils/ASanStackFrameLayout.cpp
+++ b/llvm/lib/Transforms/Utils/ASanStackFrameLayout.cpp
@@ -44,7 +44,7 @@ static size_t VarAndRedzoneSize(size_t Size, size_t Alignment) {
else if (Size <= 512) Res = Size + 64;
else if (Size <= 4096) Res = Size + 128;
else Res = Size + 256;
- return RoundUpToAlignment(Res, Alignment);
+ return alignTo(Res, Alignment);
}
void
OpenPOWER on IntegriCloud