summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/AArch64/AArch64FrameLowering.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp9
-rw-r--r--llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp9
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUFrameLowering.cpp6
-rw-r--r--llvm/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp4
-rw-r--r--llvm/lib/Target/ARM/ARMCallingConv.h2
-rw-r--r--llvm/lib/Target/ARM/ThumbRegisterInfo.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp4
-rw-r--r--llvm/lib/Target/Mips/MipsFastISel.cpp2
-rw-r--r--llvm/lib/Target/Mips/MipsFrameLowering.cpp10
-rw-r--r--llvm/lib/Target/Mips/MipsISelLowering.cpp15
-rw-r--r--llvm/lib/Target/Sparc/SparcFrameLowering.cpp2
-rw-r--r--llvm/lib/Target/Sparc/SparcISelLowering.cpp2
-rw-r--r--llvm/lib/Target/Sparc/SparcSubtarget.cpp4
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyPEI.cpp10
-rw-r--r--llvm/lib/Target/X86/X86FrameLowering.cpp11
-rw-r--r--llvm/lib/Target/XCore/XCoreISelLowering.cpp2
18 files changed, 48 insertions, 50 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 11ae8005370..3c57644c483 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -170,7 +170,7 @@ void AArch64FrameLowering::eliminateCallFramePseudoInstr(
unsigned Align = getStackAlignment();
int64_t Amount = I->getOperand(0).getImm();
- Amount = RoundUpToAlignment(Amount, Align);
+ Amount = alignTo(Amount, Align);
if (!IsDestroy)
Amount = -Amount;
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 77637364337..465f153708f 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -2545,7 +2545,7 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
// This is a non-standard ABI so by fiat I say we're allowed to make full
// use of the stack area to be popped, which must be aligned to 16 bytes in
// any case:
- StackArgSize = RoundUpToAlignment(StackArgSize, 16);
+ StackArgSize = alignTo(StackArgSize, 16);
// If we're expected to restore the stack (e.g. fastcc) then we'll be adding
// a multiple of 16.
@@ -2959,7 +2959,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
// Since callee will pop argument stack as a tail call, we must keep the
// popped size 16-byte aligned.
- NumBytes = RoundUpToAlignment(NumBytes, 16);
+ NumBytes = alignTo(NumBytes, 16);
// FPDiff will be negative if this tail call requires more space than we
// would automatically have in our incoming argument space. Positive if we
@@ -3199,9 +3199,8 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
Chain = DAG.getNode(AArch64ISD::CALL, DL, NodeTys, Ops);
InFlag = Chain.getValue(1);
- uint64_t CalleePopBytes = DoesCalleeRestoreStack(CallConv, TailCallOpt)
- ? RoundUpToAlignment(NumBytes, 16)
- : 0;
+ uint64_t CalleePopBytes =
+ DoesCalleeRestoreStack(CallConv, TailCallOpt) ? alignTo(NumBytes, 16) : 0;
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true),
DAG.getIntPtrConstant(CalleePopBytes, DL, true),
diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index 43664df3b86..6d09df11c2b 100644
--- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -976,7 +976,7 @@ static bool inBoundsForPair(bool IsUnscaled, int Offset, int OffsetStride) {
// Do alignment, specialized to power of 2 and for signed ints,
// avoiding having to do a C-style cast from uint_64t to int when
-// using RoundUpToAlignment from include/llvm/Support/MathExtras.h.
+// using alignTo from include/llvm/Support/MathExtras.h.
// FIXME: Move this function to include/MathExtras.h?
static int alignTo(int Num, int PowOf2) {
return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
index 1239dfb235e..0e911c451af 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
@@ -327,7 +327,7 @@ void AMDGPUAsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) {
if (MFI->getShaderType() == ShaderType::COMPUTE) {
OutStreamer->EmitIntValue(R_0288E8_SQ_LDS_ALLOC, 4);
- OutStreamer->EmitIntValue(RoundUpToAlignment(MFI->LDSSize, 4) >> 2, 4);
+ OutStreamer->EmitIntValue(alignTo(MFI->LDSSize, 4) >> 2, 4);
}
}
@@ -503,7 +503,7 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
ProgInfo.LDSSize = MFI->LDSSize + LDSSpillSize;
ProgInfo.LDSBlocks =
- RoundUpToAlignment(ProgInfo.LDSSize, 1 << LDSAlignShift) >> LDSAlignShift;
+ alignTo(ProgInfo.LDSSize, 1 << LDSAlignShift) >> LDSAlignShift;
// Scratch is allocated in 256 dword blocks.
unsigned ScratchAlignShift = 10;
@@ -511,8 +511,9 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
// is used by the entire wave. ProgInfo.ScratchSize is the amount of
// scratch memory used per thread.
ProgInfo.ScratchBlocks =
- RoundUpToAlignment(ProgInfo.ScratchSize * STM.getWavefrontSize(),
- 1 << ScratchAlignShift) >> ScratchAlignShift;
+ alignTo(ProgInfo.ScratchSize * STM.getWavefrontSize(),
+ 1 << ScratchAlignShift) >>
+ ScratchAlignShift;
ProgInfo.ComputePGMRSrc1 =
S_00B848_VGPRS(ProgInfo.VGPRBlocks) |
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUFrameLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUFrameLowering.cpp
index 4d84d281d99..3e6495546d1 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUFrameLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUFrameLowering.cpp
@@ -87,15 +87,15 @@ int AMDGPUFrameLowering::getFrameIndexReference(const MachineFunction &MF,
int UpperBound = FI == -1 ? MFI->getNumObjects() : FI;
for (int i = MFI->getObjectIndexBegin(); i < UpperBound; ++i) {
- OffsetBytes = RoundUpToAlignment(OffsetBytes, MFI->getObjectAlignment(i));
+ OffsetBytes = alignTo(OffsetBytes, MFI->getObjectAlignment(i));
OffsetBytes += MFI->getObjectSize(i);
// Each register holds 4 bytes, so we must always align the offset to at
// least 4 bytes, so that 2 frame objects won't share the same register.
- OffsetBytes = RoundUpToAlignment(OffsetBytes, 4);
+ OffsetBytes = alignTo(OffsetBytes, 4);
}
if (FI != -1)
- OffsetBytes = RoundUpToAlignment(OffsetBytes, MFI->getObjectAlignment(FI));
+ OffsetBytes = alignTo(OffsetBytes, MFI->getObjectAlignment(FI));
return OffsetBytes / (getStackWidth(MF) * 4);
}
diff --git a/llvm/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp b/llvm/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
index bd80bb211b4..bb4bda25470 100644
--- a/llvm/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
+++ b/llvm/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
@@ -142,8 +142,8 @@ unsigned CFStack::getSubEntrySize(CFStack::StackItem Item) {
}
void CFStack::updateMaxStackSize() {
- unsigned CurrentStackSize = CurrentEntries +
- (RoundUpToAlignment(CurrentSubEntries, 4) / 4);
+ unsigned CurrentStackSize =
+ CurrentEntries + (alignTo(CurrentSubEntries, 4) / 4);
MaxStackSize = std::max(CurrentStackSize, MaxStackSize);
}
diff --git a/llvm/lib/Target/ARM/ARMCallingConv.h b/llvm/lib/Target/ARM/ARMCallingConv.h
index a731d00883a..71b81936240 100644
--- a/llvm/lib/Target/ARM/ARMCallingConv.h
+++ b/llvm/lib/Target/ARM/ARMCallingConv.h
@@ -211,7 +211,7 @@ static bool CC_ARM_AAPCS_Custom_Aggregate(unsigned &ValNo, MVT &ValVT,
// First consume all registers that would give an unaligned object. Whether
// we go on stack or in regs, no-one will be using them in future.
- unsigned RegAlign = RoundUpToAlignment(Align, 4) / 4;
+ unsigned RegAlign = alignTo(Align, 4) / 4;
while (RegIdx % RegAlign != 0 && RegIdx < RegList.size())
State.AllocateReg(RegList[RegIdx++]);
diff --git a/llvm/lib/Target/ARM/ThumbRegisterInfo.cpp b/llvm/lib/Target/ARM/ThumbRegisterInfo.cpp
index b5f9d7e38f2..427ec6ea7e0 100644
--- a/llvm/lib/Target/ARM/ThumbRegisterInfo.cpp
+++ b/llvm/lib/Target/ARM/ThumbRegisterInfo.cpp
@@ -281,7 +281,7 @@ void llvm::emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
unsigned RequiredExtraInstrs;
if (ExtraRange)
- RequiredExtraInstrs = RoundUpToAlignment(RangeAfterCopy, ExtraRange) / ExtraRange;
+ RequiredExtraInstrs = alignTo(RangeAfterCopy, ExtraRange) / ExtraRange;
else if (RangeAfterCopy > 0)
// We need an extra instruction but none is available
RequiredExtraInstrs = 1000000;
diff --git a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
index 7a52a1c9eae..c7b06d22fd4 100644
--- a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
@@ -436,10 +436,10 @@ void HexagonFrameLowering::insertPrologueInBlock(MachineBasicBlock &MBB) const {
// Get the number of bytes to allocate from the FrameInfo.
unsigned FrameSize = MFI->getStackSize();
// Round up the max call frame size to the max alignment on the stack.
- unsigned MaxCFA = RoundUpToAlignment(MFI->getMaxCallFrameSize(), MaxAlign);
+ unsigned MaxCFA = alignTo(MFI->getMaxCallFrameSize(), MaxAlign);
MFI->setMaxCallFrameSize(MaxCFA);
- FrameSize = MaxCFA + RoundUpToAlignment(FrameSize, MaxAlign);
+ FrameSize = MaxCFA + alignTo(FrameSize, MaxAlign);
MFI->setStackSize(FrameSize);
bool AlignStack = (MaxAlign > getStackAlignment());
diff --git a/llvm/lib/Target/Mips/MipsFastISel.cpp b/llvm/lib/Target/Mips/MipsFastISel.cpp
index e9eaf810637..8bbe3d4df69 100644
--- a/llvm/lib/Target/Mips/MipsFastISel.cpp
+++ b/llvm/lib/Target/Mips/MipsFastISel.cpp
@@ -1180,7 +1180,7 @@ bool MipsFastISel::processCallArgs(CallLoweringInfo &CLI,
// for now (will return false). We need to determine the right alignment
// based on the normal alignment for the underlying machine type.
//
- unsigned ArgSize = RoundUpToAlignment(ArgVT.getSizeInBits(), 4);
+ unsigned ArgSize = alignTo(ArgVT.getSizeInBits(), 4);
unsigned BEAlign = 0;
if (ArgSize < 8 && !Subtarget->isLittle())
diff --git a/llvm/lib/Target/Mips/MipsFrameLowering.cpp b/llvm/lib/Target/Mips/MipsFrameLowering.cpp
index a74c8abd2e2..1c7e5c50363 100644
--- a/llvm/lib/Target/Mips/MipsFrameLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsFrameLowering.cpp
@@ -122,7 +122,7 @@ uint64_t MipsFrameLowering::estimateStackSize(const MachineFunction &MF) const {
// Conservatively assume all callee-saved registers will be saved.
for (const MCPhysReg *R = TRI.getCalleeSavedRegs(&MF); *R; ++R) {
unsigned Size = TRI.getMinimalPhysRegClass(*R)->getSize();
- Offset = RoundUpToAlignment(Offset + Size, Size);
+ Offset = alignTo(Offset + Size, Size);
}
unsigned MaxAlign = MFI->getMaxAlignment();
@@ -133,14 +133,14 @@ uint64_t MipsFrameLowering::estimateStackSize(const MachineFunction &MF) const {
// Iterate over other objects.
for (unsigned I = 0, E = MFI->getObjectIndexEnd(); I != E; ++I)
- Offset = RoundUpToAlignment(Offset + MFI->getObjectSize(I), MaxAlign);
+ Offset = alignTo(Offset + MFI->getObjectSize(I), MaxAlign);
// Call frame.
if (MFI->adjustsStack() && hasReservedCallFrame(MF))
- Offset = RoundUpToAlignment(Offset + MFI->getMaxCallFrameSize(),
- std::max(MaxAlign, getStackAlignment()));
+ Offset = alignTo(Offset + MFI->getMaxCallFrameSize(),
+ std::max(MaxAlign, getStackAlignment()));
- return RoundUpToAlignment(Offset, getStackAlignment());
+ return alignTo(Offset, getStackAlignment());
}
// Eliminate ADJCALLSTACKDOWN, ADJCALLSTACKUP pseudo instructions
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index 5680130b91b..694ff9b4943 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -1873,10 +1873,10 @@ SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const {
auto &TD = DAG.getDataLayout();
unsigned ArgSizeInBytes =
TD.getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext()));
- SDValue Tmp3 = DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
- DAG.getConstant(RoundUpToAlignment(ArgSizeInBytes,
- ArgSlotSizeInBytes),
- DL, VAList.getValueType()));
+ SDValue Tmp3 =
+ DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
+ DAG.getConstant(alignTo(ArgSizeInBytes, ArgSlotSizeInBytes),
+ DL, VAList.getValueType()));
// Store the incremented VAList to the legalized pointer
Chain = DAG.getStore(VAListLoad.getValue(1), DL, Tmp3, VAListPtr,
MachinePointerInfo(SV), false, false, 0);
@@ -2604,7 +2604,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// ByValChain is the output chain of the last Memcpy node created for copying
// byval arguments to the stack.
unsigned StackAlignment = TFL->getStackAlignment();
- NextStackOffset = RoundUpToAlignment(NextStackOffset, StackAlignment);
+ NextStackOffset = alignTo(NextStackOffset, StackAlignment);
SDValue NextStackOffsetVal = DAG.getIntPtrConstant(NextStackOffset, DL, true);
if (!IsTailCall)
@@ -3787,8 +3787,7 @@ void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
int VaArgOffset;
if (ArgRegs.size() == Idx)
- VaArgOffset =
- RoundUpToAlignment(State.getNextStackOffset(), RegSizeInBytes);
+ VaArgOffset = alignTo(State.getNextStackOffset(), RegSizeInBytes);
else {
VaArgOffset =
(int)ABI.GetCalleeAllocdArgSizeInBytes(State.getCallingConv()) -
@@ -3854,7 +3853,7 @@ void MipsTargetLowering::HandleByVal(CCState *State, unsigned &Size,
}
// Mark the registers allocated.
- Size = RoundUpToAlignment(Size, RegSizeInBytes);
+ Size = alignTo(Size, RegSizeInBytes);
for (unsigned I = FirstReg; Size > 0 && (I < IntArgRegs.size());
Size -= RegSizeInBytes, ++I, ++NumRegs)
State->AllocateReg(IntArgRegs[I], ShadowRegs[I]);
diff --git a/llvm/lib/Target/Sparc/SparcFrameLowering.cpp b/llvm/lib/Target/Sparc/SparcFrameLowering.cpp
index 39b5e809c9b..348af439e29 100644
--- a/llvm/lib/Target/Sparc/SparcFrameLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcFrameLowering.cpp
@@ -146,7 +146,7 @@ void SparcFrameLowering::emitPrologue(MachineFunction &MF,
// Finally, ensure that the size is sufficiently aligned for the
// data on the stack.
if (MFI->getMaxAlignment() > 0) {
- NumBytes = RoundUpToAlignment(NumBytes, MFI->getMaxAlignment());
+ NumBytes = alignTo(NumBytes, MFI->getMaxAlignment());
}
// Update stack size with corrected value.
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index 5e70ffe2223..d78633f567d 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -1131,7 +1131,7 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset());
// Keep stack frames 16-byte aligned.
- ArgsSize = RoundUpToAlignment(ArgsSize, 16);
+ ArgsSize = alignTo(ArgsSize, 16);
// Varargs calls require special treatment.
if (CLI.IsVarArg)
diff --git a/llvm/lib/Target/Sparc/SparcSubtarget.cpp b/llvm/lib/Target/Sparc/SparcSubtarget.cpp
index d701594d27a..69cf294aeed 100644
--- a/llvm/lib/Target/Sparc/SparcSubtarget.cpp
+++ b/llvm/lib/Target/Sparc/SparcSubtarget.cpp
@@ -64,7 +64,7 @@ int SparcSubtarget::getAdjustedFrameSize(int frameSize) const {
frameSize += 128;
// Frames with calls must also reserve space for 6 outgoing arguments
// whether they are used or not. LowerCall_64 takes care of that.
- frameSize = RoundUpToAlignment(frameSize, 16);
+ frameSize = alignTo(frameSize, 16);
} else {
// Emit the correct save instruction based on the number of bytes in
// the frame. Minimum stack frame size according to V8 ABI is:
@@ -77,7 +77,7 @@ int SparcSubtarget::getAdjustedFrameSize(int frameSize) const {
// Round up to next doubleword boundary -- a double-word boundary
// is required by the ABI.
- frameSize = RoundUpToAlignment(frameSize, 8);
+ frameSize = alignTo(frameSize, 8);
}
return frameSize;
}
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyPEI.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyPEI.cpp
index d570d426611..d00ae703951 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyPEI.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyPEI.cpp
@@ -528,7 +528,7 @@ AdjustStackOffset(MachineFrameInfo *MFI, int FrameIdx,
MaxAlign = std::max(MaxAlign, Align);
// Adjust to alignment boundary.
- Offset = RoundUpToAlignment(Offset, Align, Skew);
+ Offset = alignTo(Offset, Align, Skew);
if (StackGrowsDown) {
DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset << "]\n");
@@ -612,7 +612,7 @@ void WasmPEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
unsigned Align = MFI->getObjectAlignment(i);
// Adjust to alignment boundary
- Offset = RoundUpToAlignment(Offset, Align, Skew);
+ Offset = alignTo(Offset, Align, Skew);
MFI->setObjectOffset(i, -Offset); // Set the computed offset
}
@@ -621,7 +621,7 @@ void WasmPEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
for (int i = MaxCSFI; i >= MinCSFI ; --i) {
unsigned Align = MFI->getObjectAlignment(i);
// Adjust to alignment boundary
- Offset = RoundUpToAlignment(Offset, Align, Skew);
+ Offset = alignTo(Offset, Align, Skew);
MFI->setObjectOffset(i, Offset);
Offset += MFI->getObjectSize(i);
@@ -654,7 +654,7 @@ void WasmPEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
unsigned Align = MFI->getLocalFrameMaxAlign();
// Adjust to alignment boundary.
- Offset = RoundUpToAlignment(Offset, Align, Skew);
+ Offset = alignTo(Offset, Align, Skew);
DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n");
@@ -773,7 +773,7 @@ void WasmPEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
// If the frame pointer is eliminated, all frame offsets will be relative to
// SP not FP. Align to MaxAlign so this works.
StackAlign = std::max(StackAlign, MaxAlign);
- Offset = RoundUpToAlignment(Offset, StackAlign, Skew);
+ Offset = alignTo(Offset, StackAlign, Skew);
}
// Update frame info to pretend that this is part of the stack...
diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp
index 8632bb8254f..8f21fb49728 100644
--- a/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -1010,7 +1010,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
// Callee-saved registers are pushed on stack before the stack is realigned.
if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
- NumBytes = RoundUpToAlignment(NumBytes, MaxAlign);
+ NumBytes = alignTo(NumBytes, MaxAlign);
// Get the offset of the stack slot for the EBP register, which is
// guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
@@ -1131,7 +1131,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
// virtual memory manager are allocated in correct sequence.
uint64_t AlignedNumBytes = NumBytes;
if (IsWin64Prologue && !IsFunclet && TRI->needsStackRealignment(MF))
- AlignedNumBytes = RoundUpToAlignment(AlignedNumBytes, MaxAlign);
+ AlignedNumBytes = alignTo(AlignedNumBytes, MaxAlign);
if (AlignedNumBytes >= StackProbeSize && UseStackProbe) {
// Check whether EAX is livein for this function.
bool isEAXAlive = isEAXLiveIn(MF);
@@ -1430,8 +1430,7 @@ X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const {
// RBP is not included in the callee saved register block. After pushing RBP,
// everything is 16 byte aligned. Everything we allocate before an outgoing
// call must also be 16 byte aligned.
- unsigned FrameSizeMinusRBP =
- RoundUpToAlignment(CSSize + UsedSize, getStackAlignment());
+ unsigned FrameSizeMinusRBP = alignTo(CSSize + UsedSize, getStackAlignment());
// Subtract out the size of the callee saved registers. This is how much stack
// each funclet will allocate.
return FrameSizeMinusRBP - CSSize;
@@ -1491,7 +1490,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
// Callee-saved registers were pushed on stack before the stack was
// realigned.
if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
- NumBytes = RoundUpToAlignment(FrameSize, MaxAlign);
+ NumBytes = alignTo(FrameSize, MaxAlign);
// Pop EBP.
BuildMI(MBB, MBBI, DL,
@@ -2480,7 +2479,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// amount of space needed for the outgoing arguments up to the next
// alignment boundary.
unsigned StackAlign = getStackAlignment();
- Amount = RoundUpToAlignment(Amount, StackAlign);
+ Amount = alignTo(Amount, StackAlign);
MachineModuleInfo &MMI = MF.getMMI();
const Function *Fn = MF.getFunction();
diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
index 105b2cfb1be..38e6df06bd0 100644
--- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
@@ -381,7 +381,7 @@ lowerLoadWordFromAlignedBasePlusOffset(SDLoc DL, SDValue Chain, SDValue Base,
false, false, 0);
}
// Lower to pair of consecutive word aligned loads plus some bit shifting.
- int32_t HighOffset = RoundUpToAlignment(Offset, 4);
+ int32_t HighOffset = alignTo(Offset, 4);
int32_t LowOffset = HighOffset - 4;
SDValue LowAddr, HighAddr;
if (GlobalAddressSDNode *GASD =
OpenPOWER on IntegriCloud