summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp2
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/WinException.cpp2
-rw-r--r--llvm/lib/CodeGen/BranchRelaxation.cpp15
-rw-r--r--llvm/lib/CodeGen/MIRParser/MIRParser.cpp2
-rw-r--r--llvm/lib/CodeGen/MIRPrinter.cpp2
-rw-r--r--llvm/lib/CodeGen/MachineFunction.cpp8
-rw-r--r--llvm/lib/CodeGen/PatchableFunction.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp2
-rw-r--r--llvm/lib/Target/ARC/ARCMachineFunctionInfo.h4
-rw-r--r--llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMConstantIslandPass.cpp11
-rw-r--r--llvm/lib/Target/Mips/MipsAsmPrinter.cpp3
-rw-r--r--llvm/lib/Target/Mips/MipsConstantIslandPass.cpp4
-rw-r--r--llvm/lib/Target/PowerPC/PPCBranchSelector.cpp13
-rw-r--r--llvm/lib/Target/SystemZ/SystemZLongBranch.cpp6
16 files changed, 40 insertions, 40 deletions
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index bf0be8ecee9..077f4ac73ca 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -667,7 +667,7 @@ void AsmPrinter::EmitFunctionHeader() {
EmitLinkage(&F, CurrentFnSym);
if (MAI->hasFunctionAlignment())
- EmitAlignment(MF->getLogAlignment(), &F);
+ EmitAlignment(Log2(MF->getAlignment()), &F);
if (MAI->hasDotTypeDotSizeDirective())
OutStreamer->EmitSymbolAttribute(CurrentFnSym, MCSA_ELF_TypeFunction);
diff --git a/llvm/lib/CodeGen/AsmPrinter/WinException.cpp b/llvm/lib/CodeGen/AsmPrinter/WinException.cpp
index ef5aa0499e1..4f561771723 100644
--- a/llvm/lib/CodeGen/AsmPrinter/WinException.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/WinException.cpp
@@ -204,7 +204,7 @@ void WinException::beginFunclet(const MachineBasicBlock &MBB,
// We want our funclet's entry point to be aligned such that no nops will be
// present after the label.
Asm->EmitAlignment(
- std::max(Asm->MF->getLogAlignment(), MBB.getLogAlignment()), &F);
+ Log2(std::max(Asm->MF->getAlignment(), MBB.getAlignment())), &F);
// Now that we've emitted the alignment directive, point at our funclet.
Asm->OutStreamer->EmitLabel(Sym);
diff --git a/llvm/lib/CodeGen/BranchRelaxation.cpp b/llvm/lib/CodeGen/BranchRelaxation.cpp
index 4ee61cff4b4..d027d5b154e 100644
--- a/llvm/lib/CodeGen/BranchRelaxation.cpp
+++ b/llvm/lib/CodeGen/BranchRelaxation.cpp
@@ -64,19 +64,18 @@ class BranchRelaxation : public MachineFunctionPass {
/// Compute the offset immediately following this block. \p MBB is the next
/// block.
unsigned postOffset(const MachineBasicBlock &MBB) const {
- unsigned PO = Offset + Size;
- unsigned LogAlign = MBB.getLogAlignment();
- if (LogAlign == 0)
+ const unsigned PO = Offset + Size;
+ const llvm::Align Align = MBB.getAlignment();
+ if (Align == 1)
return PO;
- unsigned AlignAmt = 1 << LogAlign;
- unsigned ParentLogAlign = MBB.getParent()->getLogAlignment();
- if (LogAlign <= ParentLogAlign)
- return PO + OffsetToAlignment(PO, AlignAmt);
+ const llvm::Align ParentAlign = MBB.getParent()->getAlignment();
+ if (Align <= ParentAlign)
+ return PO + OffsetToAlignment(PO, Align.value());
// The alignment of this MBB is larger than the function's alignment, so we
// can't tell whether or not it will insert nops. Assume that it will.
- return PO + AlignAmt + OffsetToAlignment(PO, AlignAmt);
+ return PO + Align.value() + OffsetToAlignment(PO, Align.value());
}
};
diff --git a/llvm/lib/CodeGen/MIRParser/MIRParser.cpp b/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
index 2dd4fd3b9b7..72d3d1d14e9 100644
--- a/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
@@ -393,7 +393,7 @@ MIRParserImpl::initializeMachineFunction(const yaml::MachineFunction &YamlMF,
}
if (YamlMF.Alignment)
- MF.setLogAlignment(Log2_32(YamlMF.Alignment));
+ MF.setAlignment(llvm::Align(YamlMF.Alignment));
MF.setExposesReturnsTwice(YamlMF.ExposesReturnsTwice);
MF.setHasWinCFI(YamlMF.HasWinCFI);
diff --git a/llvm/lib/CodeGen/MIRPrinter.cpp b/llvm/lib/CodeGen/MIRPrinter.cpp
index 18efe1f80eb..415f28b094b 100644
--- a/llvm/lib/CodeGen/MIRPrinter.cpp
+++ b/llvm/lib/CodeGen/MIRPrinter.cpp
@@ -197,7 +197,7 @@ void MIRPrinter::print(const MachineFunction &MF) {
yaml::MachineFunction YamlMF;
YamlMF.Name = MF.getName();
- YamlMF.Alignment = 1UL << MF.getLogAlignment();
+ YamlMF.Alignment = MF.getAlignment().value();
YamlMF.ExposesReturnsTwice = MF.exposesReturnsTwice();
YamlMF.HasWinCFI = MF.hasWinCFI();
diff --git a/llvm/lib/CodeGen/MachineFunction.cpp b/llvm/lib/CodeGen/MachineFunction.cpp
index d136ebd437f..832895e2c92 100644
--- a/llvm/lib/CodeGen/MachineFunction.cpp
+++ b/llvm/lib/CodeGen/MachineFunction.cpp
@@ -173,16 +173,16 @@ void MachineFunction::init() {
FrameInfo->ensureMaxAlignment(F.getFnStackAlignment());
ConstantPool = new (Allocator) MachineConstantPool(getDataLayout());
- LogAlignment = STI->getTargetLowering()->getMinFunctionLogAlignment();
+ Alignment = STI->getTargetLowering()->getMinFunctionAlignment();
// FIXME: Shouldn't use pref alignment if explicit alignment is set on F.
// FIXME: Use Function::hasOptSize().
if (!F.hasFnAttribute(Attribute::OptimizeForSize))
- LogAlignment = std::max(
- LogAlignment, STI->getTargetLowering()->getPrefFunctionLogAlignment());
+ Alignment = std::max(Alignment,
+ STI->getTargetLowering()->getPrefFunctionAlignment());
if (AlignAllFunctions)
- LogAlignment = AlignAllFunctions;
+ Alignment = llvm::Align(1ULL << AlignAllFunctions);
JumpTableInfo = nullptr;
diff --git a/llvm/lib/CodeGen/PatchableFunction.cpp b/llvm/lib/CodeGen/PatchableFunction.cpp
index 07f88597fe6..9d7605f078f 100644
--- a/llvm/lib/CodeGen/PatchableFunction.cpp
+++ b/llvm/lib/CodeGen/PatchableFunction.cpp
@@ -78,7 +78,7 @@ bool PatchableFunction::runOnMachineFunction(MachineFunction &MF) {
MIB.add(MO);
FirstActualI->eraseFromParent();
- MF.ensureLogAlignment(4);
+ MF.ensureAlignment(llvm::Align(16));
return true;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
index 12118e6f5b3..1e3c7edc63f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
@@ -417,7 +417,7 @@ bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
// The starting address of all shader programs must be 256 bytes aligned.
// Regular functions just need the basic required instruction alignment.
- MF.setLogAlignment(MFI->isEntryFunction() ? 8 : 2);
+ MF.setAlignment(MFI->isEntryFunction() ? llvm::Align(256) : llvm::Align(4));
SetupMachineFunction(MF);
diff --git a/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp b/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp
index 7918f6bd57c..42158151b64 100644
--- a/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp
@@ -104,7 +104,7 @@ bool R600AsmPrinter::runOnMachineFunction(MachineFunction &MF) {
// Functions needs to be cacheline (256B) aligned.
- MF.ensureLogAlignment(8);
+ MF.ensureAlignment(llvm::Align(256));
SetupMachineFunction(MF);
diff --git a/llvm/lib/Target/ARC/ARCMachineFunctionInfo.h b/llvm/lib/Target/ARC/ARCMachineFunctionInfo.h
index f59b0ae65db..997327fd1b8 100644
--- a/llvm/lib/Target/ARC/ARCMachineFunctionInfo.h
+++ b/llvm/lib/Target/ARC/ARCMachineFunctionInfo.h
@@ -34,8 +34,8 @@ public:
explicit ARCFunctionInfo(MachineFunction &MF)
: ReturnStackOffsetSet(false), VarArgsFrameIndex(0),
ReturnStackOffset(-1U), MaxCallStackReq(0) {
- // Functions are 4-byte (2**2) aligned.
- MF.setLogAlignment(2);
+ // Functions are 4-byte aligned.
+ MF.setAlignment(llvm::Align(4));
}
~ARCFunctionInfo() {}
diff --git a/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp b/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp
index d8ca5fdda80..a8bdefa9193 100644
--- a/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp
@@ -63,7 +63,7 @@ void ARMBasicBlockUtils::computeBlockSize(MachineBasicBlock *MBB) {
// tBR_JTr contains a .align 2 directive.
if (!MBB->empty() && MBB->back().getOpcode() == ARM::tBR_JTr) {
BBI.PostAlign = 2;
- MBB->getParent()->ensureLogAlignment(2);
+ MBB->getParent()->ensureAlignment(llvm::Align(4));
}
}
diff --git a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
index ae62d9789bb..9d3e820f96c 100644
--- a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -396,7 +396,7 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
// Functions with jump tables need an alignment of 4 because they use the ADR
// instruction, which aligns the PC to 4 bytes before adding an offset.
if (!T2JumpTables.empty())
- MF->ensureLogAlignment(2);
+ MF->ensureAlignment(llvm::Align(4));
/// Remove dead constant pool entries.
MadeChange |= removeUnusedCPEntries();
@@ -493,7 +493,7 @@ ARMConstantIslands::doInitialConstPlacement(std::vector<MachineInstr*> &CPEMIs)
// The function needs to be as aligned as the basic blocks. The linker may
// move functions around based on their alignment.
- MF->ensureLogAlignment(BB->getLogAlignment());
+ MF->ensureAlignment(BB->getAlignment());
// Order the entries in BB by descending alignment. That ensures correct
// alignment of all entries as long as BB is sufficiently aligned. Keep
@@ -686,7 +686,7 @@ initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
BBInfoVector &BBInfo = BBUtils->getBBInfo();
// The known bits of the entry block offset are determined by the function
// alignment.
- BBInfo.front().KnownBits = MF->getLogAlignment();
+ BBInfo.front().KnownBits = Log2(MF->getAlignment());
// Compute block offsets and known bits.
BBUtils->adjustBBOffsetsAfter(&MF->front());
@@ -1041,7 +1041,8 @@ bool ARMConstantIslands::isWaterInRange(unsigned UserOffset,
// the offset of the instruction. Also account for unknown alignment padding
// in blocks between CPE and the user.
if (CPEOffset < UserOffset)
- UserOffset += Growth + UnknownPadding(MF->getLogAlignment(), CPELogAlign);
+ UserOffset +=
+ Growth + UnknownPadding(Log2(MF->getAlignment()), CPELogAlign);
} else
// CPE fits in existing padding.
Growth = 0;
@@ -1316,7 +1317,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
// Try to split the block so it's fully aligned. Compute the latest split
// point where we can add a 4-byte branch instruction, and then align to
// LogAlign which is the largest possible alignment in the function.
- unsigned LogAlign = MF->getLogAlignment();
+ unsigned LogAlign = Log2(MF->getAlignment());
assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry");
unsigned KnownBits = UserBBI.internalKnownBits();
unsigned UPad = UnknownPadding(LogAlign, KnownBits);
diff --git a/llvm/lib/Target/Mips/MipsAsmPrinter.cpp b/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
index 750d0c5d463..8840a4938c9 100644
--- a/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
+++ b/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
@@ -400,7 +400,8 @@ void MipsAsmPrinter::EmitFunctionEntryLabel() {
// NaCl sandboxing requires that indirect call instructions are masked.
// This means that function entry points should be bundle-aligned.
if (Subtarget->isTargetNaCl())
- EmitAlignment(std::max(MF->getLogAlignment(), MIPS_NACL_BUNDLE_LOG_ALIGN));
+ EmitAlignment(
+ std::max(Log2(MF->getAlignment()), MIPS_NACL_BUNDLE_LOG_ALIGN));
if (Subtarget->inMicroMipsMode()) {
TS.emitDirectiveSetMicroMips();
diff --git a/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp b/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp
index 8907a72ac87..4cd64c67239 100644
--- a/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp
+++ b/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp
@@ -542,7 +542,7 @@ MipsConstantIslands::doInitialPlacement(std::vector<MachineInstr*> &CPEMIs) {
// The function needs to be as aligned as the basic blocks. The linker may
// move functions around based on their alignment.
- MF->ensureLogAlignment(BB->getLogAlignment());
+ MF->ensureAlignment(BB->getAlignment());
// Order the entries in BB by descending alignment. That ensures correct
// alignment of all entries as long as BB is sufficiently aligned. Keep
@@ -1259,7 +1259,7 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
// Try to split the block so it's fully aligned. Compute the latest split
// point where we can add a 4-byte branch instruction, and then align to
// LogAlign which is the largest possible alignment in the function.
- unsigned LogAlign = MF->getLogAlignment();
+ unsigned LogAlign = Log2(MF->getAlignment());
assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry");
unsigned BaseInsertOffset = UserOffset + U.getMaxDisp();
LLVM_DEBUG(dbgs() << format("Split in middle of big block before %#x",
diff --git a/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp b/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp
index 353a3481132..6b95d0d0ce7 100644
--- a/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp
+++ b/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp
@@ -81,21 +81,20 @@ FunctionPass *llvm::createPPCBranchSelectionPass() {
/// original Offset.
unsigned PPCBSel::GetAlignmentAdjustment(MachineBasicBlock &MBB,
unsigned Offset) {
- unsigned LogAlign = MBB.getLogAlignment();
- if (!LogAlign)
+ const llvm::Align Align = MBB.getAlignment();
+ if (Align == 1)
return 0;
- unsigned AlignAmt = 1 << LogAlign;
- unsigned ParentLogAlign = MBB.getParent()->getLogAlignment();
+ const llvm::Align ParentAlign = MBB.getParent()->getAlignment();
- if (LogAlign <= ParentLogAlign)
- return OffsetToAlignment(Offset, AlignAmt);
+ if (Align <= ParentAlign)
+ return OffsetToAlignment(Offset, Align.value());
// The alignment of this MBB is larger than the function's alignment, so we
// can't tell whether or not it will insert nops. Assume that it will.
if (FirstImpreciseBlock < 0)
FirstImpreciseBlock = MBB.getNumber();
- return AlignAmt + OffsetToAlignment(Offset, AlignAmt);
+ return Align.value() + OffsetToAlignment(Offset, Align.value());
}
/// We need to be careful about the offset of the first block in the function
diff --git a/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp b/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp
index 2b2c80cdd2e..452c439e668 100644
--- a/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp
@@ -276,7 +276,7 @@ uint64_t SystemZLongBranch::initMBBInfo() {
Terminators.clear();
Terminators.reserve(NumBlocks);
- BlockPosition Position(MF->getLogAlignment());
+ BlockPosition Position(Log2(MF->getAlignment()));
for (unsigned I = 0; I < NumBlocks; ++I) {
MachineBasicBlock *MBB = MF->getBlockNumbered(I);
MBBInfo &Block = MBBs[I];
@@ -340,7 +340,7 @@ bool SystemZLongBranch::mustRelaxABranch() {
// must be long.
void SystemZLongBranch::setWorstCaseAddresses() {
SmallVector<TerminatorInfo, 16>::iterator TI = Terminators.begin();
- BlockPosition Position(MF->getLogAlignment());
+ BlockPosition Position(Log2(MF->getAlignment()));
for (auto &Block : MBBs) {
skipNonTerminators(Position, Block);
for (unsigned BTI = 0, BTE = Block.NumTerminators; BTI != BTE; ++BTI) {
@@ -441,7 +441,7 @@ void SystemZLongBranch::relaxBranch(TerminatorInfo &Terminator) {
// Run a shortening pass and relax any branches that need to be relaxed.
void SystemZLongBranch::relaxBranches() {
SmallVector<TerminatorInfo, 16>::iterator TI = Terminators.begin();
- BlockPosition Position(MF->getLogAlignment());
+ BlockPosition Position(Log2(MF->getAlignment()));
for (auto &Block : MBBs) {
skipNonTerminators(Position, Block);
for (unsigned BTI = 0, BTE = Block.NumTerminators; BTI != BTE; ++BTI) {
OpenPOWER on IntegriCloud