summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp6
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/WinException.cpp4
-rw-r--r--llvm/lib/CodeGen/BranchRelaxation.cpp14
-rw-r--r--llvm/lib/CodeGen/MIRParser/MIParser.cpp2
-rw-r--r--llvm/lib/CodeGen/MIRParser/MIRParser.cpp2
-rw-r--r--llvm/lib/CodeGen/MIRPrinter.cpp7
-rw-r--r--llvm/lib/CodeGen/MachineBasicBlock.cpp4
-rw-r--r--llvm/lib/CodeGen/MachineBlockPlacement.cpp27
-rw-r--r--llvm/lib/CodeGen/MachineFunction.cpp17
-rw-r--r--llvm/lib/CodeGen/PatchableFunction.cpp2
-rw-r--r--llvm/lib/CodeGen/TargetLoweringBase.cpp6
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp6
-rw-r--r--llvm/lib/Target/AArch64/AArch64Subtarget.cpp32
-rw-r--r--llvm/lib/Target/AArch64/AArch64Subtarget.h10
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp24
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.h3
-rw-r--r--llvm/lib/Target/ARC/ARCMachineFunctionInfo.h2
-rw-r--r--llvm/lib/Target/ARM/ARM.td2
-rw-r--r--llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp4
-rw-r--r--llvm/lib/Target/ARM/ARMConstantIslandPass.cpp39
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp4
-rw-r--r--llvm/lib/Target/ARM/ARMSubtarget.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMSubtarget.h6
-rw-r--r--llvm/lib/Target/AVR/AVRISelLowering.cpp2
-rw-r--r--llvm/lib/Target/BPF/BPFISelLowering.cpp4
-rw-r--r--llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp4
-rw-r--r--llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp4
-rw-r--r--llvm/lib/Target/Hexagon/HexagonISelLowering.cpp6
-rw-r--r--llvm/lib/Target/Lanai/LanaiISelLowering.cpp4
-rw-r--r--llvm/lib/Target/MSP430/MSP430ISelLowering.cpp4
-rw-r--r--llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h2
-rw-r--r--llvm/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp2
-rw-r--r--llvm/lib/Target/Mips/MipsAsmPrinter.cpp6
-rw-r--r--llvm/lib/Target/Mips/MipsBranchExpansion.cpp2
-rw-r--r--llvm/lib/Target/Mips/MipsConstantIslandPass.cpp29
-rw-r--r--llvm/lib/Target/Mips/MipsISelLowering.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCBranchSelector.cpp26
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.cpp12
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.h2
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp4
-rw-r--r--llvm/lib/Target/Sparc/SparcISelLowering.cpp2
-rw-r--r--llvm/lib/Target/SystemZ/SystemZISelLowering.cpp4
-rw-r--r--llvm/lib/Target/SystemZ/SystemZLongBranch.cpp21
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp4
-rw-r--r--llvm/lib/Target/X86/X86RetpolineThunks.cpp2
-rw-r--r--llvm/lib/Target/XCore/XCoreISelLowering.cpp4
48 files changed, 193 insertions, 188 deletions
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 220c4758956..bf0be8ecee9 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -667,7 +667,7 @@ void AsmPrinter::EmitFunctionHeader() {
EmitLinkage(&F, CurrentFnSym);
if (MAI->hasFunctionAlignment())
- EmitAlignment(MF->getAlignment(), &F);
+ EmitAlignment(MF->getLogAlignment(), &F);
if (MAI->hasDotTypeDotSizeDirective())
OutStreamer->EmitSymbolAttribute(CurrentFnSym, MCSA_ELF_TypeFunction);
@@ -2905,8 +2905,8 @@ void AsmPrinter::EmitBasicBlockStart(const MachineBasicBlock &MBB) {
}
// Emit an alignment directive for this block, if needed.
- if (unsigned Align = MBB.getAlignment())
- EmitAlignment(Align);
+ if (unsigned LogAlign = MBB.getLogAlignment())
+ EmitAlignment(LogAlign);
MCCodePaddingContext Context;
setupCodePaddingContext(MBB, Context);
OutStreamer->EmitCodePaddingBasicBlockStart(Context);
diff --git a/llvm/lib/CodeGen/AsmPrinter/WinException.cpp b/llvm/lib/CodeGen/AsmPrinter/WinException.cpp
index 155e91ce61a..ef5aa0499e1 100644
--- a/llvm/lib/CodeGen/AsmPrinter/WinException.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/WinException.cpp
@@ -203,8 +203,8 @@ void WinException::beginFunclet(const MachineBasicBlock &MBB,
// We want our funclet's entry point to be aligned such that no nops will be
// present after the label.
- Asm->EmitAlignment(std::max(Asm->MF->getAlignment(), MBB.getAlignment()),
- &F);
+ Asm->EmitAlignment(
+ std::max(Asm->MF->getLogAlignment(), MBB.getLogAlignment()), &F);
// Now that we've emitted the alignment directive, point at our funclet.
Asm->OutStreamer->EmitLabel(Sym);
diff --git a/llvm/lib/CodeGen/BranchRelaxation.cpp b/llvm/lib/CodeGen/BranchRelaxation.cpp
index 55fbf0163dd..4ee61cff4b4 100644
--- a/llvm/lib/CodeGen/BranchRelaxation.cpp
+++ b/llvm/lib/CodeGen/BranchRelaxation.cpp
@@ -65,13 +65,13 @@ class BranchRelaxation : public MachineFunctionPass {
/// block.
unsigned postOffset(const MachineBasicBlock &MBB) const {
unsigned PO = Offset + Size;
- unsigned Align = MBB.getAlignment();
- if (Align == 0)
+ unsigned LogAlign = MBB.getLogAlignment();
+ if (LogAlign == 0)
return PO;
- unsigned AlignAmt = 1 << Align;
- unsigned ParentAlign = MBB.getParent()->getAlignment();
- if (Align <= ParentAlign)
+ unsigned AlignAmt = 1 << LogAlign;
+ unsigned ParentLogAlign = MBB.getParent()->getLogAlignment();
+ if (LogAlign <= ParentLogAlign)
return PO + OffsetToAlignment(PO, AlignAmt);
// The alignment of this MBB is larger than the function's alignment, so we
@@ -128,9 +128,9 @@ void BranchRelaxation::verify() {
#ifndef NDEBUG
unsigned PrevNum = MF->begin()->getNumber();
for (MachineBasicBlock &MBB : *MF) {
- unsigned Align = MBB.getAlignment();
+ unsigned LogAlign = MBB.getLogAlignment();
unsigned Num = MBB.getNumber();
- assert(BlockInfo[Num].Offset % (1u << Align) == 0);
+ assert(BlockInfo[Num].Offset % (1u << LogAlign) == 0);
assert(!Num || BlockInfo[PrevNum].postOffset(MBB) <= BlockInfo[Num].Offset);
assert(BlockInfo[Num].Size == computeBlockSize(MBB));
PrevNum = Num;
diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
index a8fa2f1195d..f8c4dd66559 100644
--- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
@@ -641,7 +641,7 @@ bool MIParser::parseBasicBlockDefinition(
return error(Loc, Twine("redefinition of machine basic block with id #") +
Twine(ID));
if (Alignment)
- MBB->setAlignment(Alignment);
+ MBB->setLogAlignment(Log2_32(Alignment));
if (HasAddressTaken)
MBB->setHasAddressTaken();
MBB->setIsEHPad(IsLandingPad);
diff --git a/llvm/lib/CodeGen/MIRParser/MIRParser.cpp b/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
index 48ec0b2d6cf..2dd4fd3b9b7 100644
--- a/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
@@ -393,7 +393,7 @@ MIRParserImpl::initializeMachineFunction(const yaml::MachineFunction &YamlMF,
}
if (YamlMF.Alignment)
- MF.setAlignment(YamlMF.Alignment);
+ MF.setLogAlignment(Log2_32(YamlMF.Alignment));
MF.setExposesReturnsTwice(YamlMF.ExposesReturnsTwice);
MF.setHasWinCFI(YamlMF.HasWinCFI);
diff --git a/llvm/lib/CodeGen/MIRPrinter.cpp b/llvm/lib/CodeGen/MIRPrinter.cpp
index 7febb11dcb8..18efe1f80eb 100644
--- a/llvm/lib/CodeGen/MIRPrinter.cpp
+++ b/llvm/lib/CodeGen/MIRPrinter.cpp
@@ -197,7 +197,7 @@ void MIRPrinter::print(const MachineFunction &MF) {
yaml::MachineFunction YamlMF;
YamlMF.Name = MF.getName();
- YamlMF.Alignment = MF.getAlignment();
+ YamlMF.Alignment = 1UL << MF.getLogAlignment();
YamlMF.ExposesReturnsTwice = MF.exposesReturnsTwice();
YamlMF.HasWinCFI = MF.hasWinCFI();
@@ -629,9 +629,10 @@ void MIPrinter::print(const MachineBasicBlock &MBB) {
OS << "landing-pad";
HasAttributes = true;
}
- if (MBB.getAlignment()) {
+ if (MBB.getLogAlignment()) {
OS << (HasAttributes ? ", " : " (");
- OS << "align " << MBB.getAlignment();
+ OS << "align "
+ << (1UL << MBB.getLogAlignment());
HasAttributes = true;
}
if (HasAttributes)
diff --git a/llvm/lib/CodeGen/MachineBasicBlock.cpp b/llvm/lib/CodeGen/MachineBasicBlock.cpp
index 050934daa5a..bd2ab45f669 100644
--- a/llvm/lib/CodeGen/MachineBasicBlock.cpp
+++ b/llvm/lib/CodeGen/MachineBasicBlock.cpp
@@ -326,9 +326,9 @@ void MachineBasicBlock::print(raw_ostream &OS, ModuleSlotTracker &MST,
OS << "landing-pad";
HasAttributes = true;
}
- if (getAlignment()) {
+ if (getLogAlignment()) {
OS << (HasAttributes ? ", " : " (");
- OS << "align " << getAlignment();
+ OS << "align " << getLogAlignment();
HasAttributes = true;
}
if (HasAttributes)
diff --git a/llvm/lib/CodeGen/MachineBlockPlacement.cpp b/llvm/lib/CodeGen/MachineBlockPlacement.cpp
index 641f14d617c..f2a64faab2e 100644
--- a/llvm/lib/CodeGen/MachineBlockPlacement.cpp
+++ b/llvm/lib/CodeGen/MachineBlockPlacement.cpp
@@ -79,16 +79,17 @@ STATISTIC(CondBranchTakenFreq,
STATISTIC(UncondBranchTakenFreq,
"Potential frequency of taking unconditional branches");
-static cl::opt<unsigned> AlignAllBlock("align-all-blocks",
- cl::desc("Force the alignment of all "
- "blocks in the function."),
- cl::init(0), cl::Hidden);
+static cl::opt<unsigned> AlignAllBlock(
+ "align-all-blocks",
+ cl::desc("Force the alignment of all blocks in the function in log2 format "
+ "(e.g 4 means align on 16B boundaries)."),
+ cl::init(0), cl::Hidden);
static cl::opt<unsigned> AlignAllNonFallThruBlocks(
"align-all-nofallthru-blocks",
- cl::desc("Force the alignment of all "
- "blocks that have no fall-through predecessors (i.e. don't add "
- "nops that are executed)."),
+ cl::desc("Force the alignment of all blocks that have no fall-through "
+ "predecessors (i.e. don't add nops that are executed). In log2 "
+ "format (e.g 4 means align on 16B boundaries)."),
cl::init(0), cl::Hidden);
// FIXME: Find a good default for this flag and remove the flag.
@@ -2763,8 +2764,8 @@ void MachineBlockPlacement::alignBlocks() {
if (!L)
continue;
- unsigned Align = TLI->getPrefLoopAlignment(L);
- if (!Align)
+ unsigned LogAlign = TLI->getPrefLoopLogAlignment(L);
+ if (!LogAlign)
continue; // Don't care about loop alignment.
// If the block is cold relative to the function entry don't waste space
@@ -2788,7 +2789,7 @@ void MachineBlockPlacement::alignBlocks() {
// Force alignment if all the predecessors are jumps. We already checked
// that the block isn't cold above.
if (!LayoutPred->isSuccessor(ChainBB)) {
- ChainBB->setAlignment(Align);
+ ChainBB->setLogAlignment(LogAlign);
continue;
}
@@ -2800,7 +2801,7 @@ void MachineBlockPlacement::alignBlocks() {
MBPI->getEdgeProbability(LayoutPred, ChainBB);
BlockFrequency LayoutEdgeFreq = MBFI->getBlockFreq(LayoutPred) * LayoutProb;
if (LayoutEdgeFreq <= (Freq * ColdProb))
- ChainBB->setAlignment(Align);
+ ChainBB->setLogAlignment(LogAlign);
}
}
@@ -3062,14 +3063,14 @@ bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &MF) {
if (AlignAllBlock)
// Align all of the blocks in the function to a specific alignment.
for (MachineBasicBlock &MBB : MF)
- MBB.setAlignment(AlignAllBlock);
+ MBB.setLogAlignment(AlignAllBlock);
else if (AlignAllNonFallThruBlocks) {
// Align all of the blocks that have no fall-through predecessors to a
// specific alignment.
for (auto MBI = std::next(MF.begin()), MBE = MF.end(); MBI != MBE; ++MBI) {
auto LayoutPred = std::prev(MBI);
if (!LayoutPred->isSuccessor(&*MBI))
- MBI->setAlignment(AlignAllNonFallThruBlocks);
+ MBI->setLogAlignment(AlignAllNonFallThruBlocks);
}
}
if (ViewBlockLayoutWithBFI != GVDT_None &&
diff --git a/llvm/lib/CodeGen/MachineFunction.cpp b/llvm/lib/CodeGen/MachineFunction.cpp
index b771dd1a351..d136ebd437f 100644
--- a/llvm/lib/CodeGen/MachineFunction.cpp
+++ b/llvm/lib/CodeGen/MachineFunction.cpp
@@ -78,10 +78,11 @@ using namespace llvm;
#define DEBUG_TYPE "codegen"
-static cl::opt<unsigned>
-AlignAllFunctions("align-all-functions",
- cl::desc("Force the alignment of all functions."),
- cl::init(0), cl::Hidden);
+static cl::opt<unsigned> AlignAllFunctions(
+ "align-all-functions",
+ cl::desc("Force the alignment of all functions in log2 format (e.g. 4 "
+ "means align on 16B boundaries)."),
+ cl::init(0), cl::Hidden);
static const char *getPropertyName(MachineFunctionProperties::Property Prop) {
using P = MachineFunctionProperties::Property;
@@ -172,16 +173,16 @@ void MachineFunction::init() {
FrameInfo->ensureMaxAlignment(F.getFnStackAlignment());
ConstantPool = new (Allocator) MachineConstantPool(getDataLayout());
- Alignment = STI->getTargetLowering()->getMinFunctionAlignment();
+ LogAlignment = STI->getTargetLowering()->getMinFunctionLogAlignment();
// FIXME: Shouldn't use pref alignment if explicit alignment is set on F.
// FIXME: Use Function::hasOptSize().
if (!F.hasFnAttribute(Attribute::OptimizeForSize))
- Alignment = std::max(Alignment,
- STI->getTargetLowering()->getPrefFunctionAlignment());
+ LogAlignment = std::max(
+ LogAlignment, STI->getTargetLowering()->getPrefFunctionLogAlignment());
if (AlignAllFunctions)
- Alignment = AlignAllFunctions;
+ LogAlignment = AlignAllFunctions;
JumpTableInfo = nullptr;
diff --git a/llvm/lib/CodeGen/PatchableFunction.cpp b/llvm/lib/CodeGen/PatchableFunction.cpp
index a3fa1b0ad8e..07f88597fe6 100644
--- a/llvm/lib/CodeGen/PatchableFunction.cpp
+++ b/llvm/lib/CodeGen/PatchableFunction.cpp
@@ -78,7 +78,7 @@ bool PatchableFunction::runOnMachineFunction(MachineFunction &MF) {
MIB.add(MO);
FirstActualI->eraseFromParent();
- MF.ensureAlignment(4);
+ MF.ensureLogAlignment(4);
return true;
}
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index e9a7d4b5252..970b2067d42 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -583,9 +583,9 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
BooleanFloatContents = UndefinedBooleanContent;
BooleanVectorContents = UndefinedBooleanContent;
SchedPreferenceInfo = Sched::ILP;
- MinFunctionAlignment = 0;
- PrefFunctionAlignment = 0;
- PrefLoopAlignment = 0;
+ MinFunctionLogAlignment = 0;
+ PrefFunctionLogAlignment = 0;
+ PrefLoopLogAlignment = 0;
GatherAllAliasesMaxDepth = 18;
MinStackArgumentAlignment = 1;
// TODO: the default will be switched to 0 in the next commit, along
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 00b86eea3cf..9eb7047cc6b 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -640,10 +640,10 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
EnableExtLdPromotion = true;
// Set required alignment.
- setMinFunctionAlignment(2);
+ setMinFunctionLogAlignment(2);
// Set preferred alignments.
- setPrefFunctionAlignment(STI.getPrefFunctionAlignment());
- setPrefLoopAlignment(STI.getPrefLoopAlignment());
+ setPrefFunctionLogAlignment(STI.getPrefFunctionLogAlignment());
+ setPrefLoopLogAlignment(STI.getPrefLoopLogAlignment());
// Only change the limit for entries in a jump table if specified by
// the sub target, but not at the command line.
diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
index 05112afb417..558bea368ef 100644
--- a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
+++ b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
@@ -71,22 +71,22 @@ void AArch64Subtarget::initializeProperties() {
case CortexA35:
break;
case CortexA53:
- PrefFunctionAlignment = 3;
+ PrefFunctionLogAlignment = 3;
break;
case CortexA55:
break;
case CortexA57:
MaxInterleaveFactor = 4;
- PrefFunctionAlignment = 4;
+ PrefFunctionLogAlignment = 4;
break;
case CortexA65:
- PrefFunctionAlignment = 3;
+ PrefFunctionLogAlignment = 3;
break;
case CortexA72:
case CortexA73:
case CortexA75:
case CortexA76:
- PrefFunctionAlignment = 4;
+ PrefFunctionLogAlignment = 4;
break;
case Cyclone:
CacheLineSize = 64;
@@ -97,14 +97,14 @@ void AArch64Subtarget::initializeProperties() {
case ExynosM1:
MaxInterleaveFactor = 4;
MaxJumpTableSize = 8;
- PrefFunctionAlignment = 4;
- PrefLoopAlignment = 3;
+ PrefFunctionLogAlignment = 4;
+ PrefLoopLogAlignment = 3;
break;
case ExynosM3:
MaxInterleaveFactor = 4;
MaxJumpTableSize = 20;
- PrefFunctionAlignment = 5;
- PrefLoopAlignment = 4;
+ PrefFunctionLogAlignment = 5;
+ PrefLoopLogAlignment = 4;
break;
case Falkor:
MaxInterleaveFactor = 4;
@@ -126,10 +126,10 @@ void AArch64Subtarget::initializeProperties() {
MinVectorRegisterBitWidth = 128;
break;
case NeoverseE1:
- PrefFunctionAlignment = 3;
+ PrefFunctionLogAlignment = 3;
break;
case NeoverseN1:
- PrefFunctionAlignment = 4;
+ PrefFunctionLogAlignment = 4;
break;
case Saphira:
MaxInterleaveFactor = 4;
@@ -138,8 +138,8 @@ void AArch64Subtarget::initializeProperties() {
break;
case ThunderX2T99:
CacheLineSize = 64;
- PrefFunctionAlignment = 3;
- PrefLoopAlignment = 2;
+ PrefFunctionLogAlignment = 3;
+ PrefLoopLogAlignment = 2;
MaxInterleaveFactor = 4;
PrefetchDistance = 128;
MinPrefetchStride = 1024;
@@ -152,15 +152,15 @@ void AArch64Subtarget::initializeProperties() {
case ThunderXT81:
case ThunderXT83:
CacheLineSize = 128;
- PrefFunctionAlignment = 3;
- PrefLoopAlignment = 2;
+ PrefFunctionLogAlignment = 3;
+ PrefLoopLogAlignment = 2;
// FIXME: remove this to enable 64-bit SLP if performance looks good.
MinVectorRegisterBitWidth = 128;
break;
case TSV110:
CacheLineSize = 64;
- PrefFunctionAlignment = 4;
- PrefLoopAlignment = 2;
+ PrefFunctionLogAlignment = 4;
+ PrefLoopLogAlignment = 2;
break;
}
}
diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.h b/llvm/lib/Target/AArch64/AArch64Subtarget.h
index 3f9d9f5a3b2..ef360926aa9 100644
--- a/llvm/lib/Target/AArch64/AArch64Subtarget.h
+++ b/llvm/lib/Target/AArch64/AArch64Subtarget.h
@@ -198,8 +198,8 @@ protected:
uint16_t PrefetchDistance = 0;
uint16_t MinPrefetchStride = 1;
unsigned MaxPrefetchIterationsAhead = UINT_MAX;
- unsigned PrefFunctionAlignment = 0;
- unsigned PrefLoopAlignment = 0;
+ unsigned PrefFunctionLogAlignment = 0;
+ unsigned PrefLoopLogAlignment = 0;
unsigned MaxJumpTableSize = 0;
unsigned WideningBaseCost = 0;
@@ -359,8 +359,10 @@ public:
unsigned getMaxPrefetchIterationsAhead() const {
return MaxPrefetchIterationsAhead;
}
- unsigned getPrefFunctionAlignment() const { return PrefFunctionAlignment; }
- unsigned getPrefLoopAlignment() const { return PrefLoopAlignment; }
+ unsigned getPrefFunctionLogAlignment() const {
+ return PrefFunctionLogAlignment;
+ }
+ unsigned getPrefLoopLogAlignment() const { return PrefLoopLogAlignment; }
unsigned getMaximumJumpTableSize() const { return MaxJumpTableSize; }
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
index 68792a8b55e..12118e6f5b3 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
@@ -417,7 +417,7 @@ bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
// The starting address of all shader programs must be 256 bytes aligned.
// Regular functions just need the basic required instruction alignment.
- MF.setAlignment(MFI->isEntryFunction() ? 8 : 2);
+ MF.setLogAlignment(MFI->isEntryFunction() ? 8 : 2);
SetupMachineFunction(MF);
diff --git a/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp b/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp
index 3fb18862fca..7918f6bd57c 100644
--- a/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp
@@ -104,7 +104,7 @@ bool R600AsmPrinter::runOnMachineFunction(MachineFunction &MF) {
// Functions needs to be cacheline (256B) aligned.
- MF.ensureAlignment(8);
+ MF.ensureLogAlignment(8);
SetupMachineFunction(MF);
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 5516f3742dd..7430e878a09 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -10681,15 +10681,15 @@ void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op,
Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex());
}
-unsigned SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
- const unsigned PrefAlign = TargetLowering::getPrefLoopAlignment(ML);
- const unsigned CacheLineAlign = 6; // log2(64)
+unsigned SITargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const {
+ const unsigned PrefLogAlign = TargetLowering::getPrefLoopLogAlignment(ML);
+ const unsigned CacheLineLogAlign = 6; // log2(64)
// Pre-GFX10 target did not benefit from loop alignment
if (!ML || DisableLoopAlignment ||
(getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) ||
getSubtarget()->hasInstFwdPrefetchBug())
- return PrefAlign;
+ return PrefLogAlign;
// On GFX10 I$ is 4 x 64 bytes cache lines.
// By default prefetcher keeps one cache line behind and reads two ahead.
@@ -10703,28 +10703,28 @@ unsigned SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
const MachineBasicBlock *Header = ML->getHeader();
- if (Header->getAlignment() != PrefAlign)
- return Header->getAlignment(); // Already processed.
+ if (Header->getLogAlignment() != PrefLogAlign)
+ return Header->getLogAlignment(); // Already processed.
unsigned LoopSize = 0;
for (const MachineBasicBlock *MBB : ML->blocks()) {
// If inner loop block is aligned assume in average half of the alignment
// size to be added as nops.
if (MBB != Header)
- LoopSize += (1 << MBB->getAlignment()) / 2;
+ LoopSize += (1 << MBB->getLogAlignment()) / 2;
for (const MachineInstr &MI : *MBB) {
LoopSize += TII->getInstSizeInBytes(MI);
if (LoopSize > 192)
- return PrefAlign;
+ return PrefLogAlign;
}
}
if (LoopSize <= 64)
- return PrefAlign;
+ return PrefLogAlign;
if (LoopSize <= 128)
- return CacheLineAlign;
+ return CacheLineLogAlign;
// If any of parent loops is surrounded by prefetch instructions do not
// insert new for inner loop, which would reset parent's settings.
@@ -10732,7 +10732,7 @@ unsigned SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
if (MachineBasicBlock *Exit = P->getExitBlock()) {
auto I = Exit->getFirstNonDebugInstr();
if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH)
- return CacheLineAlign;
+ return CacheLineLogAlign;
}
}
@@ -10749,7 +10749,7 @@ unsigned SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
.addImm(2); // prefetch 1 line behind PC
}
- return CacheLineAlign;
+ return CacheLineLogAlign;
}
LLVM_ATTRIBUTE_UNUSED
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index 4e98b2aab3a..217152f78f2 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -379,8 +379,7 @@ public:
unsigned Depth = 0) const override;
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override;
- unsigned getPrefLoopAlignment(MachineLoop *ML) const override;
-
+ unsigned getPrefLoopLogAlignment(MachineLoop *ML) const override;
void allocateHSAUserSGPRs(CCState &CCInfo,
MachineFunction &MF,
diff --git a/llvm/lib/Target/ARC/ARCMachineFunctionInfo.h b/llvm/lib/Target/ARC/ARCMachineFunctionInfo.h
index 31aa5b93246..f59b0ae65db 100644
--- a/llvm/lib/Target/ARC/ARCMachineFunctionInfo.h
+++ b/llvm/lib/Target/ARC/ARCMachineFunctionInfo.h
@@ -35,7 +35,7 @@ public:
: ReturnStackOffsetSet(false), VarArgsFrameIndex(0),
ReturnStackOffset(-1U), MaxCallStackReq(0) {
// Functions are 4-byte (2**2) aligned.
- MF.setAlignment(2);
+ MF.setLogAlignment(2);
}
~ARCFunctionInfo() {}
diff --git a/llvm/lib/Target/ARM/ARM.td b/llvm/lib/Target/ARM/ARM.td
index 8dcddd25429..6616ae5d160 100644
--- a/llvm/lib/Target/ARM/ARM.td
+++ b/llvm/lib/Target/ARM/ARM.td
@@ -302,7 +302,7 @@ def FeatureVMLxForwarding : SubtargetFeature<"vmlx-forwarding",
def FeaturePref32BitThumb : SubtargetFeature<"32bit", "Pref32BitThumb", "true",
"Prefer 32-bit Thumb instrs">;
-def FeaturePrefLoopAlign32 : SubtargetFeature<"loop-align", "PrefLoopAlignment","2",
+def FeaturePrefLoopAlign32 : SubtargetFeature<"loop-align", "PrefLoopLogAlignment","2",
"Prefer 32-bit alignment for loops">;
def FeatureMVEVectorCostFactor1 : SubtargetFeature<"mve1beat", "MVEVectorCostFactor", "1",
diff --git a/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp b/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp
index 2de90e816b3..d8ca5fdda80 100644
--- a/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp
@@ -63,7 +63,7 @@ void ARMBasicBlockUtils::computeBlockSize(MachineBasicBlock *MBB) {
// tBR_JTr contains a .align 2 directive.
if (!MBB->empty() && MBB->back().getOpcode() == ARM::tBR_JTr) {
BBI.PostAlign = 2;
- MBB->getParent()->ensureAlignment(2);
+ MBB->getParent()->ensureLogAlignment(2);
}
}
@@ -126,7 +126,7 @@ void ARMBasicBlockUtils::adjustBBOffsetsAfter(MachineBasicBlock *BB) {
for(unsigned i = BBNum + 1, e = MF.getNumBlockIDs(); i < e; ++i) {
// Get the offset and known bits at the end of the layout predecessor.
// Include the alignment of the current block.
- unsigned LogAlign = MF.getBlockNumbered(i)->getAlignment();
+ unsigned LogAlign = MF.getBlockNumbered(i)->getLogAlignment();
unsigned Offset = BBInfo[i - 1].postOffset(LogAlign);
unsigned KnownBits = BBInfo[i - 1].postKnownBits(LogAlign);
diff --git a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
index 5283bb52ee4..ae62d9789bb 100644
--- a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -396,7 +396,7 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
// Functions with jump tables need an alignment of 4 because they use the ADR
// instruction, which aligns the PC to 4 bytes before adding an offset.
if (!T2JumpTables.empty())
- MF->ensureAlignment(2);
+ MF->ensureLogAlignment(2);
/// Remove dead constant pool entries.
MadeChange |= removeUnusedCPEntries();
@@ -486,20 +486,21 @@ ARMConstantIslands::doInitialConstPlacement(std::vector<MachineInstr*> &CPEMIs)
MF->push_back(BB);
// MachineConstantPool measures alignment in bytes. We measure in log2(bytes).
- unsigned MaxAlign = Log2_32(MCP->getConstantPoolAlignment());
+ unsigned MaxLogAlign = Log2_32(MCP->getConstantPoolAlignment());
// Mark the basic block as required by the const-pool.
- BB->setAlignment(MaxAlign);
+ BB->setLogAlignment(MaxLogAlign);
// The function needs to be as aligned as the basic blocks. The linker may
// move functions around based on their alignment.
- MF->ensureAlignment(BB->getAlignment());
+ MF->ensureLogAlignment(BB->getLogAlignment());
// Order the entries in BB by descending alignment. That ensures correct
// alignment of all entries as long as BB is sufficiently aligned. Keep
// track of the insertion point for each alignment. We are going to bucket
// sort the entries as they are created.
- SmallVector<MachineBasicBlock::iterator, 8> InsPoint(MaxAlign + 1, BB->end());
+ SmallVector<MachineBasicBlock::iterator, 8> InsPoint(MaxLogAlign + 1,
+ BB->end());
// Add all of the constants from the constant pool to the end block, use an
// identity mapping of CPI's to CPE's.
@@ -524,7 +525,7 @@ ARMConstantIslands::doInitialConstPlacement(std::vector<MachineInstr*> &CPEMIs)
// Ensure that future entries with higher alignment get inserted before
// CPEMI. This is bucket sort with iterators.
- for (unsigned a = LogAlign + 1; a <= MaxAlign; ++a)
+ for (unsigned a = LogAlign + 1; a <= MaxLogAlign; ++a)
if (InsPoint[a] == InsAt)
InsPoint[a] = CPEMI;
@@ -685,7 +686,7 @@ initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
BBInfoVector &BBInfo = BBUtils->getBBInfo();
// The known bits of the entry block offset are determined by the function
// alignment.
- BBInfo.front().KnownBits = MF->getAlignment();
+ BBInfo.front().KnownBits = MF->getLogAlignment();
// Compute block offsets and known bits.
BBUtils->adjustBBOffsetsAfter(&MF->front());
@@ -1015,14 +1016,14 @@ bool ARMConstantIslands::isWaterInRange(unsigned UserOffset,
BBInfoVector &BBInfo = BBUtils->getBBInfo();
unsigned CPELogAlign = getCPELogAlign(U.CPEMI);
unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign);
- unsigned NextBlockOffset, NextBlockAlignment;
+ unsigned NextBlockOffset, NextBlockLogAlignment;
MachineFunction::const_iterator NextBlock = Water->getIterator();
if (++NextBlock == MF->end()) {
NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
- NextBlockAlignment = 0;
+ NextBlockLogAlignment = 0;
} else {
NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset;
- NextBlockAlignment = NextBlock->getAlignment();
+ NextBlockLogAlignment = NextBlock->getLogAlignment();
}
unsigned Size = U.CPEMI->getOperand(2).getImm();
unsigned CPEEnd = CPEOffset + Size;
@@ -1034,13 +1035,13 @@ bool ARMConstantIslands::isWaterInRange(unsigned UserOffset,
Growth = CPEEnd - NextBlockOffset;
// Compute the padding that would go at the end of the CPE to align the next
// block.
- Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockAlignment);
+ Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockLogAlignment);
// If the CPE is to be inserted before the instruction, that will raise
// the offset of the instruction. Also account for unknown alignment padding
// in blocks between CPE and the user.
if (CPEOffset < UserOffset)
- UserOffset += Growth + UnknownPadding(MF->getAlignment(), CPELogAlign);
+ UserOffset += Growth + UnknownPadding(MF->getLogAlignment(), CPELogAlign);
} else
// CPE fits in existing padding.
Growth = 0;
@@ -1315,7 +1316,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
// Try to split the block so it's fully aligned. Compute the latest split
// point where we can add a 4-byte branch instruction, and then align to
// LogAlign which is the largest possible alignment in the function.
- unsigned LogAlign = MF->getAlignment();
+ unsigned LogAlign = MF->getLogAlignment();
assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry");
unsigned KnownBits = UserBBI.internalKnownBits();
unsigned UPad = UnknownPadding(LogAlign, KnownBits);
@@ -1493,9 +1494,9 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex,
// Always align the new block because CP entries can be smaller than 4
// bytes. Be careful not to decrease the existing alignment, e.g. NewMBB may
// be an already aligned constant pool block.
- const unsigned Align = isThumb ? 1 : 2;
- if (NewMBB->getAlignment() < Align)
- NewMBB->setAlignment(Align);
+ const unsigned LogAlign = isThumb ? 1 : 2;
+ if (NewMBB->getLogAlignment() < LogAlign)
+ NewMBB->setLogAlignment(LogAlign);
// Remove the original WaterList entry; we want subsequent insertions in
// this vicinity to go after the one we're about to insert. This
@@ -1524,7 +1525,7 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex,
decrementCPEReferenceCount(CPI, CPEMI);
// Mark the basic block as aligned as required by the const-pool entry.
- NewIsland->setAlignment(getCPELogAlign(U.CPEMI));
+ NewIsland->setLogAlignment(getCPELogAlign(U.CPEMI));
// Increase the size of the island block to account for the new entry.
BBUtils->adjustBBSize(NewIsland, Size);
@@ -1558,10 +1559,10 @@ void ARMConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
BBInfo[CPEBB->getNumber()].Size = 0;
// This block no longer needs to be aligned.
- CPEBB->setAlignment(0);
+ CPEBB->setLogAlignment(0);
} else
// Entries are sorted by descending alignment, so realign from the front.
- CPEBB->setAlignment(getCPELogAlign(&*CPEBB->begin()));
+ CPEBB->setLogAlignment(getCPELogAlign(&*CPEBB->begin()));
BBUtils->adjustBBOffsetsAfter(CPEBB);
// An island has only one predecessor BB and one successor BB. Check if
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 907517461e7..bbaa9431dec 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -1419,9 +1419,9 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
// Prefer likely predicted branches to selects on out-of-order cores.
PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder();
- setPrefLoopAlignment(Subtarget->getPrefLoopAlignment());
+ setPrefLoopLogAlignment(Subtarget->getPrefLoopLogAlignment());
- setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2);
+ setMinFunctionLogAlignment(Subtarget->isThumb() ? 1 : 2);
if (Subtarget->isThumb() || Subtarget->isThumb2())
setTargetDAGCombine(ISD::ABS);
diff --git a/llvm/lib/Target/ARM/ARMSubtarget.cpp b/llvm/lib/Target/ARM/ARMSubtarget.cpp
index 54443d2126f..155fbce98a8 100644
--- a/llvm/lib/Target/ARM/ARMSubtarget.cpp
+++ b/llvm/lib/Target/ARM/ARMSubtarget.cpp
@@ -300,7 +300,7 @@ void ARMSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
LdStMultipleTiming = SingleIssuePlusExtras;
MaxInterleaveFactor = 4;
if (!isThumb())
- PrefLoopAlignment = 3;
+ PrefLoopLogAlignment = 3;
break;
case Kryo:
break;
diff --git a/llvm/lib/Target/ARM/ARMSubtarget.h b/llvm/lib/Target/ARM/ARMSubtarget.h
index dde9dcbdb1c..0491420c51f 100644
--- a/llvm/lib/Target/ARM/ARMSubtarget.h
+++ b/llvm/lib/Target/ARM/ARMSubtarget.h
@@ -470,7 +470,7 @@ protected:
int PreISelOperandLatencyAdjustment = 2;
/// What alignment is preferred for loop bodies, in log2(bytes).
- unsigned PrefLoopAlignment = 0;
+ unsigned PrefLoopLogAlignment = 0;
/// The cost factor for MVE instructions, representing the multiple beats an
// instruction can take. The default is 2, (set in initSubtargetFeatures so
@@ -859,9 +859,7 @@ public:
return isROPI() || !isTargetELF();
}
- unsigned getPrefLoopAlignment() const {
- return PrefLoopAlignment;
- }
+ unsigned getPrefLoopLogAlignment() const { return PrefLoopLogAlignment; }
unsigned getMVEVectorCostFactor() const { return MVEVectorCostFactor; }
diff --git a/llvm/lib/Target/AVR/AVRISelLowering.cpp b/llvm/lib/Target/AVR/AVRISelLowering.cpp
index 3d5c481b506..12b08e8ece5 100644
--- a/llvm/lib/Target/AVR/AVRISelLowering.cpp
+++ b/llvm/lib/Target/AVR/AVRISelLowering.cpp
@@ -236,7 +236,7 @@ AVRTargetLowering::AVRTargetLowering(const AVRTargetMachine &TM,
setLibcallName(RTLIB::SIN_F32, "sin");
setLibcallName(RTLIB::COS_F32, "cos");
- setMinFunctionAlignment(1);
+ setMinFunctionLogAlignment(1);
setMinimumJumpTableEntries(UINT_MAX);
}
diff --git a/llvm/lib/Target/BPF/BPFISelLowering.cpp b/llvm/lib/Target/BPF/BPFISelLowering.cpp
index 716bef461a9..4521c6de4ee 100644
--- a/llvm/lib/Target/BPF/BPFISelLowering.cpp
+++ b/llvm/lib/Target/BPF/BPFISelLowering.cpp
@@ -133,8 +133,8 @@ BPFTargetLowering::BPFTargetLowering(const TargetMachine &TM,
setBooleanContents(ZeroOrOneBooleanContent);
// Function alignments (log2)
- setMinFunctionAlignment(3);
- setPrefFunctionAlignment(3);
+ setMinFunctionLogAlignment(3);
+ setPrefFunctionLogAlignment(3);
if (BPFExpandMemcpyInOrder) {
// LLVM generic code will try to expand memcpy into load/store pairs at this
diff --git a/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp b/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp
index ee93739b2c7..fec081d324e 100644
--- a/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp
@@ -105,11 +105,11 @@ void HexagonBranchRelaxation::computeOffset(MachineFunction &MF,
// offset of the current instruction from the start.
unsigned InstOffset = 0;
for (auto &B : MF) {
- if (B.getAlignment()) {
+ if (B.getLogAlignment()) {
// Although we don't know the exact layout of the final code, we need
// to account for alignment padding somehow. This heuristic pads each
// aligned basic block according to the alignment value.
- int ByteAlign = (1u << B.getAlignment()) - 1;
+ int ByteAlign = (1u << B.getLogAlignment()) - 1;
InstOffset = (InstOffset + ByteAlign) & ~(ByteAlign);
}
OffsetMap[&B] = InstOffset;
diff --git a/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp b/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp
index f7edc168de4..12eda8af363 100644
--- a/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp
@@ -114,11 +114,11 @@ bool HexagonFixupHwLoops::fixupLoopInstrs(MachineFunction &MF) {
// First pass - compute the offset of each basic block.
for (const MachineBasicBlock &MBB : MF) {
- if (MBB.getAlignment()) {
+ if (MBB.getLogAlignment()) {
// Although we don't know the exact layout of the final code, we need
// to account for alignment padding somehow. This heuristic pads each
// aligned basic block according to the alignment value.
- int ByteAlign = (1u << MBB.getAlignment()) - 1;
+ int ByteAlign = (1u << MBB.getLogAlignment()) - 1;
InstOffset = (InstOffset + ByteAlign) & ~(ByteAlign);
}
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index a443a29e061..8cdf06252ad 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -1235,9 +1235,9 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
Subtarget(ST) {
auto &HRI = *Subtarget.getRegisterInfo();
- setPrefLoopAlignment(4);
- setPrefFunctionAlignment(4);
- setMinFunctionAlignment(2);
+ setPrefLoopLogAlignment(4);
+ setPrefFunctionLogAlignment(4);
+ setMinFunctionLogAlignment(2);
setStackPointerRegisterToSaveRestore(HRI.getStackRegister());
setBooleanContents(TargetLoweringBase::UndefinedBooleanContent);
setBooleanVectorContents(TargetLoweringBase::UndefinedBooleanContent);
diff --git a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp
index 86e7769caf7..4a2f6dac6e3 100644
--- a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp
+++ b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp
@@ -145,8 +145,8 @@ LanaiTargetLowering::LanaiTargetLowering(const TargetMachine &TM,
setTargetDAGCombine(ISD::XOR);
// Function alignments (log2)
- setMinFunctionAlignment(2);
- setPrefFunctionAlignment(2);
+ setMinFunctionLogAlignment(2);
+ setPrefFunctionLogAlignment(2);
setJumpIsExpensive(true);
diff --git a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
index c400fa5e45f..2701ca57cfd 100644
--- a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -327,8 +327,8 @@ MSP430TargetLowering::MSP430TargetLowering(const TargetMachine &TM,
setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::MSP430_BUILTIN);
// TODO: __mspabi_srall, __mspabi_srlll, __mspabi_sllll
- setMinFunctionAlignment(1);
- setPrefFunctionAlignment(1);
+ setMinFunctionLogAlignment(1);
+ setPrefFunctionLogAlignment(1);
}
SDValue MSP430TargetLowering::LowerOperation(SDValue Op,
diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h
index ad5aff6552f..94265250e9b 100644
--- a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h
+++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h
@@ -14,7 +14,7 @@
namespace llvm {
// Log2 of the NaCl MIPS sandbox's instruction bundle size.
-static const unsigned MIPS_NACL_BUNDLE_ALIGN = 4u;
+static const unsigned MIPS_NACL_BUNDLE_LOG_ALIGN = 4u;
bool isBasePlusOffsetMemoryAccess(unsigned Opcode, unsigned *AddrIdx,
bool *IsStore = nullptr);
diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp
index c050db8a17f..191ac823641 100644
--- a/llvm/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp
+++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp
@@ -270,7 +270,7 @@ MCELFStreamer *createMipsNaClELFStreamer(MCContext &Context,
S->getAssembler().setRelaxAll(true);
// Set bundle-alignment as required by the NaCl ABI for the target.
- S->EmitBundleAlignMode(MIPS_NACL_BUNDLE_ALIGN);
+ S->EmitBundleAlignMode(MIPS_NACL_BUNDLE_LOG_ALIGN);
return S;
}
diff --git a/llvm/lib/Target/Mips/MipsAsmPrinter.cpp b/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
index b9e67bb470b..750d0c5d463 100644
--- a/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
+++ b/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
@@ -400,7 +400,7 @@ void MipsAsmPrinter::EmitFunctionEntryLabel() {
// NaCl sandboxing requires that indirect call instructions are masked.
// This means that function entry points should be bundle-aligned.
if (Subtarget->isTargetNaCl())
- EmitAlignment(std::max(MF->getAlignment(), MIPS_NACL_BUNDLE_ALIGN));
+ EmitAlignment(std::max(MF->getLogAlignment(), MIPS_NACL_BUNDLE_LOG_ALIGN));
if (Subtarget->inMicroMipsMode()) {
TS.emitDirectiveSetMicroMips();
@@ -1278,14 +1278,14 @@ void MipsAsmPrinter::NaClAlignIndirectJumpTargets(MachineFunction &MF) {
const std::vector<MachineBasicBlock*> &MBBs = JT[I].MBBs;
for (unsigned J = 0; J < MBBs.size(); ++J)
- MBBs[J]->setAlignment(MIPS_NACL_BUNDLE_ALIGN);
+ MBBs[J]->setLogAlignment(MIPS_NACL_BUNDLE_LOG_ALIGN);
}
}
// If basic block address is taken, block can be target of indirect branch.
for (auto &MBB : MF) {
if (MBB.hasAddressTaken())
- MBB.setAlignment(MIPS_NACL_BUNDLE_ALIGN);
+ MBB.setLogAlignment(MIPS_NACL_BUNDLE_LOG_ALIGN);
}
}
diff --git a/llvm/lib/Target/Mips/MipsBranchExpansion.cpp b/llvm/lib/Target/Mips/MipsBranchExpansion.cpp
index 1523a6c020a..8b558ce4039 100644
--- a/llvm/lib/Target/Mips/MipsBranchExpansion.cpp
+++ b/llvm/lib/Target/Mips/MipsBranchExpansion.cpp
@@ -507,7 +507,7 @@ void MipsBranchExpansion::expandToLongBranch(MBBInfo &I) {
.addImm(0);
if (STI->isTargetNaCl())
// Bundle-align the target of indirect branch JR.
- TgtMBB->setAlignment(MIPS_NACL_BUNDLE_ALIGN);
+ TgtMBB->setLogAlignment(MIPS_NACL_BUNDLE_LOG_ALIGN);
// In NaCl, modifying the sp is not allowed in branch delay slot.
// For MIPS32R6, we can skip using a delay slot branch.
diff --git a/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp b/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp
index eea28df7eda..8907a72ac87 100644
--- a/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp
+++ b/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp
@@ -534,21 +534,22 @@ MipsConstantIslands::doInitialPlacement(std::vector<MachineInstr*> &CPEMIs) {
MF->push_back(BB);
// MachineConstantPool measures alignment in bytes. We measure in log2(bytes).
- unsigned MaxAlign = Log2_32(MCP->getConstantPoolAlignment());
+ unsigned MaxLogAlign = Log2_32(MCP->getConstantPoolAlignment());
// Mark the basic block as required by the const-pool.
// If AlignConstantIslands isn't set, use 4-byte alignment for everything.
- BB->setAlignment(AlignConstantIslands ? MaxAlign : 2);
+ BB->setLogAlignment(AlignConstantIslands ? MaxLogAlign : 2);
// The function needs to be as aligned as the basic blocks. The linker may
// move functions around based on their alignment.
- MF->ensureAlignment(BB->getAlignment());
+ MF->ensureLogAlignment(BB->getLogAlignment());
// Order the entries in BB by descending alignment. That ensures correct
// alignment of all entries as long as BB is sufficiently aligned. Keep
// track of the insertion point for each alignment. We are going to bucket
// sort the entries as they are created.
- SmallVector<MachineBasicBlock::iterator, 8> InsPoint(MaxAlign + 1, BB->end());
+ SmallVector<MachineBasicBlock::iterator, 8> InsPoint(MaxLogAlign + 1,
+ BB->end());
// Add all of the constants from the constant pool to the end block, use an
// identity mapping of CPI's to CPE's.
@@ -576,7 +577,7 @@ MipsConstantIslands::doInitialPlacement(std::vector<MachineInstr*> &CPEMIs) {
// Ensure that future entries with higher alignment get inserted before
// CPEMI. This is bucket sort with iterators.
- for (unsigned a = LogAlign + 1; a <= MaxAlign; ++a)
+ for (unsigned a = LogAlign + 1; a <= MaxLogAlign; ++a)
if (InsPoint[a] == InsAt)
InsPoint[a] = CPEMI;
// Add a new CPEntry, but no corresponding CPUser yet.
@@ -942,14 +943,14 @@ bool MipsConstantIslands::isWaterInRange(unsigned UserOffset,
unsigned &Growth) {
unsigned CPELogAlign = getCPELogAlign(*U.CPEMI);
unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign);
- unsigned NextBlockOffset, NextBlockAlignment;
+ unsigned NextBlockOffset, NextBlockLogAlignment;
MachineFunction::const_iterator NextBlock = ++Water->getIterator();
if (NextBlock == MF->end()) {
NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
- NextBlockAlignment = 0;
+ NextBlockLogAlignment = 0;
} else {
NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset;
- NextBlockAlignment = NextBlock->getAlignment();
+ NextBlockLogAlignment = NextBlock->getLogAlignment();
}
unsigned Size = U.CPEMI->getOperand(2).getImm();
unsigned CPEEnd = CPEOffset + Size;
@@ -961,7 +962,7 @@ bool MipsConstantIslands::isWaterInRange(unsigned UserOffset,
Growth = CPEEnd - NextBlockOffset;
// Compute the padding that would go at the end of the CPE to align the next
// block.
- Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockAlignment);
+ Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockLogAlignment);
// If the CPE is to be inserted before the instruction, that will raise
// the offset of the instruction. Also account for unknown alignment padding
@@ -1258,7 +1259,7 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
// Try to split the block so it's fully aligned. Compute the latest split
// point where we can add a 4-byte branch instruction, and then align to
// LogAlign which is the largest possible alignment in the function.
- unsigned LogAlign = MF->getAlignment();
+ unsigned LogAlign = MF->getLogAlignment();
assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry");
unsigned BaseInsertOffset = UserOffset + U.getMaxDisp();
LLVM_DEBUG(dbgs() << format("Split in middle of big block before %#x",
@@ -1399,7 +1400,7 @@ bool MipsConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
++NumCPEs;
// Mark the basic block as aligned as required by the const-pool entry.
- NewIsland->setAlignment(getCPELogAlign(*U.CPEMI));
+ NewIsland->setLogAlignment(getCPELogAlign(*U.CPEMI));
// Increase the size of the island block to account for the new entry.
BBInfo[NewIsland->getNumber()].Size += Size;
@@ -1431,10 +1432,10 @@ void MipsConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
BBInfo[CPEBB->getNumber()].Size = 0;
// This block no longer needs to be aligned.
- CPEBB->setAlignment(0);
+ CPEBB->setLogAlignment(0);
} else
// Entries are sorted by descending alignment, so realign from the front.
- CPEBB->setAlignment(getCPELogAlign(*CPEBB->begin()));
+ CPEBB->setLogAlignment(getCPELogAlign(*CPEBB->begin()));
adjustBBOffsetsAfter(CPEBB);
// An island has only one predecessor BB and one successor BB. Check if
@@ -1529,7 +1530,7 @@ MipsConstantIslands::fixupUnconditionalBr(ImmBranch &Br) {
// We should have a way to back out this alignment restriction if we "can" later.
// but it is not harmful.
//
- DestBB->setAlignment(2);
+ DestBB->setLogAlignment(2);
Br.MaxDisp = ((1<<24)-1) * 2;
MI->setDesc(TII->get(Mips::JalB16));
}
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index e556aa5fda1..c1df9a63b40 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -518,7 +518,7 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
setLibcallName(RTLIB::SRA_I128, nullptr);
}
- setMinFunctionAlignment(Subtarget.isGP64bit() ? 3 : 2);
+ setMinFunctionLogAlignment(Subtarget.isGP64bit() ? 3 : 2);
// The arguments on the stack are defined in terms of 4-byte slots on O32
// and 8-byte slots on N32/N64.
diff --git a/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp b/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp
index 6d1f09278b7..353a3481132 100644
--- a/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp
+++ b/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp
@@ -81,14 +81,14 @@ FunctionPass *llvm::createPPCBranchSelectionPass() {
/// original Offset.
unsigned PPCBSel::GetAlignmentAdjustment(MachineBasicBlock &MBB,
unsigned Offset) {
- unsigned Align = MBB.getAlignment();
- if (!Align)
+ unsigned LogAlign = MBB.getLogAlignment();
+ if (!LogAlign)
return 0;
- unsigned AlignAmt = 1 << Align;
- unsigned ParentAlign = MBB.getParent()->getAlignment();
+ unsigned AlignAmt = 1 << LogAlign;
+ unsigned ParentLogAlign = MBB.getParent()->getLogAlignment();
- if (Align <= ParentAlign)
+ if (LogAlign <= ParentLogAlign)
return OffsetToAlignment(Offset, AlignAmt);
// The alignment of this MBB is larger than the function's alignment, so we
@@ -179,21 +179,21 @@ int PPCBSel::computeBranchSize(MachineFunction &Fn,
const MachineBasicBlock *Dest,
unsigned BrOffset) {
int BranchSize;
- unsigned MaxAlign = 2;
+ unsigned MaxLogAlign = 2;
bool NeedExtraAdjustment = false;
if (Dest->getNumber() <= Src->getNumber()) {
// If this is a backwards branch, the delta is the offset from the
// start of this block to this branch, plus the sizes of all blocks
// from this block to the dest.
BranchSize = BrOffset;
- MaxAlign = std::max(MaxAlign, Src->getAlignment());
+ MaxLogAlign = std::max(MaxLogAlign, Src->getLogAlignment());
int DestBlock = Dest->getNumber();
BranchSize += BlockSizes[DestBlock].first;
for (unsigned i = DestBlock+1, e = Src->getNumber(); i < e; ++i) {
BranchSize += BlockSizes[i].first;
- MaxAlign = std::max(MaxAlign,
- Fn.getBlockNumbered(i)->getAlignment());
+ MaxLogAlign =
+ std::max(MaxLogAlign, Fn.getBlockNumbered(i)->getLogAlignment());
}
NeedExtraAdjustment = (FirstImpreciseBlock >= 0) &&
@@ -204,11 +204,11 @@ int PPCBSel::computeBranchSize(MachineFunction &Fn,
unsigned StartBlock = Src->getNumber();
BranchSize = BlockSizes[StartBlock].first - BrOffset;
- MaxAlign = std::max(MaxAlign, Dest->getAlignment());
+ MaxLogAlign = std::max(MaxLogAlign, Dest->getLogAlignment());
for (unsigned i = StartBlock+1, e = Dest->getNumber(); i != e; ++i) {
BranchSize += BlockSizes[i].first;
- MaxAlign = std::max(MaxAlign,
- Fn.getBlockNumbered(i)->getAlignment());
+ MaxLogAlign =
+ std::max(MaxLogAlign, Fn.getBlockNumbered(i)->getLogAlignment());
}
NeedExtraAdjustment = (FirstImpreciseBlock >= 0) &&
@@ -258,7 +258,7 @@ int PPCBSel::computeBranchSize(MachineFunction &Fn,
// The computed offset is at most ((1 << alignment) - 4) bytes smaller
// than actual offset. So we add this number to the offset for safety.
if (NeedExtraAdjustment)
- BranchSize += (1 << MaxAlign) - 4;
+ BranchSize += (1 << MaxLogAlign) - 4;
return BranchSize;
}
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 06df60c0266..c7fc7d87ad4 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -1180,9 +1180,9 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
setJumpIsExpensive();
}
- setMinFunctionAlignment(2);
+ setMinFunctionLogAlignment(2);
if (Subtarget.isDarwin())
- setPrefFunctionAlignment(4);
+ setPrefFunctionLogAlignment(4);
switch (Subtarget.getDarwinDirective()) {
default: break;
@@ -1199,8 +1199,8 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
case PPC::DIR_PWR7:
case PPC::DIR_PWR8:
case PPC::DIR_PWR9:
- setPrefFunctionAlignment(4);
- setPrefLoopAlignment(4);
+ setPrefFunctionLogAlignment(4);
+ setPrefLoopLogAlignment(4);
break;
}
@@ -14007,7 +14007,7 @@ void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
}
}
-unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
+unsigned PPCTargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const {
switch (Subtarget.getDarwinDirective()) {
default: break;
case PPC::DIR_970:
@@ -14050,7 +14050,7 @@ unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
}
}
- return TargetLowering::getPrefLoopAlignment(ML);
+ return TargetLowering::getPrefLoopLogAlignment(ML);
}
/// getConstraintType - Given a constraint, return the type of
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h
index 757e2d36da8..249d7e48f85 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -735,7 +735,7 @@ namespace llvm {
const SelectionDAG &DAG,
unsigned Depth = 0) const override;
- unsigned getPrefLoopAlignment(MachineLoop *ML) const override;
+ unsigned getPrefLoopLogAlignment(MachineLoop *ML) const override;
bool shouldInsertFencesForAtomic(const Instruction *I) const override {
return true;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 86add6ecccf..8bf291f8d86 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -199,8 +199,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
// Function alignments (log2).
unsigned FunctionAlignment = Subtarget.hasStdExtC() ? 1 : 2;
- setMinFunctionAlignment(FunctionAlignment);
- setPrefFunctionAlignment(FunctionAlignment);
+ setMinFunctionLogAlignment(FunctionAlignment);
+ setPrefFunctionLogAlignment(FunctionAlignment);
// Effectively disable jump table generation.
setMinimumJumpTableEntries(INT_MAX);
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index 99e555a0626..cfd6a72d364 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -1805,7 +1805,7 @@ SparcTargetLowering::SparcTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
- setMinFunctionAlignment(2);
+ setMinFunctionLogAlignment(2);
computeRegisterProperties(Subtarget->getRegisterInfo());
}
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 7605a7e8034..bc0ebe6f301 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -120,9 +120,9 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
// Instructions are strings of 2-byte aligned 2-byte values.
- setMinFunctionAlignment(2);
+ setMinFunctionLogAlignment(2);
// For performance reasons we prefer 16-byte alignment.
- setPrefFunctionAlignment(4);
+ setPrefFunctionLogAlignment(4);
// Handle operations that are handled in a similar way for all types.
for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
diff --git a/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp b/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp
index 95d7e22dec3..955a9fd951f 100644
--- a/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp
@@ -87,7 +87,7 @@ struct MBBInfo {
// The minimum alignment of the block, as a log2 value.
// This value never changes.
- unsigned Alignment = 0;
+ unsigned LogAlignment = 0;
// The number of terminators in this block. This value never changes.
unsigned NumTerminators = 0;
@@ -127,7 +127,8 @@ struct BlockPosition {
// as the runtime address.
unsigned KnownBits;
- BlockPosition(unsigned InitialAlignment) : KnownBits(InitialAlignment) {}
+ BlockPosition(unsigned InitialLogAlignment)
+ : KnownBits(InitialLogAlignment) {}
};
class SystemZLongBranch : public MachineFunctionPass {
@@ -178,16 +179,16 @@ const uint64_t MaxForwardRange = 0xfffe;
// instructions.
void SystemZLongBranch::skipNonTerminators(BlockPosition &Position,
MBBInfo &Block) {
- if (Block.Alignment > Position.KnownBits) {
+ if (Block.LogAlignment > Position.KnownBits) {
// When calculating the address of Block, we need to conservatively
// assume that Block had the worst possible misalignment.
- Position.Address += ((uint64_t(1) << Block.Alignment) -
+ Position.Address += ((uint64_t(1) << Block.LogAlignment) -
(uint64_t(1) << Position.KnownBits));
- Position.KnownBits = Block.Alignment;
+ Position.KnownBits = Block.LogAlignment;
}
// Align the addresses.
- uint64_t AlignMask = (uint64_t(1) << Block.Alignment) - 1;
+ uint64_t AlignMask = (uint64_t(1) << Block.LogAlignment) - 1;
Position.Address = (Position.Address + AlignMask) & ~AlignMask;
// Record the block's position.
@@ -275,13 +276,13 @@ uint64_t SystemZLongBranch::initMBBInfo() {
Terminators.clear();
Terminators.reserve(NumBlocks);
- BlockPosition Position(MF->getAlignment());
+ BlockPosition Position(MF->getLogAlignment());
for (unsigned I = 0; I < NumBlocks; ++I) {
MachineBasicBlock *MBB = MF->getBlockNumbered(I);
MBBInfo &Block = MBBs[I];
// Record the alignment, for quick access.
- Block.Alignment = MBB->getAlignment();
+ Block.LogAlignment = MBB->getLogAlignment();
// Calculate the size of the fixed part of the block.
MachineBasicBlock::iterator MI = MBB->begin();
@@ -339,7 +340,7 @@ bool SystemZLongBranch::mustRelaxABranch() {
// must be long.
void SystemZLongBranch::setWorstCaseAddresses() {
SmallVector<TerminatorInfo, 16>::iterator TI = Terminators.begin();
- BlockPosition Position(MF->getAlignment());
+ BlockPosition Position(MF->getLogAlignment());
for (auto &Block : MBBs) {
skipNonTerminators(Position, Block);
for (unsigned BTI = 0, BTE = Block.NumTerminators; BTI != BTE; ++BTI) {
@@ -440,7 +441,7 @@ void SystemZLongBranch::relaxBranch(TerminatorInfo &Terminator) {
// Run a shortening pass and relax any branches that need to be relaxed.
void SystemZLongBranch::relaxBranches() {
SmallVector<TerminatorInfo, 16>::iterator TI = Terminators.begin();
- BlockPosition Position(MF->getAlignment());
+ BlockPosition Position(MF->getLogAlignment());
for (auto &Block : MBBs) {
skipNonTerminators(Position, Block);
for (unsigned BTI = 0, BTE = Block.NumTerminators; BTI != BTE; ++BTI) {
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 3cc45a7add1..dc031459a27 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -1892,13 +1892,13 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
MaxLoadsPerMemcmpOptSize = 2;
// Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4).
- setPrefLoopAlignment(ExperimentalPrefLoopAlignment);
+ setPrefLoopLogAlignment(ExperimentalPrefLoopAlignment);
// An out-of-order CPU can speculatively execute past a predictable branch,
// but a conditional move could be stalled by an expensive earlier operation.
PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
EnableExtLdPromotion = true;
- setPrefFunctionAlignment(4); // 2^4 bytes.
+ setPrefFunctionLogAlignment(4); // 2^4 bytes.
verifyIntrinsicTables();
}
diff --git a/llvm/lib/Target/X86/X86RetpolineThunks.cpp b/llvm/lib/Target/X86/X86RetpolineThunks.cpp
index b435b22e8ac..1a0f6ecb01e 100644
--- a/llvm/lib/Target/X86/X86RetpolineThunks.cpp
+++ b/llvm/lib/Target/X86/X86RetpolineThunks.cpp
@@ -279,7 +279,7 @@ void X86RetpolineThunks::populateThunk(MachineFunction &MF,
CallTarget->addLiveIn(Reg);
CallTarget->setHasAddressTaken();
- CallTarget->setAlignment(4);
+ CallTarget->setLogAlignment(4);
insertRegReturnAddrClobber(*CallTarget, Reg);
CallTarget->back().setPreInstrSymbol(MF, TargetSym);
BuildMI(CallTarget, DebugLoc(), TII->get(RetOpc));
diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
index 34308904dba..924744343eb 100644
--- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
@@ -171,8 +171,8 @@ XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM,
setTargetDAGCombine(ISD::INTRINSIC_VOID);
setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
- setMinFunctionAlignment(1);
- setPrefFunctionAlignment(2);
+ setMinFunctionLogAlignment(1);
+ setPrefFunctionLogAlignment(2);
}
bool XCoreTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
OpenPOWER on IntegriCloud