summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/ARM
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/ARM')
-rw-r--r--llvm/lib/Target/ARM/ARM.td2
-rw-r--r--llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp4
-rw-r--r--llvm/lib/Target/ARM/ARMConstantIslandPass.cpp39
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp4
-rw-r--r--llvm/lib/Target/ARM/ARMSubtarget.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMSubtarget.h6
6 files changed, 28 insertions, 29 deletions
diff --git a/llvm/lib/Target/ARM/ARM.td b/llvm/lib/Target/ARM/ARM.td
index 8dcddd25429..6616ae5d160 100644
--- a/llvm/lib/Target/ARM/ARM.td
+++ b/llvm/lib/Target/ARM/ARM.td
@@ -302,7 +302,7 @@ def FeatureVMLxForwarding : SubtargetFeature<"vmlx-forwarding",
def FeaturePref32BitThumb : SubtargetFeature<"32bit", "Pref32BitThumb", "true",
"Prefer 32-bit Thumb instrs">;
-def FeaturePrefLoopAlign32 : SubtargetFeature<"loop-align", "PrefLoopAlignment","2",
+def FeaturePrefLoopAlign32 : SubtargetFeature<"loop-align", "PrefLoopLogAlignment","2",
"Prefer 32-bit alignment for loops">;
def FeatureMVEVectorCostFactor1 : SubtargetFeature<"mve1beat", "MVEVectorCostFactor", "1",
diff --git a/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp b/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp
index 2de90e816b3..d8ca5fdda80 100644
--- a/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp
@@ -63,7 +63,7 @@ void ARMBasicBlockUtils::computeBlockSize(MachineBasicBlock *MBB) {
// tBR_JTr contains a .align 2 directive.
if (!MBB->empty() && MBB->back().getOpcode() == ARM::tBR_JTr) {
BBI.PostAlign = 2;
- MBB->getParent()->ensureAlignment(2);
+ MBB->getParent()->ensureLogAlignment(2);
}
}
@@ -126,7 +126,7 @@ void ARMBasicBlockUtils::adjustBBOffsetsAfter(MachineBasicBlock *BB) {
for(unsigned i = BBNum + 1, e = MF.getNumBlockIDs(); i < e; ++i) {
// Get the offset and known bits at the end of the layout predecessor.
// Include the alignment of the current block.
- unsigned LogAlign = MF.getBlockNumbered(i)->getAlignment();
+ unsigned LogAlign = MF.getBlockNumbered(i)->getLogAlignment();
unsigned Offset = BBInfo[i - 1].postOffset(LogAlign);
unsigned KnownBits = BBInfo[i - 1].postKnownBits(LogAlign);
diff --git a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
index 5283bb52ee4..ae62d9789bb 100644
--- a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -396,7 +396,7 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
// Functions with jump tables need an alignment of 4 because they use the ADR
// instruction, which aligns the PC to 4 bytes before adding an offset.
if (!T2JumpTables.empty())
- MF->ensureAlignment(2);
+ MF->ensureLogAlignment(2);
/// Remove dead constant pool entries.
MadeChange |= removeUnusedCPEntries();
@@ -486,20 +486,21 @@ ARMConstantIslands::doInitialConstPlacement(std::vector<MachineInstr*> &CPEMIs)
MF->push_back(BB);
// MachineConstantPool measures alignment in bytes. We measure in log2(bytes).
- unsigned MaxAlign = Log2_32(MCP->getConstantPoolAlignment());
+ unsigned MaxLogAlign = Log2_32(MCP->getConstantPoolAlignment());
// Mark the basic block as required by the const-pool.
- BB->setAlignment(MaxAlign);
+ BB->setLogAlignment(MaxLogAlign);
// The function needs to be as aligned as the basic blocks. The linker may
// move functions around based on their alignment.
- MF->ensureAlignment(BB->getAlignment());
+ MF->ensureLogAlignment(BB->getLogAlignment());
// Order the entries in BB by descending alignment. That ensures correct
// alignment of all entries as long as BB is sufficiently aligned. Keep
// track of the insertion point for each alignment. We are going to bucket
// sort the entries as they are created.
- SmallVector<MachineBasicBlock::iterator, 8> InsPoint(MaxAlign + 1, BB->end());
+ SmallVector<MachineBasicBlock::iterator, 8> InsPoint(MaxLogAlign + 1,
+ BB->end());
// Add all of the constants from the constant pool to the end block, use an
// identity mapping of CPI's to CPE's.
@@ -524,7 +525,7 @@ ARMConstantIslands::doInitialConstPlacement(std::vector<MachineInstr*> &CPEMIs)
// Ensure that future entries with higher alignment get inserted before
// CPEMI. This is bucket sort with iterators.
- for (unsigned a = LogAlign + 1; a <= MaxAlign; ++a)
+ for (unsigned a = LogAlign + 1; a <= MaxLogAlign; ++a)
if (InsPoint[a] == InsAt)
InsPoint[a] = CPEMI;
@@ -685,7 +686,7 @@ initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
BBInfoVector &BBInfo = BBUtils->getBBInfo();
// The known bits of the entry block offset are determined by the function
// alignment.
- BBInfo.front().KnownBits = MF->getAlignment();
+ BBInfo.front().KnownBits = MF->getLogAlignment();
// Compute block offsets and known bits.
BBUtils->adjustBBOffsetsAfter(&MF->front());
@@ -1015,14 +1016,14 @@ bool ARMConstantIslands::isWaterInRange(unsigned UserOffset,
BBInfoVector &BBInfo = BBUtils->getBBInfo();
unsigned CPELogAlign = getCPELogAlign(U.CPEMI);
unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign);
- unsigned NextBlockOffset, NextBlockAlignment;
+ unsigned NextBlockOffset, NextBlockLogAlignment;
MachineFunction::const_iterator NextBlock = Water->getIterator();
if (++NextBlock == MF->end()) {
NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
- NextBlockAlignment = 0;
+ NextBlockLogAlignment = 0;
} else {
NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset;
- NextBlockAlignment = NextBlock->getAlignment();
+ NextBlockLogAlignment = NextBlock->getLogAlignment();
}
unsigned Size = U.CPEMI->getOperand(2).getImm();
unsigned CPEEnd = CPEOffset + Size;
@@ -1034,13 +1035,13 @@ bool ARMConstantIslands::isWaterInRange(unsigned UserOffset,
Growth = CPEEnd - NextBlockOffset;
// Compute the padding that would go at the end of the CPE to align the next
// block.
- Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockAlignment);
+ Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockLogAlignment);
// If the CPE is to be inserted before the instruction, that will raise
// the offset of the instruction. Also account for unknown alignment padding
// in blocks between CPE and the user.
if (CPEOffset < UserOffset)
- UserOffset += Growth + UnknownPadding(MF->getAlignment(), CPELogAlign);
+ UserOffset += Growth + UnknownPadding(MF->getLogAlignment(), CPELogAlign);
} else
// CPE fits in existing padding.
Growth = 0;
@@ -1315,7 +1316,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
// Try to split the block so it's fully aligned. Compute the latest split
// point where we can add a 4-byte branch instruction, and then align to
// LogAlign which is the largest possible alignment in the function.
- unsigned LogAlign = MF->getAlignment();
+ unsigned LogAlign = MF->getLogAlignment();
assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry");
unsigned KnownBits = UserBBI.internalKnownBits();
unsigned UPad = UnknownPadding(LogAlign, KnownBits);
@@ -1493,9 +1494,9 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex,
// Always align the new block because CP entries can be smaller than 4
// bytes. Be careful not to decrease the existing alignment, e.g. NewMBB may
// be an already aligned constant pool block.
- const unsigned Align = isThumb ? 1 : 2;
- if (NewMBB->getAlignment() < Align)
- NewMBB->setAlignment(Align);
+ const unsigned LogAlign = isThumb ? 1 : 2;
+ if (NewMBB->getLogAlignment() < LogAlign)
+ NewMBB->setLogAlignment(LogAlign);
// Remove the original WaterList entry; we want subsequent insertions in
// this vicinity to go after the one we're about to insert. This
@@ -1524,7 +1525,7 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex,
decrementCPEReferenceCount(CPI, CPEMI);
// Mark the basic block as aligned as required by the const-pool entry.
- NewIsland->setAlignment(getCPELogAlign(U.CPEMI));
+ NewIsland->setLogAlignment(getCPELogAlign(U.CPEMI));
// Increase the size of the island block to account for the new entry.
BBUtils->adjustBBSize(NewIsland, Size);
@@ -1558,10 +1559,10 @@ void ARMConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
BBInfo[CPEBB->getNumber()].Size = 0;
// This block no longer needs to be aligned.
- CPEBB->setAlignment(0);
+ CPEBB->setLogAlignment(0);
} else
// Entries are sorted by descending alignment, so realign from the front.
- CPEBB->setAlignment(getCPELogAlign(&*CPEBB->begin()));
+ CPEBB->setLogAlignment(getCPELogAlign(&*CPEBB->begin()));
BBUtils->adjustBBOffsetsAfter(CPEBB);
// An island has only one predecessor BB and one successor BB. Check if
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 907517461e7..bbaa9431dec 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -1419,9 +1419,9 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
// Prefer likely predicted branches to selects on out-of-order cores.
PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder();
- setPrefLoopAlignment(Subtarget->getPrefLoopAlignment());
+ setPrefLoopLogAlignment(Subtarget->getPrefLoopLogAlignment());
- setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2);
+ setMinFunctionLogAlignment(Subtarget->isThumb() ? 1 : 2);
if (Subtarget->isThumb() || Subtarget->isThumb2())
setTargetDAGCombine(ISD::ABS);
diff --git a/llvm/lib/Target/ARM/ARMSubtarget.cpp b/llvm/lib/Target/ARM/ARMSubtarget.cpp
index 54443d2126f..155fbce98a8 100644
--- a/llvm/lib/Target/ARM/ARMSubtarget.cpp
+++ b/llvm/lib/Target/ARM/ARMSubtarget.cpp
@@ -300,7 +300,7 @@ void ARMSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
LdStMultipleTiming = SingleIssuePlusExtras;
MaxInterleaveFactor = 4;
if (!isThumb())
- PrefLoopAlignment = 3;
+ PrefLoopLogAlignment = 3;
break;
case Kryo:
break;
diff --git a/llvm/lib/Target/ARM/ARMSubtarget.h b/llvm/lib/Target/ARM/ARMSubtarget.h
index dde9dcbdb1c..0491420c51f 100644
--- a/llvm/lib/Target/ARM/ARMSubtarget.h
+++ b/llvm/lib/Target/ARM/ARMSubtarget.h
@@ -470,7 +470,7 @@ protected:
int PreISelOperandLatencyAdjustment = 2;
/// What alignment is preferred for loop bodies, in log2(bytes).
- unsigned PrefLoopAlignment = 0;
+ unsigned PrefLoopLogAlignment = 0;
/// The cost factor for MVE instructions, representing the multiple beats an
// instruction can take. The default is 2, (set in initSubtargetFeatures so
@@ -859,9 +859,7 @@ public:
return isROPI() || !isTargetELF();
}
- unsigned getPrefLoopAlignment() const {
- return PrefLoopAlignment;
- }
+ unsigned getPrefLoopLogAlignment() const { return PrefLoopLogAlignment; }
unsigned getMVEVectorCostFactor() const { return MVEVectorCostFactor; }
OpenPOWER on IntegriCloud