summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/ARM
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/ARM')
-rw-r--r--llvm/lib/Target/ARM/ARMAsmPrinter.cpp14
-rw-r--r--llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp8
-rw-r--r--llvm/lib/Target/ARM/ARMBasicBlockInfo.h18
-rw-r--r--llvm/lib/Target/ARM/ARMConstantIslandPass.cpp36
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp8
5 files changed, 41 insertions, 43 deletions
diff --git a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
index 9fcdb2fb75a..c8c91e53c44 100644
--- a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
+++ b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
@@ -168,7 +168,7 @@ bool ARMAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
// relatively easy to exceed the thumb branch range within a TU.
if (! ThumbIndirectPads.empty()) {
OutStreamer->EmitAssemblerFlag(MCAF_Code16);
- EmitAlignment(llvm::Align(2));
+ EmitAlignment(Align(2));
for (std::pair<unsigned, MCSymbol *> &TIP : ThumbIndirectPads) {
OutStreamer->EmitLabel(TIP.second);
EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::tBX)
@@ -526,7 +526,7 @@ void ARMAsmPrinter::EmitEndOfAsmFile(Module &M) {
if (!Stubs.empty()) {
// Switch with ".non_lazy_symbol_pointer" directive.
OutStreamer->SwitchSection(TLOFMacho.getNonLazySymbolPointerSection());
- EmitAlignment(llvm::Align(4));
+ EmitAlignment(Align(4));
for (auto &Stub : Stubs)
emitNonLazySymbolPointer(*OutStreamer, Stub.first, Stub.second);
@@ -539,7 +539,7 @@ void ARMAsmPrinter::EmitEndOfAsmFile(Module &M) {
if (!Stubs.empty()) {
// Switch with ".non_lazy_symbol_pointer" directive.
OutStreamer->SwitchSection(TLOFMacho.getThreadLocalPointerSection());
- EmitAlignment(llvm::Align(4));
+ EmitAlignment(Align(4));
for (auto &Stub : Stubs)
emitNonLazySymbolPointer(*OutStreamer, Stub.first, Stub.second);
@@ -940,7 +940,7 @@ void ARMAsmPrinter::EmitJumpTableAddrs(const MachineInstr *MI) {
// Make sure the Thumb jump table is 4-byte aligned. This will be a nop for
// ARM mode tables.
- EmitAlignment(llvm::Align(4));
+ EmitAlignment(Align(4));
// Emit a label for the jump table.
MCSymbol *JTISymbol = GetARMJTIPICJumpTableLabel(JTI);
@@ -986,7 +986,7 @@ void ARMAsmPrinter::EmitJumpTableInsts(const MachineInstr *MI) {
// Make sure the Thumb jump table is 4-byte aligned. This will be a nop for
// ARM mode tables.
- EmitAlignment(llvm::Align(4));
+ EmitAlignment(Align(4));
// Emit a label for the jump table.
MCSymbol *JTISymbol = GetARMJTIPICJumpTableLabel(JTI);
@@ -1015,7 +1015,7 @@ void ARMAsmPrinter::EmitJumpTableTBInst(const MachineInstr *MI,
unsigned JTI = MO1.getIndex();
if (Subtarget->isThumb1Only())
- EmitAlignment(llvm::Align(4));
+ EmitAlignment(Align(4));
MCSymbol *JTISymbol = GetARMJTIPICJumpTableLabel(JTI);
OutStreamer->EmitLabel(JTISymbol);
@@ -1058,7 +1058,7 @@ void ARMAsmPrinter::EmitJumpTableTBInst(const MachineInstr *MI,
OutStreamer->EmitDataRegion(MCDR_DataRegionEnd);
// Make sure the next instruction is 2-byte aligned.
- EmitAlignment(llvm::Align(2));
+ EmitAlignment(Align(2));
}
void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) {
diff --git a/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp b/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp
index 4bf32be686d..2b34b1d8548 100644
--- a/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp
@@ -47,7 +47,7 @@ void ARMBasicBlockUtils::computeBlockSize(MachineBasicBlock *MBB) {
BasicBlockInfo &BBI = BBInfo[MBB->getNumber()];
BBI.Size = 0;
BBI.Unalign = 0;
- BBI.PostAlign = llvm::Align::None();
+ BBI.PostAlign = Align::None();
for (MachineInstr &I : *MBB) {
BBI.Size += TII->getInstSizeInBytes(I);
@@ -62,8 +62,8 @@ void ARMBasicBlockUtils::computeBlockSize(MachineBasicBlock *MBB) {
// tBR_JTr contains a .align 2 directive.
if (!MBB->empty() && MBB->back().getOpcode() == ARM::tBR_JTr) {
- BBI.PostAlign = llvm::Align(4);
- MBB->getParent()->ensureAlignment(llvm::Align(4));
+ BBI.PostAlign = Align(4);
+ MBB->getParent()->ensureAlignment(Align(4));
}
}
@@ -126,7 +126,7 @@ void ARMBasicBlockUtils::adjustBBOffsetsAfter(MachineBasicBlock *BB) {
for(unsigned i = BBNum + 1, e = MF.getNumBlockIDs(); i < e; ++i) {
// Get the offset and known bits at the end of the layout predecessor.
// Include the alignment of the current block.
- const llvm::Align Align = MF.getBlockNumbered(i)->getAlignment();
+ const Align Align = MF.getBlockNumbered(i)->getAlignment();
const unsigned Offset = BBInfo[i - 1].postOffset(Align);
const unsigned KnownBits = BBInfo[i - 1].postKnownBits(Align);
diff --git a/llvm/lib/Target/ARM/ARMBasicBlockInfo.h b/llvm/lib/Target/ARM/ARMBasicBlockInfo.h
index 18e7195e1a9..d0f4a02463b 100644
--- a/llvm/lib/Target/ARM/ARMBasicBlockInfo.h
+++ b/llvm/lib/Target/ARM/ARMBasicBlockInfo.h
@@ -27,11 +27,11 @@ using BBInfoVector = SmallVectorImpl<BasicBlockInfo>;
/// unknown offset bits. This does not include alignment padding caused by
/// known offset bits.
///
-/// @param Align alignment
+/// @param Alignment alignment
/// @param KnownBits Number of known low offset bits.
-inline unsigned UnknownPadding(llvm::Align Align, unsigned KnownBits) {
- if (KnownBits < Log2(Align))
- return Align.value() - (1ull << KnownBits);
+inline unsigned UnknownPadding(Align Alignment, unsigned KnownBits) {
+ if (KnownBits < Log2(Alignment))
+ return Alignment.value() - (1ull << KnownBits);
return 0;
}
@@ -67,7 +67,7 @@ struct BasicBlockInfo {
/// PostAlign - When > 1, the block terminator contains a .align
/// directive, so the end of the block is aligned to PostAlign bytes.
- llvm::Align PostAlign;
+ Align PostAlign;
BasicBlockInfo() = default;
@@ -86,10 +86,10 @@ struct BasicBlockInfo {
/// Compute the offset immediately following this block. If Align is
/// specified, return the offset the successor block will get if it has
/// this alignment.
- unsigned postOffset(llvm::Align Align = llvm::Align::None()) const {
+ unsigned postOffset(Align Alignment = Align::None()) const {
unsigned PO = Offset + Size;
- const llvm::Align PA = std::max(PostAlign, Align);
- if (PA == llvm::Align::None())
+ const Align PA = std::max(PostAlign, Alignment);
+ if (PA == Align::None())
return PO;
// Add alignment padding from the terminator.
return PO + UnknownPadding(PA, internalKnownBits());
@@ -100,7 +100,7 @@ struct BasicBlockInfo {
/// instruction alignment. An aligned terminator may increase the number
/// of know bits.
/// If LogAlign is given, also consider the alignment of the next block.
- unsigned postKnownBits(llvm::Align Align = llvm::Align::None()) const {
+ unsigned postKnownBits(Align Align = Align::None()) const {
return std::max(Log2(std::max(PostAlign, Align)), internalKnownBits());
}
};
diff --git a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
index 874ae7862b6..24ca25f73e9 100644
--- a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -247,7 +247,7 @@ namespace {
void doInitialJumpTablePlacement(std::vector<MachineInstr *> &CPEMIs);
bool BBHasFallthrough(MachineBasicBlock *MBB);
CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
- llvm::Align getCPEAlign(const MachineInstr *CPEMI);
+ Align getCPEAlign(const MachineInstr *CPEMI);
void scanFunctionJumpTables();
void initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs);
MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI);
@@ -404,7 +404,7 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
// Functions with jump tables need an alignment of 4 because they use the ADR
// instruction, which aligns the PC to 4 bytes before adding an offset.
if (!T2JumpTables.empty())
- MF->ensureAlignment(llvm::Align(4));
+ MF->ensureAlignment(Align(4));
/// Remove dead constant pool entries.
MadeChange |= removeUnusedCPEntries();
@@ -494,7 +494,7 @@ ARMConstantIslands::doInitialConstPlacement(std::vector<MachineInstr*> &CPEMIs)
MF->push_back(BB);
// MachineConstantPool measures alignment in bytes.
- const llvm::Align MaxAlign(MCP->getConstantPoolAlignment());
+ const Align MaxAlign(MCP->getConstantPoolAlignment());
const unsigned MaxLogAlign = Log2(MaxAlign);
// Mark the basic block as required by the const-pool.
@@ -650,25 +650,25 @@ ARMConstantIslands::findConstPoolEntry(unsigned CPI,
/// getCPEAlign - Returns the required alignment of the constant pool entry
/// represented by CPEMI.
-llvm::Align ARMConstantIslands::getCPEAlign(const MachineInstr *CPEMI) {
+Align ARMConstantIslands::getCPEAlign(const MachineInstr *CPEMI) {
switch (CPEMI->getOpcode()) {
case ARM::CONSTPOOL_ENTRY:
break;
case ARM::JUMPTABLE_TBB:
- return isThumb1 ? llvm::Align(4) : llvm::Align(1);
+ return isThumb1 ? Align(4) : Align(1);
case ARM::JUMPTABLE_TBH:
- return isThumb1 ? llvm::Align(4) : llvm::Align(2);
+ return isThumb1 ? Align(4) : Align(2);
case ARM::JUMPTABLE_INSTS:
- return llvm::Align(2);
+ return Align(2);
case ARM::JUMPTABLE_ADDRS:
- return llvm::Align(4);
+ return Align(4);
default:
llvm_unreachable("unknown constpool entry kind");
}
unsigned CPI = getCombinedIndex(CPEMI);
assert(CPI < MCP->getConstants().size() && "Invalid constant pool index.");
- return llvm::Align(MCP->getConstants()[CPI].getAlignment());
+ return Align(MCP->getConstants()[CPI].getAlignment());
}
/// scanFunctionJumpTables - Do a scan of the function, building up
@@ -1021,10 +1021,10 @@ bool ARMConstantIslands::isWaterInRange(unsigned UserOffset,
MachineBasicBlock* Water, CPUser &U,
unsigned &Growth) {
BBInfoVector &BBInfo = BBUtils->getBBInfo();
- const llvm::Align CPEAlign = getCPEAlign(U.CPEMI);
+ const Align CPEAlign = getCPEAlign(U.CPEMI);
const unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPEAlign);
unsigned NextBlockOffset;
- llvm::Align NextBlockAlignment;
+ Align NextBlockAlignment;
MachineFunction::const_iterator NextBlock = Water->getIterator();
if (++NextBlock == MF->end()) {
NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
@@ -1214,7 +1214,7 @@ bool ARMConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
// inserting islands between BB0 and BB1 makes other accesses out of range.
MachineBasicBlock *UserBB = U.MI->getParent();
BBInfoVector &BBInfo = BBUtils->getBBInfo();
- const llvm::Align CPEAlign = getCPEAlign(U.CPEMI);
+ const Align CPEAlign = getCPEAlign(U.CPEMI);
unsigned MinNoSplitDisp = BBInfo[UserBB->getNumber()].postOffset(CPEAlign);
if (CloserWater && MinNoSplitDisp > U.getMaxDisp() / 2)
return false;
@@ -1268,7 +1268,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
CPUser &U = CPUsers[CPUserIndex];
MachineInstr *UserMI = U.MI;
MachineInstr *CPEMI = U.CPEMI;
- const llvm::Align CPEAlign = getCPEAlign(CPEMI);
+ const Align CPEAlign = getCPEAlign(CPEMI);
MachineBasicBlock *UserMBB = UserMI->getParent();
BBInfoVector &BBInfo = BBUtils->getBBInfo();
const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()];
@@ -1323,7 +1323,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
// Try to split the block so it's fully aligned. Compute the latest split
// point where we can add a 4-byte branch instruction, and then align to
// Align which is the largest possible alignment in the function.
- const llvm::Align Align = MF->getAlignment();
+ const Align Align = MF->getAlignment();
assert(Align >= CPEAlign && "Over-aligned constant pool entry");
unsigned KnownBits = UserBBI.internalKnownBits();
unsigned UPad = UnknownPadding(Align, KnownBits);
@@ -1501,9 +1501,9 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex,
// Always align the new block because CP entries can be smaller than 4
// bytes. Be careful not to decrease the existing alignment, e.g. NewMBB may
// be an already aligned constant pool block.
- const llvm::Align Align = isThumb ? llvm::Align(2) : llvm::Align(4);
- if (NewMBB->getAlignment() < Align)
- NewMBB->setAlignment(Align);
+ const Align Alignment = isThumb ? Align(2) : Align(4);
+ if (NewMBB->getAlignment() < Alignment)
+ NewMBB->setAlignment(Alignment);
// Remove the original WaterList entry; we want subsequent insertions in
// this vicinity to go after the one we're about to insert. This
@@ -1566,7 +1566,7 @@ void ARMConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
BBInfo[CPEBB->getNumber()].Size = 0;
// This block no longer needs to be aligned.
- CPEBB->setAlignment(llvm::Align::None());
+ CPEBB->setAlignment(Align::None());
} else {
// Entries are sorted by descending alignment, so realign from the front.
CPEBB->setAlignment(getCPEAlign(&*CPEBB->begin()));
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 1866f794d8c..989c9477b7e 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -1428,16 +1428,14 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
// On ARM arguments smaller than 4 bytes are extended, so all arguments
// are at least 4 bytes aligned.
- setMinStackArgumentAlignment(llvm::Align(4));
+ setMinStackArgumentAlignment(Align(4));
// Prefer likely predicted branches to selects on out-of-order cores.
PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder();
- setPrefLoopAlignment(
- llvm::Align(1ULL << Subtarget->getPrefLoopLogAlignment()));
+ setPrefLoopAlignment(Align(1ULL << Subtarget->getPrefLoopLogAlignment()));
- setMinFunctionAlignment(Subtarget->isThumb() ? llvm::Align(2)
- : llvm::Align(4));
+ setMinFunctionAlignment(Subtarget->isThumb() ? Align(2) : Align(4));
if (Subtarget->isThumb() || Subtarget->isThumb2())
setTargetDAGCombine(ISD::ABS);
OpenPOWER on IntegriCloud