summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGuillaume Chatelet <gchatelet@google.com>2019-09-27 12:54:21 +0000
committerGuillaume Chatelet <gchatelet@google.com>2019-09-27 12:54:21 +0000
commit18f805a7ea5f369ef523821693f1176b40bcfc7e (patch)
tree4304e048240695e0f4522b01e36ff6a884d83c5a
parent7e317cab732181540fcd03000b3d3e2a5c8bc642 (diff)
downloadbcm5719-llvm-18f805a7ea5f369ef523821693f1176b40bcfc7e.tar.gz
bcm5719-llvm-18f805a7ea5f369ef523821693f1176b40bcfc7e.zip
[Alignment][NFC] Remove unneeded llvm:: scoping on Align types
llvm-svn: 373081
-rw-r--r--llvm/include/llvm/Analysis/TargetTransformInfo.h12
-rw-r--r--llvm/include/llvm/Analysis/TargetTransformInfoImpl.h4
-rw-r--r--llvm/include/llvm/CodeGen/AsmPrinter.h6
-rw-r--r--llvm/include/llvm/CodeGen/CallingConvLower.h12
-rw-r--r--llvm/include/llvm/CodeGen/MachineBasicBlock.h6
-rw-r--r--llvm/include/llvm/CodeGen/MachineFrameInfo.h21
-rw-r--r--llvm/include/llvm/CodeGen/MachineFunction.h8
-rw-r--r--llvm/include/llvm/CodeGen/TargetCallingConv.h4
-rw-r--r--llvm/include/llvm/CodeGen/TargetLowering.h30
-rw-r--r--llvm/include/llvm/IR/DataLayout.h47
-rw-r--r--llvm/include/llvm/IR/Instructions.h12
-rw-r--r--llvm/include/llvm/MC/MCSection.h4
-rw-r--r--llvm/include/llvm/Support/Alignment.h10
-rw-r--r--llvm/include/llvm/Support/OnDiskHashTable.h3
-rw-r--r--llvm/lib/Analysis/MemoryBuiltins.cpp6
-rw-r--r--llvm/lib/Analysis/TargetTransformInfo.cpp5
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp66
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp4
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp6
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp2
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp4
-rw-r--r--llvm/lib/CodeGen/BranchRelaxation.cpp12
-rw-r--r--llvm/lib/CodeGen/CallingConvLower.cpp17
-rw-r--r--llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp30
-rw-r--r--llvm/lib/CodeGen/MIRParser/MIParser.cpp2
-rw-r--r--llvm/lib/CodeGen/MIRParser/MIRParser.cpp2
-rw-r--r--llvm/lib/CodeGen/MIRPrinter.cpp2
-rw-r--r--llvm/lib/CodeGen/MachineBasicBlock.cpp2
-rw-r--r--llvm/lib/CodeGen/MachineBlockPlacement.cpp6
-rw-r--r--llvm/lib/CodeGen/MachineFrameInfo.cpp39
-rw-r--r--llvm/lib/CodeGen/MachineFunction.cpp2
-rw-r--r--llvm/lib/CodeGen/PatchableFunction.cpp2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp29
-rw-r--r--llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp6
-rw-r--r--llvm/lib/IR/DataLayout.cpp83
-rw-r--r--llvm/lib/IR/Instructions.cpp6
-rw-r--r--llvm/lib/IR/Value.cpp2
-rw-r--r--llvm/lib/MC/ELFObjectWriter.cpp12
-rw-r--r--llvm/lib/MC/MCAssembler.cpp2
-rw-r--r--llvm/lib/MC/MCELFStreamer.cpp4
-rw-r--r--llvm/lib/MC/MCObjectStreamer.cpp2
-rw-r--r--llvm/lib/MC/MCWinCOFFStreamer.cpp4
-rw-r--r--llvm/lib/MC/MachObjectWriter.cpp12
-rw-r--r--llvm/lib/Object/ArchiveWriter.cpp10
-rw-r--r--llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64CallingConvention.cpp6
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp7
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp4
-rw-r--r--llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp6
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.h2
-rw-r--r--llvm/lib/Target/ARC/ARCMachineFunctionInfo.h2
-rw-r--r--llvm/lib/Target/ARM/ARMAsmPrinter.cpp14
-rw-r--r--llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp8
-rw-r--r--llvm/lib/Target/ARM/ARMBasicBlockInfo.h18
-rw-r--r--llvm/lib/Target/ARM/ARMConstantIslandPass.cpp36
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp8
-rw-r--r--llvm/lib/Target/AVR/AVRISelLowering.cpp2
-rw-r--r--llvm/lib/Target/BPF/BPFISelLowering.cpp4
-rw-r--r--llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonISelLowering.cpp6
-rw-r--r--llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.cpp4
-rw-r--r--llvm/lib/Target/Lanai/LanaiISelLowering.cpp4
-rw-r--r--llvm/lib/Target/MSP430/MSP430ISelLowering.cpp4
-rw-r--r--llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp20
-rw-r--r--llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h2
-rw-r--r--llvm/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp4
-rw-r--r--llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp13
-rw-r--r--llvm/lib/Target/Mips/MipsConstantIslandPass.cpp22
-rw-r--r--llvm/lib/Target/Mips/MipsISelLowering.cpp9
-rw-r--r--llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp4
-rw-r--r--llvm/lib/Target/Mips/MipsSERegisterInfo.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCBranchSelector.cpp14
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.cpp16
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.h2
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp2
-rw-r--r--llvm/lib/Target/Sparc/SparcISelLowering.cpp2
-rw-r--r--llvm/lib/Target/SystemZ/SystemZISelLowering.cpp4
-rw-r--r--llvm/lib/Target/SystemZ/SystemZLongBranch.cpp2
-rw-r--r--llvm/lib/Target/X86/X86AsmPrinter.cpp4
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp4
-rw-r--r--llvm/lib/Target/X86/X86RetpolineThunks.cpp2
-rw-r--r--llvm/lib/Target/X86/X86TargetTransformInfo.cpp4
-rw-r--r--llvm/lib/Target/X86/X86TargetTransformInfo.h4
-rw-r--r--llvm/lib/Target/XCore/XCoreAsmPrinter.cpp4
-rw-r--r--llvm/lib/Target/XCore/XCoreISelLowering.cpp4
-rw-r--r--llvm/lib/Transforms/Utils/Local.cpp24
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp4
-rw-r--r--llvm/tools/dsymutil/DwarfStreamer.cpp4
-rw-r--r--llvm/tools/llvm-cov/TestingSupport.cpp2
-rw-r--r--llvm/tools/llvm-objcopy/MachO/MachOLayoutBuilder.cpp2
94 files changed, 434 insertions, 453 deletions
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index 403fe355330..6da2d7f43bc 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -580,9 +580,9 @@ public:
bool isLegalMaskedLoad(Type *DataType) const;
/// Return true if the target supports nontemporal store.
- bool isLegalNTStore(Type *DataType, llvm::Align Alignment) const;
+ bool isLegalNTStore(Type *DataType, Align Alignment) const;
/// Return true if the target supports nontemporal load.
- bool isLegalNTLoad(Type *DataType, llvm::Align Alignment) const;
+ bool isLegalNTLoad(Type *DataType, Align Alignment) const;
/// Return true if the target supports masked scatter.
bool isLegalMaskedScatter(Type *DataType) const;
@@ -1196,8 +1196,8 @@ public:
virtual bool shouldFavorBackedgeIndex(const Loop *L) const = 0;
virtual bool isLegalMaskedStore(Type *DataType) = 0;
virtual bool isLegalMaskedLoad(Type *DataType) = 0;
- virtual bool isLegalNTStore(Type *DataType, llvm::Align Alignment) = 0;
- virtual bool isLegalNTLoad(Type *DataType, llvm::Align Alignment) = 0;
+ virtual bool isLegalNTStore(Type *DataType, Align Alignment) = 0;
+ virtual bool isLegalNTLoad(Type *DataType, Align Alignment) = 0;
virtual bool isLegalMaskedScatter(Type *DataType) = 0;
virtual bool isLegalMaskedGather(Type *DataType) = 0;
virtual bool isLegalMaskedCompressStore(Type *DataType) = 0;
@@ -1471,10 +1471,10 @@ public:
bool isLegalMaskedLoad(Type *DataType) override {
return Impl.isLegalMaskedLoad(DataType);
}
- bool isLegalNTStore(Type *DataType, llvm::Align Alignment) override {
+ bool isLegalNTStore(Type *DataType, Align Alignment) override {
return Impl.isLegalNTStore(DataType, Alignment);
}
- bool isLegalNTLoad(Type *DataType, llvm::Align Alignment) override {
+ bool isLegalNTLoad(Type *DataType, Align Alignment) override {
return Impl.isLegalNTLoad(DataType, Alignment);
}
bool isLegalMaskedScatter(Type *DataType) override {
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index ae2f5b8fcc9..2f1011799f1 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -247,14 +247,14 @@ public:
bool isLegalMaskedLoad(Type *DataType) { return false; }
- bool isLegalNTStore(Type *DataType, llvm::Align Alignment) {
+ bool isLegalNTStore(Type *DataType, Align Alignment) {
// By default, assume nontemporal memory stores are available for stores
// that are aligned and have a size that is a power of 2.
unsigned DataSize = DL.getTypeStoreSize(DataType);
return Alignment >= DataSize && isPowerOf2_32(DataSize);
}
- bool isLegalNTLoad(Type *DataType, llvm::Align Alignment) {
+ bool isLegalNTLoad(Type *DataType, Align Alignment) {
// By default, assume nontemporal memory loads are available for loads that
// are aligned and have a size that is a power of 2.
unsigned DataSize = DL.getTypeStoreSize(DataType);
diff --git a/llvm/include/llvm/CodeGen/AsmPrinter.h b/llvm/include/llvm/CodeGen/AsmPrinter.h
index 137a1c3b4a5..a4580da5aec 100644
--- a/llvm/include/llvm/CodeGen/AsmPrinter.h
+++ b/llvm/include/llvm/CodeGen/AsmPrinter.h
@@ -350,7 +350,7 @@ public:
/// global value is specified, and if that global has an explicit alignment
/// requested, it will override the alignment request if required for
/// correctness.
- void EmitAlignment(llvm::Align Align, const GlobalObject *GV = nullptr) const;
+ void EmitAlignment(Align Alignment, const GlobalObject *GV = nullptr) const;
/// Lower the specified LLVM Constant to an MCExpr.
virtual const MCExpr *lowerConstant(const Constant *CV);
@@ -643,8 +643,8 @@ public:
void EmitLinkage(const GlobalValue *GV, MCSymbol *GVSym) const;
/// Return the alignment for the specified \p GV.
- static llvm::Align getGVAlignment(const GlobalValue *GV, const DataLayout &DL,
- llvm::Align InAlign = llvm::Align::None());
+ static Align getGVAlignment(const GlobalValue *GV, const DataLayout &DL,
+ Align InAlign = Align::None());
private:
/// Private state for PrintSpecial()
diff --git a/llvm/include/llvm/CodeGen/CallingConvLower.h b/llvm/include/llvm/CodeGen/CallingConvLower.h
index 56da9b19801..a30ca638ee6 100644
--- a/llvm/include/llvm/CodeGen/CallingConvLower.h
+++ b/llvm/include/llvm/CodeGen/CallingConvLower.h
@@ -424,18 +424,18 @@ public:
/// AllocateStack - Allocate a chunk of stack space with the specified size
/// and alignment.
unsigned AllocateStack(unsigned Size, unsigned Alignment) {
- const llvm::Align Align(Alignment);
- StackOffset = alignTo(StackOffset, Align);
+ const Align CheckedAlignment(Alignment);
+ StackOffset = alignTo(StackOffset, CheckedAlignment);
unsigned Result = StackOffset;
StackOffset += Size;
- MaxStackArgAlign = std::max(Align, MaxStackArgAlign);
- ensureMaxAlignment(Align);
+ MaxStackArgAlign = std::max(CheckedAlignment, MaxStackArgAlign);
+ ensureMaxAlignment(CheckedAlignment);
return Result;
}
- void ensureMaxAlignment(llvm::Align Align) {
+ void ensureMaxAlignment(Align Alignment) {
if (!AnalyzingMustTailForwardedRegs)
- MF.getFrameInfo().ensureMaxAlignment(Align.value());
+ MF.getFrameInfo().ensureMaxAlignment(Alignment.value());
}
/// Version of AllocateStack with extra register to be shadowed.
diff --git a/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/llvm/include/llvm/CodeGen/MachineBasicBlock.h
index 2055eed2aa1..ccdde78a0b2 100644
--- a/llvm/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/llvm/include/llvm/CodeGen/MachineBasicBlock.h
@@ -105,7 +105,7 @@ private:
/// Alignment of the basic block. One if the basic block does not need to be
/// aligned.
- llvm::Align Alignment;
+ Align Alignment;
/// Indicate that this basic block is entered via an exception handler.
bool IsEHPad = false;
@@ -373,10 +373,10 @@ public:
const uint32_t *getEndClobberMask(const TargetRegisterInfo *TRI) const;
/// Return alignment of the basic block.
- llvm::Align getAlignment() const { return Alignment; }
+ Align getAlignment() const { return Alignment; }
/// Set alignment of the basic block.
- void setAlignment(llvm::Align A) { Alignment = A; }
+ void setAlignment(Align A) { Alignment = A; }
/// Returns true if the block is a landing pad. That is this basic block is
/// entered via an exception handler.
diff --git a/llvm/include/llvm/CodeGen/MachineFrameInfo.h b/llvm/include/llvm/CodeGen/MachineFrameInfo.h
index b4584ece52d..01fc50d14a7 100644
--- a/llvm/include/llvm/CodeGen/MachineFrameInfo.h
+++ b/llvm/include/llvm/CodeGen/MachineFrameInfo.h
@@ -181,7 +181,7 @@ private:
uint8_t SSPLayout;
- StackObject(uint64_t Size, llvm::Align Alignment, int64_t SPOffset,
+ StackObject(uint64_t Size, Align Alignment, int64_t SPOffset,
bool IsImmutable, bool IsSpillSlot, const AllocaInst *Alloca,
bool IsAliased, uint8_t StackID = 0)
: SPOffset(SPOffset), Size(Size), Alignment(Alignment),
@@ -419,7 +419,9 @@ public:
/// Required alignment of the local object blob,
/// which is the strictest alignment of any object in it.
- void setLocalFrameMaxAlign(Align Align) { LocalFrameMaxAlign = Align; }
+ void setLocalFrameMaxAlign(Align Alignment) {
+ LocalFrameMaxAlign = Alignment;
+ }
/// Return the required alignment of the local object blob.
Align getLocalFrameMaxAlign() const { return LocalFrameMaxAlign; }
@@ -564,7 +566,7 @@ public:
unsigned getMaxAlignment() const { return MaxAlignment.value(); }
/// Make sure the function is at least Align bytes aligned.
- void ensureMaxAlignment(llvm::Align Align);
+ void ensureMaxAlignment(Align Alignment);
/// FIXME: Remove this once transition to Align is over.
inline void ensureMaxAlignment(unsigned Align) {
ensureMaxAlignment(assumeAligned(Align));
@@ -732,9 +734,9 @@ public:
/// Create a new statically sized stack object, returning
/// a nonnegative identifier to represent it.
- int CreateStackObject(uint64_t Size, llvm::Align Alignment, bool isSpillSlot,
+ int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot,
const AllocaInst *Alloca = nullptr, uint8_t ID = 0);
- /// FIXME: Remove this function when transition to llvm::Align is over.
+ /// FIXME: Remove this function when transition to Align is over.
inline int CreateStackObject(uint64_t Size, unsigned Alignment,
bool isSpillSlot,
const AllocaInst *Alloca = nullptr,
@@ -745,8 +747,8 @@ public:
/// Create a new statically sized stack object that represents a spill slot,
/// returning a nonnegative identifier to represent it.
- int CreateSpillStackObject(uint64_t Size, llvm::Align Alignment);
- /// FIXME: Remove this function when transition to llvm::Align is over.
+ int CreateSpillStackObject(uint64_t Size, Align Alignment);
+ /// FIXME: Remove this function when transition to Align is over.
inline int CreateSpillStackObject(uint64_t Size, unsigned Alignment) {
return CreateSpillStackObject(Size, assumeAligned(Alignment));
}
@@ -760,9 +762,8 @@ public:
/// Notify the MachineFrameInfo object that a variable sized object has been
/// created. This must be created whenever a variable sized object is
/// created, whether or not the index returned is actually used.
- int CreateVariableSizedObject(llvm::Align Alignment,
- const AllocaInst *Alloca);
- /// FIXME: Remove this function when transition to llvm::Align is over.
+ int CreateVariableSizedObject(Align Alignment, const AllocaInst *Alloca);
+ /// FIXME: Remove this function when transition to Align is over.
int CreateVariableSizedObject(unsigned Alignment, const AllocaInst *Alloca) {
return CreateVariableSizedObject(assumeAligned(Alignment), Alloca);
}
diff --git a/llvm/include/llvm/CodeGen/MachineFunction.h b/llvm/include/llvm/CodeGen/MachineFunction.h
index be685afdba0..adf3f4c0e2f 100644
--- a/llvm/include/llvm/CodeGen/MachineFunction.h
+++ b/llvm/include/llvm/CodeGen/MachineFunction.h
@@ -277,7 +277,7 @@ class MachineFunction {
unsigned FunctionNumber;
/// Alignment - The alignment of the function.
- llvm::Align Alignment;
+ Align Alignment;
/// ExposesReturnsTwice - True if the function calls setjmp or related
/// functions with attribute "returns twice", but doesn't have
@@ -509,13 +509,13 @@ public:
WinEHFuncInfo *getWinEHFuncInfo() { return WinEHInfo; }
/// getAlignment - Return the alignment of the function.
- llvm::Align getAlignment() const { return Alignment; }
+ Align getAlignment() const { return Alignment; }
/// setAlignment - Set the alignment of the function.
- void setAlignment(llvm::Align A) { Alignment = A; }
+ void setAlignment(Align A) { Alignment = A; }
/// ensureAlignment - Make sure the function is at least A bytes aligned.
- void ensureAlignment(llvm::Align A) {
+ void ensureAlignment(Align A) {
if (Alignment < A)
Alignment = A;
}
diff --git a/llvm/include/llvm/CodeGen/TargetCallingConv.h b/llvm/include/llvm/CodeGen/TargetCallingConv.h
index d25a9a24591..360fc51bd04 100644
--- a/llvm/include/llvm/CodeGen/TargetCallingConv.h
+++ b/llvm/include/llvm/CodeGen/TargetCallingConv.h
@@ -126,7 +126,7 @@ namespace ISD {
return A ? A->value() : 0;
}
void setByValAlign(unsigned A) {
- ByValAlign = encode(llvm::Align(A));
+ ByValAlign = encode(Align(A));
assert(getByValAlign() == A && "bitfield overflow");
}
@@ -135,7 +135,7 @@ namespace ISD {
return A ? A->value() : 0;
}
void setOrigAlign(unsigned A) {
- OrigAlign = encode(llvm::Align(A));
+ OrigAlign = encode(Align(A));
assert(getOrigAlign() == A && "bitfield overflow");
}
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index f6ff00ff085..f325707fd0a 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -1596,18 +1596,18 @@ public:
}
/// Return the minimum stack alignment of an argument.
- llvm::Align getMinStackArgumentAlignment() const {
+ Align getMinStackArgumentAlignment() const {
return MinStackArgumentAlignment;
}
/// Return the minimum function alignment.
- llvm::Align getMinFunctionAlignment() const { return MinFunctionAlignment; }
+ Align getMinFunctionAlignment() const { return MinFunctionAlignment; }
/// Return the preferred function alignment.
- llvm::Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; }
+ Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; }
/// Return the preferred loop alignment.
- virtual llvm::Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const {
+ virtual Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const {
return PrefLoopAlignment;
}
@@ -2120,24 +2120,24 @@ protected:
}
/// Set the target's minimum function alignment.
- void setMinFunctionAlignment(llvm::Align Align) {
- MinFunctionAlignment = Align;
+ void setMinFunctionAlignment(Align Alignment) {
+ MinFunctionAlignment = Alignment;
}
/// Set the target's preferred function alignment. This should be set if
/// there is a performance benefit to higher-than-minimum alignment
- void setPrefFunctionAlignment(llvm::Align Align) {
- PrefFunctionAlignment = Align;
+ void setPrefFunctionAlignment(Align Alignment) {
+ PrefFunctionAlignment = Alignment;
}
/// Set the target's preferred loop alignment. Default alignment is one, it
/// means the target does not care about loop alignment. The target may also
/// override getPrefLoopAlignment to provide per-loop values.
- void setPrefLoopAlignment(llvm::Align Align) { PrefLoopAlignment = Align; }
+ void setPrefLoopAlignment(Align Alignment) { PrefLoopAlignment = Alignment; }
/// Set the minimum stack alignment of an argument.
- void setMinStackArgumentAlignment(llvm::Align Align) {
- MinStackArgumentAlignment = Align;
+ void setMinStackArgumentAlignment(Align Alignment) {
+ MinStackArgumentAlignment = Alignment;
}
/// Set the maximum atomic operation size supported by the
@@ -2699,18 +2699,18 @@ private:
Sched::Preference SchedPreferenceInfo;
/// The minimum alignment that any argument on the stack needs to have.
- llvm::Align MinStackArgumentAlignment;
+ Align MinStackArgumentAlignment;
/// The minimum function alignment (used when optimizing for size, and to
/// prevent explicitly provided alignment from leading to incorrect code).
- llvm::Align MinFunctionAlignment;
+ Align MinFunctionAlignment;
/// The preferred function alignment (used when alignment unspecified and
/// optimizing for speed).
- llvm::Align PrefFunctionAlignment;
+ Align PrefFunctionAlignment;
/// The preferred loop alignment (in log2 bot in bytes).
- llvm::Align PrefLoopAlignment;
+ Align PrefLoopAlignment;
/// Size in bits of the maximum atomics size the backend supports.
/// Accesses larger than this will be expanded by AtomicExpandPass.
diff --git a/llvm/include/llvm/IR/DataLayout.h b/llvm/include/llvm/IR/DataLayout.h
index 6aa5a8aa8b8..b33cb497d6e 100644
--- a/llvm/include/llvm/IR/DataLayout.h
+++ b/llvm/include/llvm/IR/DataLayout.h
@@ -72,11 +72,11 @@ struct LayoutAlignElem {
/// Alignment type from \c AlignTypeEnum
unsigned AlignType : 8;
unsigned TypeBitWidth : 24;
- llvm::Align ABIAlign;
- llvm::Align PrefAlign;
+ Align ABIAlign;
+ Align PrefAlign;
- static LayoutAlignElem get(AlignTypeEnum align_type, llvm::Align abi_align,
- llvm::Align pref_align, uint32_t bit_width);
+ static LayoutAlignElem get(AlignTypeEnum align_type, Align abi_align,
+ Align pref_align, uint32_t bit_width);
bool operator==(const LayoutAlignElem &rhs) const;
};
@@ -88,15 +88,15 @@ struct LayoutAlignElem {
/// \note The unusual order of elements in the structure attempts to reduce
/// padding and make the structure slightly more cache friendly.
struct PointerAlignElem {
- llvm::Align ABIAlign;
- llvm::Align PrefAlign;
+ Align ABIAlign;
+ Align PrefAlign;
uint32_t TypeByteWidth;
uint32_t AddressSpace;
uint32_t IndexWidth;
/// Initializer
- static PointerAlignElem get(uint32_t AddressSpace, llvm::Align ABIAlign,
- llvm::Align PrefAlign, uint32_t TypeByteWidth,
+ static PointerAlignElem get(uint32_t AddressSpace, Align ABIAlign,
+ Align PrefAlign, uint32_t TypeByteWidth,
uint32_t IndexWidth);
bool operator==(const PointerAlignElem &rhs) const;
@@ -173,16 +173,15 @@ private:
/// well-defined bitwise representation.
SmallVector<unsigned, 8> NonIntegralAddressSpaces;
- void setAlignment(AlignTypeEnum align_type, llvm::Align abi_align,
- llvm::Align pref_align, uint32_t bit_width);
- llvm::Align getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width,
- bool ABIAlign, Type *Ty) const;
- void setPointerAlignment(uint32_t AddrSpace, llvm::Align ABIAlign,
- llvm::Align PrefAlign, uint32_t TypeByteWidth,
- uint32_t IndexWidth);
+ void setAlignment(AlignTypeEnum align_type, Align abi_align, Align pref_align,
+ uint32_t bit_width);
+ Align getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width,
+ bool ABIAlign, Type *Ty) const;
+ void setPointerAlignment(uint32_t AddrSpace, Align ABIAlign, Align PrefAlign,
+ uint32_t TypeByteWidth, uint32_t IndexWidth);
/// Internal helper method that returns requested alignment for type.
- llvm::Align getAlignment(Type *Ty, bool abi_or_pref) const;
+ Align getAlignment(Type *Ty, bool abi_or_pref) const;
/// Parses a target data specification string. Assert if the string is
/// malformed.
@@ -262,11 +261,11 @@ public:
bool isIllegalInteger(uint64_t Width) const { return !isLegalInteger(Width); }
/// Returns true if the given alignment exceeds the natural stack alignment.
- bool exceedsNaturalStackAlignment(llvm::Align Align) const {
- return StackNaturalAlign && (Align > StackNaturalAlign);
+ bool exceedsNaturalStackAlignment(Align Alignment) const {
+ return StackNaturalAlign && (Alignment > StackNaturalAlign);
}
- llvm::Align getStackAlignment() const {
+ Align getStackAlignment() const {
assert(StackNaturalAlign && "StackNaturalAlign must be defined");
return *StackNaturalAlign;
}
@@ -349,12 +348,12 @@ public:
}
/// Layout pointer alignment
- llvm::Align getPointerABIAlignment(unsigned AS) const;
+ Align getPointerABIAlignment(unsigned AS) const;
/// Return target's alignment for stack-based pointers
/// FIXME: The defaults need to be removed once all of
/// the backends/clients are updated.
- llvm::Align getPointerPrefAlignment(unsigned AS = 0) const;
+ Align getPointerPrefAlignment(unsigned AS = 0) const;
/// Layout pointer size
/// FIXME: The defaults need to be removed once all of
@@ -490,7 +489,7 @@ public:
/// Returns the minimum ABI-required alignment for an integer type of
/// the specified bitwidth.
- llvm::Align getABIIntegerTypeAlignment(unsigned BitWidth) const;
+ Align getABIIntegerTypeAlignment(unsigned BitWidth) const;
/// Returns the preferred stack/global alignment for the specified
/// type.
@@ -562,7 +561,7 @@ inline LLVMTargetDataRef wrap(const DataLayout *P) {
/// based on the DataLayout structure.
class StructLayout {
uint64_t StructSize;
- llvm::Align StructAlignment;
+ Align StructAlignment;
unsigned IsPadded : 1;
unsigned NumElements : 31;
uint64_t MemberOffsets[1]; // variable sized array!
@@ -572,7 +571,7 @@ public:
uint64_t getSizeInBits() const { return 8 * StructSize; }
- llvm::Align getAlignment() const { return StructAlignment; }
+ Align getAlignment() const { return StructAlignment; }
/// Returns whether the struct has padding or not between its fields.
/// NB: Padding in nested element is not taken into account.
diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h
index 5c9b03a4c8a..c55508fb04c 100644
--- a/llvm/include/llvm/IR/Instructions.h
+++ b/llvm/include/llvm/IR/Instructions.h
@@ -114,9 +114,9 @@ public:
return MA->value();
return 0;
}
- // FIXME: Remove once migration to llvm::Align is over.
+ // FIXME: Remove once migration to Align is over.
void setAlignment(unsigned Align);
- void setAlignment(llvm::MaybeAlign Align);
+ void setAlignment(MaybeAlign Align);
/// Return true if this alloca is in the entry block of the function and is a
/// constant size. If so, the code generator will fold it into the
@@ -248,9 +248,9 @@ public:
return 0;
}
- // FIXME: Remove once migration to llvm::Align is over.
+ // FIXME: Remove once migration to Align is over.
void setAlignment(unsigned Align);
- void setAlignment(llvm::MaybeAlign Align);
+ void setAlignment(MaybeAlign Align);
/// Returns the ordering constraint of this load instruction.
AtomicOrdering getOrdering() const {
@@ -378,9 +378,9 @@ public:
return 0;
}
- // FIXME: Remove once migration to llvm::Align is over.
+ // FIXME: Remove once migration to Align is over.
void setAlignment(unsigned Align);
- void setAlignment(llvm::MaybeAlign Align);
+ void setAlignment(MaybeAlign Align);
/// Returns the ordering constraint of this store instruction.
AtomicOrdering getOrdering() const {
diff --git a/llvm/include/llvm/MC/MCSection.h b/llvm/include/llvm/MC/MCSection.h
index e97274a9602..d057feda87d 100644
--- a/llvm/include/llvm/MC/MCSection.h
+++ b/llvm/include/llvm/MC/MCSection.h
@@ -59,7 +59,7 @@ private:
MCSymbol *Begin;
MCSymbol *End = nullptr;
/// The alignment requirement of this section.
- llvm::Align Alignment;
+ Align Alignment;
/// The section index in the assemblers section list.
unsigned Ordinal = 0;
/// The index of this section in the layout order.
@@ -119,7 +119,7 @@ public:
bool hasEnded() const;
unsigned getAlignment() const { return Alignment.value(); }
- void setAlignment(llvm::Align Value) { Alignment = Value; }
+ void setAlignment(Align Value) { Alignment = Value; }
unsigned getOrdinal() const { return Ordinal; }
void setOrdinal(unsigned Value) { Ordinal = Value; }
diff --git a/llvm/include/llvm/Support/Alignment.h b/llvm/include/llvm/Support/Alignment.h
index c2673faf317..c90d8d72d44 100644
--- a/llvm/include/llvm/Support/Alignment.h
+++ b/llvm/include/llvm/Support/Alignment.h
@@ -76,10 +76,10 @@ public:
/// Returns a default constructed Align which corresponds to no alignment.
/// This is useful to test for unalignment as it conveys clear semantic.
- /// `if (A != llvm::Align::None())`
+ /// `if (A != Align::None())`
/// would be better than
- /// `if (A > llvm::Align(1))`
- constexpr static const Align None() { return llvm::Align(); }
+ /// `if (A > Align(1))`
+ constexpr static const Align None() { return Align(); }
};
/// Treats the value 0 as a 1, so Align is always at least 1.
@@ -142,8 +142,8 @@ inline uint64_t alignTo(uint64_t Size, MaybeAlign A) {
/// Returns the offset to the next integer (mod 2**64) that is greater than
/// or equal to \p Value and is a multiple of \p Align.
-inline uint64_t offsetToAlignment(uint64_t Value, llvm::Align Align) {
- return alignTo(Value, Align) - Value;
+inline uint64_t offsetToAlignment(uint64_t Value, Align Alignment) {
+ return alignTo(Value, Alignment) - Value;
}
/// Returns the log2 of the alignment.
diff --git a/llvm/include/llvm/Support/OnDiskHashTable.h b/llvm/include/llvm/Support/OnDiskHashTable.h
index c4c0ac97bdb..11dc0de0f35 100644
--- a/llvm/include/llvm/Support/OnDiskHashTable.h
+++ b/llvm/include/llvm/Support/OnDiskHashTable.h
@@ -208,8 +208,7 @@ public:
// Pad with zeros so that we can start the hashtable at an aligned address.
offset_type TableOff = Out.tell();
- uint64_t N =
- llvm::offsetToAlignment(TableOff, llvm::Align(alignof(offset_type)));
+ uint64_t N = offsetToAlignment(TableOff, Align(alignof(offset_type)));
TableOff += N;
while (N--)
LE.write<uint8_t>(0);
diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp
index 5acdeb64a40..172c86eb464 100644
--- a/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -560,9 +560,9 @@ STATISTIC(ObjectVisitorArgument,
STATISTIC(ObjectVisitorLoad,
"Number of load instructions with unsolved size and offset");
-APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Align) {
- if (Options.RoundToAlign && Align)
- return APInt(IntTyBits, alignTo(Size.getZExtValue(), llvm::Align(Align)));
+APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Alignment) {
+ if (Options.RoundToAlign && Alignment)
+ return APInt(IntTyBits, alignTo(Size.getZExtValue(), Align(Alignment)));
return Size;
}
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index 0d58234630b..553f1428217 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -302,12 +302,11 @@ bool TargetTransformInfo::isLegalMaskedLoad(Type *DataType) const {
}
bool TargetTransformInfo::isLegalNTStore(Type *DataType,
- llvm::Align Alignment) const {
+ Align Alignment) const {
return TTIImpl->isLegalNTStore(DataType, Alignment);
}
-bool TargetTransformInfo::isLegalNTLoad(Type *DataType,
- llvm::Align Alignment) const {
+bool TargetTransformInfo::isLegalNTLoad(Type *DataType, Align Alignment) const {
return TTIImpl->isLegalNTLoad(DataType, Alignment);
}
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 5664a47aeea..df2ec85d96c 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -163,29 +163,28 @@ static gcp_map_type &getGCMap(void *&P) {
/// getGVAlignment - Return the alignment to use for the specified global
/// value. This rounds up to the preferred alignment if possible and legal.
-llvm::Align AsmPrinter::getGVAlignment(const GlobalValue *GV,
- const DataLayout &DL,
- llvm::Align InAlign) {
- llvm::Align Align;
+Align AsmPrinter::getGVAlignment(const GlobalValue *GV, const DataLayout &DL,
+ Align InAlign) {
+ Align Alignment;
if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
- Align = llvm::Align(DL.getPreferredAlignment(GVar));
+ Alignment = Align(DL.getPreferredAlignment(GVar));
// If InAlign is specified, round it to it.
- if (InAlign > Align)
- Align = InAlign;
+ if (InAlign > Alignment)
+ Alignment = InAlign;
// If the GV has a specified alignment, take it into account.
- const llvm::MaybeAlign GVAlign(GV->getAlignment());
+ const MaybeAlign GVAlign(GV->getAlignment());
if (!GVAlign)
- return Align;
+ return Alignment;
assert(GVAlign && "GVAlign must be set");
// If the GVAlign is larger than NumBits, or if we are required to obey
// NumBits because the GV has an assigned section, obey it.
- if (*GVAlign > Align || GV->hasSection())
- Align = *GVAlign;
- return Align;
+ if (*GVAlign > Alignment || GV->hasSection())
+ Alignment = *GVAlign;
+ return Alignment;
}
AsmPrinter::AsmPrinter(TargetMachine &tm, std::unique_ptr<MCStreamer> Streamer)
@@ -507,7 +506,7 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
// If the alignment is specified, we *must* obey it. Overaligning a global
// with a specified alignment is a prompt way to break globals emitted to
// sections and expected to be contiguous (e.g. ObjC metadata).
- const llvm::Align Align = getGVAlignment(GV, DL);
+ const Align Alignment = getGVAlignment(GV, DL);
for (const HandlerInfo &HI : Handlers) {
NamedRegionTimer T(HI.TimerName, HI.TimerDescription,
@@ -523,7 +522,7 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
const bool SupportsAlignment =
getObjFileLowering().getCommDirectiveSupportsAlignment();
OutStreamer->EmitCommonSymbol(GVSym, Size,
- SupportsAlignment ? Align.value() : 0);
+ SupportsAlignment ? Alignment.value() : 0);
return;
}
@@ -538,7 +537,7 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
Size = 1; // zerofill of 0 bytes is undefined.
EmitLinkage(GV, GVSym);
// .zerofill __DATA, __bss, _foo, 400, 5
- OutStreamer->EmitZerofill(TheSection, GVSym, Size, Align.value());
+ OutStreamer->EmitZerofill(TheSection, GVSym, Size, Alignment.value());
return;
}
@@ -557,7 +556,7 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
// Prefer to simply fall back to .local / .comm in this case.
if (MAI->getLCOMMDirectiveAlignmentType() != LCOMM::NoAlignment) {
// .lcomm _foo, 42
- OutStreamer->EmitLocalCommonSymbol(GVSym, Size, Align.value());
+ OutStreamer->EmitLocalCommonSymbol(GVSym, Size, Alignment.value());
return;
}
@@ -567,7 +566,7 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
const bool SupportsAlignment =
getObjFileLowering().getCommDirectiveSupportsAlignment();
OutStreamer->EmitCommonSymbol(GVSym, Size,
- SupportsAlignment ? Align.value() : 0);
+ SupportsAlignment ? Alignment.value() : 0);
return;
}
@@ -588,11 +587,11 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
if (GVKind.isThreadBSS()) {
TheSection = getObjFileLowering().getTLSBSSSection();
- OutStreamer->EmitTBSSSymbol(TheSection, MangSym, Size, Align.value());
+ OutStreamer->EmitTBSSSymbol(TheSection, MangSym, Size, Alignment.value());
} else if (GVKind.isThreadData()) {
OutStreamer->SwitchSection(TheSection);
- EmitAlignment(Align, GV);
+ EmitAlignment(Alignment, GV);
OutStreamer->EmitLabel(MangSym);
EmitGlobalConstant(GV->getParent()->getDataLayout(),
@@ -628,7 +627,7 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
OutStreamer->SwitchSection(TheSection);
EmitLinkage(GV, EmittedInitSym);
- EmitAlignment(Align, GV);
+ EmitAlignment(Alignment, GV);
OutStreamer->EmitLabel(EmittedInitSym);
@@ -1435,7 +1434,7 @@ bool AsmPrinter::doFinalization(Module &M) {
OutStreamer->SwitchSection(TLOF.getDataSection());
const DataLayout &DL = M.getDataLayout();
- EmitAlignment(llvm::Align(DL.getPointerSize()));
+ EmitAlignment(Align(DL.getPointerSize()));
for (const auto &Stub : Stubs) {
OutStreamer->EmitLabel(Stub.first);
OutStreamer->EmitSymbolValue(Stub.second.getPointer(),
@@ -1462,7 +1461,7 @@ bool AsmPrinter::doFinalization(Module &M) {
COFF::IMAGE_SCN_LNK_COMDAT,
SectionKind::getReadOnly(), Stub.first->getName(),
COFF::IMAGE_COMDAT_SELECT_ANY));
- EmitAlignment(llvm::Align(DL.getPointerSize()));
+ EmitAlignment(Align(DL.getPointerSize()));
OutStreamer->EmitSymbolAttribute(Stub.first, MCSA_Global);
OutStreamer->EmitLabel(Stub.first);
OutStreamer->EmitSymbolValue(Stub.second.getPointer(),
@@ -1763,7 +1762,7 @@ void AsmPrinter::EmitConstantPool() {
if (CurSection != CPSections[i].S) {
OutStreamer->SwitchSection(CPSections[i].S);
- EmitAlignment(llvm::Align(CPSections[i].Alignment));
+ EmitAlignment(Align(CPSections[i].Alignment));
CurSection = CPSections[i].S;
Offset = 0;
}
@@ -1810,7 +1809,7 @@ void AsmPrinter::EmitJumpTableInfo() {
OutStreamer->SwitchSection(ReadOnlySection);
}
- EmitAlignment(llvm::Align(MJTI->getEntryAlignment(DL)));
+ EmitAlignment(Align(MJTI->getEntryAlignment(DL)));
// Jump tables in code sections are marked with a data_region directive
// where that's supported.
@@ -2026,7 +2025,7 @@ void AsmPrinter::EmitXXStructorList(const DataLayout &DL, const Constant *List,
llvm::stable_sort(Structors, [](const Structor &L, const Structor &R) {
return L.Priority < R.Priority;
});
- const llvm::Align Align = DL.getPointerPrefAlignment();
+ const Align Align = DL.getPointerPrefAlignment();
for (Structor &S : Structors) {
const TargetLoweringObjectFile &Obj = getObjFileLowering();
const MCSymbol *KeySym = nullptr;
@@ -2150,18 +2149,17 @@ void AsmPrinter::EmitLabelPlusOffset(const MCSymbol *Label, uint64_t Offset,
// two boundary. If a global value is specified, and if that global has
// an explicit alignment requested, it will override the alignment request
// if required for correctness.
-void AsmPrinter::EmitAlignment(llvm::Align Align,
- const GlobalObject *GV) const {
+void AsmPrinter::EmitAlignment(Align Alignment, const GlobalObject *GV) const {
if (GV)
- Align = getGVAlignment(GV, GV->getParent()->getDataLayout(), Align);
+ Alignment = getGVAlignment(GV, GV->getParent()->getDataLayout(), Alignment);
- if (Align == 1)
+ if (Alignment == Align::None())
return; // 1-byte aligned: no need to emit alignment.
if (getCurrentSection()->getKind().isText())
- OutStreamer->EmitCodeAlignment(Align.value());
+ OutStreamer->EmitCodeAlignment(Alignment.value());
else
- OutStreamer->EmitValueToAlignment(Align.value());
+ OutStreamer->EmitValueToAlignment(Alignment.value());
}
//===----------------------------------------------------------------------===//
@@ -2936,9 +2934,9 @@ void AsmPrinter::EmitBasicBlockStart(const MachineBasicBlock &MBB) {
}
// Emit an alignment directive for this block, if needed.
- const llvm::Align Align = MBB.getAlignment();
- if (Align != llvm::Align::None())
- EmitAlignment(Align);
+ const Align Alignment = MBB.getAlignment();
+ if (Alignment != Align::None())
+ EmitAlignment(Alignment);
MCCodePaddingContext Context;
setupCodePaddingContext(MBB, Context);
OutStreamer->EmitCodePaddingBasicBlockStart(Context);
diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
index 255203740ac..273d2050e4a 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
@@ -2509,8 +2509,8 @@ void DwarfDebug::emitDebugARanges() {
unsigned TupleSize = PtrSize * 2;
// 7.20 in the Dwarf specs requires the table to be aligned to a tuple.
- unsigned Padding = offsetToAlignment(sizeof(int32_t) + ContentSize,
- llvm::Align(TupleSize));
+ unsigned Padding =
+ offsetToAlignment(sizeof(int32_t) + ContentSize, Align(TupleSize));
ContentSize += Padding;
ContentSize += (List.size() + 1) * TupleSize;
diff --git a/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp b/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp
index 689d55b3caa..31dfaaac836 100644
--- a/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp
@@ -426,7 +426,7 @@ MCSymbol *EHStreamer::emitExceptionTable() {
// EHABI). In this case LSDASection will be NULL.
if (LSDASection)
Asm->OutStreamer->SwitchSection(LSDASection);
- Asm->EmitAlignment(llvm::Align(4));
+ Asm->EmitAlignment(Align(4));
// Emit the LSDA.
MCSymbol *GCCETSym =
@@ -602,11 +602,11 @@ MCSymbol *EHStreamer::emitExceptionTable() {
}
if (HaveTTData) {
- Asm->EmitAlignment(llvm::Align(4));
+ Asm->EmitAlignment(Align(4));
emitTypeInfos(TTypeEncoding, TTBaseLabel);
}
- Asm->EmitAlignment(llvm::Align(4));
+ Asm->EmitAlignment(Align(4));
return GCCETSym;
}
diff --git a/llvm/lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp
index aac08041d63..3849644d158 100644
--- a/llvm/lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp
@@ -72,7 +72,7 @@ void ErlangGCPrinter::finishAssembly(Module &M, GCModuleInfo &Info,
**/
// Align to address width.
- AP.EmitAlignment(IntPtrSize == 4 ? llvm::Align(4) : llvm::Align(8));
+ AP.EmitAlignment(IntPtrSize == 4 ? Align(4) : Align(8));
// Emit PointCount.
OS.AddComment("safe point count");
diff --git a/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
index 824d0cbcb0c..b4eda5fa8c5 100644
--- a/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
@@ -129,7 +129,7 @@ void OcamlGCMetadataPrinter::finishAssembly(Module &M, GCModuleInfo &Info,
report_fatal_error(" Too much descriptor for ocaml GC");
}
AP.emitInt16(NumDescriptors);
- AP.EmitAlignment(IntPtrSize == 4 ? llvm::Align(4) : llvm::Align(8));
+ AP.EmitAlignment(IntPtrSize == 4 ? Align(4) : Align(8));
for (GCModuleInfo::FuncInfoVec::iterator I = Info.funcinfo_begin(),
IE = Info.funcinfo_end();
@@ -180,7 +180,7 @@ void OcamlGCMetadataPrinter::finishAssembly(Module &M, GCModuleInfo &Info,
AP.emitInt16(K->StackOffset);
}
- AP.EmitAlignment(IntPtrSize == 4 ? llvm::Align(4) : llvm::Align(8));
+ AP.EmitAlignment(IntPtrSize == 4 ? Align(4) : Align(8));
}
}
}
diff --git a/llvm/lib/CodeGen/BranchRelaxation.cpp b/llvm/lib/CodeGen/BranchRelaxation.cpp
index bf7365b7c1d..6efdc9efa96 100644
--- a/llvm/lib/CodeGen/BranchRelaxation.cpp
+++ b/llvm/lib/CodeGen/BranchRelaxation.cpp
@@ -65,17 +65,17 @@ class BranchRelaxation : public MachineFunctionPass {
/// block.
unsigned postOffset(const MachineBasicBlock &MBB) const {
const unsigned PO = Offset + Size;
- const llvm::Align Align = MBB.getAlignment();
- if (Align == 1)
+ const Align Alignment = MBB.getAlignment();
+ if (Alignment == 1)
return PO;
- const llvm::Align ParentAlign = MBB.getParent()->getAlignment();
- if (Align <= ParentAlign)
- return PO + offsetToAlignment(PO, Align);
+ const Align ParentAlign = MBB.getParent()->getAlignment();
+ if (Alignment <= ParentAlign)
+ return PO + offsetToAlignment(PO, Alignment);
// The alignment of this MBB is larger than the function's alignment, so we
// can't tell whether or not it will insert nops. Assume that it will.
- return PO + Align.value() + offsetToAlignment(PO, Align);
+ return PO + Alignment.value() + offsetToAlignment(PO, Alignment);
}
};
diff --git a/llvm/lib/CodeGen/CallingConvLower.cpp b/llvm/lib/CodeGen/CallingConvLower.cpp
index 39eabd92690..a397039180a 100644
--- a/llvm/lib/CodeGen/CallingConvLower.cpp
+++ b/llvm/lib/CodeGen/CallingConvLower.cpp
@@ -43,17 +43,18 @@ CCState::CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &mf,
void CCState::HandleByVal(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo, int MinSize,
int MinAlignment, ISD::ArgFlagsTy ArgFlags) {
- llvm::Align MinAlign(MinAlignment);
- llvm::Align Align(ArgFlags.getByValAlign());
+ Align MinAlign(MinAlignment);
+ Align Alignment(ArgFlags.getByValAlign());
unsigned Size = ArgFlags.getByValSize();
if (MinSize > (int)Size)
Size = MinSize;
- if (MinAlign > Align)
- Align = MinAlign;
- ensureMaxAlignment(Align);
- MF.getSubtarget().getTargetLowering()->HandleByVal(this, Size, Align.value());
+ if (MinAlign > Alignment)
+ Alignment = MinAlign;
+ ensureMaxAlignment(Alignment);
+ MF.getSubtarget().getTargetLowering()->HandleByVal(this, Size,
+ Alignment.value());
Size = unsigned(alignTo(Size, MinAlign));
- unsigned Offset = AllocateStack(Size, Align.value());
+ unsigned Offset = AllocateStack(Size, Alignment.value());
addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
}
@@ -198,7 +199,7 @@ static bool isValueTypeInRegForCC(CallingConv::ID CC, MVT VT) {
void CCState::getRemainingRegParmsForType(SmallVectorImpl<MCPhysReg> &Regs,
MVT VT, CCAssignFn Fn) {
unsigned SavedStackOffset = StackOffset;
- llvm::Align SavedMaxStackArgAlign = MaxStackArgAlign;
+ Align SavedMaxStackArgAlign = MaxStackArgAlign;
unsigned NumLocs = Locs.size();
// Set the 'inreg' flag if it is used for this calling convention.
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 6b2bd1691e6..974b500d4ab 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -866,7 +866,7 @@ bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst,
bool DstAlignCanChange = false;
MachineFrameInfo &MFI = MF.getFrameInfo();
bool OptSize = shouldLowerMemFuncForSize(MF);
- unsigned Align = MinAlign(DstAlign, SrcAlign);
+ unsigned Alignment = MinAlign(DstAlign, SrcAlign);
MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
@@ -885,7 +885,8 @@ bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst,
MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo();
if (!findGISelOptimalMemOpLowering(
- MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Align), SrcAlign,
+ MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Alignment),
+ SrcAlign,
/*IsMemset=*/false,
/*ZeroMemset=*/false, /*MemcpyStrSrc=*/false,
/*AllowOverlap=*/!IsVolatile, DstPtrInfo.getAddrSpace(),
@@ -901,16 +902,16 @@ bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst,
// realignment.
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (!TRI->needsStackRealignment(MF))
- while (NewAlign > Align &&
- DL.exceedsNaturalStackAlignment(llvm::Align(NewAlign)))
- NewAlign /= 2;
+ while (NewAlign > Alignment &&
+ DL.exceedsNaturalStackAlignment(Align(NewAlign)))
+ NewAlign /= 2;
- if (NewAlign > Align) {
+ if (NewAlign > Alignment) {
unsigned FI = FIDef->getOperand(1).getIndex();
// Give the stack frame object a larger alignment if needed.
if (MFI.getObjectAlignment(FI) < NewAlign)
MFI.setObjectAlignment(FI, NewAlign);
- Align = NewAlign;
+ Alignment = NewAlign;
}
}
@@ -973,7 +974,7 @@ bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst,
bool DstAlignCanChange = false;
MachineFrameInfo &MFI = MF.getFrameInfo();
bool OptSize = shouldLowerMemFuncForSize(MF);
- unsigned Align = MinAlign(DstAlign, SrcAlign);
+ unsigned Alignment = MinAlign(DstAlign, SrcAlign);
MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
@@ -991,7 +992,8 @@ bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst,
// to a bug in it's findOptimalMemOpLowering implementation. For now do the
// same thing here.
if (!findGISelOptimalMemOpLowering(
- MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Align), SrcAlign,
+ MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Alignment),
+ SrcAlign,
/*IsMemset=*/false,
/*ZeroMemset=*/false, /*MemcpyStrSrc=*/false,
/*AllowOverlap=*/false, DstPtrInfo.getAddrSpace(),
@@ -1007,16 +1009,16 @@ bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst,
// realignment.
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (!TRI->needsStackRealignment(MF))
- while (NewAlign > Align &&
- DL.exceedsNaturalStackAlignment(llvm::Align(NewAlign)))
- NewAlign /= 2;
+ while (NewAlign > Alignment &&
+ DL.exceedsNaturalStackAlignment(Align(NewAlign)))
+ NewAlign /= 2;
- if (NewAlign > Align) {
+ if (NewAlign > Alignment) {
unsigned FI = FIDef->getOperand(1).getIndex();
// Give the stack frame object a larger alignment if needed.
if (MFI.getObjectAlignment(FI) < NewAlign)
MFI.setObjectAlignment(FI, NewAlign);
- Align = NewAlign;
+ Alignment = NewAlign;
}
}
diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
index dd9f3436679..0a628d53923 100644
--- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
@@ -641,7 +641,7 @@ bool MIParser::parseBasicBlockDefinition(
return error(Loc, Twine("redefinition of machine basic block with id #") +
Twine(ID));
if (Alignment)
- MBB->setAlignment(llvm::Align(Alignment));
+ MBB->setAlignment(Align(Alignment));
if (HasAddressTaken)
MBB->setHasAddressTaken();
MBB->setIsEHPad(IsLandingPad);
diff --git a/llvm/lib/CodeGen/MIRParser/MIRParser.cpp b/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
index 5f5b6eb82bc..55fac93d899 100644
--- a/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
@@ -393,7 +393,7 @@ MIRParserImpl::initializeMachineFunction(const yaml::MachineFunction &YamlMF,
}
if (YamlMF.Alignment)
- MF.setAlignment(llvm::Align(YamlMF.Alignment));
+ MF.setAlignment(Align(YamlMF.Alignment));
MF.setExposesReturnsTwice(YamlMF.ExposesReturnsTwice);
MF.setHasWinCFI(YamlMF.HasWinCFI);
diff --git a/llvm/lib/CodeGen/MIRPrinter.cpp b/llvm/lib/CodeGen/MIRPrinter.cpp
index 8f7866313bc..1a4e21ac06a 100644
--- a/llvm/lib/CodeGen/MIRPrinter.cpp
+++ b/llvm/lib/CodeGen/MIRPrinter.cpp
@@ -629,7 +629,7 @@ void MIPrinter::print(const MachineBasicBlock &MBB) {
OS << "landing-pad";
HasAttributes = true;
}
- if (MBB.getAlignment() != llvm::Align::None()) {
+ if (MBB.getAlignment() != Align::None()) {
OS << (HasAttributes ? ", " : " (");
OS << "align " << MBB.getAlignment().value();
HasAttributes = true;
diff --git a/llvm/lib/CodeGen/MachineBasicBlock.cpp b/llvm/lib/CodeGen/MachineBasicBlock.cpp
index 6e06107f2e3..854bef3aab0 100644
--- a/llvm/lib/CodeGen/MachineBasicBlock.cpp
+++ b/llvm/lib/CodeGen/MachineBasicBlock.cpp
@@ -326,7 +326,7 @@ void MachineBasicBlock::print(raw_ostream &OS, ModuleSlotTracker &MST,
OS << "landing-pad";
HasAttributes = true;
}
- if (getAlignment() != llvm::Align::None()) {
+ if (getAlignment() != Align::None()) {
OS << (HasAttributes ? ", " : " (");
OS << "align " << Log2(getAlignment());
HasAttributes = true;
diff --git a/llvm/lib/CodeGen/MachineBlockPlacement.cpp b/llvm/lib/CodeGen/MachineBlockPlacement.cpp
index 5737fe6a407..7f931b3b6f6 100644
--- a/llvm/lib/CodeGen/MachineBlockPlacement.cpp
+++ b/llvm/lib/CodeGen/MachineBlockPlacement.cpp
@@ -2807,7 +2807,7 @@ void MachineBlockPlacement::alignBlocks() {
if (!L)
continue;
- const llvm::Align Align = TLI->getPrefLoopAlignment(L);
+ const Align Align = TLI->getPrefLoopAlignment(L);
if (Align == 1)
continue; // Don't care about loop alignment.
@@ -3109,14 +3109,14 @@ bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &MF) {
if (AlignAllBlock)
// Align all of the blocks in the function to a specific alignment.
for (MachineBasicBlock &MBB : MF)
- MBB.setAlignment(llvm::Align(1ULL << AlignAllBlock));
+ MBB.setAlignment(Align(1ULL << AlignAllBlock));
else if (AlignAllNonFallThruBlocks) {
// Align all of the blocks that have no fall-through predecessors to a
// specific alignment.
for (auto MBI = std::next(MF.begin()), MBE = MF.end(); MBI != MBE; ++MBI) {
auto LayoutPred = std::prev(MBI);
if (!LayoutPred->isSuccessor(&*MBI))
- MBI->setAlignment(llvm::Align(1ULL << AlignAllNonFallThruBlocks));
+ MBI->setAlignment(Align(1ULL << AlignAllNonFallThruBlocks));
}
}
if (ViewBlockLayoutWithBFI != GVDT_None &&
diff --git a/llvm/lib/CodeGen/MachineFrameInfo.cpp b/llvm/lib/CodeGen/MachineFrameInfo.cpp
index de146184105..604f5145b1a 100644
--- a/llvm/lib/CodeGen/MachineFrameInfo.cpp
+++ b/llvm/lib/CodeGen/MachineFrameInfo.cpp
@@ -28,26 +28,26 @@
using namespace llvm;
-void MachineFrameInfo::ensureMaxAlignment(llvm::Align Align) {
+void MachineFrameInfo::ensureMaxAlignment(Align Alignment) {
if (!StackRealignable)
- assert(Align <= StackAlignment &&
- "For targets without stack realignment, Align is out of limit!");
- if (MaxAlignment < Align) MaxAlignment = Align;
+ assert(Alignment <= StackAlignment &&
+ "For targets without stack realignment, Alignment is out of limit!");
+ if (MaxAlignment < Alignment)
+ MaxAlignment = Alignment;
}
/// Clamp the alignment if requested and emit a warning.
-static inline llvm::Align clampStackAlignment(bool ShouldClamp,
- llvm::Align Align,
- llvm::Align StackAlign) {
- if (!ShouldClamp || Align <= StackAlign)
- return Align;
- LLVM_DEBUG(dbgs() << "Warning: requested alignment " << Align.value()
- << " exceeds the stack alignment " << StackAlign.value()
+static inline Align clampStackAlignment(bool ShouldClamp, Align Alignment,
+ Align StackAlignment) {
+ if (!ShouldClamp || Alignment <= StackAlignment)
+ return Alignment;
+ LLVM_DEBUG(dbgs() << "Warning: requested alignment " << Alignment.value()
+ << " exceeds the stack alignment " << StackAlignment.value()
<< " when stack realignment is off" << '\n');
- return StackAlign;
+ return StackAlignment;
}
-int MachineFrameInfo::CreateStackObject(uint64_t Size, llvm::Align Alignment,
+int MachineFrameInfo::CreateStackObject(uint64_t Size, Align Alignment,
bool IsSpillSlot,
const AllocaInst *Alloca,
uint8_t StackID) {
@@ -62,8 +62,7 @@ int MachineFrameInfo::CreateStackObject(uint64_t Size, llvm::Align Alignment,
return Index;
}
-int MachineFrameInfo::CreateSpillStackObject(uint64_t Size,
- llvm::Align Alignment) {
+int MachineFrameInfo::CreateSpillStackObject(uint64_t Size, Align Alignment) {
Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment);
CreateStackObject(Size, Alignment, true);
int Index = (int)Objects.size() - NumFixedObjects - 1;
@@ -71,7 +70,7 @@ int MachineFrameInfo::CreateSpillStackObject(uint64_t Size,
return Index;
}
-int MachineFrameInfo::CreateVariableSizedObject(llvm::Align Alignment,
+int MachineFrameInfo::CreateVariableSizedObject(Align Alignment,
const AllocaInst *Alloca) {
HasVarSizedObjects = true;
Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment);
@@ -89,8 +88,8 @@ int MachineFrameInfo::CreateFixedObject(uint64_t Size, int64_t SPOffset,
// object is 16-byte aligned. Note that unlike the non-fixed case, if the
// stack needs realignment, we can't assume that the stack will in fact be
// aligned.
- llvm::Align Alignment = commonAlignment(
- ForcedRealign ? llvm::Align::None() : StackAlignment, SPOffset);
+ Align Alignment =
+ commonAlignment(ForcedRealign ? Align::None() : StackAlignment, SPOffset);
Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment);
Objects.insert(Objects.begin(),
StackObject(Size, Alignment, SPOffset, IsImmutable,
@@ -102,8 +101,8 @@ int MachineFrameInfo::CreateFixedObject(uint64_t Size, int64_t SPOffset,
int MachineFrameInfo::CreateFixedSpillStackObject(uint64_t Size,
int64_t SPOffset,
bool IsImmutable) {
- llvm::Align Alignment = commonAlignment(
- ForcedRealign ? llvm::Align::None() : StackAlignment, SPOffset);
+ Align Alignment =
+ commonAlignment(ForcedRealign ? Align::None() : StackAlignment, SPOffset);
Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment);
Objects.insert(Objects.begin(),
StackObject(Size, Alignment, SPOffset, IsImmutable,
diff --git a/llvm/lib/CodeGen/MachineFunction.cpp b/llvm/lib/CodeGen/MachineFunction.cpp
index f48cb6dbbd7..79380339cc2 100644
--- a/llvm/lib/CodeGen/MachineFunction.cpp
+++ b/llvm/lib/CodeGen/MachineFunction.cpp
@@ -182,7 +182,7 @@ void MachineFunction::init() {
STI->getTargetLowering()->getPrefFunctionAlignment());
if (AlignAllFunctions)
- Alignment = llvm::Align(1ULL << AlignAllFunctions);
+ Alignment = Align(1ULL << AlignAllFunctions);
JumpTableInfo = nullptr;
diff --git a/llvm/lib/CodeGen/PatchableFunction.cpp b/llvm/lib/CodeGen/PatchableFunction.cpp
index 9d7605f078f..529fde84e39 100644
--- a/llvm/lib/CodeGen/PatchableFunction.cpp
+++ b/llvm/lib/CodeGen/PatchableFunction.cpp
@@ -78,7 +78,7 @@ bool PatchableFunction::runOnMachineFunction(MachineFunction &MF) {
MIB.add(MO);
FirstActualI->eraseFromParent();
- MF.ensureAlignment(llvm::Align(16));
+ MF.ensureAlignment(Align(16));
return true;
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index e7fd2761edf..1b313de5ccd 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -1898,7 +1898,7 @@ SDValue SelectionDAG::expandVAArg(SDNode *Node) {
EVT VT = Node->getValueType(0);
SDValue Tmp1 = Node->getOperand(0);
SDValue Tmp2 = Node->getOperand(1);
- const llvm::MaybeAlign MA(Node->getConstantOperandVal(3));
+ const MaybeAlign MA(Node->getConstantOperandVal(3));
SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1,
Tmp2, MachinePointerInfo(V));
@@ -5757,7 +5757,7 @@ static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
SDValue Chain, SDValue Dst, SDValue Src,
- uint64_t Size, unsigned Align,
+ uint64_t Size, unsigned Alignment,
bool isVol, bool AlwaysInline,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) {
@@ -5782,15 +5782,15 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
DstAlignCanChange = true;
unsigned SrcAlign = DAG.InferPtrAlignment(Src);
- if (Align > SrcAlign)
- SrcAlign = Align;
+ if (Alignment > SrcAlign)
+ SrcAlign = Alignment;
ConstantDataArraySlice Slice;
bool CopyFromConstant = isMemSrcFromConstant(Src, Slice);
bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr;
unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
if (!TLI.findOptimalMemOpLowering(
- MemOps, Limit, Size, (DstAlignCanChange ? 0 : Align),
+ MemOps, Limit, Size, (DstAlignCanChange ? 0 : Alignment),
(isZeroConstant ? 0 : SrcAlign), /*IsMemset=*/false,
/*ZeroMemset=*/false, /*MemcpyStrSrc=*/CopyFromConstant,
/*AllowOverlap=*/!isVol, DstPtrInfo.getAddrSpace(),
@@ -5805,15 +5805,15 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
// realignment.
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (!TRI->needsStackRealignment(MF))
- while (NewAlign > Align &&
- DL.exceedsNaturalStackAlignment(llvm::Align(NewAlign)))
- NewAlign /= 2;
+ while (NewAlign > Alignment &&
+ DL.exceedsNaturalStackAlignment(Align(NewAlign)))
+ NewAlign /= 2;
- if (NewAlign > Align) {
+ if (NewAlign > Alignment) {
// Give the stack frame object a larger alignment if needed.
if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
MFI.setObjectAlignment(FI->getIndex(), NewAlign);
- Align = NewAlign;
+ Alignment = NewAlign;
}
}
@@ -5856,10 +5856,9 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
}
Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice);
if (Value.getNode()) {
- Store = DAG.getStore(Chain, dl, Value,
- DAG.getMemBasePlusOffset(Dst, DstOff, dl),
- DstPtrInfo.getWithOffset(DstOff), Align,
- MMOFlags);
+ Store = DAG.getStore(
+ Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
+ DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags);
OutChains.push_back(Store);
}
}
@@ -5887,7 +5886,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
Store = DAG.getTruncStore(
Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
- DstPtrInfo.getWithOffset(DstOff), VT, Align, MMOFlags);
+ DstPtrInfo.getWithOffset(DstOff), VT, Alignment, MMOFlags);
OutStoreChains.push_back(Store);
}
SrcOff += VTSize;
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
index 18c74172a07..434fecfb49e 100644
--- a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
@@ -729,17 +729,17 @@ Error RuntimeDyldImpl::emitCommonSymbols(const ObjectFile &Obj,
// Assign the address of each symbol
for (auto &Sym : SymbolsToAllocate) {
- uint32_t Align = Sym.getAlignment();
+ uint32_t Alignment = Sym.getAlignment();
uint64_t Size = Sym.getCommonSize();
StringRef Name;
if (auto NameOrErr = Sym.getName())
Name = *NameOrErr;
else
return NameOrErr.takeError();
- if (Align) {
+ if (Alignment) {
// This symbol has an alignment requirement.
uint64_t AlignOffset =
- offsetToAlignment((uint64_t)Addr, llvm::Align(Align));
+ offsetToAlignment((uint64_t)Addr, Align(Alignment));
Addr += AlignOffset;
Offset += AlignOffset;
}
diff --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp
index 206649b4d86..b125d1550c6 100644
--- a/llvm/lib/IR/DataLayout.cpp
+++ b/llvm/lib/IR/DataLayout.cpp
@@ -51,7 +51,7 @@ StructLayout::StructLayout(StructType *ST, const DataLayout &DL) {
// Loop over each of the elements, placing them in memory.
for (unsigned i = 0, e = NumElements; i != e; ++i) {
Type *Ty = ST->getElementType(i);
- const llvm::Align TyAlign(ST->isPacked() ? 1 : DL.getABITypeAlignment(Ty));
+ const Align TyAlign(ST->isPacked() ? 1 : DL.getABITypeAlignment(Ty));
// Add padding if necessary to align the data element properly.
if (!isAligned(TyAlign, StructSize)) {
@@ -98,10 +98,8 @@ unsigned StructLayout::getElementContainingOffset(uint64_t Offset) const {
// LayoutAlignElem, LayoutAlign support
//===----------------------------------------------------------------------===//
-LayoutAlignElem LayoutAlignElem::get(AlignTypeEnum align_type,
- llvm::Align abi_align,
- llvm::Align pref_align,
- uint32_t bit_width) {
+LayoutAlignElem LayoutAlignElem::get(AlignTypeEnum align_type, Align abi_align,
+ Align pref_align, uint32_t bit_width) {
assert(abi_align <= pref_align && "Preferred alignment worse than ABI!");
LayoutAlignElem retval;
retval.AlignType = align_type;
@@ -123,10 +121,8 @@ LayoutAlignElem::operator==(const LayoutAlignElem &rhs) const {
// PointerAlignElem, PointerAlign support
//===----------------------------------------------------------------------===//
-PointerAlignElem PointerAlignElem::get(uint32_t AddressSpace,
- llvm::Align ABIAlign,
- llvm::Align PrefAlign,
- uint32_t TypeByteWidth,
+PointerAlignElem PointerAlignElem::get(uint32_t AddressSpace, Align ABIAlign,
+ Align PrefAlign, uint32_t TypeByteWidth,
uint32_t IndexWidth) {
assert(ABIAlign <= PrefAlign && "Preferred alignment worse than ABI!");
PointerAlignElem retval;
@@ -160,19 +156,18 @@ const char *DataLayout::getManglingComponent(const Triple &T) {
}
static const LayoutAlignElem DefaultAlignments[] = {
- {INTEGER_ALIGN, 1, llvm::Align(1), llvm::Align(1)}, // i1
- {INTEGER_ALIGN, 8, llvm::Align(1), llvm::Align(1)}, // i8
- {INTEGER_ALIGN, 16, llvm::Align(2), llvm::Align(2)}, // i16
- {INTEGER_ALIGN, 32, llvm::Align(4), llvm::Align(4)}, // i32
- {INTEGER_ALIGN, 64, llvm::Align(4), llvm::Align(8)}, // i64
- {FLOAT_ALIGN, 16, llvm::Align(2), llvm::Align(2)}, // half
- {FLOAT_ALIGN, 32, llvm::Align(4), llvm::Align(4)}, // float
- {FLOAT_ALIGN, 64, llvm::Align(8), llvm::Align(8)}, // double
- {FLOAT_ALIGN, 128, llvm::Align(16), llvm::Align(16)}, // ppcf128, quad, ...
- {VECTOR_ALIGN, 64, llvm::Align(8), llvm::Align(8)}, // v2i32, v1i64, ...
- {VECTOR_ALIGN, 128, llvm::Align(16),
- llvm::Align(16)}, // v16i8, v8i16, v4i32, ...
- {AGGREGATE_ALIGN, 0, llvm::Align(1), llvm::Align(8)} // struct
+ {INTEGER_ALIGN, 1, Align(1), Align(1)}, // i1
+ {INTEGER_ALIGN, 8, Align(1), Align(1)}, // i8
+ {INTEGER_ALIGN, 16, Align(2), Align(2)}, // i16
+ {INTEGER_ALIGN, 32, Align(4), Align(4)}, // i32
+ {INTEGER_ALIGN, 64, Align(4), Align(8)}, // i64
+ {FLOAT_ALIGN, 16, Align(2), Align(2)}, // half
+ {FLOAT_ALIGN, 32, Align(4), Align(4)}, // float
+ {FLOAT_ALIGN, 64, Align(8), Align(8)}, // double
+ {FLOAT_ALIGN, 128, Align(16), Align(16)}, // ppcf128, quad, ...
+ {VECTOR_ALIGN, 64, Align(8), Align(8)}, // v2i32, v1i64, ...
+ {VECTOR_ALIGN, 128, Align(16), Align(16)}, // v16i8, v8i16, v4i32, ...
+ {AGGREGATE_ALIGN, 0, Align(1), Align(8)} // struct
};
void DataLayout::reset(StringRef Desc) {
@@ -193,7 +188,7 @@ void DataLayout::reset(StringRef Desc) {
setAlignment((AlignTypeEnum)E.AlignType, E.ABIAlign, E.PrefAlign,
E.TypeBitWidth);
}
- setPointerAlignment(0, llvm::Align(8), llvm::Align(8), 8, 8);
+ setPointerAlignment(0, Align(8), Align(8), 8, 8);
parseSpecifier(Desc);
}
@@ -486,8 +481,8 @@ DataLayout::findAlignmentLowerBound(AlignTypeEnum AlignType,
});
}
-void DataLayout::setAlignment(AlignTypeEnum align_type, llvm::Align abi_align,
- llvm::Align pref_align, uint32_t bit_width) {
+void DataLayout::setAlignment(AlignTypeEnum align_type, Align abi_align,
+ Align pref_align, uint32_t bit_width) {
// AlignmentsTy::ABIAlign and AlignmentsTy::PrefAlign were once stored as
// uint16_t, it is unclear if there are requirements for alignment to be less
// than 2^16 other than storage. In the meantime we leave the restriction as
@@ -520,9 +515,8 @@ DataLayout::findPointerLowerBound(uint32_t AddressSpace) {
});
}
-void DataLayout::setPointerAlignment(uint32_t AddrSpace, llvm::Align ABIAlign,
- llvm::Align PrefAlign,
- uint32_t TypeByteWidth,
+void DataLayout::setPointerAlignment(uint32_t AddrSpace, Align ABIAlign,
+ Align PrefAlign, uint32_t TypeByteWidth,
uint32_t IndexWidth) {
if (PrefAlign < ABIAlign)
report_fatal_error(
@@ -542,9 +536,8 @@ void DataLayout::setPointerAlignment(uint32_t AddrSpace, llvm::Align ABIAlign,
/// getAlignmentInfo - Return the alignment (either ABI if ABIInfo = true or
/// preferred if ABIInfo = false) the layout wants for the specified datatype.
-llvm::Align DataLayout::getAlignmentInfo(AlignTypeEnum AlignType,
- uint32_t BitWidth, bool ABIInfo,
- Type *Ty) const {
+Align DataLayout::getAlignmentInfo(AlignTypeEnum AlignType, uint32_t BitWidth,
+ bool ABIInfo, Type *Ty) const {
AlignmentsTy::const_iterator I = findAlignmentLowerBound(AlignType, BitWidth);
// See if we found an exact match. Of if we are looking for an integer type,
// but don't have an exact match take the next largest integer. This is where
@@ -563,10 +556,11 @@ llvm::Align DataLayout::getAlignmentInfo(AlignTypeEnum AlignType,
} else if (AlignType == VECTOR_ALIGN) {
// By default, use natural alignment for vector types. This is consistent
// with what clang and llvm-gcc do.
- unsigned Align = getTypeAllocSize(cast<VectorType>(Ty)->getElementType());
- Align *= cast<VectorType>(Ty)->getNumElements();
- Align = PowerOf2Ceil(Align);
- return llvm::Align(Align);
+ unsigned Alignment =
+ getTypeAllocSize(cast<VectorType>(Ty)->getElementType());
+ Alignment *= cast<VectorType>(Ty)->getNumElements();
+ Alignment = PowerOf2Ceil(Alignment);
+ return Align(Alignment);
}
// If we still couldn't find a reasonable default alignment, fall back
@@ -575,9 +569,9 @@ llvm::Align DataLayout::getAlignmentInfo(AlignTypeEnum AlignType,
// approximation of reality, and if the user wanted something less
// less conservative, they should have specified it explicitly in the data
// layout.
- unsigned Align = getTypeStoreSize(Ty);
- Align = PowerOf2Ceil(Align);
- return llvm::Align(Align);
+ unsigned Alignment = getTypeStoreSize(Ty);
+ Alignment = PowerOf2Ceil(Alignment);
+ return Align(Alignment);
}
namespace {
@@ -638,7 +632,7 @@ const StructLayout *DataLayout::getStructLayout(StructType *Ty) const {
return L;
}
-llvm::Align DataLayout::getPointerABIAlignment(unsigned AS) const {
+Align DataLayout::getPointerABIAlignment(unsigned AS) const {
PointersTy::const_iterator I = findPointerLowerBound(AS);
if (I == Pointers.end() || I->AddressSpace != AS) {
I = findPointerLowerBound(0);
@@ -647,7 +641,7 @@ llvm::Align DataLayout::getPointerABIAlignment(unsigned AS) const {
return I->ABIAlign;
}
-llvm::Align DataLayout::getPointerPrefAlignment(unsigned AS) const {
+Align DataLayout::getPointerPrefAlignment(unsigned AS) const {
PointersTy::const_iterator I = findPointerLowerBound(AS);
if (I == Pointers.end() || I->AddressSpace != AS) {
I = findPointerLowerBound(0);
@@ -704,7 +698,7 @@ unsigned DataLayout::getIndexTypeSizeInBits(Type *Ty) const {
Get the ABI (\a abi_or_pref == true) or preferred alignment (\a abi_or_pref
== false) for the requested type \a Ty.
*/
-llvm::Align DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const {
+Align DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const {
AlignTypeEnum AlignType;
assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!");
@@ -723,12 +717,11 @@ llvm::Align DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const {
case Type::StructTyID: {
// Packed structure types always have an ABI alignment of one.
if (cast<StructType>(Ty)->isPacked() && abi_or_pref)
- return llvm::Align::None();
+ return Align::None();
// Get the layout annotation... which is lazily created on demand.
const StructLayout *Layout = getStructLayout(cast<StructType>(Ty));
- const llvm::Align Align =
- getAlignmentInfo(AGGREGATE_ALIGN, 0, abi_or_pref, Ty);
+ const Align Align = getAlignmentInfo(AGGREGATE_ALIGN, 0, abi_or_pref, Ty);
return std::max(Align, Layout->getAlignment());
}
case Type::IntegerTyID:
@@ -761,7 +754,7 @@ unsigned DataLayout::getABITypeAlignment(Type *Ty) const {
/// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for
/// an integer type of the specified bitwidth.
-llvm::Align DataLayout::getABIIntegerTypeAlignment(unsigned BitWidth) const {
+Align DataLayout::getABIIntegerTypeAlignment(unsigned BitWidth) const {
return getAlignmentInfo(INTEGER_ALIGN, BitWidth, true, nullptr);
}
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index 118711ab65d..f8eec2e805e 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -1248,7 +1248,7 @@ void AllocaInst::setAlignment(unsigned Align) {
setAlignment(llvm::MaybeAlign(Align));
}
-void AllocaInst::setAlignment(llvm::MaybeAlign Align) {
+void AllocaInst::setAlignment(MaybeAlign Align) {
assert((!Align || *Align <= MaximumAlignment) &&
"Alignment is greater than MaximumAlignment!");
setInstructionSubclassData((getSubclassDataFromInstruction() & ~31) |
@@ -1343,7 +1343,7 @@ void LoadInst::setAlignment(unsigned Align) {
setAlignment(llvm::MaybeAlign(Align));
}
-void LoadInst::setAlignment(llvm::MaybeAlign Align) {
+void LoadInst::setAlignment(MaybeAlign Align) {
assert((!Align || *Align <= MaximumAlignment) &&
"Alignment is greater than MaximumAlignment!");
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
@@ -1430,7 +1430,7 @@ void StoreInst::setAlignment(unsigned Align) {
setAlignment(llvm::MaybeAlign(Align));
}
-void StoreInst::setAlignment(llvm::MaybeAlign Align) {
+void StoreInst::setAlignment(MaybeAlign Align) {
assert((!Align || *Align <= MaximumAlignment) &&
"Alignment is greater than MaximumAlignment!");
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
diff --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp
index 0d6872d15cb..65b98d382cc 100644
--- a/llvm/lib/IR/Value.cpp
+++ b/llvm/lib/IR/Value.cpp
@@ -667,7 +667,7 @@ unsigned Value::getPointerAlignment(const DataLayout &DL) const {
assert(getType()->isPointerTy() && "must be pointer");
if (auto *GO = dyn_cast<GlobalObject>(this)) {
if (isa<Function>(GO)) {
- const llvm::MaybeAlign FunctionPtrAlign = DL.getFunctionPtrAlign();
+ const MaybeAlign FunctionPtrAlign = DL.getFunctionPtrAlign();
const unsigned Align = FunctionPtrAlign ? FunctionPtrAlign->value() : 0;
switch (DL.getFunctionPtrAlignType()) {
case DataLayout::FunctionPtrAlignType::Independent:
diff --git a/llvm/lib/MC/ELFObjectWriter.cpp b/llvm/lib/MC/ELFObjectWriter.cpp
index b237d5620bf..6f160e491ce 100644
--- a/llvm/lib/MC/ELFObjectWriter.cpp
+++ b/llvm/lib/MC/ELFObjectWriter.cpp
@@ -337,7 +337,7 @@ public:
} // end anonymous namespace
void ELFWriter::align(unsigned Alignment) {
- uint64_t Padding = offsetToAlignment(W.OS.tell(), llvm::Align(Alignment));
+ uint64_t Padding = offsetToAlignment(W.OS.tell(), Align(Alignment));
W.OS.write_zeros(Padding);
}
@@ -638,7 +638,7 @@ void ELFWriter::computeSymbolTable(
unsigned EntrySize = is64Bit() ? ELF::SYMENTRY_SIZE64 : ELF::SYMENTRY_SIZE32;
MCSectionELF *SymtabSection =
Ctx.getELFSection(".symtab", ELF::SHT_SYMTAB, 0, EntrySize, "");
- SymtabSection->setAlignment(is64Bit() ? llvm::Align(8) : llvm::Align(4));
+ SymtabSection->setAlignment(is64Bit() ? Align(8) : Align(4));
SymbolTableIndex = addToSectionTable(SymtabSection);
align(SymtabSection->getAlignment());
@@ -736,7 +736,7 @@ void ELFWriter::computeSymbolTable(
MCSectionELF *SymtabShndxSection =
Ctx.getELFSection(".symtab_shndx", ELF::SHT_SYMTAB_SHNDX, 0, 4, "");
SymtabShndxSectionIndex = addToSectionTable(SymtabShndxSection);
- SymtabShndxSection->setAlignment(llvm::Align(4));
+ SymtabShndxSection->setAlignment(Align(4));
}
ArrayRef<std::string> FileNames = Asm.getFileNames();
@@ -824,7 +824,7 @@ MCSectionELF *ELFWriter::createRelocationSection(MCContext &Ctx,
MCSectionELF *RelaSection = Ctx.createELFRelSection(
RelaSectionName, hasRelocationAddend() ? ELF::SHT_RELA : ELF::SHT_REL,
Flags, EntrySize, Sec.getGroup(), &Sec);
- RelaSection->setAlignment(is64Bit() ? llvm::Align(8) : llvm::Align(4));
+ RelaSection->setAlignment(is64Bit() ? Align(8) : Align(4));
return RelaSection;
}
@@ -911,7 +911,7 @@ void ELFWriter::writeSectionData(const MCAssembler &Asm, MCSection &Sec,
Section.setFlags(Section.getFlags() | ELF::SHF_COMPRESSED);
// Alignment field should reflect the requirements of
// the compressed section header.
- Section.setAlignment(is64Bit() ? llvm::Align(8) : llvm::Align(4));
+ Section.setAlignment(is64Bit() ? Align(8) : Align(4));
} else {
// Add "z" prefix to section name. This is zlib-gnu style.
MC.renameELFSection(&Section, (".z" + SectionName.drop_front(1)).str());
@@ -1135,7 +1135,7 @@ uint64_t ELFWriter::writeObject(MCAssembler &Asm, const MCAsmLayout &Layout) {
if (!GroupIdx) {
MCSectionELF *Group = Ctx.createELFGroupSection(SignatureSymbol);
GroupIdx = addToSectionTable(Group);
- Group->setAlignment(llvm::Align(4));
+ Group->setAlignment(Align(4));
Groups.push_back(Group);
}
std::vector<const MCSectionELF *> &Members =
diff --git a/llvm/lib/MC/MCAssembler.cpp b/llvm/lib/MC/MCAssembler.cpp
index 0e0ef80ed50..cf42fe85b8e 100644
--- a/llvm/lib/MC/MCAssembler.cpp
+++ b/llvm/lib/MC/MCAssembler.cpp
@@ -322,7 +322,7 @@ uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout,
case MCFragment::FT_Align: {
const MCAlignFragment &AF = cast<MCAlignFragment>(F);
unsigned Offset = Layout.getFragmentOffset(&AF);
- unsigned Size = offsetToAlignment(Offset, llvm::Align(AF.getAlignment()));
+ unsigned Size = offsetToAlignment(Offset, Align(AF.getAlignment()));
// Insert extra Nops for code alignment if the target define
// shouldInsertExtraNopBytesForCodeAlign target hook.
diff --git a/llvm/lib/MC/MCELFStreamer.cpp b/llvm/lib/MC/MCELFStreamer.cpp
index e411e24921c..fa2133078bf 100644
--- a/llvm/lib/MC/MCELFStreamer.cpp
+++ b/llvm/lib/MC/MCELFStreamer.cpp
@@ -139,7 +139,7 @@ static void setSectionAlignmentForBundling(const MCAssembler &Assembler,
MCSection *Section) {
if (Section && Assembler.isBundlingEnabled() && Section->hasInstructions() &&
Section->getAlignment() < Assembler.getBundleAlignSize())
- Section->setAlignment(llvm::Align(Assembler.getBundleAlignSize()));
+ Section->setAlignment(Align(Assembler.getBundleAlignSize()));
}
void MCELFStreamer::ChangeSection(MCSection *Section,
@@ -309,7 +309,7 @@ void MCELFStreamer::EmitCommonSymbol(MCSymbol *S, uint64_t Size,
// Update the maximum alignment of the section if necessary.
if (ByteAlignment > Section.getAlignment())
- Section.setAlignment(llvm::Align(ByteAlignment));
+ Section.setAlignment(Align(ByteAlignment));
SwitchSection(P.first, P.second);
} else {
diff --git a/llvm/lib/MC/MCObjectStreamer.cpp b/llvm/lib/MC/MCObjectStreamer.cpp
index 1e7b5d711f2..83f6ab8fe33 100644
--- a/llvm/lib/MC/MCObjectStreamer.cpp
+++ b/llvm/lib/MC/MCObjectStreamer.cpp
@@ -539,7 +539,7 @@ void MCObjectStreamer::EmitValueToAlignment(unsigned ByteAlignment,
// Update the maximum alignment on the current section if necessary.
MCSection *CurSec = getCurrentSectionOnly();
if (ByteAlignment > CurSec->getAlignment())
- CurSec->setAlignment(llvm::Align(ByteAlignment));
+ CurSec->setAlignment(Align(ByteAlignment));
}
void MCObjectStreamer::EmitCodeAlignment(unsigned ByteAlignment,
diff --git a/llvm/lib/MC/MCWinCOFFStreamer.cpp b/llvm/lib/MC/MCWinCOFFStreamer.cpp
index fc79cab522c..c5a21312140 100644
--- a/llvm/lib/MC/MCWinCOFFStreamer.cpp
+++ b/llvm/lib/MC/MCWinCOFFStreamer.cpp
@@ -192,7 +192,7 @@ void MCWinCOFFStreamer::EmitCOFFSafeSEH(MCSymbol const *Symbol) {
MCSection *SXData = getContext().getObjectFileInfo()->getSXDataSection();
getAssembler().registerSection(*SXData);
if (SXData->getAlignment() < 4)
- SXData->setAlignment(llvm::Align(4));
+ SXData->setAlignment(Align(4));
new MCSymbolIdFragment(Symbol, SXData);
@@ -209,7 +209,7 @@ void MCWinCOFFStreamer::EmitCOFFSymbolIndex(MCSymbol const *Symbol) {
MCSection *Sec = getCurrentSectionOnly();
getAssembler().registerSection(*Sec);
if (Sec->getAlignment() < 4)
- Sec->setAlignment(llvm::Align(4));
+ Sec->setAlignment(Align(4));
new MCSymbolIdFragment(Symbol, getCurrentSectionOnly());
diff --git a/llvm/lib/MC/MachObjectWriter.cpp b/llvm/lib/MC/MachObjectWriter.cpp
index 03fb03fe1b6..9f6af981aca 100644
--- a/llvm/lib/MC/MachObjectWriter.cpp
+++ b/llvm/lib/MC/MachObjectWriter.cpp
@@ -127,7 +127,7 @@ uint64_t MachObjectWriter::getPaddingSize(const MCSection *Sec,
const MCSection &NextSec = *Layout.getSectionOrder()[Next];
if (NextSec.isVirtualSection())
return 0;
- return offsetToAlignment(EndAddr, llvm::Align(NextSec.getAlignment()));
+ return offsetToAlignment(EndAddr, Align(NextSec.getAlignment()));
}
void MachObjectWriter::writeHeader(MachO::HeaderFileType Type,
@@ -445,8 +445,8 @@ void MachObjectWriter::writeLinkerOptionsLoadCommand(
}
// Pad to a multiple of the pointer size.
- W.OS.write_zeros(offsetToAlignment(BytesWritten, is64Bit() ? llvm::Align(8)
- : llvm::Align(4)));
+ W.OS.write_zeros(
+ offsetToAlignment(BytesWritten, is64Bit() ? Align(8) : Align(4)));
assert(W.OS.tell() - Start == Size);
}
@@ -835,7 +835,7 @@ uint64_t MachObjectWriter::writeObject(MCAssembler &Asm,
//
// FIXME: Is this machine dependent?
unsigned SectionDataPadding =
- offsetToAlignment(SectionDataFileSize, llvm::Align(4));
+ offsetToAlignment(SectionDataFileSize, Align(4));
SectionDataFileSize += SectionDataPadding;
// Write the prolog, starting with the header and load command...
@@ -1000,8 +1000,8 @@ uint64_t MachObjectWriter::writeObject(MCAssembler &Asm,
#endif
Asm.getLOHContainer().emit(*this, Layout);
// Pad to a multiple of the pointer size.
- W.OS.write_zeros(offsetToAlignment(LOHRawSize, is64Bit() ? llvm::Align(8)
- : llvm::Align(4)));
+ W.OS.write_zeros(
+ offsetToAlignment(LOHRawSize, is64Bit() ? Align(8) : Align(4)));
assert(W.OS.tell() - Start == LOHSize);
}
diff --git a/llvm/lib/Object/ArchiveWriter.cpp b/llvm/lib/Object/ArchiveWriter.cpp
index ecd7c850616..5234b0e1823 100644
--- a/llvm/lib/Object/ArchiveWriter.cpp
+++ b/llvm/lib/Object/ArchiveWriter.cpp
@@ -177,7 +177,7 @@ printBSDMemberHeader(raw_ostream &Out, uint64_t Pos, StringRef Name,
unsigned UID, unsigned GID, unsigned Perms, uint64_t Size) {
uint64_t PosAfterHeader = Pos + 60 + Name.size();
// Pad so that even 64 bit object files are aligned.
- unsigned Pad = offsetToAlignment(PosAfterHeader, llvm::Align(8));
+ unsigned Pad = offsetToAlignment(PosAfterHeader, Align(8));
unsigned NameWithPadding = Name.size() + Pad;
printWithSpacePadding(Out, Twine("#1/") + Twine(NameWithPadding), 16);
printRestOfMemberHeader(Out, ModTime, UID, GID, Perms,
@@ -244,7 +244,7 @@ struct MemberData {
static MemberData computeStringTable(StringRef Names) {
unsigned Size = Names.size();
- unsigned Pad = offsetToAlignment(Size, llvm::Align(2));
+ unsigned Pad = offsetToAlignment(Size, Align(2));
std::string Header;
raw_string_ostream Out(Header);
printWithSpacePadding(Out, "//", 48);
@@ -308,7 +308,7 @@ static void writeSymbolTable(raw_ostream &Out, object::Archive::Kind Kind,
// least 4-byte aligned for 32-bit content. Opt for the larger encoding
// uniformly.
// We do this for all bsd formats because it simplifies aligning members.
- const llvm::Align Alignment(isBSDLike(Kind) ? 8 : 2);
+ const Align Alignment(isBSDLike(Kind) ? 8 : 2);
unsigned Pad = offsetToAlignment(Size, Alignment);
Size += Pad;
@@ -465,9 +465,9 @@ computeMemberData(raw_ostream &StringTable, raw_ostream &SymNames,
// uniformly. This matches the behaviour with cctools and ensures that ld64
// is happy with archives that we generate.
unsigned MemberPadding =
- isDarwin(Kind) ? offsetToAlignment(Data.size(), llvm::Align(8)) : 0;
+ isDarwin(Kind) ? offsetToAlignment(Data.size(), Align(8)) : 0;
unsigned TailPadding =
- offsetToAlignment(Data.size() + MemberPadding, llvm::Align(2));
+ offsetToAlignment(Data.size() + MemberPadding, Align(2));
StringRef Padding = StringRef(PaddingData, MemberPadding + TailPadding);
sys::TimePoint<std::chrono::seconds> ModTime;
diff --git a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
index ed3ef9511b0..7ea7915c2ca 100644
--- a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
+++ b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
@@ -711,7 +711,7 @@ void AArch64AsmPrinter::EmitJumpTableInfo() {
if (JTBBs.empty()) continue;
unsigned Size = AFI->getJumpTableEntrySize(JTI);
- EmitAlignment(llvm::Align(Size));
+ EmitAlignment(Align(Size));
OutStreamer->EmitLabel(GetJTISymbol(JTI));
for (auto *JTBB : JTBBs)
diff --git a/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp b/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp
index 455300260d1..a0695cef615 100644
--- a/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp
+++ b/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp
@@ -40,10 +40,10 @@ static bool finishStackBlock(SmallVectorImpl<CCValAssign> &PendingMembers,
MVT LocVT, ISD::ArgFlagsTy &ArgFlags,
CCState &State, unsigned SlotAlign) {
unsigned Size = LocVT.getSizeInBits() / 8;
- const llvm::Align StackAlign =
+ const Align StackAlign =
State.getMachineFunction().getDataLayout().getStackAlignment();
- const llvm::Align OrigAlign(ArgFlags.getOrigAlign());
- const llvm::Align Align = std::min(OrigAlign, StackAlign);
+ const Align OrigAlign(ArgFlags.getOrigAlign());
+ const Align Align = std::min(OrigAlign, StackAlign);
for (auto &It : PendingMembers) {
It.convertToMem(State.AllocateStack(
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index ae09714395a..cc1eba2dd32 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -641,11 +641,10 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
EnableExtLdPromotion = true;
// Set required alignment.
- setMinFunctionAlignment(llvm::Align(4));
+ setMinFunctionAlignment(Align(4));
// Set preferred alignments.
- setPrefLoopAlignment(llvm::Align(1ULL << STI.getPrefLoopLogAlignment()));
- setPrefFunctionAlignment(
- llvm::Align(1ULL << STI.getPrefFunctionLogAlignment()));
+ setPrefLoopAlignment(Align(1ULL << STI.getPrefLoopLogAlignment()));
+ setPrefFunctionAlignment(Align(1ULL << STI.getPrefFunctionLogAlignment()));
// Only change the limit for entries in a jump table if specified by
// the sub target, but not at the command line.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
index 4b1d9cb5059..694ff52da10 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
@@ -229,7 +229,7 @@ void AMDGPUAsmPrinter::EmitFunctionBodyEnd() {
// alignment.
Streamer.EmitValueToAlignment(64, 0, 1, 0);
if (ReadOnlySection.getAlignment() < 64)
- ReadOnlySection.setAlignment(llvm::Align(64));
+ ReadOnlySection.setAlignment(Align(64));
const MCSubtargetInfo &STI = MF->getSubtarget();
@@ -417,7 +417,7 @@ bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
// The starting address of all shader programs must be 256 bytes aligned.
// Regular functions just need the basic required instruction alignment.
- MF.setAlignment(MFI->isEntryFunction() ? llvm::Align(256) : llvm::Align(4));
+ MF.setAlignment(MFI->isEntryFunction() ? Align(256) : Align(4));
SetupMachineFunction(MF);
diff --git a/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp b/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp
index 42158151b64..b29cd75f75c 100644
--- a/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp
@@ -104,7 +104,7 @@ bool R600AsmPrinter::runOnMachineFunction(MachineFunction &MF) {
// Functions needs to be cacheline (256B) aligned.
- MF.ensureAlignment(llvm::Align(256));
+ MF.ensureAlignment(Align(256));
SetupMachineFunction(MF);
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 385984a51f2..14d25712b65 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -10684,9 +10684,9 @@ void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op,
Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex());
}
-llvm::Align SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
- const llvm::Align PrefAlign = TargetLowering::getPrefLoopAlignment(ML);
- const llvm::Align CacheLineAlign = llvm::Align(64);
+Align SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
+ const Align PrefAlign = TargetLowering::getPrefLoopAlignment(ML);
+ const Align CacheLineAlign = Align(64);
// Pre-GFX10 target did not benefit from loop alignment
if (!ML || DisableLoopAlignment ||
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index 79cca882af9..11a9cffac61 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -384,7 +384,7 @@ public:
unsigned Depth = 0) const override;
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override;
- llvm::Align getPrefLoopAlignment(MachineLoop *ML) const override;
+ Align getPrefLoopAlignment(MachineLoop *ML) const override;
void allocateHSAUserSGPRs(CCState &CCInfo,
MachineFunction &MF,
diff --git a/llvm/lib/Target/ARC/ARCMachineFunctionInfo.h b/llvm/lib/Target/ARC/ARCMachineFunctionInfo.h
index 997327fd1b8..d4dcf9bf285 100644
--- a/llvm/lib/Target/ARC/ARCMachineFunctionInfo.h
+++ b/llvm/lib/Target/ARC/ARCMachineFunctionInfo.h
@@ -35,7 +35,7 @@ public:
: ReturnStackOffsetSet(false), VarArgsFrameIndex(0),
ReturnStackOffset(-1U), MaxCallStackReq(0) {
// Functions are 4-byte aligned.
- MF.setAlignment(llvm::Align(4));
+ MF.setAlignment(Align(4));
}
~ARCFunctionInfo() {}
diff --git a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
index 9fcdb2fb75a..c8c91e53c44 100644
--- a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
+++ b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
@@ -168,7 +168,7 @@ bool ARMAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
// relatively easy to exceed the thumb branch range within a TU.
if (! ThumbIndirectPads.empty()) {
OutStreamer->EmitAssemblerFlag(MCAF_Code16);
- EmitAlignment(llvm::Align(2));
+ EmitAlignment(Align(2));
for (std::pair<unsigned, MCSymbol *> &TIP : ThumbIndirectPads) {
OutStreamer->EmitLabel(TIP.second);
EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::tBX)
@@ -526,7 +526,7 @@ void ARMAsmPrinter::EmitEndOfAsmFile(Module &M) {
if (!Stubs.empty()) {
// Switch with ".non_lazy_symbol_pointer" directive.
OutStreamer->SwitchSection(TLOFMacho.getNonLazySymbolPointerSection());
- EmitAlignment(llvm::Align(4));
+ EmitAlignment(Align(4));
for (auto &Stub : Stubs)
emitNonLazySymbolPointer(*OutStreamer, Stub.first, Stub.second);
@@ -539,7 +539,7 @@ void ARMAsmPrinter::EmitEndOfAsmFile(Module &M) {
if (!Stubs.empty()) {
// Switch with ".non_lazy_symbol_pointer" directive.
OutStreamer->SwitchSection(TLOFMacho.getThreadLocalPointerSection());
- EmitAlignment(llvm::Align(4));
+ EmitAlignment(Align(4));
for (auto &Stub : Stubs)
emitNonLazySymbolPointer(*OutStreamer, Stub.first, Stub.second);
@@ -940,7 +940,7 @@ void ARMAsmPrinter::EmitJumpTableAddrs(const MachineInstr *MI) {
// Make sure the Thumb jump table is 4-byte aligned. This will be a nop for
// ARM mode tables.
- EmitAlignment(llvm::Align(4));
+ EmitAlignment(Align(4));
// Emit a label for the jump table.
MCSymbol *JTISymbol = GetARMJTIPICJumpTableLabel(JTI);
@@ -986,7 +986,7 @@ void ARMAsmPrinter::EmitJumpTableInsts(const MachineInstr *MI) {
// Make sure the Thumb jump table is 4-byte aligned. This will be a nop for
// ARM mode tables.
- EmitAlignment(llvm::Align(4));
+ EmitAlignment(Align(4));
// Emit a label for the jump table.
MCSymbol *JTISymbol = GetARMJTIPICJumpTableLabel(JTI);
@@ -1015,7 +1015,7 @@ void ARMAsmPrinter::EmitJumpTableTBInst(const MachineInstr *MI,
unsigned JTI = MO1.getIndex();
if (Subtarget->isThumb1Only())
- EmitAlignment(llvm::Align(4));
+ EmitAlignment(Align(4));
MCSymbol *JTISymbol = GetARMJTIPICJumpTableLabel(JTI);
OutStreamer->EmitLabel(JTISymbol);
@@ -1058,7 +1058,7 @@ void ARMAsmPrinter::EmitJumpTableTBInst(const MachineInstr *MI,
OutStreamer->EmitDataRegion(MCDR_DataRegionEnd);
// Make sure the next instruction is 2-byte aligned.
- EmitAlignment(llvm::Align(2));
+ EmitAlignment(Align(2));
}
void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) {
diff --git a/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp b/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp
index 4bf32be686d..2b34b1d8548 100644
--- a/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp
@@ -47,7 +47,7 @@ void ARMBasicBlockUtils::computeBlockSize(MachineBasicBlock *MBB) {
BasicBlockInfo &BBI = BBInfo[MBB->getNumber()];
BBI.Size = 0;
BBI.Unalign = 0;
- BBI.PostAlign = llvm::Align::None();
+ BBI.PostAlign = Align::None();
for (MachineInstr &I : *MBB) {
BBI.Size += TII->getInstSizeInBytes(I);
@@ -62,8 +62,8 @@ void ARMBasicBlockUtils::computeBlockSize(MachineBasicBlock *MBB) {
// tBR_JTr contains a .align 2 directive.
if (!MBB->empty() && MBB->back().getOpcode() == ARM::tBR_JTr) {
- BBI.PostAlign = llvm::Align(4);
- MBB->getParent()->ensureAlignment(llvm::Align(4));
+ BBI.PostAlign = Align(4);
+ MBB->getParent()->ensureAlignment(Align(4));
}
}
@@ -126,7 +126,7 @@ void ARMBasicBlockUtils::adjustBBOffsetsAfter(MachineBasicBlock *BB) {
for(unsigned i = BBNum + 1, e = MF.getNumBlockIDs(); i < e; ++i) {
// Get the offset and known bits at the end of the layout predecessor.
// Include the alignment of the current block.
- const llvm::Align Align = MF.getBlockNumbered(i)->getAlignment();
+ const Align Align = MF.getBlockNumbered(i)->getAlignment();
const unsigned Offset = BBInfo[i - 1].postOffset(Align);
const unsigned KnownBits = BBInfo[i - 1].postKnownBits(Align);
diff --git a/llvm/lib/Target/ARM/ARMBasicBlockInfo.h b/llvm/lib/Target/ARM/ARMBasicBlockInfo.h
index 18e7195e1a9..d0f4a02463b 100644
--- a/llvm/lib/Target/ARM/ARMBasicBlockInfo.h
+++ b/llvm/lib/Target/ARM/ARMBasicBlockInfo.h
@@ -27,11 +27,11 @@ using BBInfoVector = SmallVectorImpl<BasicBlockInfo>;
/// unknown offset bits. This does not include alignment padding caused by
/// known offset bits.
///
-/// @param Align alignment
+/// @param Alignment alignment
/// @param KnownBits Number of known low offset bits.
-inline unsigned UnknownPadding(llvm::Align Align, unsigned KnownBits) {
- if (KnownBits < Log2(Align))
- return Align.value() - (1ull << KnownBits);
+inline unsigned UnknownPadding(Align Alignment, unsigned KnownBits) {
+ if (KnownBits < Log2(Alignment))
+ return Alignment.value() - (1ull << KnownBits);
return 0;
}
@@ -67,7 +67,7 @@ struct BasicBlockInfo {
/// PostAlign - When > 1, the block terminator contains a .align
/// directive, so the end of the block is aligned to PostAlign bytes.
- llvm::Align PostAlign;
+ Align PostAlign;
BasicBlockInfo() = default;
@@ -86,10 +86,10 @@ struct BasicBlockInfo {
/// Compute the offset immediately following this block. If Align is
/// specified, return the offset the successor block will get if it has
/// this alignment.
- unsigned postOffset(llvm::Align Align = llvm::Align::None()) const {
+ unsigned postOffset(Align Alignment = Align::None()) const {
unsigned PO = Offset + Size;
- const llvm::Align PA = std::max(PostAlign, Align);
- if (PA == llvm::Align::None())
+ const Align PA = std::max(PostAlign, Alignment);
+ if (PA == Align::None())
return PO;
// Add alignment padding from the terminator.
return PO + UnknownPadding(PA, internalKnownBits());
@@ -100,7 +100,7 @@ struct BasicBlockInfo {
/// instruction alignment. An aligned terminator may increase the number
/// of know bits.
/// If LogAlign is given, also consider the alignment of the next block.
- unsigned postKnownBits(llvm::Align Align = llvm::Align::None()) const {
+ unsigned postKnownBits(Align Align = Align::None()) const {
return std::max(Log2(std::max(PostAlign, Align)), internalKnownBits());
}
};
diff --git a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
index 874ae7862b6..24ca25f73e9 100644
--- a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -247,7 +247,7 @@ namespace {
void doInitialJumpTablePlacement(std::vector<MachineInstr *> &CPEMIs);
bool BBHasFallthrough(MachineBasicBlock *MBB);
CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
- llvm::Align getCPEAlign(const MachineInstr *CPEMI);
+ Align getCPEAlign(const MachineInstr *CPEMI);
void scanFunctionJumpTables();
void initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs);
MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI);
@@ -404,7 +404,7 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
// Functions with jump tables need an alignment of 4 because they use the ADR
// instruction, which aligns the PC to 4 bytes before adding an offset.
if (!T2JumpTables.empty())
- MF->ensureAlignment(llvm::Align(4));
+ MF->ensureAlignment(Align(4));
/// Remove dead constant pool entries.
MadeChange |= removeUnusedCPEntries();
@@ -494,7 +494,7 @@ ARMConstantIslands::doInitialConstPlacement(std::vector<MachineInstr*> &CPEMIs)
MF->push_back(BB);
// MachineConstantPool measures alignment in bytes.
- const llvm::Align MaxAlign(MCP->getConstantPoolAlignment());
+ const Align MaxAlign(MCP->getConstantPoolAlignment());
const unsigned MaxLogAlign = Log2(MaxAlign);
// Mark the basic block as required by the const-pool.
@@ -650,25 +650,25 @@ ARMConstantIslands::findConstPoolEntry(unsigned CPI,
/// getCPEAlign - Returns the required alignment of the constant pool entry
/// represented by CPEMI.
-llvm::Align ARMConstantIslands::getCPEAlign(const MachineInstr *CPEMI) {
+Align ARMConstantIslands::getCPEAlign(const MachineInstr *CPEMI) {
switch (CPEMI->getOpcode()) {
case ARM::CONSTPOOL_ENTRY:
break;
case ARM::JUMPTABLE_TBB:
- return isThumb1 ? llvm::Align(4) : llvm::Align(1);
+ return isThumb1 ? Align(4) : Align(1);
case ARM::JUMPTABLE_TBH:
- return isThumb1 ? llvm::Align(4) : llvm::Align(2);
+ return isThumb1 ? Align(4) : Align(2);
case ARM::JUMPTABLE_INSTS:
- return llvm::Align(2);
+ return Align(2);
case ARM::JUMPTABLE_ADDRS:
- return llvm::Align(4);
+ return Align(4);
default:
llvm_unreachable("unknown constpool entry kind");
}
unsigned CPI = getCombinedIndex(CPEMI);
assert(CPI < MCP->getConstants().size() && "Invalid constant pool index.");
- return llvm::Align(MCP->getConstants()[CPI].getAlignment());
+ return Align(MCP->getConstants()[CPI].getAlignment());
}
/// scanFunctionJumpTables - Do a scan of the function, building up
@@ -1021,10 +1021,10 @@ bool ARMConstantIslands::isWaterInRange(unsigned UserOffset,
MachineBasicBlock* Water, CPUser &U,
unsigned &Growth) {
BBInfoVector &BBInfo = BBUtils->getBBInfo();
- const llvm::Align CPEAlign = getCPEAlign(U.CPEMI);
+ const Align CPEAlign = getCPEAlign(U.CPEMI);
const unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPEAlign);
unsigned NextBlockOffset;
- llvm::Align NextBlockAlignment;
+ Align NextBlockAlignment;
MachineFunction::const_iterator NextBlock = Water->getIterator();
if (++NextBlock == MF->end()) {
NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
@@ -1214,7 +1214,7 @@ bool ARMConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
// inserting islands between BB0 and BB1 makes other accesses out of range.
MachineBasicBlock *UserBB = U.MI->getParent();
BBInfoVector &BBInfo = BBUtils->getBBInfo();
- const llvm::Align CPEAlign = getCPEAlign(U.CPEMI);
+ const Align CPEAlign = getCPEAlign(U.CPEMI);
unsigned MinNoSplitDisp = BBInfo[UserBB->getNumber()].postOffset(CPEAlign);
if (CloserWater && MinNoSplitDisp > U.getMaxDisp() / 2)
return false;
@@ -1268,7 +1268,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
CPUser &U = CPUsers[CPUserIndex];
MachineInstr *UserMI = U.MI;
MachineInstr *CPEMI = U.CPEMI;
- const llvm::Align CPEAlign = getCPEAlign(CPEMI);
+ const Align CPEAlign = getCPEAlign(CPEMI);
MachineBasicBlock *UserMBB = UserMI->getParent();
BBInfoVector &BBInfo = BBUtils->getBBInfo();
const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()];
@@ -1323,7 +1323,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
// Try to split the block so it's fully aligned. Compute the latest split
// point where we can add a 4-byte branch instruction, and then align to
// Align which is the largest possible alignment in the function.
- const llvm::Align Align = MF->getAlignment();
+ const Align Align = MF->getAlignment();
assert(Align >= CPEAlign && "Over-aligned constant pool entry");
unsigned KnownBits = UserBBI.internalKnownBits();
unsigned UPad = UnknownPadding(Align, KnownBits);
@@ -1501,9 +1501,9 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex,
// Always align the new block because CP entries can be smaller than 4
// bytes. Be careful not to decrease the existing alignment, e.g. NewMBB may
// be an already aligned constant pool block.
- const llvm::Align Align = isThumb ? llvm::Align(2) : llvm::Align(4);
- if (NewMBB->getAlignment() < Align)
- NewMBB->setAlignment(Align);
+ const Align Alignment = isThumb ? Align(2) : Align(4);
+ if (NewMBB->getAlignment() < Alignment)
+ NewMBB->setAlignment(Alignment);
// Remove the original WaterList entry; we want subsequent insertions in
// this vicinity to go after the one we're about to insert. This
@@ -1566,7 +1566,7 @@ void ARMConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
BBInfo[CPEBB->getNumber()].Size = 0;
// This block no longer needs to be aligned.
- CPEBB->setAlignment(llvm::Align::None());
+ CPEBB->setAlignment(Align::None());
} else {
// Entries are sorted by descending alignment, so realign from the front.
CPEBB->setAlignment(getCPEAlign(&*CPEBB->begin()));
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 1866f794d8c..989c9477b7e 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -1428,16 +1428,14 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
// On ARM arguments smaller than 4 bytes are extended, so all arguments
// are at least 4 bytes aligned.
- setMinStackArgumentAlignment(llvm::Align(4));
+ setMinStackArgumentAlignment(Align(4));
// Prefer likely predicted branches to selects on out-of-order cores.
PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder();
- setPrefLoopAlignment(
- llvm::Align(1ULL << Subtarget->getPrefLoopLogAlignment()));
+ setPrefLoopAlignment(Align(1ULL << Subtarget->getPrefLoopLogAlignment()));
- setMinFunctionAlignment(Subtarget->isThumb() ? llvm::Align(2)
- : llvm::Align(4));
+ setMinFunctionAlignment(Subtarget->isThumb() ? Align(2) : Align(4));
if (Subtarget->isThumb() || Subtarget->isThumb2())
setTargetDAGCombine(ISD::ABS);
diff --git a/llvm/lib/Target/AVR/AVRISelLowering.cpp b/llvm/lib/Target/AVR/AVRISelLowering.cpp
index 6566f618b95..12b1f53c329 100644
--- a/llvm/lib/Target/AVR/AVRISelLowering.cpp
+++ b/llvm/lib/Target/AVR/AVRISelLowering.cpp
@@ -236,7 +236,7 @@ AVRTargetLowering::AVRTargetLowering(const AVRTargetMachine &TM,
setLibcallName(RTLIB::SIN_F32, "sin");
setLibcallName(RTLIB::COS_F32, "cos");
- setMinFunctionAlignment(llvm::Align(2));
+ setMinFunctionAlignment(Align(2));
setMinimumJumpTableEntries(UINT_MAX);
}
diff --git a/llvm/lib/Target/BPF/BPFISelLowering.cpp b/llvm/lib/Target/BPF/BPFISelLowering.cpp
index 72fe18b9ed0..56e0288f26c 100644
--- a/llvm/lib/Target/BPF/BPFISelLowering.cpp
+++ b/llvm/lib/Target/BPF/BPFISelLowering.cpp
@@ -133,8 +133,8 @@ BPFTargetLowering::BPFTargetLowering(const TargetMachine &TM,
setBooleanContents(ZeroOrOneBooleanContent);
// Function alignments
- setMinFunctionAlignment(llvm::Align(8));
- setPrefFunctionAlignment(llvm::Align(8));
+ setMinFunctionAlignment(Align(8));
+ setPrefFunctionAlignment(Align(8));
if (BPFExpandMemcpyInOrder) {
// LLVM generic code will try to expand memcpy into load/store pairs at this
diff --git a/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp b/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp
index 5cfbacf94cd..08f74080687 100644
--- a/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp
@@ -105,7 +105,7 @@ void HexagonBranchRelaxation::computeOffset(MachineFunction &MF,
// offset of the current instruction from the start.
unsigned InstOffset = 0;
for (auto &B : MF) {
- if (B.getAlignment() != llvm::Align::None()) {
+ if (B.getAlignment() != Align::None()) {
// Although we don't know the exact layout of the final code, we need
// to account for alignment padding somehow. This heuristic pads each
// aligned basic block according to the alignment value.
diff --git a/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp b/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp
index 85d7ac00890..d21de8ccb5a 100644
--- a/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp
@@ -114,7 +114,7 @@ bool HexagonFixupHwLoops::fixupLoopInstrs(MachineFunction &MF) {
// First pass - compute the offset of each basic block.
for (const MachineBasicBlock &MBB : MF) {
- if (MBB.getAlignment() != llvm::Align::None()) {
+ if (MBB.getAlignment() != Align::None()) {
// Although we don't know the exact layout of the final code, we need
// to account for alignment padding somehow. This heuristic pads each
// aligned basic block according to the alignment value.
diff --git a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
index 70afec14b51..bfa3372d7fa 100644
--- a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
@@ -1380,7 +1380,7 @@ void HexagonFrameLowering::processFunctionBeforeFrameFinalized(
Align A = MFI.getLocalFrameMaxAlign();
assert(A <= 8 && "Unexpected local frame alignment");
if (A == 1)
- MFI.setLocalFrameMaxAlign(llvm::Align(8));
+ MFI.setLocalFrameMaxAlign(Align(8));
MFI.setUseLocalStackAllocationBlock(true);
// Set the physical aligned-stack base address register.
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index 7cbec61c3ba..be4153e312f 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -1235,9 +1235,9 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
Subtarget(ST) {
auto &HRI = *Subtarget.getRegisterInfo();
- setPrefLoopAlignment(llvm::Align(16));
- setMinFunctionAlignment(llvm::Align(4));
- setPrefFunctionAlignment(llvm::Align(16));
+ setPrefLoopAlignment(Align(16));
+ setMinFunctionAlignment(Align(4));
+ setPrefFunctionAlignment(Align(16));
setStackPointerRegisterToSaveRestore(HRI.getStackRegister());
setBooleanContents(TargetLoweringBase::UndefinedBooleanContent);
setBooleanVectorContents(TargetLoweringBase::UndefinedBooleanContent);
diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.cpp
index 215af5b97a9..a799f7f7c0b 100644
--- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.cpp
+++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.cpp
@@ -116,8 +116,8 @@ void HexagonMCELFStreamer::HexagonMCEmitCommonSymbol(MCSymbol *Symbol,
}
// Update the maximum alignment of the section if necessary.
- if (llvm::Align(ByteAlignment) > Section.getAlignment())
- Section.setAlignment(llvm::Align(ByteAlignment));
+ if (Align(ByteAlignment) > Section.getAlignment())
+ Section.setAlignment(Align(ByteAlignment));
SwitchSection(P.first, P.second);
} else {
diff --git a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp
index 5cd72da0daa..70deff06995 100644
--- a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp
+++ b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp
@@ -145,8 +145,8 @@ LanaiTargetLowering::LanaiTargetLowering(const TargetMachine &TM,
setTargetDAGCombine(ISD::XOR);
// Function alignments
- setMinFunctionAlignment(llvm::Align(4));
- setPrefFunctionAlignment(llvm::Align(4));
+ setMinFunctionAlignment(Align(4));
+ setPrefFunctionAlignment(Align(4));
setJumpIsExpensive(true);
diff --git a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
index 8faa3da6ec3..a83fd131ac3 100644
--- a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -327,8 +327,8 @@ MSP430TargetLowering::MSP430TargetLowering(const TargetMachine &TM,
setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::MSP430_BUILTIN);
// TODO: __mspabi_srall, __mspabi_srlll, __mspabi_sllll
- setMinFunctionAlignment(llvm::Align(2));
- setPrefFunctionAlignment(llvm::Align(2));
+ setMinFunctionAlignment(Align(2));
+ setPrefFunctionAlignment(Align(2));
}
SDValue MSP430TargetLowering::LowerOperation(SDValue Op,
diff --git a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
index 8ab8bfe0b6c..5106ffde73e 100644
--- a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
+++ b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
@@ -1805,9 +1805,8 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
break; // We'll deal with this situation later on when applying fixups.
if (!isIntN(inMicroMipsMode() ? 17 : 18, Offset.getImm()))
return Error(IDLoc, "branch target out of range");
- if (offsetToAlignment(
- Offset.getImm(),
- (inMicroMipsMode() ? llvm::Align(2) : llvm::Align(4))))
+ if (offsetToAlignment(Offset.getImm(),
+ (inMicroMipsMode() ? Align(2) : Align(4))))
return Error(IDLoc, "branch to misaligned address");
break;
case Mips::BGEZ:
@@ -1836,9 +1835,8 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
break; // We'll deal with this situation later on when applying fixups.
if (!isIntN(inMicroMipsMode() ? 17 : 18, Offset.getImm()))
return Error(IDLoc, "branch target out of range");
- if (offsetToAlignment(
- Offset.getImm(),
- (inMicroMipsMode() ? llvm::Align(2) : llvm::Align(4))))
+ if (offsetToAlignment(Offset.getImm(),
+ (inMicroMipsMode() ? Align(2) : Align(4))))
return Error(IDLoc, "branch to misaligned address");
break;
case Mips::BGEC: case Mips::BGEC_MMR6:
@@ -1853,7 +1851,7 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
break; // We'll deal with this situation later on when applying fixups.
if (!isIntN(18, Offset.getImm()))
return Error(IDLoc, "branch target out of range");
- if (offsetToAlignment(Offset.getImm(), llvm::Align(4)))
+ if (offsetToAlignment(Offset.getImm(), Align(4)))
return Error(IDLoc, "branch to misaligned address");
break;
case Mips::BLEZC: case Mips::BLEZC_MMR6:
@@ -1866,7 +1864,7 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
break; // We'll deal with this situation later on when applying fixups.
if (!isIntN(18, Offset.getImm()))
return Error(IDLoc, "branch target out of range");
- if (offsetToAlignment(Offset.getImm(), llvm::Align(4)))
+ if (offsetToAlignment(Offset.getImm(), Align(4)))
return Error(IDLoc, "branch to misaligned address");
break;
case Mips::BEQZC: case Mips::BEQZC_MMR6:
@@ -1877,7 +1875,7 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
break; // We'll deal with this situation later on when applying fixups.
if (!isIntN(23, Offset.getImm()))
return Error(IDLoc, "branch target out of range");
- if (offsetToAlignment(Offset.getImm(), llvm::Align(4)))
+ if (offsetToAlignment(Offset.getImm(), Align(4)))
return Error(IDLoc, "branch to misaligned address");
break;
case Mips::BEQZ16_MM:
@@ -1890,7 +1888,7 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
break; // We'll deal with this situation later on when applying fixups.
if (!isInt<8>(Offset.getImm()))
return Error(IDLoc, "branch target out of range");
- if (offsetToAlignment(Offset.getImm(), llvm::Align(2)))
+ if (offsetToAlignment(Offset.getImm(), Align(2)))
return Error(IDLoc, "branch to misaligned address");
break;
}
@@ -3495,7 +3493,7 @@ bool MipsAsmParser::expandUncondBranchMMPseudo(MCInst &Inst, SMLoc IDLoc,
} else {
if (!isInt<17>(Offset.getImm()))
return Error(IDLoc, "branch target out of range");
- if (offsetToAlignment(Offset.getImm(), llvm::Align(2)))
+ if (offsetToAlignment(Offset.getImm(), Align(2)))
return Error(IDLoc, "branch to misaligned address");
Inst.clear();
Inst.setOpcode(Mips::BEQ_MM);
diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h
index a7a07cadcd9..a84ca8ccfb2 100644
--- a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h
+++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h
@@ -15,7 +15,7 @@
namespace llvm {
// NaCl MIPS sandbox's instruction bundle size.
-static const llvm::Align MIPS_NACL_BUNDLE_ALIGN = llvm::Align(16);
+static const Align MIPS_NACL_BUNDLE_ALIGN = Align(16);
bool isBasePlusOffsetMemoryAccess(unsigned Opcode, unsigned *AddrIdx,
bool *IsStore = nullptr);
diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp
index 874341e4124..3ff9c722484 100644
--- a/llvm/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp
+++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp
@@ -37,7 +37,7 @@ void MipsRegInfoRecord::EmitMipsOptionRecord() {
Context.getELFSection(".MIPS.options", ELF::SHT_MIPS_OPTIONS,
ELF::SHF_ALLOC | ELF::SHF_MIPS_NOSTRIP, 1, "");
MCA.registerSection(*Sec);
- Sec->setAlignment(llvm::Align(8));
+ Sec->setAlignment(Align(8));
Streamer->SwitchSection(Sec);
Streamer->EmitIntValue(ELF::ODK_REGINFO, 1); // kind
@@ -55,7 +55,7 @@ void MipsRegInfoRecord::EmitMipsOptionRecord() {
MCSectionELF *Sec = Context.getELFSection(".reginfo", ELF::SHT_MIPS_REGINFO,
ELF::SHF_ALLOC, 24, "");
MCA.registerSection(*Sec);
- Sec->setAlignment(MTS->getABI().IsN32() ? llvm::Align(8) : llvm::Align(4));
+ Sec->setAlignment(MTS->getABI().IsN32() ? Align(8) : Align(4));
Streamer->SwitchSection(Sec);
Streamer->EmitIntValue(ri_gprmask, 4);
diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
index d3cc29b8d6a..b6dae9f6dea 100644
--- a/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
+++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
@@ -901,12 +901,9 @@ void MipsTargetELFStreamer::finish() {
MCSection &BSSSection = *OFI.getBSSSection();
MCA.registerSection(BSSSection);
- TextSection.setAlignment(
- llvm::Align(std::max(16u, TextSection.getAlignment())));
- DataSection.setAlignment(
- llvm::Align(std::max(16u, DataSection.getAlignment())));
- BSSSection.setAlignment(
- llvm::Align(std::max(16u, BSSSection.getAlignment())));
+ TextSection.setAlignment(Align(std::max(16u, TextSection.getAlignment())));
+ DataSection.setAlignment(Align(std::max(16u, DataSection.getAlignment())));
+ BSSSection.setAlignment(Align(std::max(16u, BSSSection.getAlignment())));
if (RoundSectionSizes) {
// Make sections sizes a multiple of the alignment. This is useful for
@@ -1029,7 +1026,7 @@ void MipsTargetELFStreamer::emitDirectiveEnd(StringRef Name) {
MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, Context);
MCA.registerSection(*Sec);
- Sec->setAlignment(llvm::Align(4));
+ Sec->setAlignment(Align(4));
OS.PushSection();
@@ -1319,7 +1316,7 @@ void MipsTargetELFStreamer::emitMipsAbiFlags() {
MCSectionELF *Sec = Context.getELFSection(
".MIPS.abiflags", ELF::SHT_MIPS_ABIFLAGS, ELF::SHF_ALLOC, 24, "");
MCA.registerSection(*Sec);
- Sec->setAlignment(llvm::Align(8));
+ Sec->setAlignment(Align(8));
OS.SwitchSection(Sec);
OS << ABIFlagsSection;
diff --git a/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp b/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp
index 49f601994bc..f5064052173 100644
--- a/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp
+++ b/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp
@@ -371,7 +371,7 @@ namespace {
void doInitialPlacement(std::vector<MachineInstr*> &CPEMIs);
CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
- llvm::Align getCPEAlign(const MachineInstr &CPEMI);
+ Align getCPEAlign(const MachineInstr &CPEMI);
void initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs);
unsigned getOffsetOf(MachineInstr *MI) const;
unsigned getUserOffset(CPUser&) const;
@@ -529,11 +529,11 @@ MipsConstantIslands::doInitialPlacement(std::vector<MachineInstr*> &CPEMIs) {
MF->push_back(BB);
// MachineConstantPool measures alignment in bytes. We measure in log2(bytes).
- const llvm::Align MaxAlign(MCP->getConstantPoolAlignment());
+ const Align MaxAlign(MCP->getConstantPoolAlignment());
// Mark the basic block as required by the const-pool.
// If AlignConstantIslands isn't set, use 4-byte alignment for everything.
- BB->setAlignment(AlignConstantIslands ? MaxAlign : llvm::Align(4));
+ BB->setAlignment(AlignConstantIslands ? MaxAlign : Align(4));
// The function needs to be as aligned as the basic blocks. The linker may
// move functions around based on their alignment.
@@ -619,16 +619,16 @@ MipsConstantIslands::CPEntry
/// getCPEAlign - Returns the required alignment of the constant pool entry
/// represented by CPEMI. Alignment is measured in log2(bytes) units.
-llvm::Align MipsConstantIslands::getCPEAlign(const MachineInstr &CPEMI) {
+Align MipsConstantIslands::getCPEAlign(const MachineInstr &CPEMI) {
assert(CPEMI.getOpcode() == Mips::CONSTPOOL_ENTRY);
// Everything is 4-byte aligned unless AlignConstantIslands is set.
if (!AlignConstantIslands)
- return llvm::Align(4);
+ return Align(4);
unsigned CPI = CPEMI.getOperand(1).getIndex();
assert(CPI < MCP->getConstants().size() && "Invalid constant pool index.");
- return llvm::Align(MCP->getConstants()[CPI].getAlignment());
+ return Align(MCP->getConstants()[CPI].getAlignment());
}
/// initializeFunctionInfo - Do the initial scan of the function, building up
@@ -936,11 +936,11 @@ bool MipsConstantIslands::isWaterInRange(unsigned UserOffset,
unsigned &Growth) {
unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset();
unsigned NextBlockOffset;
- llvm::Align NextBlockAlignment;
+ Align NextBlockAlignment;
MachineFunction::const_iterator NextBlock = ++Water->getIterator();
if (NextBlock == MF->end()) {
NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
- NextBlockAlignment = llvm::Align::None();
+ NextBlockAlignment = Align::None();
} else {
NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset;
NextBlockAlignment = NextBlock->getAlignment();
@@ -1251,7 +1251,7 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
// Try to split the block so it's fully aligned. Compute the latest split
// point where we can add a 4-byte branch instruction, and then align to
// Align which is the largest possible alignment in the function.
- const llvm::Align Align = MF->getAlignment();
+ const Align Align = MF->getAlignment();
unsigned BaseInsertOffset = UserOffset + U.getMaxDisp();
LLVM_DEBUG(dbgs() << format("Split in middle of big block before %#x",
BaseInsertOffset));
@@ -1423,7 +1423,7 @@ void MipsConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
BBInfo[CPEBB->getNumber()].Size = 0;
// This block no longer needs to be aligned.
- CPEBB->setAlignment(llvm::Align(1));
+ CPEBB->setAlignment(Align(1));
} else {
// Entries are sorted by descending alignment, so realign from the front.
CPEBB->setAlignment(getCPEAlign(*CPEBB->begin()));
@@ -1522,7 +1522,7 @@ MipsConstantIslands::fixupUnconditionalBr(ImmBranch &Br) {
// We should have a way to back out this alignment restriction if we "can" later.
// but it is not harmful.
//
- DestBB->setAlignment(llvm::Align(4));
+ DestBB->setAlignment(Align(4));
Br.MaxDisp = ((1<<24)-1) * 2;
MI->setDesc(TII->get(Mips::JalB16));
}
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index 57e2c88b2ab..34084bff07a 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -514,13 +514,12 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
setLibcallName(RTLIB::SRA_I128, nullptr);
}
- setMinFunctionAlignment(Subtarget.isGP64bit() ? llvm::Align(8)
- : llvm::Align(4));
+ setMinFunctionAlignment(Subtarget.isGP64bit() ? Align(8) : Align(4));
// The arguments on the stack are defined in terms of 4-byte slots on O32
// and 8-byte slots on N32/N64.
- setMinStackArgumentAlignment((ABI.IsN32() || ABI.IsN64()) ? llvm::Align(8)
- : llvm::Align(4));
+ setMinStackArgumentAlignment((ABI.IsN32() || ABI.IsN64()) ? Align(8)
+ : Align(4));
setStackPointerRegisterToSaveRestore(ABI.IsN64() ? Mips::SP_64 : Mips::SP);
@@ -2148,7 +2147,7 @@ SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Node->getValueType(0);
SDValue Chain = Node->getOperand(0);
SDValue VAListPtr = Node->getOperand(1);
- const llvm::Align Align =
+ const Align Align =
llvm::MaybeAlign(Node->getConstantOperandVal(3)).valueOrOne();
const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
SDLoc DL(Node);
diff --git a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
index 98dfd0a407c..d9354cadc73 100644
--- a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
+++ b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
@@ -247,8 +247,8 @@ bool MipsSEDAGToDAGISel::selectAddrFrameIndexOffset(
Base = Addr.getOperand(0);
// If base is a FI, additional offset calculation is done in
// eliminateFrameIndex, otherwise we need to check the alignment
- const llvm::Align Align(1ULL << ShiftAmount);
- if (!isAligned(Align, CN->getZExtValue()))
+ const Align Alignment(1ULL << ShiftAmount);
+ if (!isAligned(Alignment, CN->getZExtValue()))
return false;
}
diff --git a/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp b/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp
index 1a6382c96fb..a48088c2891 100644
--- a/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp
+++ b/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp
@@ -212,7 +212,7 @@ void MipsSERegisterInfo::eliminateFI(MachineBasicBlock::iterator II,
// element size), otherwise it is a 16-bit signed immediate.
unsigned OffsetBitSize =
getLoadStoreOffsetSizeInBits(MI.getOpcode(), MI.getOperand(OpNo - 1));
- const llvm::Align OffsetAlign(getLoadStoreOffsetAlign(MI.getOpcode()));
+ const Align OffsetAlign(getLoadStoreOffsetAlign(MI.getOpcode()));
if (OffsetBitSize < 16 && isInt<16>(Offset) &&
(!isIntN(OffsetBitSize, Offset) || !isAligned(OffsetAlign, Offset))) {
// If we have an offset that needs to fit into a signed n-bit immediate
diff --git a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
index 124c1827154..c8f26dd2f14 100644
--- a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
+++ b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
@@ -1634,7 +1634,7 @@ bool PPCDarwinAsmPrinter::doFinalization(Module &M) {
if (!Stubs.empty()) {
// Switch with ".non_lazy_symbol_pointer" directive.
OutStreamer->SwitchSection(TLOFMacho.getNonLazySymbolPointerSection());
- EmitAlignment(isPPC64 ? llvm::Align(8) : llvm::Align(4));
+ EmitAlignment(isPPC64 ? Align(8) : Align(4));
for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
// L_foo$stub:
diff --git a/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp b/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp
index 9b4748b4dd8..cdff4d383d2 100644
--- a/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp
+++ b/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp
@@ -81,20 +81,20 @@ FunctionPass *llvm::createPPCBranchSelectionPass() {
/// original Offset.
unsigned PPCBSel::GetAlignmentAdjustment(MachineBasicBlock &MBB,
unsigned Offset) {
- const llvm::Align Align = MBB.getAlignment();
- if (Align == 1)
+ const Align Alignment = MBB.getAlignment();
+ if (Alignment == Align::None())
return 0;
- const llvm::Align ParentAlign = MBB.getParent()->getAlignment();
+ const Align ParentAlign = MBB.getParent()->getAlignment();
- if (Align <= ParentAlign)
- return offsetToAlignment(Offset, Align);
+ if (Alignment <= ParentAlign)
+ return offsetToAlignment(Offset, Alignment);
// The alignment of this MBB is larger than the function's alignment, so we
// can't tell whether or not it will insert nops. Assume that it will.
if (FirstImpreciseBlock < 0)
FirstImpreciseBlock = MBB.getNumber();
- return Align.value() + offsetToAlignment(Offset, Align);
+ return Alignment.value() + offsetToAlignment(Offset, Alignment);
}
/// We need to be careful about the offset of the first block in the function
@@ -178,7 +178,7 @@ int PPCBSel::computeBranchSize(MachineFunction &Fn,
const MachineBasicBlock *Dest,
unsigned BrOffset) {
int BranchSize;
- llvm::Align MaxAlign = llvm::Align(4);
+ Align MaxAlign = Align(4);
bool NeedExtraAdjustment = false;
if (Dest->getNumber() <= Src->getNumber()) {
// If this is a backwards branch, the delta is the offset from the
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 432d772185a..40719c6b2bb 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -139,7 +139,7 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
// On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
// arguments are at least 4/8 bytes aligned.
bool isPPC64 = Subtarget.isPPC64();
- setMinStackArgumentAlignment(isPPC64 ? llvm::Align(8) : llvm::Align(4));
+ setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4));
// Set up the register classes.
addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
@@ -1179,9 +1179,9 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
setJumpIsExpensive();
}
- setMinFunctionAlignment(llvm::Align(4));
+ setMinFunctionAlignment(Align(4));
if (Subtarget.isDarwin())
- setPrefFunctionAlignment(llvm::Align(16));
+ setPrefFunctionAlignment(Align(16));
switch (Subtarget.getDarwinDirective()) {
default: break;
@@ -1198,8 +1198,8 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
case PPC::DIR_PWR7:
case PPC::DIR_PWR8:
case PPC::DIR_PWR9:
- setPrefLoopAlignment(llvm::Align(16));
- setPrefFunctionAlignment(llvm::Align(16));
+ setPrefLoopAlignment(Align(16));
+ setPrefFunctionAlignment(Align(16));
break;
}
@@ -14110,7 +14110,7 @@ void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
}
}
-llvm::Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
+Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
switch (Subtarget.getDarwinDirective()) {
default: break;
case PPC::DIR_970:
@@ -14131,7 +14131,7 @@ llvm::Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
// Actual alignment of the loop will depend on the hotness check and other
// logic in alignBlocks.
if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
- return llvm::Align(32);
+ return Align(32);
}
const PPCInstrInfo *TII = Subtarget.getInstrInfo();
@@ -14147,7 +14147,7 @@ llvm::Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
}
if (LoopSize > 16 && LoopSize <= 32)
- return llvm::Align(32);
+ return Align(32);
break;
}
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h
index 29cf75c62a1..2cc9af3c05f 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -742,7 +742,7 @@ namespace llvm {
const SelectionDAG &DAG,
unsigned Depth = 0) const override;
- llvm::Align getPrefLoopAlignment(MachineLoop *ML) const override;
+ Align getPrefLoopAlignment(MachineLoop *ML) const override;
bool shouldInsertFencesForAtomic(const Instruction *I) const override {
return true;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index e8dedffa9c2..f459497164f 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -198,7 +198,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setBooleanContents(ZeroOrOneBooleanContent);
// Function alignments.
- const llvm::Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
+ const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
setMinFunctionAlignment(FunctionAlignment);
setPrefFunctionAlignment(FunctionAlignment);
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index e8b33f8a70e..07db19af7af 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -1805,7 +1805,7 @@ SparcTargetLowering::SparcTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
- setMinFunctionAlignment(llvm::Align(4));
+ setMinFunctionAlignment(Align(4));
computeRegisterProperties(Subtarget->getRegisterInfo());
}
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index ba79ec2986a..d69f578735a 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -120,9 +120,9 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
// Instructions are strings of 2-byte aligned 2-byte values.
- setMinFunctionAlignment(llvm::Align(2));
+ setMinFunctionAlignment(Align(2));
// For performance reasons we prefer 16-byte alignment.
- setPrefFunctionAlignment(llvm::Align(16));
+ setPrefFunctionAlignment(Align(16));
// Handle operations that are handled in a similar way for all types.
for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
diff --git a/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp b/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp
index 64577788d70..72411122956 100644
--- a/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp
@@ -87,7 +87,7 @@ struct MBBInfo {
// The minimum alignment of the block.
// This value never changes.
- llvm::Align Alignment;
+ Align Alignment;
// The number of terminators in this block. This value never changes.
unsigned NumTerminators = 0;
diff --git a/llvm/lib/Target/X86/X86AsmPrinter.cpp b/llvm/lib/Target/X86/X86AsmPrinter.cpp
index 7f6927df9db..8d27be30a27 100644
--- a/llvm/lib/Target/X86/X86AsmPrinter.cpp
+++ b/llvm/lib/Target/X86/X86AsmPrinter.cpp
@@ -575,7 +575,7 @@ void X86AsmPrinter::EmitStartOfAsmFile(Module &M) {
// Emitting note header.
int WordSize = TT.isArch64Bit() ? 8 : 4;
- EmitAlignment(WordSize == 4 ? llvm::Align(4) : llvm::Align(8));
+ EmitAlignment(WordSize == 4 ? Align(4) : Align(8));
OutStreamer->EmitIntValue(4, 4 /*size*/); // data size for "GNU\0"
OutStreamer->EmitIntValue(8 + WordSize, 4 /*size*/); // Elf_Prop size
OutStreamer->EmitIntValue(ELF::NT_GNU_PROPERTY_TYPE_0, 4 /*size*/);
@@ -585,7 +585,7 @@ void X86AsmPrinter::EmitStartOfAsmFile(Module &M) {
OutStreamer->EmitIntValue(ELF::GNU_PROPERTY_X86_FEATURE_1_AND, 4);
OutStreamer->EmitIntValue(4, 4); // data size
OutStreamer->EmitIntValue(FeatureFlagsAnd, 4); // data
- EmitAlignment(WordSize == 4 ? llvm::Align(4) : llvm::Align(8)); // padding
+ EmitAlignment(WordSize == 4 ? Align(4) : Align(8)); // padding
OutStreamer->endSection(Nt);
OutStreamer->SwitchSection(Cur);
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 630204a826f..ff927a4df92 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -1951,13 +1951,13 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
MaxLoadsPerMemcmpOptSize = 2;
// Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4).
- setPrefLoopAlignment(llvm::Align(1ULL << ExperimentalPrefLoopAlignment));
+ setPrefLoopAlignment(Align(1ULL << ExperimentalPrefLoopAlignment));
// An out-of-order CPU can speculatively execute past a predictable branch,
// but a conditional move could be stalled by an expensive earlier operation.
PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
EnableExtLdPromotion = true;
- setPrefFunctionAlignment(llvm::Align(16));
+ setPrefFunctionAlignment(Align(16));
verifyIntrinsicTables();
}
diff --git a/llvm/lib/Target/X86/X86RetpolineThunks.cpp b/llvm/lib/Target/X86/X86RetpolineThunks.cpp
index 205843a8dde..f1fa192546d 100644
--- a/llvm/lib/Target/X86/X86RetpolineThunks.cpp
+++ b/llvm/lib/Target/X86/X86RetpolineThunks.cpp
@@ -279,7 +279,7 @@ void X86RetpolineThunks::populateThunk(MachineFunction &MF,
CallTarget->addLiveIn(Reg);
CallTarget->setHasAddressTaken();
- CallTarget->setAlignment(llvm::Align(16));
+ CallTarget->setAlignment(Align(16));
insertRegReturnAddrClobber(*CallTarget, Reg);
CallTarget->back().setPreInstrSymbol(MF, TargetSym);
BuildMI(CallTarget, DebugLoc(), TII->get(RetOpc));
diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
index 838a43ac339..ebbf6d0702e 100644
--- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
+++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
@@ -3294,7 +3294,7 @@ bool X86TTIImpl::isLegalMaskedStore(Type *DataType) {
return isLegalMaskedLoad(DataType);
}
-bool X86TTIImpl::isLegalNTLoad(Type *DataType, llvm::Align Alignment) {
+bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) {
unsigned DataSize = DL.getTypeStoreSize(DataType);
// The only supported nontemporal loads are for aligned vectors of 16 or 32
// bytes. Note that 32-byte nontemporal vector loads are supported by AVX2
@@ -3305,7 +3305,7 @@ bool X86TTIImpl::isLegalNTLoad(Type *DataType, llvm::Align Alignment) {
return false;
}
-bool X86TTIImpl::isLegalNTStore(Type *DataType, llvm::Align Alignment) {
+bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) {
unsigned DataSize = DL.getTypeStoreSize(DataType);
// SSE4A supports nontemporal stores of float and double at arbitrary
diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.h b/llvm/lib/Target/X86/X86TargetTransformInfo.h
index 27d3b65c25b..9b948dbbb4c 100644
--- a/llvm/lib/Target/X86/X86TargetTransformInfo.h
+++ b/llvm/lib/Target/X86/X86TargetTransformInfo.h
@@ -187,8 +187,8 @@ public:
bool canMacroFuseCmp();
bool isLegalMaskedLoad(Type *DataType);
bool isLegalMaskedStore(Type *DataType);
- bool isLegalNTLoad(Type *DataType, llvm::Align Alignment);
- bool isLegalNTStore(Type *DataType, llvm::Align Alignment);
+ bool isLegalNTLoad(Type *DataType, Align Alignment);
+ bool isLegalNTStore(Type *DataType, Align Alignment);
bool isLegalMaskedGather(Type *DataType);
bool isLegalMaskedScatter(Type *DataType);
bool isLegalMaskedExpandLoad(Type *DataType);
diff --git a/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp b/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp
index 46d1faa1866..6b3dc27cb88 100644
--- a/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp
+++ b/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp
@@ -115,7 +115,7 @@ void XCoreAsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
MCSymbol *GVSym = getSymbol(GV);
const Constant *C = GV->getInitializer();
- const llvm::Align Align(DL.getPrefTypeAlignment(C->getType()));
+ const Align Alignment(DL.getPrefTypeAlignment(C->getType()));
// Mark the start of the global
getTargetStreamer().emitCCTopData(GVSym->getName());
@@ -143,7 +143,7 @@ void XCoreAsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
llvm_unreachable("Unknown linkage type!");
}
- EmitAlignment(std::max(Align, llvm::Align(4)), GV);
+ EmitAlignment(std::max(Alignment, Align(4)), GV);
if (GV->isThreadLocal()) {
report_fatal_error("TLS is not supported by this target!");
diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
index ea3dcfc9d7d..bf006fd673f 100644
--- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
@@ -171,8 +171,8 @@ XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM,
setTargetDAGCombine(ISD::INTRINSIC_VOID);
setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
- setMinFunctionAlignment(llvm::Align(2));
- setPrefFunctionAlignment(llvm::Align(4));
+ setMinFunctionAlignment(Align(2));
+ setPrefFunctionAlignment(Align(4));
}
bool XCoreTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp
index 7242eb1d3de..3dac0f6e6dd 100644
--- a/llvm/lib/Transforms/Utils/Local.cpp
+++ b/llvm/lib/Transforms/Utils/Local.cpp
@@ -1132,10 +1132,10 @@ bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
/// often possible though. If alignment is important, a more reliable approach
/// is to simply align all global variables and allocation instructions to
/// their preferred alignment from the beginning.
-static unsigned enforceKnownAlignment(Value *V, unsigned Align,
+static unsigned enforceKnownAlignment(Value *V, unsigned Alignment,
unsigned PrefAlign,
const DataLayout &DL) {
- assert(PrefAlign > Align);
+ assert(PrefAlign > Alignment);
V = V->stripPointerCasts();
@@ -1146,36 +1146,36 @@ static unsigned enforceKnownAlignment(Value *V, unsigned Align,
// stripPointerCasts recurses through infinite layers of bitcasts,
// while computeKnownBits is not allowed to traverse more than 6
// levels.
- Align = std::max(AI->getAlignment(), Align);
- if (PrefAlign <= Align)
- return Align;
+ Alignment = std::max(AI->getAlignment(), Alignment);
+ if (PrefAlign <= Alignment)
+ return Alignment;
// If the preferred alignment is greater than the natural stack alignment
// then don't round up. This avoids dynamic stack realignment.
- if (DL.exceedsNaturalStackAlignment(llvm::Align(PrefAlign)))
- return Align;
+ if (DL.exceedsNaturalStackAlignment(Align(PrefAlign)))
+ return Alignment;
AI->setAlignment(PrefAlign);
return PrefAlign;
}
if (auto *GO = dyn_cast<GlobalObject>(V)) {
// TODO: as above, this shouldn't be necessary.
- Align = std::max(GO->getAlignment(), Align);
- if (PrefAlign <= Align)
- return Align;
+ Alignment = std::max(GO->getAlignment(), Alignment);
+ if (PrefAlign <= Alignment)
+ return Alignment;
// If there is a large requested alignment and we can, bump up the alignment
// of the global. If the memory we set aside for the global may not be the
// memory used by the final program then it is impossible for us to reliably
// enforce the preferred alignment.
if (!GO->canIncreaseAlignment())
- return Align;
+ return Alignment;
GO->setAlignment(PrefAlign);
return PrefAlign;
}
- return Align;
+ return Alignment;
}
unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
index 61406c97fd0..9731abbb50a 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
@@ -742,7 +742,7 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
assert(VecTy && "did not find vectorized version of stored type");
unsigned Alignment = getLoadStoreAlignment(ST);
assert(Alignment && "Alignment should be set");
- if (!TTI->isLegalNTStore(VecTy, llvm::Align(Alignment))) {
+ if (!TTI->isLegalNTStore(VecTy, Align(Alignment))) {
reportVectorizationFailure(
"nontemporal store instruction cannot be vectorized",
"nontemporal store instruction cannot be vectorized",
@@ -759,7 +759,7 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
assert(VecTy && "did not find vectorized version of load type");
unsigned Alignment = getLoadStoreAlignment(LD);
assert(Alignment && "Alignment should be set");
- if (!TTI->isLegalNTLoad(VecTy, llvm::Align(Alignment))) {
+ if (!TTI->isLegalNTLoad(VecTy, Align(Alignment))) {
reportVectorizationFailure(
"nontemporal load instruction cannot be vectorized",
"nontemporal load instruction cannot be vectorized",
diff --git a/llvm/tools/dsymutil/DwarfStreamer.cpp b/llvm/tools/dsymutil/DwarfStreamer.cpp
index 5d0f7c1067c..732260f0346 100644
--- a/llvm/tools/dsymutil/DwarfStreamer.cpp
+++ b/llvm/tools/dsymutil/DwarfStreamer.cpp
@@ -260,7 +260,7 @@ void DwarfStreamer::emitAppleTypes(
/// Emit the swift_ast section stored in \p Buffers.
void DwarfStreamer::emitSwiftAST(StringRef Buffer) {
MCSection *SwiftASTSection = MOFI->getDwarfSwiftASTSection();
- SwiftASTSection->setAlignment(llvm::Align(32));
+ SwiftASTSection->setAlignment(Align(32));
MS->SwitchSection(SwiftASTSection);
MS->EmitBytes(Buffer);
}
@@ -339,7 +339,7 @@ void DwarfStreamer::emitUnitRangesEntries(CompileUnit &Unit,
sizeof(int8_t); // Segment Size (in bytes)
unsigned TupleSize = AddressSize * 2;
- unsigned Padding = offsetToAlignment(HeaderSize, llvm::Align(TupleSize));
+ unsigned Padding = offsetToAlignment(HeaderSize, Align(TupleSize));
Asm->EmitLabelDifference(EndLabel, BeginLabel, 4); // Arange length
Asm->OutStreamer->EmitLabel(BeginLabel);
diff --git a/llvm/tools/llvm-cov/TestingSupport.cpp b/llvm/tools/llvm-cov/TestingSupport.cpp
index 39f809a8f0d..b99bd83157d 100644
--- a/llvm/tools/llvm-cov/TestingSupport.cpp
+++ b/llvm/tools/llvm-cov/TestingSupport.cpp
@@ -100,7 +100,7 @@ int convertForTestingMain(int argc, const char *argv[]) {
encodeULEB128(ProfileNamesAddress, OS);
OS << ProfileNamesData;
// Coverage mapping data is expected to have an alignment of 8.
- for (unsigned Pad = offsetToAlignment(OS.tell(), llvm::Align(8)); Pad; --Pad)
+ for (unsigned Pad = offsetToAlignment(OS.tell(), Align(8)); Pad; --Pad)
OS.write(uint8_t(0));
OS << CoverageMappingData;
diff --git a/llvm/tools/llvm-objcopy/MachO/MachOLayoutBuilder.cpp b/llvm/tools/llvm-objcopy/MachO/MachOLayoutBuilder.cpp
index 0b8b3f1c2dd..f621f3aa09c 100644
--- a/llvm/tools/llvm-objcopy/MachO/MachOLayoutBuilder.cpp
+++ b/llvm/tools/llvm-objcopy/MachO/MachOLayoutBuilder.cpp
@@ -146,7 +146,7 @@ uint64_t MachOLayoutBuilder::layoutSegments() {
Sec.Offset = 0;
} else {
uint64_t PaddingSize =
- offsetToAlignment(SegFileSize, llvm::Align(1ull << Sec.Align));
+ offsetToAlignment(SegFileSize, Align(1ull << Sec.Align));
Sec.Offset = SegOffset + SegFileSize + PaddingSize;
Sec.Size = Sec.Content.size();
SegFileSize += PaddingSize + Sec.Size;
OpenPOWER on IntegriCloud