summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp3
-rw-r--r--llvm/lib/Target/Hexagon/HexagonISelLowering.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.cpp2
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp9
5 files changed, 10 insertions, 8 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index d16fffd4bc4..a78f37f4278 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -643,7 +643,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setMinFunctionAlignment(llvm::Align(4));
// Set preferred alignments.
setPrefFunctionLogAlignment(STI.getPrefFunctionLogAlignment());
- setPrefLoopLogAlignment(STI.getPrefLoopLogAlignment());
+ setPrefLoopAlignment(llvm::Align(1ULL << STI.getPrefLoopLogAlignment()));
// Only change the limit for entries in a jump table if specified by
// the sub target, but not at the command line.
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 5c298e52388..eacaa75bc10 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -1419,7 +1419,8 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
// Prefer likely predicted branches to selects on out-of-order cores.
PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder();
- setPrefLoopLogAlignment(Subtarget->getPrefLoopLogAlignment());
+ setPrefLoopAlignment(
+ llvm::Align(1UL << Subtarget->getPrefLoopLogAlignment()));
setMinFunctionAlignment(Subtarget->isThumb() ? llvm::Align(2)
: llvm::Align(4));
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index 63f2899d23c..cea1986f396 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -1235,7 +1235,7 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
Subtarget(ST) {
auto &HRI = *Subtarget.getRegisterInfo();
- setPrefLoopLogAlignment(4);
+ setPrefLoopAlignment(llvm::Align(16));
setPrefFunctionLogAlignment(4);
setMinFunctionAlignment(llvm::Align(4));
setStackPointerRegisterToSaveRestore(HRI.getStackRegister());
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 7d51b6c89d8..1ed32000432 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -1200,7 +1200,7 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
case PPC::DIR_PWR8:
case PPC::DIR_PWR9:
setPrefFunctionLogAlignment(4);
- setPrefLoopLogAlignment(4);
+ setPrefLoopAlignment(llvm::Align(16));
break;
}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 136d124ab4b..9e249caea4b 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -73,9 +73,10 @@ cl::opt<bool> ExperimentalVectorWideningLegalization(
static cl::opt<int> ExperimentalPrefLoopAlignment(
"x86-experimental-pref-loop-alignment", cl::init(4),
- cl::desc("Sets the preferable loop alignment for experiments "
- "(the last x86-experimental-pref-loop-alignment bits"
- " of the loop header PC will be 0)."),
+ cl::desc(
+ "Sets the preferable loop alignment for experiments (as log2 bytes)"
+ "(the last x86-experimental-pref-loop-alignment bits"
+ " of the loop header PC will be 0)."),
cl::Hidden);
// Added in 10.0.
@@ -1892,7 +1893,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
MaxLoadsPerMemcmpOptSize = 2;
// Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4).
- setPrefLoopLogAlignment(ExperimentalPrefLoopAlignment);
+ setPrefLoopAlignment(llvm::Align(1UL << ExperimentalPrefLoopAlignment));
// An out-of-order CPU can speculatively execute past a predictable branch,
// but a conditional move could be stalled by an expensive earlier operation.
OpenPOWER on IntegriCloud