summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp')
-rw-r--r--llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp2
1 files changed, 1 insertions, 1 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index aafff4ef580..178390e7a31 100644
--- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -1969,7 +1969,7 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
}
bool AArch64LoadStoreOpt::enableNarrowLdMerge(MachineFunction &Fn) {
- bool ProfitableArch = Subtarget->isCortexA57();
+ bool ProfitableArch = Subtarget->isCortexA57() || Subtarget->isKryo();
// FIXME: The benefit from converting narrow loads into a wider load could be
// microarchitectural as it assumes that a single load with two bitfield
// extracts is cheaper than two narrow loads. Currently, this conversion is
OpenPOWER on IntegriCloud