summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/AArch64/AArch64InstrInfo.cpp')
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.cpp19
1 files changed, 9 insertions, 10 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 1e4373b6294..d0d08083e28 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -544,8 +544,7 @@ static bool canBeExpandedToORR(const MachineInstr *MI, unsigned BitSize) {
// FIXME: this implementation should be micro-architecture dependent, so a
// micro-architecture target hook should be introduced here in future.
bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr *MI) const {
- if (!Subtarget.isCortexA57() && !Subtarget.isCortexA53() &&
- !Subtarget.isExynosM1() && !Subtarget.isKryo())
+ if (!Subtarget.hasCustomCheapAsMoveHandling())
return MI->isAsCheapAsAMove();
unsigned Imm;
@@ -559,7 +558,7 @@ bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr *MI) const {
case AArch64::ADDXri:
case AArch64::SUBWri:
case AArch64::SUBXri:
- return (Subtarget.isExynosM1() ||
+ return (Subtarget.getProcFamily() == AArch64Subtarget::ExynosM1 ||
MI->getOperand(3).getImm() == 0);
// add/sub on register with shift
@@ -568,7 +567,7 @@ bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr *MI) const {
case AArch64::SUBWrs:
case AArch64::SUBXrs:
Imm = MI->getOperand(3).getImm();
- return (Subtarget.isExynosM1() &&
+ return (Subtarget.getProcFamily() == AArch64Subtarget::ExynosM1 &&
AArch64_AM::getArithShiftValue(Imm) < 4);
// logical ops on immediate
@@ -609,7 +608,7 @@ bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr *MI) const {
case AArch64::ORRWrs:
case AArch64::ORRXrs:
Imm = MI->getOperand(3).getImm();
- return (Subtarget.isExynosM1() &&
+ return (Subtarget.getProcFamily() == AArch64Subtarget::ExynosM1 &&
AArch64_AM::getShiftValue(Imm) < 4 &&
AArch64_AM::getShiftType(Imm) == AArch64_AM::LSL);
@@ -1522,8 +1521,8 @@ bool AArch64InstrInfo::isCandidateToMergeOrPair(MachineInstr *MI) const {
if (isLdStPairSuppressed(MI))
return false;
- // Do not pair quad ld/st for Exynos.
- if (Subtarget.isExynosM1()) {
+ // On some CPUs quad load/store pairs are slower than two single load/stores.
+ if (Subtarget.avoidQuadLdStPairs()) {
switch (MI->getOpcode()) {
default:
break;
@@ -1801,8 +1800,8 @@ bool AArch64InstrInfo::shouldClusterMemOps(MachineInstr *FirstLdSt,
bool AArch64InstrInfo::shouldScheduleAdjacent(MachineInstr *First,
MachineInstr *Second) const {
- if (Subtarget.isCyclone()) {
- // Cyclone can fuse CMN, CMP, TST followed by Bcc.
+ if (Subtarget.hasMacroOpFusion()) {
+ // Fuse CMN, CMP, TST followed by Bcc.
unsigned SecondOpcode = Second->getOpcode();
if (SecondOpcode == AArch64::Bcc) {
switch (First->getOpcode()) {
@@ -1817,7 +1816,7 @@ bool AArch64InstrInfo::shouldScheduleAdjacent(MachineInstr *First,
return true;
}
}
- // Cyclone B0 also supports ALU operations followed by CBZ/CBNZ.
+ // Fuse ALU operations followed by CBZ/CBNZ.
if (SecondOpcode == AArch64::CBNZW || SecondOpcode == AArch64::CBNZX ||
SecondOpcode == AArch64::CBZW || SecondOpcode == AArch64::CBZX) {
switch (First->getOpcode()) {
OpenPOWER on IntegriCloud