summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp')
-rw-r--r--llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp42
1 files changed, 21 insertions, 21 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index 2bd655d0aa2..d4154ca88b2 100644
--- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -604,10 +604,10 @@ static bool isLdOffsetInRangeOfSt(MachineInstr *LoadInst,
assert(isMatchingStore(LoadInst, StoreInst) && "Expect only matched ld/st.");
int LoadSize = getMemScale(LoadInst);
int StoreSize = getMemScale(StoreInst);
- int UnscaledStOffset = TII->isUnscaledLdSt(StoreInst)
+ int UnscaledStOffset = TII->isUnscaledLdSt(*StoreInst)
? getLdStOffsetOp(StoreInst).getImm()
: getLdStOffsetOp(StoreInst).getImm() * StoreSize;
- int UnscaledLdOffset = TII->isUnscaledLdSt(LoadInst)
+ int UnscaledLdOffset = TII->isUnscaledLdSt(*LoadInst)
? getLdStOffsetOp(LoadInst).getImm()
: getLdStOffsetOp(LoadInst).getImm() * LoadSize;
return (UnscaledStOffset <= UnscaledLdOffset) &&
@@ -963,8 +963,8 @@ AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
// performance and correctness are verified only in little-endian.
if (!Subtarget->isLittleEndian())
return NextI;
- bool IsUnscaled = TII->isUnscaledLdSt(LoadI);
- assert(IsUnscaled == TII->isUnscaledLdSt(StoreI) &&
+ bool IsUnscaled = TII->isUnscaledLdSt(*LoadI);
+ assert(IsUnscaled == TII->isUnscaledLdSt(*StoreI) &&
"Unsupported ld/st match");
assert(LoadSize <= StoreSize && "Invalid load size");
int UnscaledLdOffset = IsUnscaled
@@ -1072,24 +1072,24 @@ static int alignTo(int Num, int PowOf2) {
return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
}
-static bool mayAlias(MachineInstr *MIa, MachineInstr *MIb,
+static bool mayAlias(MachineInstr &MIa, MachineInstr &MIb,
const AArch64InstrInfo *TII) {
// One of the instructions must modify memory.
- if (!MIa->mayStore() && !MIb->mayStore())
+ if (!MIa.mayStore() && !MIb.mayStore())
return false;
// Both instructions must be memory operations.
- if (!MIa->mayLoadOrStore() && !MIb->mayLoadOrStore())
+ if (!MIa.mayLoadOrStore() && !MIb.mayLoadOrStore())
return false;
return !TII->areMemAccessesTriviallyDisjoint(MIa, MIb);
}
-static bool mayAlias(MachineInstr *MIa,
+static bool mayAlias(MachineInstr &MIa,
SmallVectorImpl<MachineInstr *> &MemInsns,
const AArch64InstrInfo *TII) {
- for (auto &MIb : MemInsns)
- if (mayAlias(MIa, MIb, TII))
+ for (MachineInstr *MIb : MemInsns)
+ if (mayAlias(MIa, *MIb, TII))
return true;
return false;
@@ -1146,7 +1146,7 @@ bool AArch64LoadStoreOpt::findMatchingStore(
return false;
// If we encounter a store aliased with the load, return early.
- if (MI->mayStore() && mayAlias(LoadMI, MI, TII))
+ if (MI->mayStore() && mayAlias(*LoadMI, *MI, TII))
return false;
} while (MBBI != B && Count < Limit);
return false;
@@ -1158,12 +1158,12 @@ static bool areCandidatesToMergeOrPair(MachineInstr *FirstMI, MachineInstr *MI,
LdStPairFlags &Flags,
const AArch64InstrInfo *TII) {
// If this is volatile or if pairing is suppressed, not a candidate.
- if (MI->hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI))
+ if (MI->hasOrderedMemoryRef() || TII->isLdStPairSuppressed(*MI))
return false;
// We should have already checked FirstMI for pair suppression and volatility.
assert(!FirstMI->hasOrderedMemoryRef() &&
- !TII->isLdStPairSuppressed(FirstMI) &&
+ !TII->isLdStPairSuppressed(*FirstMI) &&
"FirstMI shouldn't get here if either of these checks are true.");
unsigned OpcA = FirstMI->getOpcode();
@@ -1212,7 +1212,7 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
++MBBI;
bool MayLoad = FirstMI->mayLoad();
- bool IsUnscaled = TII->isUnscaledLdSt(FirstMI);
+ bool IsUnscaled = TII->isUnscaledLdSt(*FirstMI);
unsigned Reg = getLdStRegOp(FirstMI).getReg();
unsigned BaseReg = getLdStBaseOp(FirstMI).getReg();
int Offset = getLdStOffsetOp(FirstMI).getImm();
@@ -1249,7 +1249,7 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
// a relocation.
unsigned MIBaseReg = getLdStBaseOp(MI).getReg();
int MIOffset = getLdStOffsetOp(MI).getImm();
- bool MIIsUnscaled = TII->isUnscaledLdSt(MI);
+ bool MIIsUnscaled = TII->isUnscaledLdSt(*MI);
if (IsUnscaled != MIIsUnscaled) {
// We're trying to pair instructions that differ in how they are scaled.
// If FirstMI is scaled then scale the offset of MI accordingly.
@@ -1314,7 +1314,7 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
// first.
if (!ModifiedRegs[getLdStRegOp(MI).getReg()] &&
!(MI->mayLoad() && UsedRegs[getLdStRegOp(MI).getReg()]) &&
- !mayAlias(MI, MemInsns, TII)) {
+ !mayAlias(*MI, MemInsns, TII)) {
Flags.setMergeForward(false);
return MBBI;
}
@@ -1325,7 +1325,7 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
// into the second.
if (!ModifiedRegs[getLdStRegOp(FirstMI).getReg()] &&
!(MayLoad && UsedRegs[getLdStRegOp(FirstMI).getReg()]) &&
- !mayAlias(FirstMI, MemInsns, TII)) {
+ !mayAlias(*FirstMI, MemInsns, TII)) {
Flags.setMergeForward(true);
return MBBI;
}
@@ -1610,7 +1610,7 @@ bool AArch64LoadStoreOpt::tryToMergeLdStInst(
MachineInstr *MI = MBBI;
MachineBasicBlock::iterator E = MI->getParent()->end();
- if (!TII->isCandidateToMergeOrPair(MI))
+ if (!TII->isCandidateToMergeOrPair(*MI))
return false;
// For promotable zero stores, the stored value should be WZR.
@@ -1642,13 +1642,13 @@ bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
MachineInstr *MI = MBBI;
MachineBasicBlock::iterator E = MI->getParent()->end();
- if (!TII->isCandidateToMergeOrPair(MI))
+ if (!TII->isCandidateToMergeOrPair(*MI))
return false;
// Early exit if the offset is not possible to match. (6 bits of positive
// range, plus allow an extra one in case we find a later insn that matches
// with Offset-1)
- bool IsUnscaled = TII->isUnscaledLdSt(MI);
+ bool IsUnscaled = TII->isUnscaledLdSt(*MI);
int Offset = getLdStOffsetOp(MI).getImm();
int OffsetStride = IsUnscaled ? getMemScale(MI) : 1;
if (!inBoundsForPair(IsUnscaled, Offset, OffsetStride))
@@ -1660,7 +1660,7 @@ bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
findMatchingInsn(MBBI, Flags, LdStLimit, /* FindNarrowMerge = */ false);
if (Paired != E) {
++NumPairCreated;
- if (TII->isUnscaledLdSt(MI))
+ if (TII->isUnscaledLdSt(*MI))
++NumUnscaledPairCreated;
// Keeping the iterator straight is a pain, so we let the merge routine tell
// us what the next instruction is after it's done mucking about.
OpenPOWER on IntegriCloud