summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/include/llvm/CodeGen/MachineInstr.h2
-rw-r--r--llvm/include/llvm/CodeGen/TargetInstrInfo.h12
-rw-r--r--llvm/lib/CodeGen/ImplicitNullChecks.cpp15
-rw-r--r--llvm/lib/CodeGen/MachineInstr.cpp4
-rw-r--r--llvm/lib/CodeGen/MachinePipeliner.cpp6
-rw-r--r--llvm/lib/CodeGen/MachineScheduler.cpp6
-rw-r--r--llvm/lib/CodeGen/MachineSink.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.cpp26
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.h14
-rw-r--r--llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp28
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.h12
-rw-r--r--llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp5
-rw-r--r--llvm/lib/Target/Hexagon/HexagonInstrInfo.h6
-rw-r--r--llvm/lib/Target/Lanai/LanaiInstrInfo.cpp11
-rw-r--r--llvm/lib/Target/Lanai/LanaiInstrInfo.h9
-rw-r--r--llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp3
-rw-r--r--llvm/lib/Target/SystemZ/SystemZInstrInfo.h3
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp2
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.h3
21 files changed, 98 insertions, 75 deletions
diff --git a/llvm/include/llvm/CodeGen/MachineInstr.h b/llvm/include/llvm/CodeGen/MachineInstr.h
index a85fee7bf4b..5f671de0d9b 100644
--- a/llvm/include/llvm/CodeGen/MachineInstr.h
+++ b/llvm/include/llvm/CodeGen/MachineInstr.h
@@ -1378,7 +1378,7 @@ public:
/// @param AA Optional alias analysis, used to compare memory operands.
/// @param Other MachineInstr to check aliasing against.
/// @param UseTBAA Whether to pass TBAA information to alias analysis.
- bool mayAlias(AliasAnalysis *AA, MachineInstr &Other, bool UseTBAA);
+ bool mayAlias(AliasAnalysis *AA, const MachineInstr &Other, bool UseTBAA) const;
/// Return true if this instruction may have an ordered
/// or volatile memory reference, or if the information describing the memory
diff --git a/llvm/include/llvm/CodeGen/TargetInstrInfo.h b/llvm/include/llvm/CodeGen/TargetInstrInfo.h
index b732be625de..3ba4d406461 100644
--- a/llvm/include/llvm/CodeGen/TargetInstrInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetInstrInfo.h
@@ -1144,8 +1144,9 @@ public:
/// Get the base operand and byte offset of an instruction that reads/writes
/// memory.
- virtual bool getMemOperandWithOffset(MachineInstr &MI,
- MachineOperand *&BaseOp, int64_t &Offset,
+ virtual bool getMemOperandWithOffset(const MachineInstr &MI,
+ const MachineOperand *&BaseOp,
+ int64_t &Offset,
const TargetRegisterInfo *TRI) const {
return false;
}
@@ -1170,8 +1171,8 @@ public:
/// or
/// DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
/// to TargetPassConfig::createMachineScheduler() to have an effect.
- virtual bool shouldClusterMemOps(MachineOperand &BaseOp1,
- MachineOperand &BaseOp2,
+ virtual bool shouldClusterMemOps(const MachineOperand &BaseOp1,
+ const MachineOperand &BaseOp2,
unsigned NumLoads) const {
llvm_unreachable("target did not implement shouldClusterMemOps()");
}
@@ -1548,7 +1549,8 @@ public:
/// See also MachineInstr::mayAlias, which is implemented on top of this
/// function.
virtual bool
- areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
+ areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
+ const MachineInstr &MIb,
AliasAnalysis *AA = nullptr) const {
assert((MIa.mayLoad() || MIa.mayStore()) &&
"MIa must load from or modify a memory location");
diff --git a/llvm/lib/CodeGen/ImplicitNullChecks.cpp b/llvm/lib/CodeGen/ImplicitNullChecks.cpp
index 68110314151..dff6d02b06e 100644
--- a/llvm/lib/CodeGen/ImplicitNullChecks.cpp
+++ b/llvm/lib/CodeGen/ImplicitNullChecks.cpp
@@ -180,7 +180,8 @@ class ImplicitNullChecks : public MachineFunctionPass {
/// Returns AR_NoAlias if \p MI memory operation does not alias with
/// \p PrevMI, AR_MayAlias if they may alias and AR_WillAliasEverything if
/// they may alias and any further memory operation may alias with \p PrevMI.
- AliasResult areMemoryOpsAliased(MachineInstr &MI, MachineInstr *PrevMI);
+ AliasResult areMemoryOpsAliased(const MachineInstr &MI,
+ const MachineInstr *PrevMI) const;
enum SuitabilityResult {
SR_Suitable,
@@ -194,7 +195,8 @@ class ImplicitNullChecks : public MachineFunctionPass {
/// no sense to continue lookup due to any other instruction will not be able
/// to be used. \p PrevInsts is the set of instruction seen since
/// the explicit null check on \p PointerReg.
- SuitabilityResult isSuitableMemoryOp(MachineInstr &MI, unsigned PointerReg,
+ SuitabilityResult isSuitableMemoryOp(const MachineInstr &MI,
+ unsigned PointerReg,
ArrayRef<MachineInstr *> PrevInsts);
/// Return true if \p FaultingMI can be hoisted from after the
@@ -318,8 +320,8 @@ static bool AnyAliasLiveIn(const TargetRegisterInfo *TRI,
}
ImplicitNullChecks::AliasResult
-ImplicitNullChecks::areMemoryOpsAliased(MachineInstr &MI,
- MachineInstr *PrevMI) {
+ImplicitNullChecks::areMemoryOpsAliased(const MachineInstr &MI,
+ const MachineInstr *PrevMI) const {
// If it is not memory access, skip the check.
if (!(PrevMI->mayStore() || PrevMI->mayLoad()))
return AR_NoAlias;
@@ -356,10 +358,11 @@ ImplicitNullChecks::areMemoryOpsAliased(MachineInstr &MI,
}
ImplicitNullChecks::SuitabilityResult
-ImplicitNullChecks::isSuitableMemoryOp(MachineInstr &MI, unsigned PointerReg,
+ImplicitNullChecks::isSuitableMemoryOp(const MachineInstr &MI,
+ unsigned PointerReg,
ArrayRef<MachineInstr *> PrevInsts) {
int64_t Offset;
- MachineOperand *BaseOp;
+ const MachineOperand *BaseOp;
if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, TRI) ||
!BaseOp->isReg() || BaseOp->getReg() != PointerReg)
diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp
index 26d58340b61..7fdcb31aea7 100644
--- a/llvm/lib/CodeGen/MachineInstr.cpp
+++ b/llvm/lib/CodeGen/MachineInstr.cpp
@@ -1181,8 +1181,8 @@ bool MachineInstr::isSafeToMove(AliasAnalysis *AA, bool &SawStore) const {
return true;
}
-bool MachineInstr::mayAlias(AliasAnalysis *AA, MachineInstr &Other,
- bool UseTBAA) {
+bool MachineInstr::mayAlias(AliasAnalysis *AA, const MachineInstr &Other,
+ bool UseTBAA) const {
const MachineFunction *MF = getMF();
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
const MachineFrameInfo &MFI = MF->getFrameInfo();
diff --git a/llvm/lib/CodeGen/MachinePipeliner.cpp b/llvm/lib/CodeGen/MachinePipeliner.cpp
index 1f7c48fe215..fde6531e160 100644
--- a/llvm/lib/CodeGen/MachinePipeliner.cpp
+++ b/llvm/lib/CodeGen/MachinePipeliner.cpp
@@ -597,7 +597,7 @@ void SwingSchedulerDAG::addLoopCarriedDependences(AliasAnalysis *AA) {
// First, perform the cheaper check that compares the base register.
// If they are the same and the load offset is less than the store
// offset, then mark the dependence as loop carried potentially.
- MachineOperand *BaseOp1, *BaseOp2;
+ const MachineOperand *BaseOp1, *BaseOp2;
int64_t Offset1, Offset2;
if (TII->getMemOperandWithOffset(LdMI, BaseOp1, Offset1, TRI) &&
TII->getMemOperandWithOffset(MI, BaseOp2, Offset2, TRI)) {
@@ -2725,7 +2725,7 @@ void SwingSchedulerDAG::addBranches(MBBVectorTy &PrologBBs,
/// during each iteration. Set Delta to the amount of the change.
bool SwingSchedulerDAG::computeDelta(MachineInstr &MI, unsigned &Delta) {
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
- MachineOperand *BaseOp;
+ const MachineOperand *BaseOp;
int64_t Offset;
if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, TRI))
return false;
@@ -3139,7 +3139,7 @@ bool SwingSchedulerDAG::isLoopCarriedDep(SUnit *Source, const SDep &Dep,
if (!computeDelta(*SI, DeltaS) || !computeDelta(*DI, DeltaD))
return true;
- MachineOperand *BaseOpS, *BaseOpD;
+ const MachineOperand *BaseOpS, *BaseOpD;
int64_t OffsetS, OffsetD;
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (!TII->getMemOperandWithOffset(*SI, BaseOpS, OffsetS, TRI) ||
diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp
index d56ea43437b..4b61dad4ac5 100644
--- a/llvm/lib/CodeGen/MachineScheduler.cpp
+++ b/llvm/lib/CodeGen/MachineScheduler.cpp
@@ -1466,10 +1466,10 @@ namespace {
class BaseMemOpClusterMutation : public ScheduleDAGMutation {
struct MemOpInfo {
SUnit *SU;
- MachineOperand *BaseOp;
+ const MachineOperand *BaseOp;
int64_t Offset;
- MemOpInfo(SUnit *su, MachineOperand *Op, int64_t ofs)
+ MemOpInfo(SUnit *su, const MachineOperand *Op, int64_t ofs)
: SU(su), BaseOp(Op), Offset(ofs) {}
bool operator<(const MemOpInfo &RHS) const {
@@ -1555,7 +1555,7 @@ void BaseMemOpClusterMutation::clusterNeighboringMemOps(
ArrayRef<SUnit *> MemOps, ScheduleDAGInstrs *DAG) {
SmallVector<MemOpInfo, 32> MemOpRecords;
for (SUnit *SU : MemOps) {
- MachineOperand *BaseOp;
+ const MachineOperand *BaseOp;
int64_t Offset;
if (TII->getMemOperandWithOffset(*SU->getInstr(), BaseOp, Offset, TRI))
MemOpRecords.push_back(MemOpInfo(SU, BaseOp, Offset));
diff --git a/llvm/lib/CodeGen/MachineSink.cpp b/llvm/lib/CodeGen/MachineSink.cpp
index f4fcbeb387f..fc61a34354c 100644
--- a/llvm/lib/CodeGen/MachineSink.cpp
+++ b/llvm/lib/CodeGen/MachineSink.cpp
@@ -715,7 +715,7 @@ static bool SinkingPreventsImplicitNullCheck(MachineInstr &MI,
!PredBB->getTerminator()->getMetadata(LLVMContext::MD_make_implicit))
return false;
- MachineOperand *BaseOp;
+ const MachineOperand *BaseOp;
int64_t Offset;
if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, TRI))
return false;
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 12f2576f51e..2819c50c8aa 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -927,9 +927,9 @@ bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
}
bool AArch64InstrInfo::areMemAccessesTriviallyDisjoint(
- MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA) const {
+ const MachineInstr &MIa, const MachineInstr &MIb, AliasAnalysis *AA) const {
const TargetRegisterInfo *TRI = &getRegisterInfo();
- MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
+ const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
int64_t OffsetA = 0, OffsetB = 0;
unsigned WidthA = 0, WidthB = 0;
@@ -1894,7 +1894,7 @@ unsigned AArch64InstrInfo::convertToFlagSettingOpc(unsigned Opc,
// Is this a candidate for ld/st merging or pairing? For example, we don't
// touch volatiles or load/stores that have a hint to avoid pair formation.
-bool AArch64InstrInfo::isCandidateToMergeOrPair(MachineInstr &MI) const {
+bool AArch64InstrInfo::isCandidateToMergeOrPair(const MachineInstr &MI) const {
// If this is a volatile load/store, don't mess with it.
if (MI.hasOrderedMemoryRef())
return false;
@@ -1936,8 +1936,8 @@ bool AArch64InstrInfo::isCandidateToMergeOrPair(MachineInstr &MI) const {
return true;
}
-bool AArch64InstrInfo::getMemOperandWithOffset(MachineInstr &LdSt,
- MachineOperand *&BaseOp,
+bool AArch64InstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt,
+ const MachineOperand *&BaseOp,
int64_t &Offset,
const TargetRegisterInfo *TRI) const {
unsigned Width;
@@ -1945,7 +1945,7 @@ bool AArch64InstrInfo::getMemOperandWithOffset(MachineInstr &LdSt,
}
bool AArch64InstrInfo::getMemOperandWithOffsetWidth(
- MachineInstr &LdSt, MachineOperand *&BaseOp, int64_t &Offset,
+ const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset,
unsigned &Width, const TargetRegisterInfo *TRI) const {
assert(LdSt.mayLoadOrStore() && "Expected a memory operation.");
// Handle only loads/stores with base register followed by immediate offset.
@@ -2244,11 +2244,11 @@ static bool shouldClusterFI(const MachineFrameInfo &MFI, int FI1,
/// Detect opportunities for ldp/stp formation.
///
/// Only called for LdSt for which getMemOperandWithOffset returns true.
-bool AArch64InstrInfo::shouldClusterMemOps(MachineOperand &BaseOp1,
- MachineOperand &BaseOp2,
+bool AArch64InstrInfo::shouldClusterMemOps(const MachineOperand &BaseOp1,
+ const MachineOperand &BaseOp2,
unsigned NumLoads) const {
- MachineInstr &FirstLdSt = *BaseOp1.getParent();
- MachineInstr &SecondLdSt = *BaseOp2.getParent();
+ const MachineInstr &FirstLdSt = *BaseOp1.getParent();
+ const MachineInstr &SecondLdSt = *BaseOp2.getParent();
if (BaseOp1.getType() != BaseOp2.getType())
return false;
@@ -4918,8 +4918,8 @@ AArch64InstrInfo::getOutliningCandidateInfo(
// At this point, we have a stack instruction that we might need to
// fix up. We'll handle it if it's a load or store.
if (MI.mayLoadOrStore()) {
- MachineOperand *Base; // Filled with the base operand of MI.
- int64_t Offset; // Filled with the offset of MI.
+ const MachineOperand *Base; // Filled with the base operand of MI.
+ int64_t Offset; // Filled with the offset of MI.
// Does it allow us to offset the base operand and is the base the
// register SP?
@@ -5288,7 +5288,7 @@ AArch64InstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT,
void AArch64InstrInfo::fixupPostOutline(MachineBasicBlock &MBB) const {
for (MachineInstr &MI : MBB) {
- MachineOperand *Base;
+ const MachineOperand *Base;
unsigned Width;
int64_t Offset;
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
index 148b7afabe6..b33edc5168b 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
@@ -54,7 +54,8 @@ public:
unsigned &DstReg, unsigned &SubIdx) const override;
bool
- areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
+ areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
+ const MachineInstr &MIb,
AliasAnalysis *AA = nullptr) const override;
unsigned isLoadFromStackSlot(const MachineInstr &MI,
@@ -100,16 +101,18 @@ public:
static unsigned convertToFlagSettingOpc(unsigned Opc, bool &Is64Bit);
/// Return true if this is a load/store that can be potentially paired/merged.
- bool isCandidateToMergeOrPair(MachineInstr &MI) const;
+ bool isCandidateToMergeOrPair(const MachineInstr &MI) const;
/// Hint that pairing the given load or store is unprofitable.
static void suppressLdStPair(MachineInstr &MI);
- bool getMemOperandWithOffset(MachineInstr &MI, MachineOperand *&BaseOp,
+ bool getMemOperandWithOffset(const MachineInstr &MI,
+ const MachineOperand *&BaseOp,
int64_t &Offset,
const TargetRegisterInfo *TRI) const override;
- bool getMemOperandWithOffsetWidth(MachineInstr &MI, MachineOperand *&BaseOp,
+ bool getMemOperandWithOffsetWidth(const MachineInstr &MI,
+ const MachineOperand *&BaseOp,
int64_t &Offset, unsigned &Width,
const TargetRegisterInfo *TRI) const;
@@ -123,7 +126,8 @@ public:
static bool getMemOpInfo(unsigned Opcode, unsigned &Scale, unsigned &Width,
int64_t &MinOffset, int64_t &MaxOffset);
- bool shouldClusterMemOps(MachineOperand &BaseOp1, MachineOperand &BaseOp2,
+ bool shouldClusterMemOps(const MachineOperand &BaseOp1,
+ const MachineOperand &BaseOp2,
unsigned NumLoads) const override;
void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
diff --git a/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp b/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp
index 27ddd7af271..0e84a00df00 100644
--- a/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp
+++ b/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp
@@ -147,7 +147,7 @@ bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &MF) {
for (auto &MI : MBB) {
if (!isNarrowFPStore(MI))
continue;
- MachineOperand *BaseOp;
+ const MachineOperand *BaseOp;
int64_t Offset;
if (TII->getMemOperandWithOffset(MI, BaseOp, Offset, TRI) &&
BaseOp->isReg()) {
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 45b0a5b2a8d..8ce3fd2fd58 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -256,8 +256,8 @@ static bool isStride64(unsigned Opc) {
}
}
-bool SIInstrInfo::getMemOperandWithOffset(MachineInstr &LdSt,
- MachineOperand *&BaseOp,
+bool SIInstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt,
+ const MachineOperand *&BaseOp,
int64_t &Offset,
const TargetRegisterInfo *TRI) const {
unsigned Opc = LdSt.getOpcode();
@@ -321,7 +321,7 @@ bool SIInstrInfo::getMemOperandWithOffset(MachineInstr &LdSt,
if (SOffset && SOffset->isReg())
return false;
- MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
+ const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
if (!AddrReg)
return false;
@@ -344,7 +344,7 @@ bool SIInstrInfo::getMemOperandWithOffset(MachineInstr &LdSt,
if (!OffsetImm)
return false;
- MachineOperand *SBaseReg = getNamedOperand(LdSt, AMDGPU::OpName::sbase);
+ const MachineOperand *SBaseReg = getNamedOperand(LdSt, AMDGPU::OpName::sbase);
BaseOp = SBaseReg;
Offset = OffsetImm->getImm();
assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base "
@@ -353,7 +353,7 @@ bool SIInstrInfo::getMemOperandWithOffset(MachineInstr &LdSt,
}
if (isFLAT(LdSt)) {
- MachineOperand *VAddr = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
+ const MachineOperand *VAddr = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
if (VAddr) {
// Can't analyze 2 offsets.
if (getNamedOperand(LdSt, AMDGPU::OpName::saddr))
@@ -409,11 +409,11 @@ static bool memOpsHaveSameBasePtr(const MachineInstr &MI1,
return Base1 == Base2;
}
-bool SIInstrInfo::shouldClusterMemOps(MachineOperand &BaseOp1,
- MachineOperand &BaseOp2,
+bool SIInstrInfo::shouldClusterMemOps(const MachineOperand &BaseOp1,
+ const MachineOperand &BaseOp2,
unsigned NumLoads) const {
- MachineInstr &FirstLdSt = *BaseOp1.getParent();
- MachineInstr &SecondLdSt = *BaseOp2.getParent();
+ const MachineInstr &FirstLdSt = *BaseOp1.getParent();
+ const MachineInstr &SecondLdSt = *BaseOp2.getParent();
if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOp1, SecondLdSt, BaseOp2))
return false;
@@ -2223,9 +2223,9 @@ static bool offsetsDoNotOverlap(int WidthA, int OffsetA,
return LowOffset + LowWidth <= HighOffset;
}
-bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr &MIa,
- MachineInstr &MIb) const {
- MachineOperand *BaseOp0, *BaseOp1;
+bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa,
+ const MachineInstr &MIb) const {
+ const MachineOperand *BaseOp0, *BaseOp1;
int64_t Offset0, Offset1;
if (getMemOperandWithOffset(MIa, BaseOp0, Offset0, &RI) &&
@@ -2247,8 +2247,8 @@ bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr &MIa,
return false;
}
-bool SIInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr &MIa,
- MachineInstr &MIb,
+bool SIInstrInfo::areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
+ const MachineInstr &MIb,
AliasAnalysis *AA) const {
assert((MIa.mayLoad() || MIa.mayStore()) &&
"MIa must load from or modify a memory location");
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index 13e3dbd3cfe..a22f5a68ee6 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -127,7 +127,8 @@ private:
const TargetRegisterClass *
getDestEquivalentVGPRClass(const MachineInstr &Inst) const;
- bool checkInstOffsetsDoNotOverlap(MachineInstr &MIa, MachineInstr &MIb) const;
+ bool checkInstOffsetsDoNotOverlap(const MachineInstr &MIa,
+ const MachineInstr &MIb) const;
unsigned findUsedSGPR(const MachineInstr &MI, int OpIndices[3]) const;
@@ -172,11 +173,13 @@ public:
int64_t &Offset1,
int64_t &Offset2) const override;
- bool getMemOperandWithOffset(MachineInstr &LdSt, MachineOperand *&BaseOp,
+ bool getMemOperandWithOffset(const MachineInstr &LdSt,
+ const MachineOperand *&BaseOp,
int64_t &Offset,
const TargetRegisterInfo *TRI) const final;
- bool shouldClusterMemOps(MachineOperand &BaseOp1, MachineOperand &BaseOp2,
+ bool shouldClusterMemOps(const MachineOperand &BaseOp1,
+ const MachineOperand &BaseOp2,
unsigned NumLoads) const override;
bool shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, int64_t Offset0,
@@ -293,7 +296,8 @@ public:
unsigned Kind) const override;
bool
- areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
+ areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
+ const MachineInstr &MIb,
AliasAnalysis *AA = nullptr) const override;
bool isFoldableCopy(const MachineInstr &MI) const;
diff --git a/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp b/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp
index 343c8b6e7a6..ebbdf80f956 100644
--- a/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp
@@ -1956,7 +1956,7 @@ void SIScheduleDAGMI::schedule()
for (unsigned i = 0, e = (unsigned)SUnits.size(); i != e; ++i) {
SUnit *SU = &SUnits[i];
- MachineOperand *BaseLatOp;
+ const MachineOperand *BaseLatOp;
int64_t OffLatReg;
if (SITII->isLowLatencyInstruction(*SU->getInstr())) {
IsLowLatencySU[i] = 1;
diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
index 3ecb1428213..545dd15dde2 100644
--- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -1854,7 +1854,8 @@ DFAPacketizer *HexagonInstrInfo::CreateTargetScheduleState(
// S2_storeri_io %r29, 132, killed %r1; flags: mem:ST4[FixedStack1]
// Currently AA considers the addresses in these instructions to be aliasing.
bool HexagonInstrInfo::areMemAccessesTriviallyDisjoint(
- MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA) const {
+ const MachineInstr &MIa, const MachineInstr &MIb,
+ AliasAnalysis *AA) const {
if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
return false;
@@ -2925,7 +2926,7 @@ bool HexagonInstrInfo::addLatencyToSchedule(const MachineInstr &MI1,
/// Get the base register and byte offset of a load/store instr.
bool HexagonInstrInfo::getMemOperandWithOffset(
- MachineInstr &LdSt, MachineOperand *&BaseOp, int64_t &Offset,
+ const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset,
const TargetRegisterInfo *TRI) const {
unsigned AccessSize = 0;
BaseOp = getBaseAndOffset(LdSt, Offset, AccessSize);
diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.h b/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
index 688af2a9089..8d339641ba4 100644
--- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
+++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
@@ -215,7 +215,8 @@ public:
bool expandPostRAPseudo(MachineInstr &MI) const override;
/// Get the base register and byte offset of a load/store instr.
- bool getMemOperandWithOffset(MachineInstr &LdSt, MachineOperand *&BaseOp,
+ bool getMemOperandWithOffset(const MachineInstr &LdSt,
+ const MachineOperand *&BaseOp,
int64_t &Offset,
const TargetRegisterInfo *TRI) const override;
@@ -295,7 +296,8 @@ public:
// memory addresses. This function returns true if two MIs access different
// memory addresses and false otherwise.
bool
- areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
+ areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
+ const MachineInstr &MIb,
AliasAnalysis *AA = nullptr) const override;
/// For instructions with a base and offset, return the position of the
diff --git a/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp b/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp
index 93db67c4e3e..dd45797edae 100644
--- a/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp
+++ b/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp
@@ -86,7 +86,8 @@ void LanaiInstrInfo::loadRegFromStackSlot(
}
bool LanaiInstrInfo::areMemAccessesTriviallyDisjoint(
- MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis * /*AA*/) const {
+ const MachineInstr &MIa, const MachineInstr &MIb,
+ AliasAnalysis * /*AA*/) const {
assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
@@ -100,7 +101,7 @@ bool LanaiInstrInfo::areMemAccessesTriviallyDisjoint(
// the width doesn't overlap the offset of a higher memory access,
// then the memory accesses are different.
const TargetRegisterInfo *TRI = &getRegisterInfo();
- MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
+ const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
int64_t OffsetA = 0, OffsetB = 0;
unsigned int WidthA = 0, WidthB = 0;
if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
@@ -755,7 +756,7 @@ unsigned LanaiInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
}
bool LanaiInstrInfo::getMemOperandWithOffsetWidth(
- MachineInstr &LdSt, MachineOperand *&BaseOp, int64_t &Offset,
+ const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset,
unsigned &Width, const TargetRegisterInfo * /*TRI*/) const {
// Handle only loads/stores with base register followed by immediate offset
// and with add as ALU op.
@@ -793,8 +794,8 @@ bool LanaiInstrInfo::getMemOperandWithOffsetWidth(
return true;
}
-bool LanaiInstrInfo::getMemOperandWithOffset(MachineInstr &LdSt,
- MachineOperand *&BaseOp,
+bool LanaiInstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt,
+ const MachineOperand *&BaseOp,
int64_t &Offset,
const TargetRegisterInfo *TRI) const {
switch (LdSt.getOpcode()) {
diff --git a/llvm/lib/Target/Lanai/LanaiInstrInfo.h b/llvm/lib/Target/Lanai/LanaiInstrInfo.h
index feda529610c..d71424aeb0b 100644
--- a/llvm/lib/Target/Lanai/LanaiInstrInfo.h
+++ b/llvm/lib/Target/Lanai/LanaiInstrInfo.h
@@ -35,7 +35,8 @@ public:
return RegisterInfo;
}
- bool areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
+ bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
+ const MachineInstr &MIb,
AliasAnalysis *AA) const override;
unsigned isLoadFromStackSlot(const MachineInstr &MI,
@@ -67,11 +68,13 @@ public:
bool expandPostRAPseudo(MachineInstr &MI) const override;
- bool getMemOperandWithOffset(MachineInstr &LdSt, MachineOperand *&BaseOp,
+ bool getMemOperandWithOffset(const MachineInstr &LdSt,
+ const MachineOperand *&BaseOp,
int64_t &Offset,
const TargetRegisterInfo *TRI) const override;
- bool getMemOperandWithOffsetWidth(MachineInstr &LdSt, MachineOperand *&BaseOp,
+ bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt,
+ const MachineOperand *&BaseOp,
int64_t &Offset, unsigned &Width,
const TargetRegisterInfo *TRI) const;
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
index cb963a10010..54977d93919 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -1782,7 +1782,8 @@ void SystemZInstrInfo::loadImmediate(MachineBasicBlock &MBB,
}
bool SystemZInstrInfo::
-areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
+areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
+ const MachineInstr &MIb,
AliasAnalysis *AA) const {
if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand())
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
index c5d1917b329..150028b1e55 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
@@ -313,7 +313,8 @@ public:
// addresses. This function returns true if two MIs access different
// memory addresses and false otherwise.
bool
- areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
+ areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
+ const MachineInstr &MIb,
AliasAnalysis *AA = nullptr) const override;
};
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 7b68cc2dc3a..480292b06a3 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -2958,7 +2958,7 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg,
}
bool X86InstrInfo::getMemOperandWithOffset(
- MachineInstr &MemOp, MachineOperand *&BaseOp, int64_t &Offset,
+ const MachineInstr &MemOp, const MachineOperand *&BaseOp, int64_t &Offset,
const TargetRegisterInfo *TRI) const {
const MCInstrDesc &Desc = MemOp.getDesc();
int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags);
diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h
index 49f76606e8d..de4ed6998f5 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/llvm/lib/Target/X86/X86InstrInfo.h
@@ -288,7 +288,8 @@ public:
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const override;
- bool getMemOperandWithOffset(MachineInstr &LdSt, MachineOperand *&BaseOp,
+ bool getMemOperandWithOffset(const MachineInstr &LdSt,
+ const MachineOperand *&BaseOp,
int64_t &Offset,
const TargetRegisterInfo *TRI) const override;
bool analyzeBranchPredicate(MachineBasicBlock &MBB,
OpenPOWER on IntegriCloud