diff options
-rw-r--r-- | llvm/include/llvm/CodeGen/TargetInstrInfo.h | 4 | ||||
-rw-r--r-- | llvm/lib/CodeGen/BranchFolding.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/CodeGen/ImplicitNullChecks.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/CodeGen/StackColoring.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Target/ARC/ARCOptAddrMode.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp | 6 | ||||
-rw-r--r-- | llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp | 2 |
14 files changed, 20 insertions, 20 deletions
diff --git a/llvm/include/llvm/CodeGen/TargetInstrInfo.h b/llvm/include/llvm/CodeGen/TargetInstrInfo.h index e48d6bf625e..916c8a075ee 100644 --- a/llvm/include/llvm/CodeGen/TargetInstrInfo.h +++ b/llvm/include/llvm/CodeGen/TargetInstrInfo.h @@ -1647,9 +1647,9 @@ public: virtual bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const { - assert((MIa.mayLoad() || MIa.mayStore()) && + assert(MIa.mayLoadOrStore() && "MIa must load from or modify a memory location"); - assert((MIb.mayLoad() || MIb.mayStore()) && + assert(MIb.mayLoadOrStore() && "MIb must load from or modify a memory location"); return false; } diff --git a/llvm/lib/CodeGen/BranchFolding.cpp b/llvm/lib/CodeGen/BranchFolding.cpp index 2bf1b392528..4b9c50aeb1d 100644 --- a/llvm/lib/CodeGen/BranchFolding.cpp +++ b/llvm/lib/CodeGen/BranchFolding.cpp @@ -449,7 +449,7 @@ static unsigned EstimateRuntime(MachineBasicBlock::iterator I, continue; if (I->isCall()) Time += 10; - else if (I->mayLoad() || I->mayStore()) + else if (I->mayLoadOrStore()) Time += 2; else ++Time; @@ -835,7 +835,7 @@ mergeOperations(MachineBasicBlock::iterator MBBIStartPos, assert(MBBICommon->isIdenticalTo(*MBBI) && "Expected matching MIIs!"); // Merge MMOs from memory operations in the common block. - if (MBBICommon->mayLoad() || MBBICommon->mayStore()) + if (MBBICommon->mayLoadOrStore()) MBBICommon->cloneMergedMemRefs(*MBB->getParent(), {&*MBBICommon, &*MBBI}); // Drop undef flags if they aren't present in all merged instructions. for (unsigned I = 0, E = MBBICommon->getNumOperands(); I != E; ++I) { diff --git a/llvm/lib/CodeGen/ImplicitNullChecks.cpp b/llvm/lib/CodeGen/ImplicitNullChecks.cpp index b3ca4c1d802..0bbedb0a5ea 100644 --- a/llvm/lib/CodeGen/ImplicitNullChecks.cpp +++ b/llvm/lib/CodeGen/ImplicitNullChecks.cpp @@ -372,7 +372,7 @@ ImplicitNullChecks::isSuitableMemoryOp(const MachineInstr &MI, // We want the mem access to be issued at a sane offset from PointerReg, // so that if PointerReg is null then the access reliably page faults. - if (!((MI.mayLoad() || MI.mayStore()) && !MI.isPredicable() && + if (!(MI.mayLoadOrStore() && !MI.isPredicable() && -PageSize < Offset && Offset < PageSize)) return SR_Unsuitable; diff --git a/llvm/lib/CodeGen/StackColoring.cpp b/llvm/lib/CodeGen/StackColoring.cpp index fc10c7d3a90..b6e81116286 100644 --- a/llvm/lib/CodeGen/StackColoring.cpp +++ b/llvm/lib/CodeGen/StackColoring.cpp @@ -1004,7 +1004,7 @@ void StackColoring::remapInstructions(DenseMap<int, int> &SlotRemap) { // zone are okay, despite the fact that we don't have a good way // for validating all of the usages of the calculation. #ifndef NDEBUG - bool TouchesMemory = I.mayLoad() || I.mayStore(); + bool TouchesMemory = I.mayLoadOrStore(); // If we *don't* protect the user from escaped allocas, don't bother // validating the instructions. if (!I.isDebugInstr() && TouchesMemory && ProtectFromEscapedAllocas) { diff --git a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp index 9528aee4c50..3ef5a77af45 100644 --- a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp +++ b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp @@ -185,7 +185,7 @@ GCNHazardRecognizer::getHazardType(SUnit *SU, int Stalls) { if (SIInstrInfo::isMAI(*MI) && checkMAIHazards(MI) > 0) return NoopHazard; - if ((MI->mayLoad() || MI->mayStore()) && checkMAILdStHazards(MI) > 0) + if (MI->mayLoadOrStore() && checkMAILdStHazards(MI) > 0) return NoopHazard; if (MI->isInlineAsm() && checkInlineAsmHazards(MI) > 0) @@ -296,7 +296,7 @@ unsigned GCNHazardRecognizer::PreEmitNoopsCommon(MachineInstr *MI) { if (SIInstrInfo::isMAI(*MI)) return std::max(WaitStates, checkMAIHazards(MI)); - if (MI->mayLoad() || MI->mayStore()) + if (MI->mayLoadOrStore()) return std::max(WaitStates, checkMAILdStHazards(MI)); return WaitStates; diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp index fae556a35df..bfeaebde386 100644 --- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp +++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp @@ -1211,7 +1211,7 @@ void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst, ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst); } } else if (TII->isFLAT(Inst)) { - assert(Inst.mayLoad() || Inst.mayStore()); + assert(Inst.mayLoadOrStore()); if (TII->usesVM_CNT(Inst)) { if (!ST->hasVscnt()) diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index 2224dbc91aa..041a7622f72 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -2549,9 +2549,9 @@ bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa, bool SIInstrInfo::areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const { - assert((MIa.mayLoad() || MIa.mayStore()) && + assert(MIa.mayLoadOrStore() && "MIa must load from or modify a memory location"); - assert((MIb.mayLoad() || MIb.mayStore()) && + assert(MIb.mayLoadOrStore() && "MIb must load from or modify a memory location"); if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects()) diff --git a/llvm/lib/Target/ARC/ARCOptAddrMode.cpp b/llvm/lib/Target/ARC/ARCOptAddrMode.cpp index 901cd5a1efa..232a7be2a9f 100644 --- a/llvm/lib/Target/ARC/ARCOptAddrMode.cpp +++ b/llvm/lib/Target/ARC/ARCOptAddrMode.cpp @@ -186,7 +186,7 @@ bool ARCOptAddrMode::noUseOfAddBeforeLoadOrStore(const MachineInstr *Add, } MachineInstr *ARCOptAddrMode::tryToCombine(MachineInstr &Ldst) { - assert((Ldst.mayLoad() || Ldst.mayStore()) && "LD/ST instruction expected"); + assert(Ldst.mayLoadOrStore() && "LD/ST instruction expected"); unsigned BasePos, OffsetPos; diff --git a/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp b/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp index 9b70e385941..3ffcc71e159 100644 --- a/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp +++ b/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp @@ -1639,7 +1639,7 @@ bool HCE::replaceInstrExact(const ExtDesc &ED, Register ExtR) { return true; } - if ((MI.mayLoad() || MI.mayStore()) && !isStoreImmediate(ExtOpc)) { + if (MI.mayLoadOrStore() && !isStoreImmediate(ExtOpc)) { // For memory instructions, there is an asymmetry in the addressing // modes. Addressing modes allowing extenders can be replaced with // addressing modes that use registers, but the order of operands @@ -1794,7 +1794,7 @@ bool HCE::replaceInstrExpr(const ExtDesc &ED, const ExtenderInit &ExtI, return true; } - if (MI.mayLoad() || MI.mayStore()) { + if (MI.mayLoadOrStore()) { unsigned IdxOpc = getRegOffOpcode(ExtOpc); assert(IdxOpc && "Expecting indexed opcode"); MachineInstrBuilder MIB = BuildMI(MBB, At, dl, HII->get(IdxOpc)); @@ -1844,7 +1844,7 @@ bool HCE::replaceInstr(unsigned Idx, Register ExtR, const ExtenderInit &ExtI) { // These two addressing modes must be converted into indexed forms // regardless of what the initializer looks like. bool IsAbs = false, IsAbsSet = false; - if (MI.mayLoad() || MI.mayStore()) { + if (MI.mayLoadOrStore()) { unsigned AM = HII->getAddrMode(MI); IsAbs = AM == HexagonII::Absolute; IsAbsSet = AM == HexagonII::AbsoluteSet; diff --git a/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp b/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp index 6f3e96ea5a2..b18730d967a 100644 --- a/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp +++ b/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp @@ -682,7 +682,7 @@ bool HexagonEarlyIfConversion::isPredicableStore(const MachineInstr *MI) bool HexagonEarlyIfConversion::isSafeToSpeculate(const MachineInstr *MI) const { - if (MI->mayLoad() || MI->mayStore()) + if (MI->mayLoadOrStore()) return false; if (MI->isCall() || MI->isBarrier() || MI->isBranch()) return false; diff --git a/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp b/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp index 428f70edc46..c1d0599830c 100644 --- a/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp +++ b/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp @@ -1041,7 +1041,7 @@ bool HexagonExpandCondsets::predicate(MachineInstr &TfrI, bool Cond, bool CanDown = canMoveOver(*DefI, Defs, Uses); // The TfrI does not access memory, but DefI could. Check if it's safe // to move DefI down to TfrI. - if (DefI->mayLoad() || DefI->mayStore()) + if (DefI->mayLoadOrStore()) if (!canMoveMemTo(*DefI, TfrI, true)) CanDown = false; diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp index de22bc02724..39ec8936214 100644 --- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -2147,7 +2147,7 @@ bool HexagonInstrInfo::isDuplexPair(const MachineInstr &MIa, } bool HexagonInstrInfo::isEarlySourceInstr(const MachineInstr &MI) const { - if (MI.mayLoad() || MI.mayStore() || MI.isCompare()) + if (MI.mayLoadOrStore() || MI.isCompare()) return true; // Multiply diff --git a/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp b/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp index 55f31c62885..d80e0ed50c9 100644 --- a/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp +++ b/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp @@ -159,7 +159,7 @@ bool HexagonSplitDoubleRegs::isVolatileInstr(const MachineInstr *MI) const { } bool HexagonSplitDoubleRegs::isFixedInstr(const MachineInstr *MI) const { - if (MI->mayLoad() || MI->mayStore()) + if (MI->mayLoadOrStore()) if (MemRefsFixed || isVolatileInstr(MI)) return true; if (MI->isDebugInstr()) diff --git a/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp b/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp index 917358009cf..aab37393ed3 100644 --- a/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp +++ b/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp @@ -271,7 +271,7 @@ void HexagonStoreWidening::createStoreGroup(MachineInstr *BaseStore, if (MI->isCall() || MI->hasUnmodeledSideEffects()) return; - if (MI->mayLoad() || MI->mayStore()) { + if (MI->mayLoadOrStore()) { if (MI->hasOrderedMemoryRef() || instrAliased(Group, MI)) return; Other.push_back(MI); |