diff options
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/CodeGen/ImplicitNullChecks.cpp | 10 | ||||
-rw-r--r-- | llvm/lib/CodeGen/MachineScheduler.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/CodeGen/MachineSink.cpp | 3 | ||||
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64InstrInfo.cpp | 7 | ||||
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64InstrInfo.h | 2 | ||||
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 6 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/SIInstrInfo.h | 2 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp | 3 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.h | 2 |
11 files changed, 22 insertions, 21 deletions
diff --git a/llvm/lib/CodeGen/ImplicitNullChecks.cpp b/llvm/lib/CodeGen/ImplicitNullChecks.cpp index 39c1b9fb9a6..a413aebac23 100644 --- a/llvm/lib/CodeGen/ImplicitNullChecks.cpp +++ b/llvm/lib/CodeGen/ImplicitNullChecks.cpp @@ -46,10 +46,9 @@ using namespace llvm; -static cl::opt<unsigned> PageSize("imp-null-check-page-size", - cl::desc("The page size of the target in " - "bytes"), - cl::init(4096)); +static cl::opt<int> PageSize("imp-null-check-page-size", + cl::desc("The page size of the target in bytes"), + cl::init(4096)); #define DEBUG_TYPE "implicit-null-checks" @@ -324,7 +323,8 @@ bool ImplicitNullChecks::analyzeBlockForNullChecks( for (auto MII = NotNullSucc->begin(), MIE = NotNullSucc->end(); MII != MIE; ++MII) { MachineInstr *MI = &*MII; - unsigned BaseReg, Offset; + unsigned BaseReg; + int64_t Offset; if (TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI)) if (MI->mayLoad() && !MI->isPredicable() && BaseReg == PointerReg && Offset < PageSize && MI->getDesc().getNumDefs() <= 1 && diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp index 604d8219794..7547ae3342b 100644 --- a/llvm/lib/CodeGen/MachineScheduler.cpp +++ b/llvm/lib/CodeGen/MachineScheduler.cpp @@ -1361,7 +1361,7 @@ class LoadClusterMutation : public ScheduleDAGMutation { struct LoadInfo { SUnit *SU; unsigned BaseReg; - unsigned Offset; + int64_t Offset; LoadInfo(SUnit *su, unsigned reg, unsigned ofs) : SU(su), BaseReg(reg), Offset(ofs) {} @@ -1389,7 +1389,7 @@ void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads, for (unsigned Idx = 0, End = Loads.size(); Idx != End; ++Idx) { SUnit *SU = Loads[Idx]; unsigned BaseReg; - unsigned Offset; + int64_t Offset; if (TII->getMemOpBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI)) LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset)); } diff --git a/llvm/lib/CodeGen/MachineSink.cpp b/llvm/lib/CodeGen/MachineSink.cpp index b1f3875880b..18aecdaba84 100644 --- a/llvm/lib/CodeGen/MachineSink.cpp +++ b/llvm/lib/CodeGen/MachineSink.cpp @@ -702,7 +702,8 @@ static bool SinkingPreventsImplicitNullCheck(MachineInstr *MI, !PredBB->getTerminator()->getMetadata(LLVMContext::MD_make_implicit)) return false; - unsigned BaseReg, Offset; + unsigned BaseReg; + int64_t Offset; if (!TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI)) return false; diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index 162d8ba8f77..feb9db7cbf5 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -1313,10 +1313,9 @@ void AArch64InstrInfo::suppressLdStPair(MachineInstr *MI) const { ->setFlags(MOSuppressPair << MachineMemOperand::MOTargetStartBit); } -bool -AArch64InstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg, - unsigned &Offset, - const TargetRegisterInfo *TRI) const { +bool AArch64InstrInfo::getMemOpBaseRegImmOfs( + MachineInstr *LdSt, unsigned &BaseReg, int64_t &Offset, + const TargetRegisterInfo *TRI) const { switch (LdSt->getOpcode()) { default: return false; diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h index b5bb446f8c1..46055c7ed93 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h @@ -91,7 +91,7 @@ public: void suppressLdStPair(MachineInstr *MI) const; bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg, - unsigned &Offset, + int64_t &Offset, const TargetRegisterInfo *TRI) const override; bool getMemOpBaseRegImmOfsWidth(MachineInstr *LdSt, unsigned &BaseReg, diff --git a/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp b/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp index 1c6b15790ea..51efc4cf992 100644 --- a/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp +++ b/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp @@ -141,7 +141,7 @@ bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &MF) { if (!isNarrowFPStore(MI)) continue; unsigned BaseReg; - unsigned Offset; + int64_t Offset; if (TII->getMemOpBaseRegImmOfs(&MI, BaseReg, Offset, TRI)) { if (PrevBaseReg == BaseReg) { // If this block can take STPs, skip ahead to the next block. diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index bbc19fdc715..f3dfde7e104 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -202,7 +202,7 @@ static bool isStride64(unsigned Opc) { } bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg, - unsigned &Offset, + int64_t &Offset, const TargetRegisterInfo *TRI) const { unsigned Opc = LdSt->getOpcode(); @@ -1160,8 +1160,8 @@ static bool offsetsDoNotOverlap(int WidthA, int OffsetA, bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr *MIa, MachineInstr *MIb) const { - unsigned BaseReg0, Offset0; - unsigned BaseReg1, Offset1; + unsigned BaseReg0, BaseReg1; + int64_t Offset0, Offset1; if (getMemOpBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) && getMemOpBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) { diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h index 3a96d79c1d7..f0c3d10b8bc 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h @@ -91,7 +91,7 @@ public: int64_t &Offset2) const override; bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg, - unsigned &Offset, + int64_t &Offset, const TargetRegisterInfo *TRI) const final; bool shouldClusterLoads(MachineInstr *FirstLdSt, diff --git a/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp b/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp index cd399333485..5ef12e7f5ea 100644 --- a/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp +++ b/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp @@ -1879,7 +1879,8 @@ void SIScheduleDAGMI::schedule() for (unsigned i = 0, e = (unsigned)SUnits.size(); i != e; ++i) { SUnit *SU = &SUnits[i]; - unsigned BaseLatReg, OffLatReg; + unsigned BaseLatReg; + int64_t OffLatReg; if (SITII->isLowLatencyInstruction(SU->getInstr())) { IsLowLatencySU[i] = 1; if (SITII->getMemOpBaseRegImmOfs(SU->getInstr(), BaseLatReg, diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index fc43d9b3862..dbfa8aca8ac 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -4612,7 +4612,7 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg, } bool X86InstrInfo::getMemOpBaseRegImmOfs(MachineInstr *MemOp, unsigned &BaseReg, - unsigned &Offset, + int64_t &Offset, const TargetRegisterInfo *TRI) const { const MCInstrDesc &Desc = MemOp->getDesc(); int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags, MemOp->getOpcode()); diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h index 9cbd2ecc8a0..3e3f2af7641 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.h +++ b/llvm/lib/Target/X86/X86InstrInfo.h @@ -312,7 +312,7 @@ public: bool AllowModify) const override; bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg, - unsigned &Offset, + int64_t &Offset, const TargetRegisterInfo *TRI) const override; bool AnalyzeBranchPredicate(MachineBasicBlock &MBB, TargetInstrInfo::MachineBranchPredicate &MBP, |