summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/include/llvm/Target/TargetInstrInfo.h9
-rw-r--r--llvm/lib/CodeGen/MachineScheduler.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.cpp16
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.h12
-rw-r--r--llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp10
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.h6
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp30
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.h4
9 files changed, 63 insertions, 28 deletions
diff --git a/llvm/include/llvm/Target/TargetInstrInfo.h b/llvm/include/llvm/Target/TargetInstrInfo.h
index 4b1acdab5b4..48b2bdd157a 100644
--- a/llvm/include/llvm/Target/TargetInstrInfo.h
+++ b/llvm/include/llvm/Target/TargetInstrInfo.h
@@ -827,10 +827,11 @@ public:
return false;
}
- /// Get the base register and byte offset of a load/store instr.
- virtual bool getLdStBaseRegImmOfs(MachineInstr *LdSt,
- unsigned &BaseReg, unsigned &Offset,
- const TargetRegisterInfo *TRI) const {
+ /// Get the base register and byte offset of an instruction that reads/writes
+ /// memory.
+ virtual bool getMemOpBaseRegImmOfs(MachineInstr *MemOp, unsigned &BaseReg,
+ unsigned &Offset,
+ const TargetRegisterInfo *TRI) const {
return false;
}
diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp
index f545e8e28bb..50a9c669ea2 100644
--- a/llvm/lib/CodeGen/MachineScheduler.cpp
+++ b/llvm/lib/CodeGen/MachineScheduler.cpp
@@ -1271,7 +1271,7 @@ void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads,
SUnit *SU = Loads[Idx];
unsigned BaseReg;
unsigned Offset;
- if (TII->getLdStBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI))
+ if (TII->getMemOpBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI))
LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset));
}
if (LoadRecords.size() < 2)
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 69def5839e0..cfd4721f6c3 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -629,8 +629,8 @@ AArch64InstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa,
// base registers are identical, and the offset of a lower memory access +
// the width doesn't overlap the offset of a higher memory access,
// then the memory accesses are different.
- if (getLdStBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) &&
- getLdStBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) {
+ if (getMemOpBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) &&
+ getMemOpBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) {
if (BaseRegA == BaseRegB) {
int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
@@ -1310,9 +1310,9 @@ void AArch64InstrInfo::suppressLdStPair(MachineInstr *MI) const {
}
bool
-AArch64InstrInfo::getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
- unsigned &Offset,
- const TargetRegisterInfo *TRI) const {
+AArch64InstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
+ unsigned &Offset,
+ const TargetRegisterInfo *TRI) const {
switch (LdSt->getOpcode()) {
default:
return false;
@@ -1336,7 +1336,7 @@ AArch64InstrInfo::getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
};
}
-bool AArch64InstrInfo::getLdStBaseRegImmOfsWidth(
+bool AArch64InstrInfo::getMemOpBaseRegImmOfsWidth(
MachineInstr *LdSt, unsigned &BaseReg, int &Offset, int &Width,
const TargetRegisterInfo *TRI) const {
// Handle only loads/stores with base register followed by immediate offset.
@@ -1434,7 +1434,7 @@ bool AArch64InstrInfo::getLdStBaseRegImmOfsWidth(
/// Detect opportunities for ldp/stp formation.
///
-/// Only called for LdSt for which getLdStBaseRegImmOfs returns true.
+/// Only called for LdSt for which getMemOpBaseRegImmOfs returns true.
bool AArch64InstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
MachineInstr *SecondLdSt,
unsigned NumLoads) const {
@@ -1443,7 +1443,7 @@ bool AArch64InstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
return false;
if (FirstLdSt->getOpcode() != SecondLdSt->getOpcode())
return false;
- // getLdStBaseRegImmOfs guarantees that oper 2 isImm.
+ // getMemOpBaseRegImmOfs guarantees that oper 2 isImm.
unsigned Ofs1 = FirstLdSt->getOperand(2).getImm();
// Allow 6 bits of positive range.
if (Ofs1 > 64)
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
index 4c326536ca8..39ca44711c4 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
@@ -90,13 +90,13 @@ public:
/// Hint that pairing the given load or store is unprofitable.
void suppressLdStPair(MachineInstr *MI) const;
- bool getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
- unsigned &Offset,
- const TargetRegisterInfo *TRI) const override;
+ bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
+ unsigned &Offset,
+ const TargetRegisterInfo *TRI) const override;
- bool getLdStBaseRegImmOfsWidth(MachineInstr *LdSt, unsigned &BaseReg,
- int &Offset, int &Width,
- const TargetRegisterInfo *TRI) const;
+ bool getMemOpBaseRegImmOfsWidth(MachineInstr *LdSt, unsigned &BaseReg,
+ int &Offset, int &Width,
+ const TargetRegisterInfo *TRI) const;
bool enableClusterLoads() const override { return true; }
diff --git a/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp b/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp
index 85b44a20e11..1c6b15790ea 100644
--- a/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp
+++ b/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp
@@ -142,7 +142,7 @@ bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &MF) {
continue;
unsigned BaseReg;
unsigned Offset;
- if (TII->getLdStBaseRegImmOfs(&MI, BaseReg, Offset, TRI)) {
+ if (TII->getMemOpBaseRegImmOfs(&MI, BaseReg, Offset, TRI)) {
if (PrevBaseReg == BaseReg) {
// If this block can take STPs, skip ahead to the next block.
if (!SuppressSTP && shouldAddSTPToBlock(MI.getParent()))
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index d647c25286f..3394573b062 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -200,9 +200,9 @@ static bool isStride64(unsigned Opc) {
}
}
-bool SIInstrInfo::getLdStBaseRegImmOfs(MachineInstr *LdSt,
- unsigned &BaseReg, unsigned &Offset,
- const TargetRegisterInfo *TRI) const {
+bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
+ unsigned &Offset,
+ const TargetRegisterInfo *TRI) const {
unsigned Opc = LdSt->getOpcode();
if (isDS(Opc)) {
const MachineOperand *OffsetImm = getNamedOperand(*LdSt,
@@ -1053,8 +1053,8 @@ bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr *MIa,
unsigned BaseReg0, Offset0;
unsigned BaseReg1, Offset1;
- if (getLdStBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) &&
- getLdStBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) {
+ if (getMemOpBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) &&
+ getMemOpBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) {
assert(MIa->hasOneMemOperand() && MIb->hasOneMemOperand() &&
"read2 / write2 not expected here yet");
unsigned Width0 = (*MIa->memoperands_begin())->getSize();
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index 64b5120841c..6fafb945c99 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -79,9 +79,9 @@ public:
int64_t &Offset1,
int64_t &Offset2) const override;
- bool getLdStBaseRegImmOfs(MachineInstr *LdSt,
- unsigned &BaseReg, unsigned &Offset,
- const TargetRegisterInfo *TRI) const final;
+ bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
+ unsigned &Offset,
+ const TargetRegisterInfo *TRI) const final;
bool shouldClusterLoads(MachineInstr *FirstLdSt,
MachineInstr *SecondLdSt,
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 928b9c77352..b89c69b729b 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -3965,6 +3965,36 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg,
}
}
+bool X86InstrInfo::getMemOpBaseRegImmOfs(MachineInstr *MemOp, unsigned &BaseReg,
+ unsigned &Offset,
+ const TargetRegisterInfo *TRI) const {
+ const MCInstrDesc &Desc = MemOp->getDesc();
+ int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags, MemOp->getOpcode());
+ if (MemRefBegin < 0)
+ return false;
+
+ MemRefBegin += X86II::getOperandBias(Desc);
+
+ BaseReg = MemOp->getOperand(MemRefBegin + X86::AddrBaseReg).getReg();
+ if (MemOp->getOperand(MemRefBegin + X86::AddrScaleAmt).getImm() != 1)
+ return false;
+
+ if (MemOp->getOperand(MemRefBegin + X86::AddrIndexReg).getReg() !=
+ X86::NoRegister)
+ return false;
+
+ const MachineOperand &DispMO = MemOp->getOperand(MemRefBegin + X86::AddrDisp);
+
+ // Displacement can be symbolic
+ if (!DispMO.isImm())
+ return false;
+
+ Offset = DispMO.getImm();
+
+ return (MemOp->getOperand(MemRefBegin + X86::AddrIndexReg).getReg() ==
+ X86::NoRegister);
+}
+
static unsigned getStoreRegOpcode(unsigned SrcReg,
const TargetRegisterClass *RC,
bool isStackAligned,
diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h
index 3d47d1d96b1..2e9168e3322 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/llvm/lib/Target/X86/X86InstrInfo.h
@@ -267,6 +267,10 @@ public:
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const override;
+
+ bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
+ unsigned &Offset,
+ const TargetRegisterInfo *TRI) const override;
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
OpenPOWER on IntegriCloud