summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/AMDGPU
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/AMDGPU')
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp28
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.h12
-rw-r--r--llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp2
3 files changed, 23 insertions, 19 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 45b0a5b2a8d..8ce3fd2fd58 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -256,8 +256,8 @@ static bool isStride64(unsigned Opc) {
}
}
-bool SIInstrInfo::getMemOperandWithOffset(MachineInstr &LdSt,
- MachineOperand *&BaseOp,
+bool SIInstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt,
+ const MachineOperand *&BaseOp,
int64_t &Offset,
const TargetRegisterInfo *TRI) const {
unsigned Opc = LdSt.getOpcode();
@@ -321,7 +321,7 @@ bool SIInstrInfo::getMemOperandWithOffset(MachineInstr &LdSt,
if (SOffset && SOffset->isReg())
return false;
- MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
+ const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
if (!AddrReg)
return false;
@@ -344,7 +344,7 @@ bool SIInstrInfo::getMemOperandWithOffset(MachineInstr &LdSt,
if (!OffsetImm)
return false;
- MachineOperand *SBaseReg = getNamedOperand(LdSt, AMDGPU::OpName::sbase);
+ const MachineOperand *SBaseReg = getNamedOperand(LdSt, AMDGPU::OpName::sbase);
BaseOp = SBaseReg;
Offset = OffsetImm->getImm();
assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base "
@@ -353,7 +353,7 @@ bool SIInstrInfo::getMemOperandWithOffset(MachineInstr &LdSt,
}
if (isFLAT(LdSt)) {
- MachineOperand *VAddr = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
+ const MachineOperand *VAddr = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
if (VAddr) {
// Can't analyze 2 offsets.
if (getNamedOperand(LdSt, AMDGPU::OpName::saddr))
@@ -409,11 +409,11 @@ static bool memOpsHaveSameBasePtr(const MachineInstr &MI1,
return Base1 == Base2;
}
-bool SIInstrInfo::shouldClusterMemOps(MachineOperand &BaseOp1,
- MachineOperand &BaseOp2,
+bool SIInstrInfo::shouldClusterMemOps(const MachineOperand &BaseOp1,
+ const MachineOperand &BaseOp2,
unsigned NumLoads) const {
- MachineInstr &FirstLdSt = *BaseOp1.getParent();
- MachineInstr &SecondLdSt = *BaseOp2.getParent();
+ const MachineInstr &FirstLdSt = *BaseOp1.getParent();
+ const MachineInstr &SecondLdSt = *BaseOp2.getParent();
if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOp1, SecondLdSt, BaseOp2))
return false;
@@ -2223,9 +2223,9 @@ static bool offsetsDoNotOverlap(int WidthA, int OffsetA,
return LowOffset + LowWidth <= HighOffset;
}
-bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr &MIa,
- MachineInstr &MIb) const {
- MachineOperand *BaseOp0, *BaseOp1;
+bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa,
+ const MachineInstr &MIb) const {
+ const MachineOperand *BaseOp0, *BaseOp1;
int64_t Offset0, Offset1;
if (getMemOperandWithOffset(MIa, BaseOp0, Offset0, &RI) &&
@@ -2247,8 +2247,8 @@ bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr &MIa,
return false;
}
-bool SIInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr &MIa,
- MachineInstr &MIb,
+bool SIInstrInfo::areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
+ const MachineInstr &MIb,
AliasAnalysis *AA) const {
assert((MIa.mayLoad() || MIa.mayStore()) &&
"MIa must load from or modify a memory location");
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index 13e3dbd3cfe..a22f5a68ee6 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -127,7 +127,8 @@ private:
const TargetRegisterClass *
getDestEquivalentVGPRClass(const MachineInstr &Inst) const;
- bool checkInstOffsetsDoNotOverlap(MachineInstr &MIa, MachineInstr &MIb) const;
+ bool checkInstOffsetsDoNotOverlap(const MachineInstr &MIa,
+ const MachineInstr &MIb) const;
unsigned findUsedSGPR(const MachineInstr &MI, int OpIndices[3]) const;
@@ -172,11 +173,13 @@ public:
int64_t &Offset1,
int64_t &Offset2) const override;
- bool getMemOperandWithOffset(MachineInstr &LdSt, MachineOperand *&BaseOp,
+ bool getMemOperandWithOffset(const MachineInstr &LdSt,
+ const MachineOperand *&BaseOp,
int64_t &Offset,
const TargetRegisterInfo *TRI) const final;
- bool shouldClusterMemOps(MachineOperand &BaseOp1, MachineOperand &BaseOp2,
+ bool shouldClusterMemOps(const MachineOperand &BaseOp1,
+ const MachineOperand &BaseOp2,
unsigned NumLoads) const override;
bool shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, int64_t Offset0,
@@ -293,7 +296,8 @@ public:
unsigned Kind) const override;
bool
- areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
+ areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
+ const MachineInstr &MIb,
AliasAnalysis *AA = nullptr) const override;
bool isFoldableCopy(const MachineInstr &MI) const;
diff --git a/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp b/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp
index 343c8b6e7a6..ebbdf80f956 100644
--- a/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp
@@ -1956,7 +1956,7 @@ void SIScheduleDAGMI::schedule()
for (unsigned i = 0, e = (unsigned)SUnits.size(); i != e; ++i) {
SUnit *SU = &SUnits[i];
- MachineOperand *BaseLatOp;
+ const MachineOperand *BaseLatOp;
int64_t OffLatReg;
if (SITII->isLowLatencyInstruction(*SU->getInstr())) {
IsLowLatencySU[i] = 1;
OpenPOWER on IntegriCloud