summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
diff options
context:
space:
mode:
authorDuncan P. N. Exon Smith <dexonsmith@apple.com>2016-06-30 00:01:54 +0000
committerDuncan P. N. Exon Smith <dexonsmith@apple.com>2016-06-30 00:01:54 +0000
commit9cfc75c214d42eebd74f9f5f5d20d453404d5db4 (patch)
tree0f9f9110f564b6287a4db4cdf9e6097d19085c7e /llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
parentc3701e8b9252498e2ed27a99238f71cb07dd43a4 (diff)
downloadbcm5719-llvm-9cfc75c214d42eebd74f9f5f5d20d453404d5db4.tar.gz
bcm5719-llvm-9cfc75c214d42eebd74f9f5f5d20d453404d5db4.zip
CodeGen: Use MachineInstr& in TargetInstrInfo, NFC
This is mostly a mechanical change to make TargetInstrInfo API take MachineInstr& (instead of MachineInstr* or MachineBasicBlock::iterator) when the argument is expected to be a valid MachineInstr. This is a general API improvement. Although it would be possible to do this one function at a time, that would demand a quadratic amount of churn since many of these functions call each other. Instead I've done everything as a block and just updated what was necessary. This is mostly mechanical fixes: adding and removing `*` and `&` operators. The only non-mechanical change is to split ARMBaseInstrInfo::getOperandLatencyImpl out from ARMBaseInstrInfo::getOperandLatency. Previously, the latter took a `MachineInstr*` which it updated to the instruction bundle leader; now, the latter calls the former either with the same `MachineInstr&` or the bundle leader. As a side effect, this removes a bunch of MachineInstr* to MachineBasicBlock::iterator implicit conversions, a necessary step toward fixing PR26753. Note: I updated WebAssembly, Lanai, and AVR (despite being off-by-default) since it turned out to be easy. I couldn't run tests for AVR since llc doesn't link with it turned on. llvm-svn: 274189
Diffstat (limited to 'llvm/lib/Target/AMDGPU/SIFoldOperands.cpp')
-rw-r--r--llvm/lib/Target/AMDGPU/SIFoldOperands.cpp8
1 files changed, 4 insertions, 4 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index 4ba9d73e321..4ecc0fcc623 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -132,7 +132,7 @@ static bool tryAddToFoldList(std::vector<FoldCandidate> &FoldList,
MachineInstr *MI, unsigned OpNo,
MachineOperand *OpToFold,
const SIInstrInfo *TII) {
- if (!TII->isOperandLegal(MI, OpNo, OpToFold)) {
+ if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
// Special case for v_mac_f32_e64 if we are trying to fold into src2
unsigned Opc = MI->getOpcode();
@@ -159,7 +159,7 @@ static bool tryAddToFoldList(std::vector<FoldCandidate> &FoldList,
// see if this makes it possible to fold.
unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
- bool CanCommute = TII->findCommutedOpIndices(MI, CommuteIdx0, CommuteIdx1);
+ bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
if (CanCommute) {
if (CommuteIdx0 == OpNo)
@@ -177,10 +177,10 @@ static bool tryAddToFoldList(std::vector<FoldCandidate> &FoldList,
return false;
if (!CanCommute ||
- !TII->commuteInstruction(MI, false, CommuteIdx0, CommuteIdx1))
+ !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
return false;
- if (!TII->isOperandLegal(MI, OpNo, OpToFold))
+ if (!TII->isOperandLegal(*MI, OpNo, OpToFold))
return false;
}
OpenPOWER on IntegriCloud