summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJessica Paquette <jpaquette@apple.com>2019-07-10 18:44:57 +0000
committerJessica Paquette <jpaquette@apple.com>2019-07-10 18:44:57 +0000
commit3132968ae969659a05f84705aa9e8f474fbea29d (patch)
tree2842c7798caa4388f37a61fe7257cb9f8722957b
parentc32742139ed3b95344157890781fe74ef98ba425 (diff)
downloadbcm5719-llvm-3132968ae969659a05f84705aa9e8f474fbea29d.tar.gz
bcm5719-llvm-3132968ae969659a05f84705aa9e8f474fbea29d.zip
[GlobalISel][AArch64][NFC] Use getDefIgnoringCopies from Utils where we can
There are a few places where we walk over copies throughout AArch64InstructionSelector.cpp. In Utils, there's a function that does exactly this which we can use instead. Note that the utility function works with the case where we run into a COPY from a physical register. We've run into bugs with this a couple times, so using it should defend us from similar future bugs. Also update opt-fold-compare.mir to show that we still handle physical registers properly. Differential Revision: https://reviews.llvm.org/D64513 llvm-svn: 365683
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp27
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-compare.mir27
2 files changed, 32 insertions, 22 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
index bef690c8361..9ff5827fc42 100644
--- a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
@@ -2811,13 +2811,10 @@ void AArch64InstructionSelector::collectShuffleMaskIndices(
"G_SHUFFLE_VECTOR should have a constant mask operand as G_BUILD_VECTOR");
// Find the constant indices.
for (unsigned i = 1, e = MaskDef->getNumOperands(); i < e; ++i) {
- MachineInstr *ScalarDef = MRI.getVRegDef(MaskDef->getOperand(i).getReg());
- assert(ScalarDef && "Could not find vreg def of shufflevec index op");
// Look through copies.
- while (ScalarDef->getOpcode() == TargetOpcode::COPY) {
- ScalarDef = MRI.getVRegDef(ScalarDef->getOperand(1).getReg());
- assert(ScalarDef && "Could not find def of copy operand");
- }
+ MachineInstr *ScalarDef =
+ getDefIgnoringCopies(MaskDef->getOperand(i).getReg(), MRI);
+ assert(ScalarDef && "Could not find vreg def of shufflevec index op");
if (ScalarDef->getOpcode() != TargetOpcode::G_CONSTANT) {
// This be an undef if not a constant.
assert(ScalarDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
@@ -3229,20 +3226,6 @@ MachineInstr *AArch64InstructionSelector::tryFoldIntegerCompare(
//
// cmn z, y
- // Helper lambda to find the def.
- auto FindDef = [&](Register VReg) {
- MachineInstr *Def = MRI.getVRegDef(VReg);
- while (Def) {
- if (Def->getOpcode() != TargetOpcode::COPY)
- break;
- // Copies can be from physical registers. If we hit this, we're done.
- if (TargetRegisterInfo::isPhysicalRegister(Def->getOperand(1).getReg()))
- break;
- Def = MRI.getVRegDef(Def->getOperand(1).getReg());
- }
- return Def;
- };
-
// Helper lambda to detect the subtract followed by the compare.
// Takes in the def of the LHS or RHS, and checks if it's a subtract from 0.
auto IsCMN = [&](MachineInstr *DefMI, const AArch64CC::CondCode &CC) {
@@ -3269,8 +3252,8 @@ MachineInstr *AArch64InstructionSelector::tryFoldIntegerCompare(
};
// Check if the RHS or LHS of the G_ICMP is defined by a SUB
- MachineInstr *LHSDef = FindDef(LHS.getReg());
- MachineInstr *RHSDef = FindDef(RHS.getReg());
+ MachineInstr *LHSDef = getDefIgnoringCopies(LHS.getReg(), MRI);
+ MachineInstr *RHSDef = getDefIgnoringCopies(RHS.getReg(), MRI);
CmpInst::Predicate P = (CmpInst::Predicate)Predicate.getPredicate();
const AArch64CC::CondCode CC = changeICMPPredToAArch64CC(P);
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-compare.mir b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-compare.mir
index b78a2cb2719..c080db05972 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-compare.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-compare.mir
@@ -478,3 +478,30 @@ body: |
%6:gpr(s32) = G_ICMP intpred(eq), %5(s32), %2
$w0 = COPY %6(s32)
RET_ReallyLR implicit $w0
+
+...
+---
+name: test_physreg_copy
+alignment: 2
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: test_physreg_copy
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK: $xzr = SUBSXrr [[COPY]], [[COPY1]], implicit-def $nzcv
+ ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
+ ; CHECK: $w0 = COPY [[CSINCWr]]
+ ; CHECK: RET_ReallyLR implicit $x0
+ %0:gpr(s64) = COPY $x0
+ %1:gpr(s64) = COPY $x1
+ ; When we find the defs of the LHS and RHS of the compare, we walk over
+ ; copies. Make sure that we don't crash when we hit a copy from a physical
+ ; register.
+ %7:gpr(s32) = G_ICMP intpred(eq), %0, %1
+ $w0 = COPY %7(s32)
+ RET_ReallyLR implicit $x0
OpenPOWER on IntegriCloud