summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonas Paulsson <paulsson@linux.vnet.ibm.com>2018-02-23 08:30:15 +0000
committerJonas Paulsson <paulsson@linux.vnet.ibm.com>2018-02-23 08:30:15 +0000
commit07d6aea61af9d69a1f230489207713f0d49efe08 (patch)
treeca466985779c3f39c7e25a4abb221fd3d485024a
parentff52eb5927e1045c042a8e442960de9c49b1935e (diff)
downloadbcm5719-llvm-07d6aea61af9d69a1f230489207713f0d49efe08.tar.gz
bcm5719-llvm-07d6aea61af9d69a1f230489207713f0d49efe08.zip
[Mips] Return true in enableMultipleCopyHints().
Enable multiple COPY hints to eliminate more COPYs during register allocation. Note that this is something all targets should do, see https://reviews.llvm.org/D38128. Review: Simon Dardis llvm-svn: 325870
-rw-r--r--llvm/lib/Target/Mips/MipsRegisterInfo.h2
-rw-r--r--llvm/test/CodeGen/Mips/Fast-ISel/sel1.ll8
-rw-r--r--llvm/test/CodeGen/Mips/analyzebranch.ll127
-rw-r--r--llvm/test/CodeGen/Mips/indirect-jump-hazard/calls.ll48
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/select-dbl.ll266
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/select-flt.ll266
-rw-r--r--llvm/test/CodeGen/Mips/o32_cc_byval.ll11
-rw-r--r--llvm/test/CodeGen/Mips/select.ll244
8 files changed, 493 insertions, 479 deletions
diff --git a/llvm/lib/Target/Mips/MipsRegisterInfo.h b/llvm/lib/Target/Mips/MipsRegisterInfo.h
index fe8d7953ec8..53c42bccaf2 100644
--- a/llvm/lib/Target/Mips/MipsRegisterInfo.h
+++ b/llvm/lib/Target/Mips/MipsRegisterInfo.h
@@ -57,6 +57,8 @@ public:
BitVector getReservedRegs(const MachineFunction &MF) const override;
+ bool enableMultipleCopyHints() const override { return true; }
+
bool requiresRegisterScavenging(const MachineFunction &MF) const override;
bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const override;
diff --git a/llvm/test/CodeGen/Mips/Fast-ISel/sel1.ll b/llvm/test/CodeGen/Mips/Fast-ISel/sel1.ll
index 8c258513072..04d94eed77a 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/sel1.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/sel1.ll
@@ -89,12 +89,12 @@ entry:
define float @sel_float2(float %k, float %l, i32 %j) {
; CHECK-LABEL: sel_float2:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: mov.s $f0, $f14
; CHECK-NEXT: xor $1, $6, $zero
; CHECK-NEXT: sltu $1, $zero, $1
; CHECK-NEXT: andi $1, $1, 1
-; CHECK-NEXT: movn.s $f14, $f12, $1
; CHECK-NEXT: jr $ra
-; CHECK-NEXT: mov.s $f0, $f14
+; CHECK-NEXT: movn.s $f0, $f12, $1
entry:
%cond = icmp ne i32 %j, 0
%res = select i1 %cond, float %k, float %l
@@ -121,13 +121,13 @@ entry:
define double @sel_double2(double %k, double %l, i32 %j) {
; CHECK-LABEL: sel_double2:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: mov.d $f0, $f14
; CHECK-NEXT: lw $1, 16($sp)
; CHECK-NEXT: xor $1, $1, $zero
; CHECK-NEXT: sltu $1, $zero, $1
; CHECK-NEXT: andi $1, $1, 1
-; CHECK-NEXT: movn.d $f14, $f12, $1
; CHECK-NEXT: jr $ra
-; CHECK-NEXT: mov.d $f0, $f14
+; CHECK-NEXT: movn.d $f0, $f12, $1
entry:
%cond = icmp ne i32 %j, 0
%res = select i1 %cond, double %k, double %l
diff --git a/llvm/test/CodeGen/Mips/analyzebranch.ll b/llvm/test/CodeGen/Mips/analyzebranch.ll
index 23c312c613c..5c9c9999c86 100644
--- a/llvm/test/CodeGen/Mips/analyzebranch.ll
+++ b/llvm/test/CodeGen/Mips/analyzebranch.ll
@@ -10,139 +10,144 @@
define double @foo(double %a, double %b) nounwind readnone {
; MIPS32-LABEL: foo:
; MIPS32: # %bb.0: # %entry
-; MIPS32-NEXT: mtc1 $zero, $f0
-; MIPS32-NEXT: mtc1 $zero, $f1
-; MIPS32-NEXT: c.ule.d $f12, $f0
+; MIPS32-NEXT: mov.d $f0, $f12
+; MIPS32-NEXT: mtc1 $zero, $f2
+; MIPS32-NEXT: mtc1 $zero, $f3
+; MIPS32-NEXT: c.ule.d $f0, $f2
; MIPS32-NEXT: bc1f $BB0_2
; MIPS32-NEXT: nop
; MIPS32-NEXT: # %bb.1: # %if.else
-; MIPS32-NEXT: mtc1 $zero, $f12
-; MIPS32-NEXT: mtc1 $zero, $f13
-; MIPS32-NEXT: c.ule.d $f14, $f12
+; MIPS32-NEXT: mtc1 $zero, $f0
+; MIPS32-NEXT: mtc1 $zero, $f1
+; MIPS32-NEXT: c.ule.d $f14, $f0
; MIPS32-NEXT: bc1t $BB0_3
; MIPS32-NEXT: nop
; MIPS32-NEXT: $BB0_2: # %if.end6
-; MIPS32-NEXT: sub.d $f0, $f14, $f12
-; MIPS32-NEXT: add.d $f12, $f0, $f0
+; MIPS32-NEXT: sub.d $f0, $f14, $f0
+; MIPS32-NEXT: add.d $f0, $f0, $f0
; MIPS32-NEXT: $BB0_3: # %return
; MIPS32-NEXT: jr $ra
-; MIPS32-NEXT: mov.d $f0, $f12
+; MIPS32-NEXT: nop
;
; MIPS32R2-LABEL: foo:
; MIPS32R2: # %bb.0: # %entry
-; MIPS32R2-NEXT: mtc1 $zero, $f0
-; MIPS32R2-NEXT: mthc1 $zero, $f0
-; MIPS32R2-NEXT: c.ule.d $f12, $f0
+; MIPS32R2-NEXT: mov.d $f0, $f12
+; MIPS32R2-NEXT: mtc1 $zero, $f2
+; MIPS32R2-NEXT: mthc1 $zero, $f2
+; MIPS32R2-NEXT: c.ule.d $f0, $f2
; MIPS32R2-NEXT: bc1f $BB0_2
; MIPS32R2-NEXT: nop
; MIPS32R2-NEXT: # %bb.1: # %if.else
-; MIPS32R2-NEXT: mtc1 $zero, $f12
-; MIPS32R2-NEXT: mthc1 $zero, $f12
-; MIPS32R2-NEXT: c.ule.d $f14, $f12
+; MIPS32R2-NEXT: mtc1 $zero, $f0
+; MIPS32R2-NEXT: mthc1 $zero, $f0
+; MIPS32R2-NEXT: c.ule.d $f14, $f0
; MIPS32R2-NEXT: bc1t $BB0_3
; MIPS32R2-NEXT: nop
; MIPS32R2-NEXT: $BB0_2: # %if.end6
-; MIPS32R2-NEXT: sub.d $f0, $f14, $f12
-; MIPS32R2-NEXT: add.d $f12, $f0, $f0
+; MIPS32R2-NEXT: sub.d $f0, $f14, $f0
+; MIPS32R2-NEXT: add.d $f0, $f0, $f0
; MIPS32R2-NEXT: $BB0_3: # %return
; MIPS32R2-NEXT: jr $ra
-; MIPS32R2-NEXT: mov.d $f0, $f12
+; MIPS32R2-NEXT: nop
;
; MIPS32r6-LABEL: foo:
; MIPS32r6: # %bb.0: # %entry
-; MIPS32r6-NEXT: mtc1 $zero, $f0
-; MIPS32r6-NEXT: mthc1 $zero, $f0
-; MIPS32r6-NEXT: cmp.lt.d $f0, $f0, $f12
-; MIPS32r6-NEXT: mfc1 $1, $f0
+; MIPS32r6-NEXT: mov.d $f0, $f12
+; MIPS32r6-NEXT: mtc1 $zero, $f1
+; MIPS32r6-NEXT: mthc1 $zero, $f1
+; MIPS32r6-NEXT: cmp.lt.d $f1, $f1, $f0
+; MIPS32r6-NEXT: mfc1 $1, $f1
; MIPS32r6-NEXT: andi $1, $1, 1
; MIPS32r6-NEXT: bnezc $1, $BB0_2
; MIPS32r6-NEXT: # %bb.1: # %if.else
-; MIPS32r6-NEXT: mtc1 $zero, $f12
-; MIPS32r6-NEXT: mthc1 $zero, $f12
-; MIPS32r6-NEXT: cmp.ule.d $f0, $f14, $f12
-; MIPS32r6-NEXT: mfc1 $1, $f0
+; MIPS32r6-NEXT: mtc1 $zero, $f0
+; MIPS32r6-NEXT: mthc1 $zero, $f0
+; MIPS32r6-NEXT: cmp.ule.d $f1, $f14, $f0
+; MIPS32r6-NEXT: mfc1 $1, $f1
; MIPS32r6-NEXT: andi $1, $1, 1
; MIPS32r6-NEXT: bnezc $1, $BB0_3
; MIPS32r6-NEXT: $BB0_2: # %if.end6
-; MIPS32r6-NEXT: sub.d $f0, $f14, $f12
-; MIPS32r6-NEXT: add.d $f12, $f0, $f0
+; MIPS32r6-NEXT: sub.d $f0, $f14, $f0
+; MIPS32r6-NEXT: add.d $f0, $f0, $f0
; MIPS32r6-NEXT: $BB0_3: # %return
-; MIPS32r6-NEXT: jr $ra
-; MIPS32r6-NEXT: mov.d $f0, $f12
+; MIPS32r6-NEXT: jrc $ra
;
; MIPS4-LABEL: foo:
; MIPS4: # %bb.0: # %entry
-; MIPS4-NEXT: dmtc1 $zero, $f0
-; MIPS4-NEXT: c.ule.d $f12, $f0
+; MIPS4-NEXT: mov.d $f0, $f12
+; MIPS4-NEXT: dmtc1 $zero, $f1
+; MIPS4-NEXT: c.ule.d $f0, $f1
; MIPS4-NEXT: bc1f .LBB0_2
; MIPS4-NEXT: nop
; MIPS4-NEXT: # %bb.1: # %if.else
-; MIPS4-NEXT: dmtc1 $zero, $f12
-; MIPS4-NEXT: c.ule.d $f13, $f12
+; MIPS4-NEXT: dmtc1 $zero, $f0
+; MIPS4-NEXT: c.ule.d $f13, $f0
; MIPS4-NEXT: bc1t .LBB0_3
; MIPS4-NEXT: nop
; MIPS4-NEXT: .LBB0_2: # %if.end6
-; MIPS4-NEXT: sub.d $f0, $f13, $f12
-; MIPS4-NEXT: add.d $f12, $f0, $f0
+; MIPS4-NEXT: sub.d $f0, $f13, $f0
+; MIPS4-NEXT: add.d $f0, $f0, $f0
; MIPS4-NEXT: .LBB0_3: # %return
; MIPS4-NEXT: jr $ra
-; MIPS4-NEXT: mov.d $f0, $f12
+; MIPS4-NEXT: nop
;
; MIPS64-LABEL: foo:
; MIPS64: # %bb.0: # %entry
-; MIPS64-NEXT: dmtc1 $zero, $f0
-; MIPS64-NEXT: c.ule.d $f12, $f0
+; MIPS64-NEXT: mov.d $f0, $f12
+; MIPS64-NEXT: dmtc1 $zero, $f1
+; MIPS64-NEXT: c.ule.d $f0, $f1
; MIPS64-NEXT: bc1f .LBB0_2
; MIPS64-NEXT: nop
; MIPS64-NEXT: # %bb.1: # %if.else
-; MIPS64-NEXT: dmtc1 $zero, $f12
-; MIPS64-NEXT: c.ule.d $f13, $f12
+; MIPS64-NEXT: dmtc1 $zero, $f0
+; MIPS64-NEXT: c.ule.d $f13, $f0
; MIPS64-NEXT: bc1t .LBB0_3
; MIPS64-NEXT: nop
; MIPS64-NEXT: .LBB0_2: # %if.end6
-; MIPS64-NEXT: sub.d $f0, $f13, $f12
-; MIPS64-NEXT: add.d $f12, $f0, $f0
+; MIPS64-NEXT: sub.d $f0, $f13, $f0
+; MIPS64-NEXT: add.d $f0, $f0, $f0
; MIPS64-NEXT: .LBB0_3: # %return
; MIPS64-NEXT: jr $ra
-; MIPS64-NEXT: mov.d $f0, $f12
+; MIPS64-NEXT: nop
;
; MIPS64R2-LABEL: foo:
; MIPS64R2: # %bb.0: # %entry
-; MIPS64R2-NEXT: dmtc1 $zero, $f0
-; MIPS64R2-NEXT: c.ule.d $f12, $f0
+; MIPS64R2-NEXT: mov.d $f0, $f12
+; MIPS64R2-NEXT: dmtc1 $zero, $f1
+; MIPS64R2-NEXT: c.ule.d $f0, $f1
; MIPS64R2-NEXT: bc1f .LBB0_2
; MIPS64R2-NEXT: nop
; MIPS64R2-NEXT: # %bb.1: # %if.else
-; MIPS64R2-NEXT: dmtc1 $zero, $f12
-; MIPS64R2-NEXT: c.ule.d $f13, $f12
+; MIPS64R2-NEXT: dmtc1 $zero, $f0
+; MIPS64R2-NEXT: c.ule.d $f13, $f0
; MIPS64R2-NEXT: bc1t .LBB0_3
; MIPS64R2-NEXT: nop
; MIPS64R2-NEXT: .LBB0_2: # %if.end6
-; MIPS64R2-NEXT: sub.d $f0, $f13, $f12
-; MIPS64R2-NEXT: add.d $f12, $f0, $f0
+; MIPS64R2-NEXT: sub.d $f0, $f13, $f0
+; MIPS64R2-NEXT: add.d $f0, $f0, $f0
; MIPS64R2-NEXT: .LBB0_3: # %return
; MIPS64R2-NEXT: jr $ra
-; MIPS64R2-NEXT: mov.d $f0, $f12
+; MIPS64R2-NEXT: nop
;
; MIPS64R6-LABEL: foo:
; MIPS64R6: # %bb.0: # %entry
-; MIPS64R6-NEXT: dmtc1 $zero, $f0
-; MIPS64R6-NEXT: cmp.lt.d $f0, $f0, $f12
-; MIPS64R6-NEXT: mfc1 $1, $f0
+; MIPS64R6-NEXT: mov.d $f0, $f12
+; MIPS64R6-NEXT: dmtc1 $zero, $f1
+; MIPS64R6-NEXT: cmp.lt.d $f1, $f1, $f0
+; MIPS64R6-NEXT: mfc1 $1, $f1
; MIPS64R6-NEXT: andi $1, $1, 1
; MIPS64R6-NEXT: bnezc $1, .LBB0_2
; MIPS64R6-NEXT: # %bb.1: # %if.else
-; MIPS64R6-NEXT: dmtc1 $zero, $f12
-; MIPS64R6-NEXT: cmp.ule.d $f0, $f13, $f12
-; MIPS64R6-NEXT: mfc1 $1, $f0
+; MIPS64R6-NEXT: dmtc1 $zero, $f0
+; MIPS64R6-NEXT: cmp.ule.d $f1, $f13, $f0
+; MIPS64R6-NEXT: mfc1 $1, $f1
; MIPS64R6-NEXT: andi $1, $1, 1
; MIPS64R6-NEXT: bnezc $1, .LBB0_3
; MIPS64R6-NEXT: .LBB0_2: # %if.end6
-; MIPS64R6-NEXT: sub.d $f0, $f13, $f12
-; MIPS64R6-NEXT: add.d $f12, $f0, $f0
+; MIPS64R6-NEXT: sub.d $f0, $f13, $f0
+; MIPS64R6-NEXT: add.d $f0, $f0, $f0
; MIPS64R6-NEXT: .LBB0_3: # %return
-; MIPS64R6-NEXT: jr $ra
-; MIPS64R6-NEXT: mov.d $f0, $f12
+; MIPS64R6-NEXT: jrc $ra
entry:
%cmp = fcmp ogt double %a, 0.000000e+00
br i1 %cmp, label %if.end6, label %if.else
diff --git a/llvm/test/CodeGen/Mips/indirect-jump-hazard/calls.ll b/llvm/test/CodeGen/Mips/indirect-jump-hazard/calls.ll
index 20e89136d87..0d598abc156 100644
--- a/llvm/test/CodeGen/Mips/indirect-jump-hazard/calls.ll
+++ b/llvm/test/CodeGen/Mips/indirect-jump-hazard/calls.ll
@@ -30,8 +30,7 @@ define void @fooNonTail(void (i32)* nocapture %f1) nounwind {
; MIPS32R2: # %bb.0: # %entry
; MIPS32R2-NEXT: addiu $sp, $sp, -24
; MIPS32R2-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill
-; MIPS32R2-NEXT: move $1, $4
-; MIPS32R2-NEXT: move $25, $1
+; MIPS32R2-NEXT: move $25, $4
; MIPS32R2-NEXT: jalr.hb $25
; MIPS32R2-NEXT: addiu $4, $zero, 13
; MIPS32R2-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload
@@ -42,8 +41,7 @@ define void @fooNonTail(void (i32)* nocapture %f1) nounwind {
; MIPS32R6: # %bb.0: # %entry
; MIPS32R6-NEXT: addiu $sp, $sp, -24
; MIPS32R6-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill
-; MIPS32R6-NEXT: move $1, $4
-; MIPS32R6-NEXT: move $25, $1
+; MIPS32R6-NEXT: move $25, $4
; MIPS32R6-NEXT: jalr.hb $25
; MIPS32R6-NEXT: addiu $4, $zero, 13
; MIPS32R6-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload
@@ -54,8 +52,7 @@ define void @fooNonTail(void (i32)* nocapture %f1) nounwind {
; MIPS64R2: # %bb.0: # %entry
; MIPS64R2-NEXT: daddiu $sp, $sp, -16
; MIPS64R2-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill
-; MIPS64R2-NEXT: move $1, $4
-; MIPS64R2-NEXT: move $25, $1
+; MIPS64R2-NEXT: move $25, $4
; MIPS64R2-NEXT: jalr.hb $25
; MIPS64R2-NEXT: daddiu $4, $zero, 13
; MIPS64R2-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload
@@ -66,8 +63,7 @@ define void @fooNonTail(void (i32)* nocapture %f1) nounwind {
; MIPS64R6: # %bb.0: # %entry
; MIPS64R6-NEXT: daddiu $sp, $sp, -16
; MIPS64R6-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill
-; MIPS64R6-NEXT: move $1, $4
-; MIPS64R6-NEXT: move $25, $1
+; MIPS64R6-NEXT: move $25, $4
; MIPS64R6-NEXT: jalr.hb $25
; MIPS64R6-NEXT: daddiu $4, $zero, 13
; MIPS64R6-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload
@@ -78,8 +74,7 @@ define void @fooNonTail(void (i32)* nocapture %f1) nounwind {
; PIC-MIPS32R2: # %bb.0: # %entry
; PIC-MIPS32R2-NEXT: addiu $sp, $sp, -24
; PIC-MIPS32R2-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill
-; PIC-MIPS32R2-NEXT: move $1, $4
-; PIC-MIPS32R2-NEXT: move $25, $1
+; PIC-MIPS32R2-NEXT: move $25, $4
; PIC-MIPS32R2-NEXT: jalr.hb $25
; PIC-MIPS32R2-NEXT: addiu $4, $zero, 13
; PIC-MIPS32R2-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload
@@ -90,8 +85,7 @@ define void @fooNonTail(void (i32)* nocapture %f1) nounwind {
; PIC-MIPS32R6: # %bb.0: # %entry
; PIC-MIPS32R6-NEXT: addiu $sp, $sp, -24
; PIC-MIPS32R6-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill
-; PIC-MIPS32R6-NEXT: move $1, $4
-; PIC-MIPS32R6-NEXT: move $25, $1
+; PIC-MIPS32R6-NEXT: move $25, $4
; PIC-MIPS32R6-NEXT: jalr.hb $25
; PIC-MIPS32R6-NEXT: addiu $4, $zero, 13
; PIC-MIPS32R6-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload
@@ -102,8 +96,7 @@ define void @fooNonTail(void (i32)* nocapture %f1) nounwind {
; PIC-MIPS64R2: # %bb.0: # %entry
; PIC-MIPS64R2-NEXT: daddiu $sp, $sp, -16
; PIC-MIPS64R2-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill
-; PIC-MIPS64R2-NEXT: move $1, $4
-; PIC-MIPS64R2-NEXT: move $25, $1
+; PIC-MIPS64R2-NEXT: move $25, $4
; PIC-MIPS64R2-NEXT: jalr.hb $25
; PIC-MIPS64R2-NEXT: daddiu $4, $zero, 13
; PIC-MIPS64R2-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload
@@ -114,8 +107,7 @@ define void @fooNonTail(void (i32)* nocapture %f1) nounwind {
; PIC-MIPS64R6: # %bb.0: # %entry
; PIC-MIPS64R6-NEXT: daddiu $sp, $sp, -16
; PIC-MIPS64R6-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill
-; PIC-MIPS64R6-NEXT: move $1, $4
-; PIC-MIPS64R6-NEXT: move $25, $1
+; PIC-MIPS64R6-NEXT: move $25, $4
; PIC-MIPS64R6-NEXT: jalr.hb $25
; PIC-MIPS64R6-NEXT: daddiu $4, $zero, 13
; PIC-MIPS64R6-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload
@@ -129,57 +121,49 @@ entry:
define i32 @fooTail(i32 (i32)* nocapture %f1) nounwind {
; MIPS32R2-LABEL: fooTail:
; MIPS32R2: # %bb.0: # %entry
-; MIPS32R2-NEXT: move $1, $4
-; MIPS32R2-NEXT: move $25, $1
+; MIPS32R2-NEXT: move $25, $4
; MIPS32R2-NEXT: jr.hb $25
; MIPS32R2-NEXT: addiu $4, $zero, 14
;
; MIPS32R6-LABEL: fooTail:
; MIPS32R6: # %bb.0: # %entry
-; MIPS32R6-NEXT: move $1, $4
-; MIPS32R6-NEXT: move $25, $1
+; MIPS32R6-NEXT: move $25, $4
; MIPS32R6-NEXT: jr.hb $25
; MIPS32R6-NEXT: addiu $4, $zero, 14
;
; MIPS64R2-LABEL: fooTail:
; MIPS64R2: # %bb.0: # %entry
-; MIPS64R2-NEXT: move $1, $4
-; MIPS64R2-NEXT: move $25, $1
+; MIPS64R2-NEXT: move $25, $4
; MIPS64R2-NEXT: jr.hb $25
; MIPS64R2-NEXT: daddiu $4, $zero, 14
;
; MIPS64R6-LABEL: fooTail:
; MIPS64R6: # %bb.0: # %entry
-; MIPS64R6-NEXT: move $1, $4
-; MIPS64R6-NEXT: move $25, $1
+; MIPS64R6-NEXT: move $25, $4
; MIPS64R6-NEXT: jr.hb $25
; MIPS64R6-NEXT: daddiu $4, $zero, 14
;
; PIC-MIPS32R2-LABEL: fooTail:
; PIC-MIPS32R2: # %bb.0: # %entry
-; PIC-MIPS32R2-NEXT: move $1, $4
-; PIC-MIPS32R2-NEXT: move $25, $1
+; PIC-MIPS32R2-NEXT: move $25, $4
; PIC-MIPS32R2-NEXT: jr.hb $25
; PIC-MIPS32R2-NEXT: addiu $4, $zero, 14
;
; PIC-MIPS32R6-LABEL: fooTail:
; PIC-MIPS32R6: # %bb.0: # %entry
-; PIC-MIPS32R6-NEXT: move $1, $4
-; PIC-MIPS32R6-NEXT: move $25, $1
+; PIC-MIPS32R6-NEXT: move $25, $4
; PIC-MIPS32R6-NEXT: jr.hb $25
; PIC-MIPS32R6-NEXT: addiu $4, $zero, 14
;
; PIC-MIPS64R2-LABEL: fooTail:
; PIC-MIPS64R2: # %bb.0: # %entry
-; PIC-MIPS64R2-NEXT: move $1, $4
-; PIC-MIPS64R2-NEXT: move $25, $1
+; PIC-MIPS64R2-NEXT: move $25, $4
; PIC-MIPS64R2-NEXT: jr.hb $25
; PIC-MIPS64R2-NEXT: daddiu $4, $zero, 14
;
; PIC-MIPS64R6-LABEL: fooTail:
; PIC-MIPS64R6: # %bb.0: # %entry
-; PIC-MIPS64R6-NEXT: move $1, $4
-; PIC-MIPS64R6-NEXT: move $25, $1
+; PIC-MIPS64R6-NEXT: move $25, $4
; PIC-MIPS64R6-NEXT: jr.hb $25
; PIC-MIPS64R6-NEXT: daddiu $4, $zero, 14
entry:
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/select-dbl.ll b/llvm/test/CodeGen/Mips/llvm-ir/select-dbl.ll
index 40d74157ab8..715c35fdfe3 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/select-dbl.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/select-dbl.ll
@@ -76,19 +76,19 @@ define double @tst_select_i1_double(i1 signext %s, double %x, double %y) {
; M3: # %bb.0: # %entry
; M3-NEXT: andi $1, $4, 1
; M3-NEXT: bnez $1, .LBB0_2
-; M3-NEXT: nop
+; M3-NEXT: mov.d $f0, $f13
; M3-NEXT: # %bb.1: # %entry
-; M3-NEXT: mov.d $f13, $f14
+; M3-NEXT: mov.d $f0, $f14
; M3-NEXT: .LBB0_2: # %entry
; M3-NEXT: jr $ra
-; M3-NEXT: mov.d $f0, $f13
+; M3-NEXT: nop
;
; CMOV64-LABEL: tst_select_i1_double:
; CMOV64: # %bb.0: # %entry
+; CMOV64-NEXT: mov.d $f0, $f14
; CMOV64-NEXT: andi $1, $4, 1
-; CMOV64-NEXT: movn.d $f14, $f13, $1
; CMOV64-NEXT: jr $ra
-; CMOV64-NEXT: mov.d $f0, $f14
+; CMOV64-NEXT: movn.d $f0, $f13, $1
;
; 64R6-LABEL: tst_select_i1_double:
; 64R6: # %bb.0: # %entry
@@ -124,28 +124,28 @@ define double @tst_select_i1_double_reordered(double %x, double %y,
; M2-NEXT: lw $1, 16($sp)
; M2-NEXT: andi $1, $1, 1
; M2-NEXT: bnez $1, $BB1_2
-; M2-NEXT: nop
+; M2-NEXT: mov.d $f0, $f12
; M2-NEXT: # %bb.1: # %entry
-; M2-NEXT: mov.d $f12, $f14
+; M2-NEXT: mov.d $f0, $f14
; M2-NEXT: $BB1_2: # %entry
; M2-NEXT: jr $ra
-; M2-NEXT: mov.d $f0, $f12
+; M2-NEXT: nop
;
; CMOV32R1-LABEL: tst_select_i1_double_reordered:
; CMOV32R1: # %bb.0: # %entry
+; CMOV32R1-NEXT: mov.d $f0, $f14
; CMOV32R1-NEXT: lw $1, 16($sp)
; CMOV32R1-NEXT: andi $1, $1, 1
-; CMOV32R1-NEXT: movn.d $f14, $f12, $1
; CMOV32R1-NEXT: jr $ra
-; CMOV32R1-NEXT: mov.d $f0, $f14
+; CMOV32R1-NEXT: movn.d $f0, $f12, $1
;
; CMOV32R2-LABEL: tst_select_i1_double_reordered:
; CMOV32R2: # %bb.0: # %entry
+; CMOV32R2-NEXT: mov.d $f0, $f14
; CMOV32R2-NEXT: lw $1, 16($sp)
; CMOV32R2-NEXT: andi $1, $1, 1
-; CMOV32R2-NEXT: movn.d $f14, $f12, $1
; CMOV32R2-NEXT: jr $ra
-; CMOV32R2-NEXT: mov.d $f0, $f14
+; CMOV32R2-NEXT: movn.d $f0, $f12, $1
;
; 32R6-LABEL: tst_select_i1_double_reordered:
; 32R6: # %bb.0: # %entry
@@ -158,19 +158,19 @@ define double @tst_select_i1_double_reordered(double %x, double %y,
; M3: # %bb.0: # %entry
; M3-NEXT: andi $1, $6, 1
; M3-NEXT: bnez $1, .LBB1_2
-; M3-NEXT: nop
+; M3-NEXT: mov.d $f0, $f12
; M3-NEXT: # %bb.1: # %entry
-; M3-NEXT: mov.d $f12, $f13
+; M3-NEXT: mov.d $f0, $f13
; M3-NEXT: .LBB1_2: # %entry
; M3-NEXT: jr $ra
-; M3-NEXT: mov.d $f0, $f12
+; M3-NEXT: nop
;
; CMOV64-LABEL: tst_select_i1_double_reordered:
; CMOV64: # %bb.0: # %entry
+; CMOV64-NEXT: mov.d $f0, $f13
; CMOV64-NEXT: andi $1, $6, 1
-; CMOV64-NEXT: movn.d $f13, $f12, $1
; CMOV64-NEXT: jr $ra
-; CMOV64-NEXT: mov.d $f0, $f13
+; CMOV64-NEXT: movn.d $f0, $f12, $1
;
; 64R6-LABEL: tst_select_i1_double_reordered:
; 64R6: # %bb.0: # %entry
@@ -180,11 +180,11 @@ define double @tst_select_i1_double_reordered(double %x, double %y,
;
; MM32R3-LABEL: tst_select_i1_double_reordered:
; MM32R3: # %bb.0: # %entry
+; MM32R3-NEXT: mov.d $f0, $f14
; MM32R3-NEXT: lw $2, 16($sp)
; MM32R3-NEXT: andi16 $2, $2, 1
-; MM32R3-NEXT: movn.d $f14, $f12, $2
; MM32R3-NEXT: jr $ra
-; MM32R3-NEXT: mov.d $f0, $f14
+; MM32R3-NEXT: movn.d $f0, $f12, $2
;
; MM32R6-LABEL: tst_select_i1_double_reordered:
; MM32R6: # %bb.0: # %entry
@@ -201,28 +201,29 @@ entry:
define double @tst_select_fcmp_olt_double(double %x, double %y) {
; M2-LABEL: tst_select_fcmp_olt_double:
; M2: # %bb.0: # %entry
-; M2-NEXT: c.olt.d $f12, $f14
+; M2-NEXT: mov.d $f0, $f12
+; M2-NEXT: c.olt.d $f0, $f14
; M2-NEXT: bc1t $BB2_2
; M2-NEXT: nop
; M2-NEXT: # %bb.1: # %entry
-; M2-NEXT: mov.d $f12, $f14
+; M2-NEXT: mov.d $f0, $f14
; M2-NEXT: $BB2_2: # %entry
; M2-NEXT: jr $ra
-; M2-NEXT: mov.d $f0, $f12
+; M2-NEXT: nop
;
; CMOV32R1-LABEL: tst_select_fcmp_olt_double:
; CMOV32R1: # %bb.0: # %entry
-; CMOV32R1-NEXT: c.olt.d $f12, $f14
-; CMOV32R1-NEXT: movt.d $f14, $f12, $fcc0
-; CMOV32R1-NEXT: jr $ra
; CMOV32R1-NEXT: mov.d $f0, $f14
+; CMOV32R1-NEXT: c.olt.d $f12, $f0
+; CMOV32R1-NEXT: jr $ra
+; CMOV32R1-NEXT: movt.d $f0, $f12, $fcc0
;
; CMOV32R2-LABEL: tst_select_fcmp_olt_double:
; CMOV32R2: # %bb.0: # %entry
-; CMOV32R2-NEXT: c.olt.d $f12, $f14
-; CMOV32R2-NEXT: movt.d $f14, $f12, $fcc0
-; CMOV32R2-NEXT: jr $ra
; CMOV32R2-NEXT: mov.d $f0, $f14
+; CMOV32R2-NEXT: c.olt.d $f12, $f0
+; CMOV32R2-NEXT: jr $ra
+; CMOV32R2-NEXT: movt.d $f0, $f12, $fcc0
;
; 32R6-LABEL: tst_select_fcmp_olt_double:
; 32R6: # %bb.0: # %entry
@@ -234,21 +235,22 @@ define double @tst_select_fcmp_olt_double(double %x, double %y) {
;
; M3-LABEL: tst_select_fcmp_olt_double:
; M3: # %bb.0: # %entry
-; M3-NEXT: c.olt.d $f12, $f13
+; M3-NEXT: mov.d $f0, $f12
+; M3-NEXT: c.olt.d $f0, $f13
; M3-NEXT: bc1t .LBB2_2
; M3-NEXT: nop
; M3-NEXT: # %bb.1: # %entry
-; M3-NEXT: mov.d $f12, $f13
+; M3-NEXT: mov.d $f0, $f13
; M3-NEXT: .LBB2_2: # %entry
; M3-NEXT: jr $ra
-; M3-NEXT: mov.d $f0, $f12
+; M3-NEXT: nop
;
; CMOV64-LABEL: tst_select_fcmp_olt_double:
; CMOV64: # %bb.0: # %entry
-; CMOV64-NEXT: c.olt.d $f12, $f13
-; CMOV64-NEXT: movt.d $f13, $f12, $fcc0
-; CMOV64-NEXT: jr $ra
; CMOV64-NEXT: mov.d $f0, $f13
+; CMOV64-NEXT: c.olt.d $f12, $f0
+; CMOV64-NEXT: jr $ra
+; CMOV64-NEXT: movt.d $f0, $f12, $fcc0
;
; 64R6-LABEL: tst_select_fcmp_olt_double:
; 64R6: # %bb.0: # %entry
@@ -260,10 +262,10 @@ define double @tst_select_fcmp_olt_double(double %x, double %y) {
;
; MM32R3-LABEL: tst_select_fcmp_olt_double:
; MM32R3: # %bb.0: # %entry
-; MM32R3-NEXT: c.olt.d $f12, $f14
-; MM32R3-NEXT: movt.d $f14, $f12, $fcc0
-; MM32R3-NEXT: jr $ra
; MM32R3-NEXT: mov.d $f0, $f14
+; MM32R3-NEXT: c.olt.d $f12, $f0
+; MM32R3-NEXT: jr $ra
+; MM32R3-NEXT: movt.d $f0, $f12, $fcc0
;
; MM32R6-LABEL: tst_select_fcmp_olt_double:
; MM32R6: # %bb.0: # %entry
@@ -281,28 +283,29 @@ entry:
define double @tst_select_fcmp_ole_double(double %x, double %y) {
; M2-LABEL: tst_select_fcmp_ole_double:
; M2: # %bb.0: # %entry
-; M2-NEXT: c.ole.d $f12, $f14
+; M2-NEXT: mov.d $f0, $f12
+; M2-NEXT: c.ole.d $f0, $f14
; M2-NEXT: bc1t $BB3_2
; M2-NEXT: nop
; M2-NEXT: # %bb.1: # %entry
-; M2-NEXT: mov.d $f12, $f14
+; M2-NEXT: mov.d $f0, $f14
; M2-NEXT: $BB3_2: # %entry
; M2-NEXT: jr $ra
-; M2-NEXT: mov.d $f0, $f12
+; M2-NEXT: nop
;
; CMOV32R1-LABEL: tst_select_fcmp_ole_double:
; CMOV32R1: # %bb.0: # %entry
-; CMOV32R1-NEXT: c.ole.d $f12, $f14
-; CMOV32R1-NEXT: movt.d $f14, $f12, $fcc0
-; CMOV32R1-NEXT: jr $ra
; CMOV32R1-NEXT: mov.d $f0, $f14
+; CMOV32R1-NEXT: c.ole.d $f12, $f0
+; CMOV32R1-NEXT: jr $ra
+; CMOV32R1-NEXT: movt.d $f0, $f12, $fcc0
;
; CMOV32R2-LABEL: tst_select_fcmp_ole_double:
; CMOV32R2: # %bb.0: # %entry
-; CMOV32R2-NEXT: c.ole.d $f12, $f14
-; CMOV32R2-NEXT: movt.d $f14, $f12, $fcc0
-; CMOV32R2-NEXT: jr $ra
; CMOV32R2-NEXT: mov.d $f0, $f14
+; CMOV32R2-NEXT: c.ole.d $f12, $f0
+; CMOV32R2-NEXT: jr $ra
+; CMOV32R2-NEXT: movt.d $f0, $f12, $fcc0
;
; 32R6-LABEL: tst_select_fcmp_ole_double:
; 32R6: # %bb.0: # %entry
@@ -314,21 +317,22 @@ define double @tst_select_fcmp_ole_double(double %x, double %y) {
;
; M3-LABEL: tst_select_fcmp_ole_double:
; M3: # %bb.0: # %entry
-; M3-NEXT: c.ole.d $f12, $f13
+; M3-NEXT: mov.d $f0, $f12
+; M3-NEXT: c.ole.d $f0, $f13
; M3-NEXT: bc1t .LBB3_2
; M3-NEXT: nop
; M3-NEXT: # %bb.1: # %entry
-; M3-NEXT: mov.d $f12, $f13
+; M3-NEXT: mov.d $f0, $f13
; M3-NEXT: .LBB3_2: # %entry
; M3-NEXT: jr $ra
-; M3-NEXT: mov.d $f0, $f12
+; M3-NEXT: nop
;
; CMOV64-LABEL: tst_select_fcmp_ole_double:
; CMOV64: # %bb.0: # %entry
-; CMOV64-NEXT: c.ole.d $f12, $f13
-; CMOV64-NEXT: movt.d $f13, $f12, $fcc0
-; CMOV64-NEXT: jr $ra
; CMOV64-NEXT: mov.d $f0, $f13
+; CMOV64-NEXT: c.ole.d $f12, $f0
+; CMOV64-NEXT: jr $ra
+; CMOV64-NEXT: movt.d $f0, $f12, $fcc0
;
; 64R6-LABEL: tst_select_fcmp_ole_double:
; 64R6: # %bb.0: # %entry
@@ -340,10 +344,10 @@ define double @tst_select_fcmp_ole_double(double %x, double %y) {
;
; MM32R3-LABEL: tst_select_fcmp_ole_double:
; MM32R3: # %bb.0: # %entry
-; MM32R3-NEXT: c.ole.d $f12, $f14
-; MM32R3-NEXT: movt.d $f14, $f12, $fcc0
-; MM32R3-NEXT: jr $ra
; MM32R3-NEXT: mov.d $f0, $f14
+; MM32R3-NEXT: c.ole.d $f12, $f0
+; MM32R3-NEXT: jr $ra
+; MM32R3-NEXT: movt.d $f0, $f12, $fcc0
;
; MM32R6-LABEL: tst_select_fcmp_ole_double:
; MM32R6: # %bb.0: # %entry
@@ -361,28 +365,29 @@ entry:
define double @tst_select_fcmp_ogt_double(double %x, double %y) {
; M2-LABEL: tst_select_fcmp_ogt_double:
; M2: # %bb.0: # %entry
-; M2-NEXT: c.ule.d $f12, $f14
+; M2-NEXT: mov.d $f0, $f12
+; M2-NEXT: c.ule.d $f0, $f14
; M2-NEXT: bc1f $BB4_2
; M2-NEXT: nop
; M2-NEXT: # %bb.1: # %entry
-; M2-NEXT: mov.d $f12, $f14
+; M2-NEXT: mov.d $f0, $f14
; M2-NEXT: $BB4_2: # %entry
; M2-NEXT: jr $ra
-; M2-NEXT: mov.d $f0, $f12
+; M2-NEXT: nop
;
; CMOV32R1-LABEL: tst_select_fcmp_ogt_double:
; CMOV32R1: # %bb.0: # %entry
-; CMOV32R1-NEXT: c.ule.d $f12, $f14
-; CMOV32R1-NEXT: movf.d $f14, $f12, $fcc0
-; CMOV32R1-NEXT: jr $ra
; CMOV32R1-NEXT: mov.d $f0, $f14
+; CMOV32R1-NEXT: c.ule.d $f12, $f0
+; CMOV32R1-NEXT: jr $ra
+; CMOV32R1-NEXT: movf.d $f0, $f12, $fcc0
;
; CMOV32R2-LABEL: tst_select_fcmp_ogt_double:
; CMOV32R2: # %bb.0: # %entry
-; CMOV32R2-NEXT: c.ule.d $f12, $f14
-; CMOV32R2-NEXT: movf.d $f14, $f12, $fcc0
-; CMOV32R2-NEXT: jr $ra
; CMOV32R2-NEXT: mov.d $f0, $f14
+; CMOV32R2-NEXT: c.ule.d $f12, $f0
+; CMOV32R2-NEXT: jr $ra
+; CMOV32R2-NEXT: movf.d $f0, $f12, $fcc0
;
; 32R6-LABEL: tst_select_fcmp_ogt_double:
; 32R6: # %bb.0: # %entry
@@ -394,21 +399,22 @@ define double @tst_select_fcmp_ogt_double(double %x, double %y) {
;
; M3-LABEL: tst_select_fcmp_ogt_double:
; M3: # %bb.0: # %entry
-; M3-NEXT: c.ule.d $f12, $f13
+; M3-NEXT: mov.d $f0, $f12
+; M3-NEXT: c.ule.d $f0, $f13
; M3-NEXT: bc1f .LBB4_2
; M3-NEXT: nop
; M3-NEXT: # %bb.1: # %entry
-; M3-NEXT: mov.d $f12, $f13
+; M3-NEXT: mov.d $f0, $f13
; M3-NEXT: .LBB4_2: # %entry
; M3-NEXT: jr $ra
-; M3-NEXT: mov.d $f0, $f12
+; M3-NEXT: nop
;
; CMOV64-LABEL: tst_select_fcmp_ogt_double:
; CMOV64: # %bb.0: # %entry
-; CMOV64-NEXT: c.ule.d $f12, $f13
-; CMOV64-NEXT: movf.d $f13, $f12, $fcc0
-; CMOV64-NEXT: jr $ra
; CMOV64-NEXT: mov.d $f0, $f13
+; CMOV64-NEXT: c.ule.d $f12, $f0
+; CMOV64-NEXT: jr $ra
+; CMOV64-NEXT: movf.d $f0, $f12, $fcc0
;
; 64R6-LABEL: tst_select_fcmp_ogt_double:
; 64R6: # %bb.0: # %entry
@@ -420,10 +426,10 @@ define double @tst_select_fcmp_ogt_double(double %x, double %y) {
;
; MM32R3-LABEL: tst_select_fcmp_ogt_double:
; MM32R3: # %bb.0: # %entry
-; MM32R3-NEXT: c.ule.d $f12, $f14
-; MM32R3-NEXT: movf.d $f14, $f12, $fcc0
-; MM32R3-NEXT: jr $ra
; MM32R3-NEXT: mov.d $f0, $f14
+; MM32R3-NEXT: c.ule.d $f12, $f0
+; MM32R3-NEXT: jr $ra
+; MM32R3-NEXT: movf.d $f0, $f12, $fcc0
;
; MM32R6-LABEL: tst_select_fcmp_ogt_double:
; MM32R6: # %bb.0: # %entry
@@ -441,28 +447,29 @@ entry:
define double @tst_select_fcmp_oge_double(double %x, double %y) {
; M2-LABEL: tst_select_fcmp_oge_double:
; M2: # %bb.0: # %entry
-; M2-NEXT: c.ult.d $f12, $f14
+; M2-NEXT: mov.d $f0, $f12
+; M2-NEXT: c.ult.d $f0, $f14
; M2-NEXT: bc1f $BB5_2
; M2-NEXT: nop
; M2-NEXT: # %bb.1: # %entry
-; M2-NEXT: mov.d $f12, $f14
+; M2-NEXT: mov.d $f0, $f14
; M2-NEXT: $BB5_2: # %entry
; M2-NEXT: jr $ra
-; M2-NEXT: mov.d $f0, $f12
+; M2-NEXT: nop
;
; CMOV32R1-LABEL: tst_select_fcmp_oge_double:
; CMOV32R1: # %bb.0: # %entry
-; CMOV32R1-NEXT: c.ult.d $f12, $f14
-; CMOV32R1-NEXT: movf.d $f14, $f12, $fcc0
-; CMOV32R1-NEXT: jr $ra
; CMOV32R1-NEXT: mov.d $f0, $f14
+; CMOV32R1-NEXT: c.ult.d $f12, $f0
+; CMOV32R1-NEXT: jr $ra
+; CMOV32R1-NEXT: movf.d $f0, $f12, $fcc0
;
; CMOV32R2-LABEL: tst_select_fcmp_oge_double:
; CMOV32R2: # %bb.0: # %entry
-; CMOV32R2-NEXT: c.ult.d $f12, $f14
-; CMOV32R2-NEXT: movf.d $f14, $f12, $fcc0
-; CMOV32R2-NEXT: jr $ra
; CMOV32R2-NEXT: mov.d $f0, $f14
+; CMOV32R2-NEXT: c.ult.d $f12, $f0
+; CMOV32R2-NEXT: jr $ra
+; CMOV32R2-NEXT: movf.d $f0, $f12, $fcc0
;
; 32R6-LABEL: tst_select_fcmp_oge_double:
; 32R6: # %bb.0: # %entry
@@ -474,21 +481,22 @@ define double @tst_select_fcmp_oge_double(double %x, double %y) {
;
; M3-LABEL: tst_select_fcmp_oge_double:
; M3: # %bb.0: # %entry
-; M3-NEXT: c.ult.d $f12, $f13
+; M3-NEXT: mov.d $f0, $f12
+; M3-NEXT: c.ult.d $f0, $f13
; M3-NEXT: bc1f .LBB5_2
; M3-NEXT: nop
; M3-NEXT: # %bb.1: # %entry
-; M3-NEXT: mov.d $f12, $f13
+; M3-NEXT: mov.d $f0, $f13
; M3-NEXT: .LBB5_2: # %entry
; M3-NEXT: jr $ra
-; M3-NEXT: mov.d $f0, $f12
+; M3-NEXT: nop
;
; CMOV64-LABEL: tst_select_fcmp_oge_double:
; CMOV64: # %bb.0: # %entry
-; CMOV64-NEXT: c.ult.d $f12, $f13
-; CMOV64-NEXT: movf.d $f13, $f12, $fcc0
-; CMOV64-NEXT: jr $ra
; CMOV64-NEXT: mov.d $f0, $f13
+; CMOV64-NEXT: c.ult.d $f12, $f0
+; CMOV64-NEXT: jr $ra
+; CMOV64-NEXT: movf.d $f0, $f12, $fcc0
;
; 64R6-LABEL: tst_select_fcmp_oge_double:
; 64R6: # %bb.0: # %entry
@@ -500,10 +508,10 @@ define double @tst_select_fcmp_oge_double(double %x, double %y) {
;
; MM32R3-LABEL: tst_select_fcmp_oge_double:
; MM32R3: # %bb.0: # %entry
-; MM32R3-NEXT: c.ult.d $f12, $f14
-; MM32R3-NEXT: movf.d $f14, $f12, $fcc0
-; MM32R3-NEXT: jr $ra
; MM32R3-NEXT: mov.d $f0, $f14
+; MM32R3-NEXT: c.ult.d $f12, $f0
+; MM32R3-NEXT: jr $ra
+; MM32R3-NEXT: movf.d $f0, $f12, $fcc0
;
; MM32R6-LABEL: tst_select_fcmp_oge_double:
; MM32R6: # %bb.0: # %entry
@@ -521,28 +529,29 @@ entry:
define double @tst_select_fcmp_oeq_double(double %x, double %y) {
; M2-LABEL: tst_select_fcmp_oeq_double:
; M2: # %bb.0: # %entry
-; M2-NEXT: c.eq.d $f12, $f14
+; M2-NEXT: mov.d $f0, $f12
+; M2-NEXT: c.eq.d $f0, $f14
; M2-NEXT: bc1t $BB6_2
; M2-NEXT: nop
; M2-NEXT: # %bb.1: # %entry
-; M2-NEXT: mov.d $f12, $f14
+; M2-NEXT: mov.d $f0, $f14
; M2-NEXT: $BB6_2: # %entry
; M2-NEXT: jr $ra
-; M2-NEXT: mov.d $f0, $f12
+; M2-NEXT: nop
;
; CMOV32R1-LABEL: tst_select_fcmp_oeq_double:
; CMOV32R1: # %bb.0: # %entry
-; CMOV32R1-NEXT: c.eq.d $f12, $f14
-; CMOV32R1-NEXT: movt.d $f14, $f12, $fcc0
-; CMOV32R1-NEXT: jr $ra
; CMOV32R1-NEXT: mov.d $f0, $f14
+; CMOV32R1-NEXT: c.eq.d $f12, $f0
+; CMOV32R1-NEXT: jr $ra
+; CMOV32R1-NEXT: movt.d $f0, $f12, $fcc0
;
; CMOV32R2-LABEL: tst_select_fcmp_oeq_double:
; CMOV32R2: # %bb.0: # %entry
-; CMOV32R2-NEXT: c.eq.d $f12, $f14
-; CMOV32R2-NEXT: movt.d $f14, $f12, $fcc0
-; CMOV32R2-NEXT: jr $ra
; CMOV32R2-NEXT: mov.d $f0, $f14
+; CMOV32R2-NEXT: c.eq.d $f12, $f0
+; CMOV32R2-NEXT: jr $ra
+; CMOV32R2-NEXT: movt.d $f0, $f12, $fcc0
;
; 32R6-LABEL: tst_select_fcmp_oeq_double:
; 32R6: # %bb.0: # %entry
@@ -554,21 +563,22 @@ define double @tst_select_fcmp_oeq_double(double %x, double %y) {
;
; M3-LABEL: tst_select_fcmp_oeq_double:
; M3: # %bb.0: # %entry
-; M3-NEXT: c.eq.d $f12, $f13
+; M3-NEXT: mov.d $f0, $f12
+; M3-NEXT: c.eq.d $f0, $f13
; M3-NEXT: bc1t .LBB6_2
; M3-NEXT: nop
; M3-NEXT: # %bb.1: # %entry
-; M3-NEXT: mov.d $f12, $f13
+; M3-NEXT: mov.d $f0, $f13
; M3-NEXT: .LBB6_2: # %entry
; M3-NEXT: jr $ra
-; M3-NEXT: mov.d $f0, $f12
+; M3-NEXT: nop
;
; CMOV64-LABEL: tst_select_fcmp_oeq_double:
; CMOV64: # %bb.0: # %entry
-; CMOV64-NEXT: c.eq.d $f12, $f13
-; CMOV64-NEXT: movt.d $f13, $f12, $fcc0
-; CMOV64-NEXT: jr $ra
; CMOV64-NEXT: mov.d $f0, $f13
+; CMOV64-NEXT: c.eq.d $f12, $f0
+; CMOV64-NEXT: jr $ra
+; CMOV64-NEXT: movt.d $f0, $f12, $fcc0
;
; 64R6-LABEL: tst_select_fcmp_oeq_double:
; 64R6: # %bb.0: # %entry
@@ -580,10 +590,10 @@ define double @tst_select_fcmp_oeq_double(double %x, double %y) {
;
; MM32R3-LABEL: tst_select_fcmp_oeq_double:
; MM32R3: # %bb.0: # %entry
-; MM32R3-NEXT: c.eq.d $f12, $f14
-; MM32R3-NEXT: movt.d $f14, $f12, $fcc0
-; MM32R3-NEXT: jr $ra
; MM32R3-NEXT: mov.d $f0, $f14
+; MM32R3-NEXT: c.eq.d $f12, $f0
+; MM32R3-NEXT: jr $ra
+; MM32R3-NEXT: movt.d $f0, $f12, $fcc0
;
; MM32R6-LABEL: tst_select_fcmp_oeq_double:
; MM32R6: # %bb.0: # %entry
@@ -601,28 +611,29 @@ entry:
define double @tst_select_fcmp_one_double(double %x, double %y) {
; M2-LABEL: tst_select_fcmp_one_double:
; M2: # %bb.0: # %entry
-; M2-NEXT: c.ueq.d $f12, $f14
+; M2-NEXT: mov.d $f0, $f12
+; M2-NEXT: c.ueq.d $f0, $f14
; M2-NEXT: bc1f $BB7_2
; M2-NEXT: nop
; M2-NEXT: # %bb.1: # %entry
-; M2-NEXT: mov.d $f12, $f14
+; M2-NEXT: mov.d $f0, $f14
; M2-NEXT: $BB7_2: # %entry
; M2-NEXT: jr $ra
-; M2-NEXT: mov.d $f0, $f12
+; M2-NEXT: nop
;
; CMOV32R1-LABEL: tst_select_fcmp_one_double:
; CMOV32R1: # %bb.0: # %entry
-; CMOV32R1-NEXT: c.ueq.d $f12, $f14
-; CMOV32R1-NEXT: movf.d $f14, $f12, $fcc0
-; CMOV32R1-NEXT: jr $ra
; CMOV32R1-NEXT: mov.d $f0, $f14
+; CMOV32R1-NEXT: c.ueq.d $f12, $f0
+; CMOV32R1-NEXT: jr $ra
+; CMOV32R1-NEXT: movf.d $f0, $f12, $fcc0
;
; CMOV32R2-LABEL: tst_select_fcmp_one_double:
; CMOV32R2: # %bb.0: # %entry
-; CMOV32R2-NEXT: c.ueq.d $f12, $f14
-; CMOV32R2-NEXT: movf.d $f14, $f12, $fcc0
-; CMOV32R2-NEXT: jr $ra
; CMOV32R2-NEXT: mov.d $f0, $f14
+; CMOV32R2-NEXT: c.ueq.d $f12, $f0
+; CMOV32R2-NEXT: jr $ra
+; CMOV32R2-NEXT: movf.d $f0, $f12, $fcc0
;
; 32R6-LABEL: tst_select_fcmp_one_double:
; 32R6: # %bb.0: # %entry
@@ -635,21 +646,22 @@ define double @tst_select_fcmp_one_double(double %x, double %y) {
;
; M3-LABEL: tst_select_fcmp_one_double:
; M3: # %bb.0: # %entry
-; M3-NEXT: c.ueq.d $f12, $f13
+; M3-NEXT: mov.d $f0, $f12
+; M3-NEXT: c.ueq.d $f0, $f13
; M3-NEXT: bc1f .LBB7_2
; M3-NEXT: nop
; M3-NEXT: # %bb.1: # %entry
-; M3-NEXT: mov.d $f12, $f13
+; M3-NEXT: mov.d $f0, $f13
; M3-NEXT: .LBB7_2: # %entry
; M3-NEXT: jr $ra
-; M3-NEXT: mov.d $f0, $f12
+; M3-NEXT: nop
;
; CMOV64-LABEL: tst_select_fcmp_one_double:
; CMOV64: # %bb.0: # %entry
-; CMOV64-NEXT: c.ueq.d $f12, $f13
-; CMOV64-NEXT: movf.d $f13, $f12, $fcc0
-; CMOV64-NEXT: jr $ra
; CMOV64-NEXT: mov.d $f0, $f13
+; CMOV64-NEXT: c.ueq.d $f12, $f0
+; CMOV64-NEXT: jr $ra
+; CMOV64-NEXT: movf.d $f0, $f12, $fcc0
;
; 64R6-LABEL: tst_select_fcmp_one_double:
; 64R6: # %bb.0: # %entry
@@ -662,10 +674,10 @@ define double @tst_select_fcmp_one_double(double %x, double %y) {
;
; MM32R3-LABEL: tst_select_fcmp_one_double:
; MM32R3: # %bb.0: # %entry
-; MM32R3-NEXT: c.ueq.d $f12, $f14
-; MM32R3-NEXT: movf.d $f14, $f12, $fcc0
-; MM32R3-NEXT: jr $ra
; MM32R3-NEXT: mov.d $f0, $f14
+; MM32R3-NEXT: c.ueq.d $f12, $f0
+; MM32R3-NEXT: jr $ra
+; MM32R3-NEXT: movf.d $f0, $f12, $fcc0
;
; MM32R6-LABEL: tst_select_fcmp_one_double:
; MM32R6: # %bb.0: # %entry
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/select-flt.ll b/llvm/test/CodeGen/Mips/llvm-ir/select-flt.ll
index c0ac43f662f..c04601c64b0 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/select-flt.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/select-flt.ll
@@ -71,19 +71,19 @@ define float @tst_select_i1_float(i1 signext %s, float %x, float %y) {
; M3: # %bb.0: # %entry
; M3-NEXT: andi $1, $4, 1
; M3-NEXT: bnez $1, .LBB0_2
-; M3-NEXT: nop
+; M3-NEXT: mov.s $f0, $f13
; M3-NEXT: # %bb.1: # %entry
-; M3-NEXT: mov.s $f13, $f14
+; M3-NEXT: mov.s $f0, $f14
; M3-NEXT: .LBB0_2: # %entry
; M3-NEXT: jr $ra
-; M3-NEXT: mov.s $f0, $f13
+; M3-NEXT: nop
;
; CMOV64-LABEL: tst_select_i1_float:
; CMOV64: # %bb.0: # %entry
+; CMOV64-NEXT: mov.s $f0, $f14
; CMOV64-NEXT: andi $1, $4, 1
-; CMOV64-NEXT: movn.s $f14, $f13, $1
; CMOV64-NEXT: jr $ra
-; CMOV64-NEXT: mov.s $f0, $f14
+; CMOV64-NEXT: movn.s $f0, $f13, $1
;
; 64R6-LABEL: tst_select_i1_float:
; 64R6: # %bb.0: # %entry
@@ -116,26 +116,26 @@ define float @tst_select_i1_float_reordered(float %x, float %y,
; M2: # %bb.0: # %entry
; M2-NEXT: andi $1, $6, 1
; M2-NEXT: bnez $1, $BB1_2
-; M2-NEXT: nop
+; M2-NEXT: mov.s $f0, $f12
; M2-NEXT: # %bb.1: # %entry
-; M2-NEXT: mov.s $f12, $f14
+; M2-NEXT: mov.s $f0, $f14
; M2-NEXT: $BB1_2: # %entry
; M2-NEXT: jr $ra
-; M2-NEXT: mov.s $f0, $f12
+; M2-NEXT: nop
;
; CMOV32R1-LABEL: tst_select_i1_float_reordered:
; CMOV32R1: # %bb.0: # %entry
+; CMOV32R1-NEXT: mov.s $f0, $f14
; CMOV32R1-NEXT: andi $1, $6, 1
-; CMOV32R1-NEXT: movn.s $f14, $f12, $1
; CMOV32R1-NEXT: jr $ra
-; CMOV32R1-NEXT: mov.s $f0, $f14
+; CMOV32R1-NEXT: movn.s $f0, $f12, $1
;
; CMOV32R2-LABEL: tst_select_i1_float_reordered:
; CMOV32R2: # %bb.0: # %entry
+; CMOV32R2-NEXT: mov.s $f0, $f14
; CMOV32R2-NEXT: andi $1, $6, 1
-; CMOV32R2-NEXT: movn.s $f14, $f12, $1
; CMOV32R2-NEXT: jr $ra
-; CMOV32R2-NEXT: mov.s $f0, $f14
+; CMOV32R2-NEXT: movn.s $f0, $f12, $1
;
; 32R6-LABEL: tst_select_i1_float_reordered:
; 32R6: # %bb.0: # %entry
@@ -147,19 +147,19 @@ define float @tst_select_i1_float_reordered(float %x, float %y,
; M3: # %bb.0: # %entry
; M3-NEXT: andi $1, $6, 1
; M3-NEXT: bnez $1, .LBB1_2
-; M3-NEXT: nop
+; M3-NEXT: mov.s $f0, $f12
; M3-NEXT: # %bb.1: # %entry
-; M3-NEXT: mov.s $f12, $f13
+; M3-NEXT: mov.s $f0, $f13
; M3-NEXT: .LBB1_2: # %entry
; M3-NEXT: jr $ra
-; M3-NEXT: mov.s $f0, $f12
+; M3-NEXT: nop
;
; CMOV64-LABEL: tst_select_i1_float_reordered:
; CMOV64: # %bb.0: # %entry
+; CMOV64-NEXT: mov.s $f0, $f13
; CMOV64-NEXT: andi $1, $6, 1
-; CMOV64-NEXT: movn.s $f13, $f12, $1
; CMOV64-NEXT: jr $ra
-; CMOV64-NEXT: mov.s $f0, $f13
+; CMOV64-NEXT: movn.s $f0, $f12, $1
;
; 64R6-LABEL: tst_select_i1_float_reordered:
; 64R6: # %bb.0: # %entry
@@ -169,10 +169,10 @@ define float @tst_select_i1_float_reordered(float %x, float %y,
;
; MM32R3-LABEL: tst_select_i1_float_reordered:
; MM32R3: # %bb.0: # %entry
+; MM32R3-NEXT: mov.s $f0, $f14
; MM32R3-NEXT: andi16 $2, $6, 1
-; MM32R3-NEXT: movn.s $f14, $f12, $2
; MM32R3-NEXT: jr $ra
-; MM32R3-NEXT: mov.s $f0, $f14
+; MM32R3-NEXT: movn.s $f0, $f12, $2
;
; MM32R6-LABEL: tst_select_i1_float_reordered:
; MM32R6: # %bb.0: # %entry
@@ -188,28 +188,29 @@ entry:
define float @tst_select_fcmp_olt_float(float %x, float %y) {
; M2-LABEL: tst_select_fcmp_olt_float:
; M2: # %bb.0: # %entry
-; M2-NEXT: c.olt.s $f12, $f14
+; M2-NEXT: mov.s $f0, $f12
+; M2-NEXT: c.olt.s $f0, $f14
; M2-NEXT: bc1t $BB2_2
; M2-NEXT: nop
; M2-NEXT: # %bb.1: # %entry
-; M2-NEXT: mov.s $f12, $f14
+; M2-NEXT: mov.s $f0, $f14
; M2-NEXT: $BB2_2: # %entry
; M2-NEXT: jr $ra
-; M2-NEXT: mov.s $f0, $f12
+; M2-NEXT: nop
;
; CMOV32R1-LABEL: tst_select_fcmp_olt_float:
; CMOV32R1: # %bb.0: # %entry
-; CMOV32R1-NEXT: c.olt.s $f12, $f14
-; CMOV32R1-NEXT: movt.s $f14, $f12, $fcc0
-; CMOV32R1-NEXT: jr $ra
; CMOV32R1-NEXT: mov.s $f0, $f14
+; CMOV32R1-NEXT: c.olt.s $f12, $f0
+; CMOV32R1-NEXT: jr $ra
+; CMOV32R1-NEXT: movt.s $f0, $f12, $fcc0
;
; CMOV32R2-LABEL: tst_select_fcmp_olt_float:
; CMOV32R2: # %bb.0: # %entry
-; CMOV32R2-NEXT: c.olt.s $f12, $f14
-; CMOV32R2-NEXT: movt.s $f14, $f12, $fcc0
-; CMOV32R2-NEXT: jr $ra
; CMOV32R2-NEXT: mov.s $f0, $f14
+; CMOV32R2-NEXT: c.olt.s $f12, $f0
+; CMOV32R2-NEXT: jr $ra
+; CMOV32R2-NEXT: movt.s $f0, $f12, $fcc0
;
; 32R6-LABEL: tst_select_fcmp_olt_float:
; 32R6: # %bb.0: # %entry
@@ -219,21 +220,22 @@ define float @tst_select_fcmp_olt_float(float %x, float %y) {
;
; M3-LABEL: tst_select_fcmp_olt_float:
; M3: # %bb.0: # %entry
-; M3-NEXT: c.olt.s $f12, $f13
+; M3-NEXT: mov.s $f0, $f12
+; M3-NEXT: c.olt.s $f0, $f13
; M3-NEXT: bc1t .LBB2_2
; M3-NEXT: nop
; M3-NEXT: # %bb.1: # %entry
-; M3-NEXT: mov.s $f12, $f13
+; M3-NEXT: mov.s $f0, $f13
; M3-NEXT: .LBB2_2: # %entry
; M3-NEXT: jr $ra
-; M3-NEXT: mov.s $f0, $f12
+; M3-NEXT: nop
;
; CMOV64-LABEL: tst_select_fcmp_olt_float:
; CMOV64: # %bb.0: # %entry
-; CMOV64-NEXT: c.olt.s $f12, $f13
-; CMOV64-NEXT: movt.s $f13, $f12, $fcc0
-; CMOV64-NEXT: jr $ra
; CMOV64-NEXT: mov.s $f0, $f13
+; CMOV64-NEXT: c.olt.s $f12, $f0
+; CMOV64-NEXT: jr $ra
+; CMOV64-NEXT: movt.s $f0, $f12, $fcc0
;
; 64R6-LABEL: tst_select_fcmp_olt_float:
; 64R6: # %bb.0: # %entry
@@ -243,10 +245,10 @@ define float @tst_select_fcmp_olt_float(float %x, float %y) {
;
; MM32R3-LABEL: tst_select_fcmp_olt_float:
; MM32R3: # %bb.0: # %entry
-; MM32R3-NEXT: c.olt.s $f12, $f14
-; MM32R3-NEXT: movt.s $f14, $f12, $fcc0
-; MM32R3-NEXT: jr $ra
; MM32R3-NEXT: mov.s $f0, $f14
+; MM32R3-NEXT: c.olt.s $f12, $f0
+; MM32R3-NEXT: jr $ra
+; MM32R3-NEXT: movt.s $f0, $f12, $fcc0
;
; MM32R6-LABEL: tst_select_fcmp_olt_float:
; MM32R6: # %bb.0: # %entry
@@ -262,28 +264,29 @@ entry:
define float @tst_select_fcmp_ole_float(float %x, float %y) {
; M2-LABEL: tst_select_fcmp_ole_float:
; M2: # %bb.0: # %entry
-; M2-NEXT: c.ole.s $f12, $f14
+; M2-NEXT: mov.s $f0, $f12
+; M2-NEXT: c.ole.s $f0, $f14
; M2-NEXT: bc1t $BB3_2
; M2-NEXT: nop
; M2-NEXT: # %bb.1: # %entry
-; M2-NEXT: mov.s $f12, $f14
+; M2-NEXT: mov.s $f0, $f14
; M2-NEXT: $BB3_2: # %entry
; M2-NEXT: jr $ra
-; M2-NEXT: mov.s $f0, $f12
+; M2-NEXT: nop
;
; CMOV32R1-LABEL: tst_select_fcmp_ole_float:
; CMOV32R1: # %bb.0: # %entry
-; CMOV32R1-NEXT: c.ole.s $f12, $f14
-; CMOV32R1-NEXT: movt.s $f14, $f12, $fcc0
-; CMOV32R1-NEXT: jr $ra
; CMOV32R1-NEXT: mov.s $f0, $f14
+; CMOV32R1-NEXT: c.ole.s $f12, $f0
+; CMOV32R1-NEXT: jr $ra
+; CMOV32R1-NEXT: movt.s $f0, $f12, $fcc0
;
; CMOV32R2-LABEL: tst_select_fcmp_ole_float:
; CMOV32R2: # %bb.0: # %entry
-; CMOV32R2-NEXT: c.ole.s $f12, $f14
-; CMOV32R2-NEXT: movt.s $f14, $f12, $fcc0
-; CMOV32R2-NEXT: jr $ra
; CMOV32R2-NEXT: mov.s $f0, $f14
+; CMOV32R2-NEXT: c.ole.s $f12, $f0
+; CMOV32R2-NEXT: jr $ra
+; CMOV32R2-NEXT: movt.s $f0, $f12, $fcc0
;
; 32R6-LABEL: tst_select_fcmp_ole_float:
; 32R6: # %bb.0: # %entry
@@ -293,21 +296,22 @@ define float @tst_select_fcmp_ole_float(float %x, float %y) {
;
; M3-LABEL: tst_select_fcmp_ole_float:
; M3: # %bb.0: # %entry
-; M3-NEXT: c.ole.s $f12, $f13
+; M3-NEXT: mov.s $f0, $f12
+; M3-NEXT: c.ole.s $f0, $f13
; M3-NEXT: bc1t .LBB3_2
; M3-NEXT: nop
; M3-NEXT: # %bb.1: # %entry
-; M3-NEXT: mov.s $f12, $f13
+; M3-NEXT: mov.s $f0, $f13
; M3-NEXT: .LBB3_2: # %entry
; M3-NEXT: jr $ra
-; M3-NEXT: mov.s $f0, $f12
+; M3-NEXT: nop
;
; CMOV64-LABEL: tst_select_fcmp_ole_float:
; CMOV64: # %bb.0: # %entry
-; CMOV64-NEXT: c.ole.s $f12, $f13
-; CMOV64-NEXT: movt.s $f13, $f12, $fcc0
-; CMOV64-NEXT: jr $ra
; CMOV64-NEXT: mov.s $f0, $f13
+; CMOV64-NEXT: c.ole.s $f12, $f0
+; CMOV64-NEXT: jr $ra
+; CMOV64-NEXT: movt.s $f0, $f12, $fcc0
;
; 64R6-LABEL: tst_select_fcmp_ole_float:
; 64R6: # %bb.0: # %entry
@@ -317,10 +321,10 @@ define float @tst_select_fcmp_ole_float(float %x, float %y) {
;
; MM32R3-LABEL: tst_select_fcmp_ole_float:
; MM32R3: # %bb.0: # %entry
-; MM32R3-NEXT: c.ole.s $f12, $f14
-; MM32R3-NEXT: movt.s $f14, $f12, $fcc0
-; MM32R3-NEXT: jr $ra
; MM32R3-NEXT: mov.s $f0, $f14
+; MM32R3-NEXT: c.ole.s $f12, $f0
+; MM32R3-NEXT: jr $ra
+; MM32R3-NEXT: movt.s $f0, $f12, $fcc0
;
; MM32R6-LABEL: tst_select_fcmp_ole_float:
; MM32R6: # %bb.0: # %entry
@@ -336,28 +340,29 @@ entry:
define float @tst_select_fcmp_ogt_float(float %x, float %y) {
; M2-LABEL: tst_select_fcmp_ogt_float:
; M2: # %bb.0: # %entry
-; M2-NEXT: c.ule.s $f12, $f14
+; M2-NEXT: mov.s $f0, $f12
+; M2-NEXT: c.ule.s $f0, $f14
; M2-NEXT: bc1f $BB4_2
; M2-NEXT: nop
; M2-NEXT: # %bb.1: # %entry
-; M2-NEXT: mov.s $f12, $f14
+; M2-NEXT: mov.s $f0, $f14
; M2-NEXT: $BB4_2: # %entry
; M2-NEXT: jr $ra
-; M2-NEXT: mov.s $f0, $f12
+; M2-NEXT: nop
;
; CMOV32R1-LABEL: tst_select_fcmp_ogt_float:
; CMOV32R1: # %bb.0: # %entry
-; CMOV32R1-NEXT: c.ule.s $f12, $f14
-; CMOV32R1-NEXT: movf.s $f14, $f12, $fcc0
-; CMOV32R1-NEXT: jr $ra
; CMOV32R1-NEXT: mov.s $f0, $f14
+; CMOV32R1-NEXT: c.ule.s $f12, $f0
+; CMOV32R1-NEXT: jr $ra
+; CMOV32R1-NEXT: movf.s $f0, $f12, $fcc0
;
; CMOV32R2-LABEL: tst_select_fcmp_ogt_float:
; CMOV32R2: # %bb.0: # %entry
-; CMOV32R2-NEXT: c.ule.s $f12, $f14
-; CMOV32R2-NEXT: movf.s $f14, $f12, $fcc0
-; CMOV32R2-NEXT: jr $ra
; CMOV32R2-NEXT: mov.s $f0, $f14
+; CMOV32R2-NEXT: c.ule.s $f12, $f0
+; CMOV32R2-NEXT: jr $ra
+; CMOV32R2-NEXT: movf.s $f0, $f12, $fcc0
;
; 32R6-LABEL: tst_select_fcmp_ogt_float:
; 32R6: # %bb.0: # %entry
@@ -367,21 +372,22 @@ define float @tst_select_fcmp_ogt_float(float %x, float %y) {
;
; M3-LABEL: tst_select_fcmp_ogt_float:
; M3: # %bb.0: # %entry
-; M3-NEXT: c.ule.s $f12, $f13
+; M3-NEXT: mov.s $f0, $f12
+; M3-NEXT: c.ule.s $f0, $f13
; M3-NEXT: bc1f .LBB4_2
; M3-NEXT: nop
; M3-NEXT: # %bb.1: # %entry
-; M3-NEXT: mov.s $f12, $f13
+; M3-NEXT: mov.s $f0, $f13
; M3-NEXT: .LBB4_2: # %entry
; M3-NEXT: jr $ra
-; M3-NEXT: mov.s $f0, $f12
+; M3-NEXT: nop
;
; CMOV64-LABEL: tst_select_fcmp_ogt_float:
; CMOV64: # %bb.0: # %entry
-; CMOV64-NEXT: c.ule.s $f12, $f13
-; CMOV64-NEXT: movf.s $f13, $f12, $fcc0
-; CMOV64-NEXT: jr $ra
; CMOV64-NEXT: mov.s $f0, $f13
+; CMOV64-NEXT: c.ule.s $f12, $f0
+; CMOV64-NEXT: jr $ra
+; CMOV64-NEXT: movf.s $f0, $f12, $fcc0
;
; 64R6-LABEL: tst_select_fcmp_ogt_float:
; 64R6: # %bb.0: # %entry
@@ -391,10 +397,10 @@ define float @tst_select_fcmp_ogt_float(float %x, float %y) {
;
; MM32R3-LABEL: tst_select_fcmp_ogt_float:
; MM32R3: # %bb.0: # %entry
-; MM32R3-NEXT: c.ule.s $f12, $f14
-; MM32R3-NEXT: movf.s $f14, $f12, $fcc0
-; MM32R3-NEXT: jr $ra
; MM32R3-NEXT: mov.s $f0, $f14
+; MM32R3-NEXT: c.ule.s $f12, $f0
+; MM32R3-NEXT: jr $ra
+; MM32R3-NEXT: movf.s $f0, $f12, $fcc0
;
; MM32R6-LABEL: tst_select_fcmp_ogt_float:
; MM32R6: # %bb.0: # %entry
@@ -410,28 +416,29 @@ entry:
define float @tst_select_fcmp_oge_float(float %x, float %y) {
; M2-LABEL: tst_select_fcmp_oge_float:
; M2: # %bb.0: # %entry
-; M2-NEXT: c.ult.s $f12, $f14
+; M2-NEXT: mov.s $f0, $f12
+; M2-NEXT: c.ult.s $f0, $f14
; M2-NEXT: bc1f $BB5_2
; M2-NEXT: nop
; M2-NEXT: # %bb.1: # %entry
-; M2-NEXT: mov.s $f12, $f14
+; M2-NEXT: mov.s $f0, $f14
; M2-NEXT: $BB5_2: # %entry
; M2-NEXT: jr $ra
-; M2-NEXT: mov.s $f0, $f12
+; M2-NEXT: nop
;
; CMOV32R1-LABEL: tst_select_fcmp_oge_float:
; CMOV32R1: # %bb.0: # %entry
-; CMOV32R1-NEXT: c.ult.s $f12, $f14
-; CMOV32R1-NEXT: movf.s $f14, $f12, $fcc0
-; CMOV32R1-NEXT: jr $ra
; CMOV32R1-NEXT: mov.s $f0, $f14
+; CMOV32R1-NEXT: c.ult.s $f12, $f0
+; CMOV32R1-NEXT: jr $ra
+; CMOV32R1-NEXT: movf.s $f0, $f12, $fcc0
;
; CMOV32R2-LABEL: tst_select_fcmp_oge_float:
; CMOV32R2: # %bb.0: # %entry
-; CMOV32R2-NEXT: c.ult.s $f12, $f14
-; CMOV32R2-NEXT: movf.s $f14, $f12, $fcc0
-; CMOV32R2-NEXT: jr $ra
; CMOV32R2-NEXT: mov.s $f0, $f14
+; CMOV32R2-NEXT: c.ult.s $f12, $f0
+; CMOV32R2-NEXT: jr $ra
+; CMOV32R2-NEXT: movf.s $f0, $f12, $fcc0
;
; 32R6-LABEL: tst_select_fcmp_oge_float:
; 32R6: # %bb.0: # %entry
@@ -441,21 +448,22 @@ define float @tst_select_fcmp_oge_float(float %x, float %y) {
;
; M3-LABEL: tst_select_fcmp_oge_float:
; M3: # %bb.0: # %entry
-; M3-NEXT: c.ult.s $f12, $f13
+; M3-NEXT: mov.s $f0, $f12
+; M3-NEXT: c.ult.s $f0, $f13
; M3-NEXT: bc1f .LBB5_2
; M3-NEXT: nop
; M3-NEXT: # %bb.1: # %entry
-; M3-NEXT: mov.s $f12, $f13
+; M3-NEXT: mov.s $f0, $f13
; M3-NEXT: .LBB5_2: # %entry
; M3-NEXT: jr $ra
-; M3-NEXT: mov.s $f0, $f12
+; M3-NEXT: nop
;
; CMOV64-LABEL: tst_select_fcmp_oge_float:
; CMOV64: # %bb.0: # %entry
-; CMOV64-NEXT: c.ult.s $f12, $f13
-; CMOV64-NEXT: movf.s $f13, $f12, $fcc0
-; CMOV64-NEXT: jr $ra
; CMOV64-NEXT: mov.s $f0, $f13
+; CMOV64-NEXT: c.ult.s $f12, $f0
+; CMOV64-NEXT: jr $ra
+; CMOV64-NEXT: movf.s $f0, $f12, $fcc0
;
; 64R6-LABEL: tst_select_fcmp_oge_float:
; 64R6: # %bb.0: # %entry
@@ -465,10 +473,10 @@ define float @tst_select_fcmp_oge_float(float %x, float %y) {
;
; MM32R3-LABEL: tst_select_fcmp_oge_float:
; MM32R3: # %bb.0: # %entry
-; MM32R3-NEXT: c.ult.s $f12, $f14
-; MM32R3-NEXT: movf.s $f14, $f12, $fcc0
-; MM32R3-NEXT: jr $ra
; MM32R3-NEXT: mov.s $f0, $f14
+; MM32R3-NEXT: c.ult.s $f12, $f0
+; MM32R3-NEXT: jr $ra
+; MM32R3-NEXT: movf.s $f0, $f12, $fcc0
;
; MM32R6-LABEL: tst_select_fcmp_oge_float:
; MM32R6: # %bb.0: # %entry
@@ -484,28 +492,29 @@ entry:
define float @tst_select_fcmp_oeq_float(float %x, float %y) {
; M2-LABEL: tst_select_fcmp_oeq_float:
; M2: # %bb.0: # %entry
-; M2-NEXT: c.eq.s $f12, $f14
+; M2-NEXT: mov.s $f0, $f12
+; M2-NEXT: c.eq.s $f0, $f14
; M2-NEXT: bc1t $BB6_2
; M2-NEXT: nop
; M2-NEXT: # %bb.1: # %entry
-; M2-NEXT: mov.s $f12, $f14
+; M2-NEXT: mov.s $f0, $f14
; M2-NEXT: $BB6_2: # %entry
; M2-NEXT: jr $ra
-; M2-NEXT: mov.s $f0, $f12
+; M2-NEXT: nop
;
; CMOV32R1-LABEL: tst_select_fcmp_oeq_float:
; CMOV32R1: # %bb.0: # %entry
-; CMOV32R1-NEXT: c.eq.s $f12, $f14
-; CMOV32R1-NEXT: movt.s $f14, $f12, $fcc0
-; CMOV32R1-NEXT: jr $ra
; CMOV32R1-NEXT: mov.s $f0, $f14
+; CMOV32R1-NEXT: c.eq.s $f12, $f0
+; CMOV32R1-NEXT: jr $ra
+; CMOV32R1-NEXT: movt.s $f0, $f12, $fcc0
;
; CMOV32R2-LABEL: tst_select_fcmp_oeq_float:
; CMOV32R2: # %bb.0: # %entry
-; CMOV32R2-NEXT: c.eq.s $f12, $f14
-; CMOV32R2-NEXT: movt.s $f14, $f12, $fcc0
-; CMOV32R2-NEXT: jr $ra
; CMOV32R2-NEXT: mov.s $f0, $f14
+; CMOV32R2-NEXT: c.eq.s $f12, $f0
+; CMOV32R2-NEXT: jr $ra
+; CMOV32R2-NEXT: movt.s $f0, $f12, $fcc0
;
; 32R6-LABEL: tst_select_fcmp_oeq_float:
; 32R6: # %bb.0: # %entry
@@ -515,21 +524,22 @@ define float @tst_select_fcmp_oeq_float(float %x, float %y) {
;
; M3-LABEL: tst_select_fcmp_oeq_float:
; M3: # %bb.0: # %entry
-; M3-NEXT: c.eq.s $f12, $f13
+; M3-NEXT: mov.s $f0, $f12
+; M3-NEXT: c.eq.s $f0, $f13
; M3-NEXT: bc1t .LBB6_2
; M3-NEXT: nop
; M3-NEXT: # %bb.1: # %entry
-; M3-NEXT: mov.s $f12, $f13
+; M3-NEXT: mov.s $f0, $f13
; M3-NEXT: .LBB6_2: # %entry
; M3-NEXT: jr $ra
-; M3-NEXT: mov.s $f0, $f12
+; M3-NEXT: nop
;
; CMOV64-LABEL: tst_select_fcmp_oeq_float:
; CMOV64: # %bb.0: # %entry
-; CMOV64-NEXT: c.eq.s $f12, $f13
-; CMOV64-NEXT: movt.s $f13, $f12, $fcc0
-; CMOV64-NEXT: jr $ra
; CMOV64-NEXT: mov.s $f0, $f13
+; CMOV64-NEXT: c.eq.s $f12, $f0
+; CMOV64-NEXT: jr $ra
+; CMOV64-NEXT: movt.s $f0, $f12, $fcc0
;
; 64R6-LABEL: tst_select_fcmp_oeq_float:
; 64R6: # %bb.0: # %entry
@@ -539,10 +549,10 @@ define float @tst_select_fcmp_oeq_float(float %x, float %y) {
;
; MM32R3-LABEL: tst_select_fcmp_oeq_float:
; MM32R3: # %bb.0: # %entry
-; MM32R3-NEXT: c.eq.s $f12, $f14
-; MM32R3-NEXT: movt.s $f14, $f12, $fcc0
-; MM32R3-NEXT: jr $ra
; MM32R3-NEXT: mov.s $f0, $f14
+; MM32R3-NEXT: c.eq.s $f12, $f0
+; MM32R3-NEXT: jr $ra
+; MM32R3-NEXT: movt.s $f0, $f12, $fcc0
;
; MM32R6-LABEL: tst_select_fcmp_oeq_float:
; MM32R6: # %bb.0: # %entry
@@ -558,28 +568,29 @@ entry:
define float @tst_select_fcmp_one_float(float %x, float %y) {
; M2-LABEL: tst_select_fcmp_one_float:
; M2: # %bb.0: # %entry
-; M2-NEXT: c.ueq.s $f12, $f14
+; M2-NEXT: mov.s $f0, $f12
+; M2-NEXT: c.ueq.s $f0, $f14
; M2-NEXT: bc1f $BB7_2
; M2-NEXT: nop
; M2-NEXT: # %bb.1: # %entry
-; M2-NEXT: mov.s $f12, $f14
+; M2-NEXT: mov.s $f0, $f14
; M2-NEXT: $BB7_2: # %entry
; M2-NEXT: jr $ra
-; M2-NEXT: mov.s $f0, $f12
+; M2-NEXT: nop
;
; CMOV32R1-LABEL: tst_select_fcmp_one_float:
; CMOV32R1: # %bb.0: # %entry
-; CMOV32R1-NEXT: c.ueq.s $f12, $f14
-; CMOV32R1-NEXT: movf.s $f14, $f12, $fcc0
-; CMOV32R1-NEXT: jr $ra
; CMOV32R1-NEXT: mov.s $f0, $f14
+; CMOV32R1-NEXT: c.ueq.s $f12, $f0
+; CMOV32R1-NEXT: jr $ra
+; CMOV32R1-NEXT: movf.s $f0, $f12, $fcc0
;
; CMOV32R2-LABEL: tst_select_fcmp_one_float:
; CMOV32R2: # %bb.0: # %entry
-; CMOV32R2-NEXT: c.ueq.s $f12, $f14
-; CMOV32R2-NEXT: movf.s $f14, $f12, $fcc0
-; CMOV32R2-NEXT: jr $ra
; CMOV32R2-NEXT: mov.s $f0, $f14
+; CMOV32R2-NEXT: c.ueq.s $f12, $f0
+; CMOV32R2-NEXT: jr $ra
+; CMOV32R2-NEXT: movf.s $f0, $f12, $fcc0
;
; 32R6-LABEL: tst_select_fcmp_one_float:
; 32R6: # %bb.0: # %entry
@@ -592,21 +603,22 @@ define float @tst_select_fcmp_one_float(float %x, float %y) {
;
; M3-LABEL: tst_select_fcmp_one_float:
; M3: # %bb.0: # %entry
-; M3-NEXT: c.ueq.s $f12, $f13
+; M3-NEXT: mov.s $f0, $f12
+; M3-NEXT: c.ueq.s $f0, $f13
; M3-NEXT: bc1f .LBB7_2
; M3-NEXT: nop
; M3-NEXT: # %bb.1: # %entry
-; M3-NEXT: mov.s $f12, $f13
+; M3-NEXT: mov.s $f0, $f13
; M3-NEXT: .LBB7_2: # %entry
; M3-NEXT: jr $ra
-; M3-NEXT: mov.s $f0, $f12
+; M3-NEXT: nop
;
; CMOV64-LABEL: tst_select_fcmp_one_float:
; CMOV64: # %bb.0: # %entry
-; CMOV64-NEXT: c.ueq.s $f12, $f13
-; CMOV64-NEXT: movf.s $f13, $f12, $fcc0
-; CMOV64-NEXT: jr $ra
; CMOV64-NEXT: mov.s $f0, $f13
+; CMOV64-NEXT: c.ueq.s $f12, $f0
+; CMOV64-NEXT: jr $ra
+; CMOV64-NEXT: movf.s $f0, $f12, $fcc0
;
; 64R6-LABEL: tst_select_fcmp_one_float:
; 64R6: # %bb.0: # %entry
@@ -619,10 +631,10 @@ define float @tst_select_fcmp_one_float(float %x, float %y) {
;
; MM32R3-LABEL: tst_select_fcmp_one_float:
; MM32R3: # %bb.0: # %entry
-; MM32R3-NEXT: c.ueq.s $f12, $f14
-; MM32R3-NEXT: movf.s $f14, $f12, $fcc0
-; MM32R3-NEXT: jr $ra
; MM32R3-NEXT: mov.s $f0, $f14
+; MM32R3-NEXT: c.ueq.s $f12, $f0
+; MM32R3-NEXT: jr $ra
+; MM32R3-NEXT: movf.s $f0, $f12, $fcc0
;
; MM32R6-LABEL: tst_select_fcmp_one_float:
; MM32R6: # %bb.0: # %entry
diff --git a/llvm/test/CodeGen/Mips/o32_cc_byval.ll b/llvm/test/CodeGen/Mips/o32_cc_byval.ll
index 3f267ad6417..9aaea26f0c1 100644
--- a/llvm/test/CodeGen/Mips/o32_cc_byval.ll
+++ b/llvm/test/CodeGen/Mips/o32_cc_byval.ll
@@ -190,14 +190,15 @@ define void @f4(float %f, %struct.S3* nocapture byval %s3, %struct.S1* nocapture
; CHECK-NEXT: addiu $sp, $sp, -48
; CHECK-NEXT: sw $ra, 44($sp) # 4-byte Folded Spill
; CHECK-NEXT: addu $gp, $2, $25
+; CHECK-NEXT: move $4, $7
; CHECK-NEXT: sw $5, 52($sp)
; CHECK-NEXT: sw $6, 56($sp)
-; CHECK-NEXT: sw $7, 60($sp)
+; CHECK-NEXT: sw $4, 60($sp)
; CHECK-NEXT: lw $1, 80($sp)
; CHECK-NEXT: lb $2, 52($sp)
; CHECK-NEXT: addiu $3, $zero, 4
-; CHECK-NEXT: lui $4, 16576
-; CHECK-NEXT: sw $4, 36($sp)
+; CHECK-NEXT: lui $5, 16576
+; CHECK-NEXT: sw $5, 36($sp)
; CHECK-NEXT: sw $2, 32($sp)
; CHECK-NEXT: sw $3, 28($sp)
; CHECK-NEXT: sw $1, 24($sp)
@@ -207,11 +208,9 @@ define void @f4(float %f, %struct.S3* nocapture byval %s3, %struct.S1* nocapture
; CHECK-NEXT: lw $1, %got($CPI3_0)($gp)
; CHECK-NEXT: ldc1 $f0, %lo($CPI3_0)($1)
; CHECK-NEXT: mfc1 $6, $f0
-; CHECK-NEXT: mfc1 $1, $f1
; CHECK-NEXT: lw $25, %call16(callee4)($gp)
-; CHECK-NEXT: move $4, $7
; CHECK-NEXT: jalr $25
-; CHECK-NEXT: move $7, $1
+; CHECK-NEXT: mfc1 $7, $f1
; CHECK-NEXT: lw $ra, 44($sp) # 4-byte Folded Reload
; CHECK-NEXT: jr $ra
; CHECK-NEXT: addiu $sp, $sp, 48
diff --git a/llvm/test/CodeGen/Mips/select.ll b/llvm/test/CodeGen/Mips/select.ll
index 18cd40f7248..a908480d2ea 100644
--- a/llvm/test/CodeGen/Mips/select.ll
+++ b/llvm/test/CodeGen/Mips/select.ll
@@ -202,15 +202,15 @@ define float @i32_icmp_ne_f32_val(i32 signext %s, float %f0, float %f1) nounwind
;
; 64-LABEL: i32_icmp_ne_f32_val:
; 64: # %bb.0: # %entry
-; 64-NEXT: movn.s $f14, $f13, $4
-; 64-NEXT: jr $ra
; 64-NEXT: mov.s $f0, $f14
+; 64-NEXT: jr $ra
+; 64-NEXT: movn.s $f0, $f13, $4
;
; 64R2-LABEL: i32_icmp_ne_f32_val:
; 64R2: # %bb.0: # %entry
-; 64R2-NEXT: movn.s $f14, $f13, $4
-; 64R2-NEXT: jr $ra
; 64R2-NEXT: mov.s $f0, $f14
+; 64R2-NEXT: jr $ra
+; 64R2-NEXT: movn.s $f0, $f13, $4
;
; 64R6-LABEL: i32_icmp_ne_f32_val:
; 64R6: # %bb.0: # %entry
@@ -255,15 +255,15 @@ define double @i32_icmp_ne_f64_val(i32 signext %s, double %f0, double %f1) nounw
;
; 64-LABEL: i32_icmp_ne_f64_val:
; 64: # %bb.0: # %entry
-; 64-NEXT: movn.d $f14, $f13, $4
-; 64-NEXT: jr $ra
; 64-NEXT: mov.d $f0, $f14
+; 64-NEXT: jr $ra
+; 64-NEXT: movn.d $f0, $f13, $4
;
; 64R2-LABEL: i32_icmp_ne_f64_val:
; 64R2: # %bb.0: # %entry
-; 64R2-NEXT: movn.d $f14, $f13, $4
-; 64R2-NEXT: jr $ra
; 64R2-NEXT: mov.d $f0, $f14
+; 64R2-NEXT: jr $ra
+; 64R2-NEXT: movn.d $f0, $f13, $4
;
; 64R6-LABEL: i32_icmp_ne_f64_val:
; 64R6: # %bb.0: # %entry
@@ -281,21 +281,21 @@ entry:
define float @f32_fcmp_oeq_f32_val(float %f0, float %f1, float %f2, float %f3) nounwind readnone {
; 32-LABEL: f32_fcmp_oeq_f32_val:
; 32: # %bb.0: # %entry
-; 32-NEXT: mtc1 $7, $f0
-; 32-NEXT: mtc1 $6, $f1
-; 32-NEXT: c.eq.s $f1, $f0
-; 32-NEXT: movt.s $f14, $f12, $fcc0
-; 32-NEXT: jr $ra
; 32-NEXT: mov.s $f0, $f14
+; 32-NEXT: mtc1 $7, $f1
+; 32-NEXT: mtc1 $6, $f2
+; 32-NEXT: c.eq.s $f2, $f1
+; 32-NEXT: jr $ra
+; 32-NEXT: movt.s $f0, $f12, $fcc0
;
; 32R2-LABEL: f32_fcmp_oeq_f32_val:
; 32R2: # %bb.0: # %entry
-; 32R2-NEXT: mtc1 $7, $f0
-; 32R2-NEXT: mtc1 $6, $f1
-; 32R2-NEXT: c.eq.s $f1, $f0
-; 32R2-NEXT: movt.s $f14, $f12, $fcc0
-; 32R2-NEXT: jr $ra
; 32R2-NEXT: mov.s $f0, $f14
+; 32R2-NEXT: mtc1 $7, $f1
+; 32R2-NEXT: mtc1 $6, $f2
+; 32R2-NEXT: c.eq.s $f2, $f1
+; 32R2-NEXT: jr $ra
+; 32R2-NEXT: movt.s $f0, $f12, $fcc0
;
; 32R6-LABEL: f32_fcmp_oeq_f32_val:
; 32R6: # %bb.0: # %entry
@@ -307,17 +307,17 @@ define float @f32_fcmp_oeq_f32_val(float %f0, float %f1, float %f2, float %f3) n
;
; 64-LABEL: f32_fcmp_oeq_f32_val:
; 64: # %bb.0: # %entry
+; 64-NEXT: mov.s $f0, $f13
; 64-NEXT: c.eq.s $f14, $f15
-; 64-NEXT: movt.s $f13, $f12, $fcc0
; 64-NEXT: jr $ra
-; 64-NEXT: mov.s $f0, $f13
+; 64-NEXT: movt.s $f0, $f12, $fcc0
;
; 64R2-LABEL: f32_fcmp_oeq_f32_val:
; 64R2: # %bb.0: # %entry
+; 64R2-NEXT: mov.s $f0, $f13
; 64R2-NEXT: c.eq.s $f14, $f15
-; 64R2-NEXT: movt.s $f13, $f12, $fcc0
; 64R2-NEXT: jr $ra
-; 64R2-NEXT: mov.s $f0, $f13
+; 64R2-NEXT: movt.s $f0, $f12, $fcc0
;
; 64R6-LABEL: f32_fcmp_oeq_f32_val:
; 64R6: # %bb.0: # %entry
@@ -333,21 +333,21 @@ entry:
define float @f32_fcmp_olt_f32_val(float %f0, float %f1, float %f2, float %f3) nounwind readnone {
; 32-LABEL: f32_fcmp_olt_f32_val:
; 32: # %bb.0: # %entry
-; 32-NEXT: mtc1 $7, $f0
-; 32-NEXT: mtc1 $6, $f1
-; 32-NEXT: c.olt.s $f1, $f0
-; 32-NEXT: movt.s $f14, $f12, $fcc0
-; 32-NEXT: jr $ra
; 32-NEXT: mov.s $f0, $f14
+; 32-NEXT: mtc1 $7, $f1
+; 32-NEXT: mtc1 $6, $f2
+; 32-NEXT: c.olt.s $f2, $f1
+; 32-NEXT: jr $ra
+; 32-NEXT: movt.s $f0, $f12, $fcc0
;
; 32R2-LABEL: f32_fcmp_olt_f32_val:
; 32R2: # %bb.0: # %entry
-; 32R2-NEXT: mtc1 $7, $f0
-; 32R2-NEXT: mtc1 $6, $f1
-; 32R2-NEXT: c.olt.s $f1, $f0
-; 32R2-NEXT: movt.s $f14, $f12, $fcc0
-; 32R2-NEXT: jr $ra
; 32R2-NEXT: mov.s $f0, $f14
+; 32R2-NEXT: mtc1 $7, $f1
+; 32R2-NEXT: mtc1 $6, $f2
+; 32R2-NEXT: c.olt.s $f2, $f1
+; 32R2-NEXT: jr $ra
+; 32R2-NEXT: movt.s $f0, $f12, $fcc0
;
; 32R6-LABEL: f32_fcmp_olt_f32_val:
; 32R6: # %bb.0: # %entry
@@ -359,17 +359,17 @@ define float @f32_fcmp_olt_f32_val(float %f0, float %f1, float %f2, float %f3) n
;
; 64-LABEL: f32_fcmp_olt_f32_val:
; 64: # %bb.0: # %entry
+; 64-NEXT: mov.s $f0, $f13
; 64-NEXT: c.olt.s $f14, $f15
-; 64-NEXT: movt.s $f13, $f12, $fcc0
; 64-NEXT: jr $ra
-; 64-NEXT: mov.s $f0, $f13
+; 64-NEXT: movt.s $f0, $f12, $fcc0
;
; 64R2-LABEL: f32_fcmp_olt_f32_val:
; 64R2: # %bb.0: # %entry
+; 64R2-NEXT: mov.s $f0, $f13
; 64R2-NEXT: c.olt.s $f14, $f15
-; 64R2-NEXT: movt.s $f13, $f12, $fcc0
; 64R2-NEXT: jr $ra
-; 64R2-NEXT: mov.s $f0, $f13
+; 64R2-NEXT: movt.s $f0, $f12, $fcc0
;
; 64R6-LABEL: f32_fcmp_olt_f32_val:
; 64R6: # %bb.0: # %entry
@@ -385,21 +385,21 @@ entry:
define float @f32_fcmp_ogt_f32_val(float %f0, float %f1, float %f2, float %f3) nounwind readnone {
; 32-LABEL: f32_fcmp_ogt_f32_val:
; 32: # %bb.0: # %entry
-; 32-NEXT: mtc1 $7, $f0
-; 32-NEXT: mtc1 $6, $f1
-; 32-NEXT: c.ule.s $f1, $f0
-; 32-NEXT: movf.s $f14, $f12, $fcc0
-; 32-NEXT: jr $ra
; 32-NEXT: mov.s $f0, $f14
+; 32-NEXT: mtc1 $7, $f1
+; 32-NEXT: mtc1 $6, $f2
+; 32-NEXT: c.ule.s $f2, $f1
+; 32-NEXT: jr $ra
+; 32-NEXT: movf.s $f0, $f12, $fcc0
;
; 32R2-LABEL: f32_fcmp_ogt_f32_val:
; 32R2: # %bb.0: # %entry
-; 32R2-NEXT: mtc1 $7, $f0
-; 32R2-NEXT: mtc1 $6, $f1
-; 32R2-NEXT: c.ule.s $f1, $f0
-; 32R2-NEXT: movf.s $f14, $f12, $fcc0
-; 32R2-NEXT: jr $ra
; 32R2-NEXT: mov.s $f0, $f14
+; 32R2-NEXT: mtc1 $7, $f1
+; 32R2-NEXT: mtc1 $6, $f2
+; 32R2-NEXT: c.ule.s $f2, $f1
+; 32R2-NEXT: jr $ra
+; 32R2-NEXT: movf.s $f0, $f12, $fcc0
;
; 32R6-LABEL: f32_fcmp_ogt_f32_val:
; 32R6: # %bb.0: # %entry
@@ -411,17 +411,17 @@ define float @f32_fcmp_ogt_f32_val(float %f0, float %f1, float %f2, float %f3) n
;
; 64-LABEL: f32_fcmp_ogt_f32_val:
; 64: # %bb.0: # %entry
+; 64-NEXT: mov.s $f0, $f13
; 64-NEXT: c.ule.s $f14, $f15
-; 64-NEXT: movf.s $f13, $f12, $fcc0
; 64-NEXT: jr $ra
-; 64-NEXT: mov.s $f0, $f13
+; 64-NEXT: movf.s $f0, $f12, $fcc0
;
; 64R2-LABEL: f32_fcmp_ogt_f32_val:
; 64R2: # %bb.0: # %entry
+; 64R2-NEXT: mov.s $f0, $f13
; 64R2-NEXT: c.ule.s $f14, $f15
-; 64R2-NEXT: movf.s $f13, $f12, $fcc0
; 64R2-NEXT: jr $ra
-; 64R2-NEXT: mov.s $f0, $f13
+; 64R2-NEXT: movf.s $f0, $f12, $fcc0
;
; 64R6-LABEL: f32_fcmp_ogt_f32_val:
; 64R6: # %bb.0: # %entry
@@ -437,21 +437,21 @@ entry:
define double @f32_fcmp_ogt_f64_val(double %f0, double %f1, float %f2, float %f3) nounwind readnone {
; 32-LABEL: f32_fcmp_ogt_f64_val:
; 32: # %bb.0: # %entry
-; 32-NEXT: lwc1 $f0, 20($sp)
-; 32-NEXT: lwc1 $f1, 16($sp)
-; 32-NEXT: c.ule.s $f1, $f0
-; 32-NEXT: movf.d $f14, $f12, $fcc0
-; 32-NEXT: jr $ra
; 32-NEXT: mov.d $f0, $f14
+; 32-NEXT: lwc1 $f2, 20($sp)
+; 32-NEXT: lwc1 $f3, 16($sp)
+; 32-NEXT: c.ule.s $f3, $f2
+; 32-NEXT: jr $ra
+; 32-NEXT: movf.d $f0, $f12, $fcc0
;
; 32R2-LABEL: f32_fcmp_ogt_f64_val:
; 32R2: # %bb.0: # %entry
-; 32R2-NEXT: lwc1 $f0, 20($sp)
-; 32R2-NEXT: lwc1 $f1, 16($sp)
-; 32R2-NEXT: c.ule.s $f1, $f0
-; 32R2-NEXT: movf.d $f14, $f12, $fcc0
-; 32R2-NEXT: jr $ra
; 32R2-NEXT: mov.d $f0, $f14
+; 32R2-NEXT: lwc1 $f2, 20($sp)
+; 32R2-NEXT: lwc1 $f3, 16($sp)
+; 32R2-NEXT: c.ule.s $f3, $f2
+; 32R2-NEXT: jr $ra
+; 32R2-NEXT: movf.d $f0, $f12, $fcc0
;
; 32R6-LABEL: f32_fcmp_ogt_f64_val:
; 32R6: # %bb.0: # %entry
@@ -465,17 +465,17 @@ define double @f32_fcmp_ogt_f64_val(double %f0, double %f1, float %f2, float %f3
;
; 64-LABEL: f32_fcmp_ogt_f64_val:
; 64: # %bb.0: # %entry
+; 64-NEXT: mov.d $f0, $f13
; 64-NEXT: c.ule.s $f14, $f15
-; 64-NEXT: movf.d $f13, $f12, $fcc0
; 64-NEXT: jr $ra
-; 64-NEXT: mov.d $f0, $f13
+; 64-NEXT: movf.d $f0, $f12, $fcc0
;
; 64R2-LABEL: f32_fcmp_ogt_f64_val:
; 64R2: # %bb.0: # %entry
+; 64R2-NEXT: mov.d $f0, $f13
; 64R2-NEXT: c.ule.s $f14, $f15
-; 64R2-NEXT: movf.d $f13, $f12, $fcc0
; 64R2-NEXT: jr $ra
-; 64R2-NEXT: mov.d $f0, $f13
+; 64R2-NEXT: movf.d $f0, $f12, $fcc0
;
; 64R6-LABEL: f32_fcmp_ogt_f64_val:
; 64R6: # %bb.0: # %entry
@@ -493,21 +493,21 @@ entry:
define double @f64_fcmp_oeq_f64_val(double %f0, double %f1, double %f2, double %f3) nounwind readnone {
; 32-LABEL: f64_fcmp_oeq_f64_val:
; 32: # %bb.0: # %entry
-; 32-NEXT: ldc1 $f0, 24($sp)
-; 32-NEXT: ldc1 $f2, 16($sp)
-; 32-NEXT: c.eq.d $f2, $f0
-; 32-NEXT: movt.d $f14, $f12, $fcc0
-; 32-NEXT: jr $ra
; 32-NEXT: mov.d $f0, $f14
+; 32-NEXT: ldc1 $f2, 24($sp)
+; 32-NEXT: ldc1 $f4, 16($sp)
+; 32-NEXT: c.eq.d $f4, $f2
+; 32-NEXT: jr $ra
+; 32-NEXT: movt.d $f0, $f12, $fcc0
;
; 32R2-LABEL: f64_fcmp_oeq_f64_val:
; 32R2: # %bb.0: # %entry
-; 32R2-NEXT: ldc1 $f0, 24($sp)
-; 32R2-NEXT: ldc1 $f2, 16($sp)
-; 32R2-NEXT: c.eq.d $f2, $f0
-; 32R2-NEXT: movt.d $f14, $f12, $fcc0
-; 32R2-NEXT: jr $ra
; 32R2-NEXT: mov.d $f0, $f14
+; 32R2-NEXT: ldc1 $f2, 24($sp)
+; 32R2-NEXT: ldc1 $f4, 16($sp)
+; 32R2-NEXT: c.eq.d $f4, $f2
+; 32R2-NEXT: jr $ra
+; 32R2-NEXT: movt.d $f0, $f12, $fcc0
;
; 32R6-LABEL: f64_fcmp_oeq_f64_val:
; 32R6: # %bb.0: # %entry
@@ -521,17 +521,17 @@ define double @f64_fcmp_oeq_f64_val(double %f0, double %f1, double %f2, double %
;
; 64-LABEL: f64_fcmp_oeq_f64_val:
; 64: # %bb.0: # %entry
+; 64-NEXT: mov.d $f0, $f13
; 64-NEXT: c.eq.d $f14, $f15
-; 64-NEXT: movt.d $f13, $f12, $fcc0
; 64-NEXT: jr $ra
-; 64-NEXT: mov.d $f0, $f13
+; 64-NEXT: movt.d $f0, $f12, $fcc0
;
; 64R2-LABEL: f64_fcmp_oeq_f64_val:
; 64R2: # %bb.0: # %entry
+; 64R2-NEXT: mov.d $f0, $f13
; 64R2-NEXT: c.eq.d $f14, $f15
-; 64R2-NEXT: movt.d $f13, $f12, $fcc0
; 64R2-NEXT: jr $ra
-; 64R2-NEXT: mov.d $f0, $f13
+; 64R2-NEXT: movt.d $f0, $f12, $fcc0
;
; 64R6-LABEL: f64_fcmp_oeq_f64_val:
; 64R6: # %bb.0: # %entry
@@ -549,21 +549,21 @@ entry:
define double @f64_fcmp_olt_f64_val(double %f0, double %f1, double %f2, double %f3) nounwind readnone {
; 32-LABEL: f64_fcmp_olt_f64_val:
; 32: # %bb.0: # %entry
-; 32-NEXT: ldc1 $f0, 24($sp)
-; 32-NEXT: ldc1 $f2, 16($sp)
-; 32-NEXT: c.olt.d $f2, $f0
-; 32-NEXT: movt.d $f14, $f12, $fcc0
-; 32-NEXT: jr $ra
; 32-NEXT: mov.d $f0, $f14
+; 32-NEXT: ldc1 $f2, 24($sp)
+; 32-NEXT: ldc1 $f4, 16($sp)
+; 32-NEXT: c.olt.d $f4, $f2
+; 32-NEXT: jr $ra
+; 32-NEXT: movt.d $f0, $f12, $fcc0
;
; 32R2-LABEL: f64_fcmp_olt_f64_val:
; 32R2: # %bb.0: # %entry
-; 32R2-NEXT: ldc1 $f0, 24($sp)
-; 32R2-NEXT: ldc1 $f2, 16($sp)
-; 32R2-NEXT: c.olt.d $f2, $f0
-; 32R2-NEXT: movt.d $f14, $f12, $fcc0
-; 32R2-NEXT: jr $ra
; 32R2-NEXT: mov.d $f0, $f14
+; 32R2-NEXT: ldc1 $f2, 24($sp)
+; 32R2-NEXT: ldc1 $f4, 16($sp)
+; 32R2-NEXT: c.olt.d $f4, $f2
+; 32R2-NEXT: jr $ra
+; 32R2-NEXT: movt.d $f0, $f12, $fcc0
;
; 32R6-LABEL: f64_fcmp_olt_f64_val:
; 32R6: # %bb.0: # %entry
@@ -577,17 +577,17 @@ define double @f64_fcmp_olt_f64_val(double %f0, double %f1, double %f2, double %
;
; 64-LABEL: f64_fcmp_olt_f64_val:
; 64: # %bb.0: # %entry
+; 64-NEXT: mov.d $f0, $f13
; 64-NEXT: c.olt.d $f14, $f15
-; 64-NEXT: movt.d $f13, $f12, $fcc0
; 64-NEXT: jr $ra
-; 64-NEXT: mov.d $f0, $f13
+; 64-NEXT: movt.d $f0, $f12, $fcc0
;
; 64R2-LABEL: f64_fcmp_olt_f64_val:
; 64R2: # %bb.0: # %entry
+; 64R2-NEXT: mov.d $f0, $f13
; 64R2-NEXT: c.olt.d $f14, $f15
-; 64R2-NEXT: movt.d $f13, $f12, $fcc0
; 64R2-NEXT: jr $ra
-; 64R2-NEXT: mov.d $f0, $f13
+; 64R2-NEXT: movt.d $f0, $f12, $fcc0
;
; 64R6-LABEL: f64_fcmp_olt_f64_val:
; 64R6: # %bb.0: # %entry
@@ -605,21 +605,21 @@ entry:
define double @f64_fcmp_ogt_f64_val(double %f0, double %f1, double %f2, double %f3) nounwind readnone {
; 32-LABEL: f64_fcmp_ogt_f64_val:
; 32: # %bb.0: # %entry
-; 32-NEXT: ldc1 $f0, 24($sp)
-; 32-NEXT: ldc1 $f2, 16($sp)
-; 32-NEXT: c.ule.d $f2, $f0
-; 32-NEXT: movf.d $f14, $f12, $fcc0
-; 32-NEXT: jr $ra
; 32-NEXT: mov.d $f0, $f14
+; 32-NEXT: ldc1 $f2, 24($sp)
+; 32-NEXT: ldc1 $f4, 16($sp)
+; 32-NEXT: c.ule.d $f4, $f2
+; 32-NEXT: jr $ra
+; 32-NEXT: movf.d $f0, $f12, $fcc0
;
; 32R2-LABEL: f64_fcmp_ogt_f64_val:
; 32R2: # %bb.0: # %entry
-; 32R2-NEXT: ldc1 $f0, 24($sp)
-; 32R2-NEXT: ldc1 $f2, 16($sp)
-; 32R2-NEXT: c.ule.d $f2, $f0
-; 32R2-NEXT: movf.d $f14, $f12, $fcc0
-; 32R2-NEXT: jr $ra
; 32R2-NEXT: mov.d $f0, $f14
+; 32R2-NEXT: ldc1 $f2, 24($sp)
+; 32R2-NEXT: ldc1 $f4, 16($sp)
+; 32R2-NEXT: c.ule.d $f4, $f2
+; 32R2-NEXT: jr $ra
+; 32R2-NEXT: movf.d $f0, $f12, $fcc0
;
; 32R6-LABEL: f64_fcmp_ogt_f64_val:
; 32R6: # %bb.0: # %entry
@@ -633,17 +633,17 @@ define double @f64_fcmp_ogt_f64_val(double %f0, double %f1, double %f2, double %
;
; 64-LABEL: f64_fcmp_ogt_f64_val:
; 64: # %bb.0: # %entry
+; 64-NEXT: mov.d $f0, $f13
; 64-NEXT: c.ule.d $f14, $f15
-; 64-NEXT: movf.d $f13, $f12, $fcc0
; 64-NEXT: jr $ra
-; 64-NEXT: mov.d $f0, $f13
+; 64-NEXT: movf.d $f0, $f12, $fcc0
;
; 64R2-LABEL: f64_fcmp_ogt_f64_val:
; 64R2: # %bb.0: # %entry
+; 64R2-NEXT: mov.d $f0, $f13
; 64R2-NEXT: c.ule.d $f14, $f15
-; 64R2-NEXT: movf.d $f13, $f12, $fcc0
; 64R2-NEXT: jr $ra
-; 64R2-NEXT: mov.d $f0, $f13
+; 64R2-NEXT: movf.d $f0, $f12, $fcc0
;
; 64R6-LABEL: f64_fcmp_ogt_f64_val:
; 64R6: # %bb.0: # %entry
@@ -661,23 +661,23 @@ entry:
define float @f64_fcmp_ogt_f32_val(float %f0, float %f1, double %f2, double %f3) nounwind readnone {
; 32-LABEL: f64_fcmp_ogt_f32_val:
; 32: # %bb.0: # %entry
-; 32-NEXT: mtc1 $6, $f0
-; 32-NEXT: mtc1 $7, $f1
-; 32-NEXT: ldc1 $f2, 16($sp)
-; 32-NEXT: c.ule.d $f0, $f2
-; 32-NEXT: movf.s $f14, $f12, $fcc0
-; 32-NEXT: jr $ra
; 32-NEXT: mov.s $f0, $f14
+; 32-NEXT: mtc1 $6, $f2
+; 32-NEXT: mtc1 $7, $f3
+; 32-NEXT: ldc1 $f4, 16($sp)
+; 32-NEXT: c.ule.d $f2, $f4
+; 32-NEXT: jr $ra
+; 32-NEXT: movf.s $f0, $f12, $fcc0
;
; 32R2-LABEL: f64_fcmp_ogt_f32_val:
; 32R2: # %bb.0: # %entry
-; 32R2-NEXT: mtc1 $6, $f0
-; 32R2-NEXT: mthc1 $7, $f0
-; 32R2-NEXT: ldc1 $f2, 16($sp)
-; 32R2-NEXT: c.ule.d $f0, $f2
-; 32R2-NEXT: movf.s $f14, $f12, $fcc0
-; 32R2-NEXT: jr $ra
; 32R2-NEXT: mov.s $f0, $f14
+; 32R2-NEXT: mtc1 $6, $f2
+; 32R2-NEXT: mthc1 $7, $f2
+; 32R2-NEXT: ldc1 $f4, 16($sp)
+; 32R2-NEXT: c.ule.d $f2, $f4
+; 32R2-NEXT: jr $ra
+; 32R2-NEXT: movf.s $f0, $f12, $fcc0
;
; 32R6-LABEL: f64_fcmp_ogt_f32_val:
; 32R6: # %bb.0: # %entry
@@ -690,17 +690,17 @@ define float @f64_fcmp_ogt_f32_val(float %f0, float %f1, double %f2, double %f3)
;
; 64-LABEL: f64_fcmp_ogt_f32_val:
; 64: # %bb.0: # %entry
+; 64-NEXT: mov.s $f0, $f13
; 64-NEXT: c.ule.d $f14, $f15
-; 64-NEXT: movf.s $f13, $f12, $fcc0
; 64-NEXT: jr $ra
-; 64-NEXT: mov.s $f0, $f13
+; 64-NEXT: movf.s $f0, $f12, $fcc0
;
; 64R2-LABEL: f64_fcmp_ogt_f32_val:
; 64R2: # %bb.0: # %entry
+; 64R2-NEXT: mov.s $f0, $f13
; 64R2-NEXT: c.ule.d $f14, $f15
-; 64R2-NEXT: movf.s $f13, $f12, $fcc0
; 64R2-NEXT: jr $ra
-; 64R2-NEXT: mov.s $f0, $f13
+; 64R2-NEXT: movf.s $f0, $f12, $fcc0
;
; 64R6-LABEL: f64_fcmp_ogt_f32_val:
; 64R6: # %bb.0: # %entry
OpenPOWER on IntegriCloud