summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/Mips/llvm-ir
diff options
context:
space:
mode:
authorGeoff Berry <gberry@codeaurora.org>2018-02-27 16:59:10 +0000
committerGeoff Berry <gberry@codeaurora.org>2018-02-27 16:59:10 +0000
commita2b901129099b93f20e8cdf41f520e31398c0c4d (patch)
treedce027ceb74117c28b4d882383b8ebb172de0fd7 /llvm/test/CodeGen/Mips/llvm-ir
parent3bfa8f01207f907551c8ea39a938e0f2f8ec018b (diff)
downloadbcm5719-llvm-a2b901129099b93f20e8cdf41f520e31398c0c4d.tar.gz
bcm5719-llvm-a2b901129099b93f20e8cdf41f520e31398c0c4d.zip
Re-enable "[MachineCopyPropagation] Extend pass to do COPY source forwarding"
Re-enable commit r323991 now that r325931 has been committed to make MachineOperand::isRenamable() check more conservative w.r.t. code changes and opt-in on a per-target basis. llvm-svn: 326208
Diffstat (limited to 'llvm/test/CodeGen/Mips/llvm-ir')
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/ashr.ll2
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/lshr.ll10
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/select-dbl.ll108
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/select-flt.ll108
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/shl.ll4
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/sub.ll2
6 files changed, 105 insertions, 129 deletions
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/ashr.ll b/llvm/test/CodeGen/Mips/llvm-ir/ashr.ll
index 5cbf51e3882..140f545f239 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/ashr.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/ashr.ll
@@ -800,7 +800,7 @@ define signext i128 @ashr_i128(i128 signext %a, i128 signext %b) {
; MMR3-NEXT: sw $5, 36($sp) # 4-byte Folded Spill
; MMR3-NEXT: sw $4, 8($sp) # 4-byte Folded Spill
; MMR3-NEXT: lw $16, 76($sp)
-; MMR3-NEXT: srlv $4, $8, $16
+; MMR3-NEXT: srlv $4, $7, $16
; MMR3-NEXT: not16 $3, $16
; MMR3-NEXT: sw $3, 24($sp) # 4-byte Folded Spill
; MMR3-NEXT: sll16 $2, $6, 1
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/lshr.ll b/llvm/test/CodeGen/Mips/llvm-ir/lshr.ll
index d9756ddcf31..79382e0df35 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/lshr.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/lshr.ll
@@ -828,7 +828,7 @@ define signext i128 @lshr_i128(i128 signext %a, i128 signext %b) {
; MMR3-NEXT: move $17, $5
; MMR3-NEXT: sw $4, 8($sp) # 4-byte Folded Spill
; MMR3-NEXT: lw $16, 76($sp)
-; MMR3-NEXT: srlv $7, $8, $16
+; MMR3-NEXT: srlv $7, $7, $16
; MMR3-NEXT: not16 $3, $16
; MMR3-NEXT: sw $3, 24($sp) # 4-byte Folded Spill
; MMR3-NEXT: sll16 $2, $6, 1
@@ -919,14 +919,14 @@ define signext i128 @lshr_i128(i128 signext %a, i128 signext %b) {
; MMR6-NEXT: not16 $5, $3
; MMR6-NEXT: sw $5, 12($sp) # 4-byte Folded Spill
; MMR6-NEXT: move $17, $6
-; MMR6-NEXT: sw $17, 16($sp) # 4-byte Folded Spill
-; MMR6-NEXT: sll16 $6, $17, 1
+; MMR6-NEXT: sw $6, 16($sp) # 4-byte Folded Spill
+; MMR6-NEXT: sll16 $6, $6, 1
; MMR6-NEXT: sllv $6, $6, $5
; MMR6-NEXT: or $8, $6, $2
; MMR6-NEXT: addiu $5, $3, -64
; MMR6-NEXT: srlv $9, $7, $5
; MMR6-NEXT: move $6, $4
-; MMR6-NEXT: sll16 $2, $6, 1
+; MMR6-NEXT: sll16 $2, $4, 1
; MMR6-NEXT: sw $2, 8($sp) # 4-byte Folded Spill
; MMR6-NEXT: not16 $16, $5
; MMR6-NEXT: sllv $10, $2, $16
@@ -948,7 +948,7 @@ define signext i128 @lshr_i128(i128 signext %a, i128 signext %b) {
; MMR6-NEXT: selnez $11, $12, $4
; MMR6-NEXT: sllv $12, $6, $2
; MMR6-NEXT: move $7, $6
-; MMR6-NEXT: sw $7, 4($sp) # 4-byte Folded Spill
+; MMR6-NEXT: sw $6, 4($sp) # 4-byte Folded Spill
; MMR6-NEXT: not16 $2, $2
; MMR6-NEXT: srl16 $6, $17, 1
; MMR6-NEXT: srlv $2, $6, $2
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/select-dbl.ll b/llvm/test/CodeGen/Mips/llvm-ir/select-dbl.ll
index 715c35fdfe3..3f79a238888 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/select-dbl.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/select-dbl.ll
@@ -201,10 +201,9 @@ entry:
define double @tst_select_fcmp_olt_double(double %x, double %y) {
; M2-LABEL: tst_select_fcmp_olt_double:
; M2: # %bb.0: # %entry
-; M2-NEXT: mov.d $f0, $f12
-; M2-NEXT: c.olt.d $f0, $f14
+; M2-NEXT: c.olt.d $f12, $f14
; M2-NEXT: bc1t $BB2_2
-; M2-NEXT: nop
+; M2-NEXT: mov.d $f0, $f12
; M2-NEXT: # %bb.1: # %entry
; M2-NEXT: mov.d $f0, $f14
; M2-NEXT: $BB2_2: # %entry
@@ -214,14 +213,14 @@ define double @tst_select_fcmp_olt_double(double %x, double %y) {
; CMOV32R1-LABEL: tst_select_fcmp_olt_double:
; CMOV32R1: # %bb.0: # %entry
; CMOV32R1-NEXT: mov.d $f0, $f14
-; CMOV32R1-NEXT: c.olt.d $f12, $f0
+; CMOV32R1-NEXT: c.olt.d $f12, $f14
; CMOV32R1-NEXT: jr $ra
; CMOV32R1-NEXT: movt.d $f0, $f12, $fcc0
;
; CMOV32R2-LABEL: tst_select_fcmp_olt_double:
; CMOV32R2: # %bb.0: # %entry
; CMOV32R2-NEXT: mov.d $f0, $f14
-; CMOV32R2-NEXT: c.olt.d $f12, $f0
+; CMOV32R2-NEXT: c.olt.d $f12, $f14
; CMOV32R2-NEXT: jr $ra
; CMOV32R2-NEXT: movt.d $f0, $f12, $fcc0
;
@@ -235,10 +234,9 @@ define double @tst_select_fcmp_olt_double(double %x, double %y) {
;
; M3-LABEL: tst_select_fcmp_olt_double:
; M3: # %bb.0: # %entry
-; M3-NEXT: mov.d $f0, $f12
-; M3-NEXT: c.olt.d $f0, $f13
+; M3-NEXT: c.olt.d $f12, $f13
; M3-NEXT: bc1t .LBB2_2
-; M3-NEXT: nop
+; M3-NEXT: mov.d $f0, $f12
; M3-NEXT: # %bb.1: # %entry
; M3-NEXT: mov.d $f0, $f13
; M3-NEXT: .LBB2_2: # %entry
@@ -248,7 +246,7 @@ define double @tst_select_fcmp_olt_double(double %x, double %y) {
; CMOV64-LABEL: tst_select_fcmp_olt_double:
; CMOV64: # %bb.0: # %entry
; CMOV64-NEXT: mov.d $f0, $f13
-; CMOV64-NEXT: c.olt.d $f12, $f0
+; CMOV64-NEXT: c.olt.d $f12, $f13
; CMOV64-NEXT: jr $ra
; CMOV64-NEXT: movt.d $f0, $f12, $fcc0
;
@@ -263,7 +261,7 @@ define double @tst_select_fcmp_olt_double(double %x, double %y) {
; MM32R3-LABEL: tst_select_fcmp_olt_double:
; MM32R3: # %bb.0: # %entry
; MM32R3-NEXT: mov.d $f0, $f14
-; MM32R3-NEXT: c.olt.d $f12, $f0
+; MM32R3-NEXT: c.olt.d $f12, $f14
; MM32R3-NEXT: jr $ra
; MM32R3-NEXT: movt.d $f0, $f12, $fcc0
;
@@ -283,10 +281,9 @@ entry:
define double @tst_select_fcmp_ole_double(double %x, double %y) {
; M2-LABEL: tst_select_fcmp_ole_double:
; M2: # %bb.0: # %entry
-; M2-NEXT: mov.d $f0, $f12
-; M2-NEXT: c.ole.d $f0, $f14
+; M2-NEXT: c.ole.d $f12, $f14
; M2-NEXT: bc1t $BB3_2
-; M2-NEXT: nop
+; M2-NEXT: mov.d $f0, $f12
; M2-NEXT: # %bb.1: # %entry
; M2-NEXT: mov.d $f0, $f14
; M2-NEXT: $BB3_2: # %entry
@@ -296,14 +293,14 @@ define double @tst_select_fcmp_ole_double(double %x, double %y) {
; CMOV32R1-LABEL: tst_select_fcmp_ole_double:
; CMOV32R1: # %bb.0: # %entry
; CMOV32R1-NEXT: mov.d $f0, $f14
-; CMOV32R1-NEXT: c.ole.d $f12, $f0
+; CMOV32R1-NEXT: c.ole.d $f12, $f14
; CMOV32R1-NEXT: jr $ra
; CMOV32R1-NEXT: movt.d $f0, $f12, $fcc0
;
; CMOV32R2-LABEL: tst_select_fcmp_ole_double:
; CMOV32R2: # %bb.0: # %entry
; CMOV32R2-NEXT: mov.d $f0, $f14
-; CMOV32R2-NEXT: c.ole.d $f12, $f0
+; CMOV32R2-NEXT: c.ole.d $f12, $f14
; CMOV32R2-NEXT: jr $ra
; CMOV32R2-NEXT: movt.d $f0, $f12, $fcc0
;
@@ -317,10 +314,9 @@ define double @tst_select_fcmp_ole_double(double %x, double %y) {
;
; M3-LABEL: tst_select_fcmp_ole_double:
; M3: # %bb.0: # %entry
-; M3-NEXT: mov.d $f0, $f12
-; M3-NEXT: c.ole.d $f0, $f13
+; M3-NEXT: c.ole.d $f12, $f13
; M3-NEXT: bc1t .LBB3_2
-; M3-NEXT: nop
+; M3-NEXT: mov.d $f0, $f12
; M3-NEXT: # %bb.1: # %entry
; M3-NEXT: mov.d $f0, $f13
; M3-NEXT: .LBB3_2: # %entry
@@ -330,7 +326,7 @@ define double @tst_select_fcmp_ole_double(double %x, double %y) {
; CMOV64-LABEL: tst_select_fcmp_ole_double:
; CMOV64: # %bb.0: # %entry
; CMOV64-NEXT: mov.d $f0, $f13
-; CMOV64-NEXT: c.ole.d $f12, $f0
+; CMOV64-NEXT: c.ole.d $f12, $f13
; CMOV64-NEXT: jr $ra
; CMOV64-NEXT: movt.d $f0, $f12, $fcc0
;
@@ -345,7 +341,7 @@ define double @tst_select_fcmp_ole_double(double %x, double %y) {
; MM32R3-LABEL: tst_select_fcmp_ole_double:
; MM32R3: # %bb.0: # %entry
; MM32R3-NEXT: mov.d $f0, $f14
-; MM32R3-NEXT: c.ole.d $f12, $f0
+; MM32R3-NEXT: c.ole.d $f12, $f14
; MM32R3-NEXT: jr $ra
; MM32R3-NEXT: movt.d $f0, $f12, $fcc0
;
@@ -365,10 +361,9 @@ entry:
define double @tst_select_fcmp_ogt_double(double %x, double %y) {
; M2-LABEL: tst_select_fcmp_ogt_double:
; M2: # %bb.0: # %entry
-; M2-NEXT: mov.d $f0, $f12
-; M2-NEXT: c.ule.d $f0, $f14
+; M2-NEXT: c.ule.d $f12, $f14
; M2-NEXT: bc1f $BB4_2
-; M2-NEXT: nop
+; M2-NEXT: mov.d $f0, $f12
; M2-NEXT: # %bb.1: # %entry
; M2-NEXT: mov.d $f0, $f14
; M2-NEXT: $BB4_2: # %entry
@@ -378,14 +373,14 @@ define double @tst_select_fcmp_ogt_double(double %x, double %y) {
; CMOV32R1-LABEL: tst_select_fcmp_ogt_double:
; CMOV32R1: # %bb.0: # %entry
; CMOV32R1-NEXT: mov.d $f0, $f14
-; CMOV32R1-NEXT: c.ule.d $f12, $f0
+; CMOV32R1-NEXT: c.ule.d $f12, $f14
; CMOV32R1-NEXT: jr $ra
; CMOV32R1-NEXT: movf.d $f0, $f12, $fcc0
;
; CMOV32R2-LABEL: tst_select_fcmp_ogt_double:
; CMOV32R2: # %bb.0: # %entry
; CMOV32R2-NEXT: mov.d $f0, $f14
-; CMOV32R2-NEXT: c.ule.d $f12, $f0
+; CMOV32R2-NEXT: c.ule.d $f12, $f14
; CMOV32R2-NEXT: jr $ra
; CMOV32R2-NEXT: movf.d $f0, $f12, $fcc0
;
@@ -399,10 +394,9 @@ define double @tst_select_fcmp_ogt_double(double %x, double %y) {
;
; M3-LABEL: tst_select_fcmp_ogt_double:
; M3: # %bb.0: # %entry
-; M3-NEXT: mov.d $f0, $f12
-; M3-NEXT: c.ule.d $f0, $f13
+; M3-NEXT: c.ule.d $f12, $f13
; M3-NEXT: bc1f .LBB4_2
-; M3-NEXT: nop
+; M3-NEXT: mov.d $f0, $f12
; M3-NEXT: # %bb.1: # %entry
; M3-NEXT: mov.d $f0, $f13
; M3-NEXT: .LBB4_2: # %entry
@@ -412,7 +406,7 @@ define double @tst_select_fcmp_ogt_double(double %x, double %y) {
; CMOV64-LABEL: tst_select_fcmp_ogt_double:
; CMOV64: # %bb.0: # %entry
; CMOV64-NEXT: mov.d $f0, $f13
-; CMOV64-NEXT: c.ule.d $f12, $f0
+; CMOV64-NEXT: c.ule.d $f12, $f13
; CMOV64-NEXT: jr $ra
; CMOV64-NEXT: movf.d $f0, $f12, $fcc0
;
@@ -427,7 +421,7 @@ define double @tst_select_fcmp_ogt_double(double %x, double %y) {
; MM32R3-LABEL: tst_select_fcmp_ogt_double:
; MM32R3: # %bb.0: # %entry
; MM32R3-NEXT: mov.d $f0, $f14
-; MM32R3-NEXT: c.ule.d $f12, $f0
+; MM32R3-NEXT: c.ule.d $f12, $f14
; MM32R3-NEXT: jr $ra
; MM32R3-NEXT: movf.d $f0, $f12, $fcc0
;
@@ -447,10 +441,9 @@ entry:
define double @tst_select_fcmp_oge_double(double %x, double %y) {
; M2-LABEL: tst_select_fcmp_oge_double:
; M2: # %bb.0: # %entry
-; M2-NEXT: mov.d $f0, $f12
-; M2-NEXT: c.ult.d $f0, $f14
+; M2-NEXT: c.ult.d $f12, $f14
; M2-NEXT: bc1f $BB5_2
-; M2-NEXT: nop
+; M2-NEXT: mov.d $f0, $f12
; M2-NEXT: # %bb.1: # %entry
; M2-NEXT: mov.d $f0, $f14
; M2-NEXT: $BB5_2: # %entry
@@ -460,14 +453,14 @@ define double @tst_select_fcmp_oge_double(double %x, double %y) {
; CMOV32R1-LABEL: tst_select_fcmp_oge_double:
; CMOV32R1: # %bb.0: # %entry
; CMOV32R1-NEXT: mov.d $f0, $f14
-; CMOV32R1-NEXT: c.ult.d $f12, $f0
+; CMOV32R1-NEXT: c.ult.d $f12, $f14
; CMOV32R1-NEXT: jr $ra
; CMOV32R1-NEXT: movf.d $f0, $f12, $fcc0
;
; CMOV32R2-LABEL: tst_select_fcmp_oge_double:
; CMOV32R2: # %bb.0: # %entry
; CMOV32R2-NEXT: mov.d $f0, $f14
-; CMOV32R2-NEXT: c.ult.d $f12, $f0
+; CMOV32R2-NEXT: c.ult.d $f12, $f14
; CMOV32R2-NEXT: jr $ra
; CMOV32R2-NEXT: movf.d $f0, $f12, $fcc0
;
@@ -481,10 +474,9 @@ define double @tst_select_fcmp_oge_double(double %x, double %y) {
;
; M3-LABEL: tst_select_fcmp_oge_double:
; M3: # %bb.0: # %entry
-; M3-NEXT: mov.d $f0, $f12
-; M3-NEXT: c.ult.d $f0, $f13
+; M3-NEXT: c.ult.d $f12, $f13
; M3-NEXT: bc1f .LBB5_2
-; M3-NEXT: nop
+; M3-NEXT: mov.d $f0, $f12
; M3-NEXT: # %bb.1: # %entry
; M3-NEXT: mov.d $f0, $f13
; M3-NEXT: .LBB5_2: # %entry
@@ -494,7 +486,7 @@ define double @tst_select_fcmp_oge_double(double %x, double %y) {
; CMOV64-LABEL: tst_select_fcmp_oge_double:
; CMOV64: # %bb.0: # %entry
; CMOV64-NEXT: mov.d $f0, $f13
-; CMOV64-NEXT: c.ult.d $f12, $f0
+; CMOV64-NEXT: c.ult.d $f12, $f13
; CMOV64-NEXT: jr $ra
; CMOV64-NEXT: movf.d $f0, $f12, $fcc0
;
@@ -509,7 +501,7 @@ define double @tst_select_fcmp_oge_double(double %x, double %y) {
; MM32R3-LABEL: tst_select_fcmp_oge_double:
; MM32R3: # %bb.0: # %entry
; MM32R3-NEXT: mov.d $f0, $f14
-; MM32R3-NEXT: c.ult.d $f12, $f0
+; MM32R3-NEXT: c.ult.d $f12, $f14
; MM32R3-NEXT: jr $ra
; MM32R3-NEXT: movf.d $f0, $f12, $fcc0
;
@@ -529,10 +521,9 @@ entry:
define double @tst_select_fcmp_oeq_double(double %x, double %y) {
; M2-LABEL: tst_select_fcmp_oeq_double:
; M2: # %bb.0: # %entry
-; M2-NEXT: mov.d $f0, $f12
-; M2-NEXT: c.eq.d $f0, $f14
+; M2-NEXT: c.eq.d $f12, $f14
; M2-NEXT: bc1t $BB6_2
-; M2-NEXT: nop
+; M2-NEXT: mov.d $f0, $f12
; M2-NEXT: # %bb.1: # %entry
; M2-NEXT: mov.d $f0, $f14
; M2-NEXT: $BB6_2: # %entry
@@ -542,14 +533,14 @@ define double @tst_select_fcmp_oeq_double(double %x, double %y) {
; CMOV32R1-LABEL: tst_select_fcmp_oeq_double:
; CMOV32R1: # %bb.0: # %entry
; CMOV32R1-NEXT: mov.d $f0, $f14
-; CMOV32R1-NEXT: c.eq.d $f12, $f0
+; CMOV32R1-NEXT: c.eq.d $f12, $f14
; CMOV32R1-NEXT: jr $ra
; CMOV32R1-NEXT: movt.d $f0, $f12, $fcc0
;
; CMOV32R2-LABEL: tst_select_fcmp_oeq_double:
; CMOV32R2: # %bb.0: # %entry
; CMOV32R2-NEXT: mov.d $f0, $f14
-; CMOV32R2-NEXT: c.eq.d $f12, $f0
+; CMOV32R2-NEXT: c.eq.d $f12, $f14
; CMOV32R2-NEXT: jr $ra
; CMOV32R2-NEXT: movt.d $f0, $f12, $fcc0
;
@@ -563,10 +554,9 @@ define double @tst_select_fcmp_oeq_double(double %x, double %y) {
;
; M3-LABEL: tst_select_fcmp_oeq_double:
; M3: # %bb.0: # %entry
-; M3-NEXT: mov.d $f0, $f12
-; M3-NEXT: c.eq.d $f0, $f13
+; M3-NEXT: c.eq.d $f12, $f13
; M3-NEXT: bc1t .LBB6_2
-; M3-NEXT: nop
+; M3-NEXT: mov.d $f0, $f12
; M3-NEXT: # %bb.1: # %entry
; M3-NEXT: mov.d $f0, $f13
; M3-NEXT: .LBB6_2: # %entry
@@ -576,7 +566,7 @@ define double @tst_select_fcmp_oeq_double(double %x, double %y) {
; CMOV64-LABEL: tst_select_fcmp_oeq_double:
; CMOV64: # %bb.0: # %entry
; CMOV64-NEXT: mov.d $f0, $f13
-; CMOV64-NEXT: c.eq.d $f12, $f0
+; CMOV64-NEXT: c.eq.d $f12, $f13
; CMOV64-NEXT: jr $ra
; CMOV64-NEXT: movt.d $f0, $f12, $fcc0
;
@@ -591,7 +581,7 @@ define double @tst_select_fcmp_oeq_double(double %x, double %y) {
; MM32R3-LABEL: tst_select_fcmp_oeq_double:
; MM32R3: # %bb.0: # %entry
; MM32R3-NEXT: mov.d $f0, $f14
-; MM32R3-NEXT: c.eq.d $f12, $f0
+; MM32R3-NEXT: c.eq.d $f12, $f14
; MM32R3-NEXT: jr $ra
; MM32R3-NEXT: movt.d $f0, $f12, $fcc0
;
@@ -611,10 +601,9 @@ entry:
define double @tst_select_fcmp_one_double(double %x, double %y) {
; M2-LABEL: tst_select_fcmp_one_double:
; M2: # %bb.0: # %entry
-; M2-NEXT: mov.d $f0, $f12
-; M2-NEXT: c.ueq.d $f0, $f14
+; M2-NEXT: c.ueq.d $f12, $f14
; M2-NEXT: bc1f $BB7_2
-; M2-NEXT: nop
+; M2-NEXT: mov.d $f0, $f12
; M2-NEXT: # %bb.1: # %entry
; M2-NEXT: mov.d $f0, $f14
; M2-NEXT: $BB7_2: # %entry
@@ -624,14 +613,14 @@ define double @tst_select_fcmp_one_double(double %x, double %y) {
; CMOV32R1-LABEL: tst_select_fcmp_one_double:
; CMOV32R1: # %bb.0: # %entry
; CMOV32R1-NEXT: mov.d $f0, $f14
-; CMOV32R1-NEXT: c.ueq.d $f12, $f0
+; CMOV32R1-NEXT: c.ueq.d $f12, $f14
; CMOV32R1-NEXT: jr $ra
; CMOV32R1-NEXT: movf.d $f0, $f12, $fcc0
;
; CMOV32R2-LABEL: tst_select_fcmp_one_double:
; CMOV32R2: # %bb.0: # %entry
; CMOV32R2-NEXT: mov.d $f0, $f14
-; CMOV32R2-NEXT: c.ueq.d $f12, $f0
+; CMOV32R2-NEXT: c.ueq.d $f12, $f14
; CMOV32R2-NEXT: jr $ra
; CMOV32R2-NEXT: movf.d $f0, $f12, $fcc0
;
@@ -646,10 +635,9 @@ define double @tst_select_fcmp_one_double(double %x, double %y) {
;
; M3-LABEL: tst_select_fcmp_one_double:
; M3: # %bb.0: # %entry
-; M3-NEXT: mov.d $f0, $f12
-; M3-NEXT: c.ueq.d $f0, $f13
+; M3-NEXT: c.ueq.d $f12, $f13
; M3-NEXT: bc1f .LBB7_2
-; M3-NEXT: nop
+; M3-NEXT: mov.d $f0, $f12
; M3-NEXT: # %bb.1: # %entry
; M3-NEXT: mov.d $f0, $f13
; M3-NEXT: .LBB7_2: # %entry
@@ -659,7 +647,7 @@ define double @tst_select_fcmp_one_double(double %x, double %y) {
; CMOV64-LABEL: tst_select_fcmp_one_double:
; CMOV64: # %bb.0: # %entry
; CMOV64-NEXT: mov.d $f0, $f13
-; CMOV64-NEXT: c.ueq.d $f12, $f0
+; CMOV64-NEXT: c.ueq.d $f12, $f13
; CMOV64-NEXT: jr $ra
; CMOV64-NEXT: movf.d $f0, $f12, $fcc0
;
@@ -675,7 +663,7 @@ define double @tst_select_fcmp_one_double(double %x, double %y) {
; MM32R3-LABEL: tst_select_fcmp_one_double:
; MM32R3: # %bb.0: # %entry
; MM32R3-NEXT: mov.d $f0, $f14
-; MM32R3-NEXT: c.ueq.d $f12, $f0
+; MM32R3-NEXT: c.ueq.d $f12, $f14
; MM32R3-NEXT: jr $ra
; MM32R3-NEXT: movf.d $f0, $f12, $fcc0
;
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/select-flt.ll b/llvm/test/CodeGen/Mips/llvm-ir/select-flt.ll
index c04601c64b0..7f07d3b5bea 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/select-flt.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/select-flt.ll
@@ -188,10 +188,9 @@ entry:
define float @tst_select_fcmp_olt_float(float %x, float %y) {
; M2-LABEL: tst_select_fcmp_olt_float:
; M2: # %bb.0: # %entry
-; M2-NEXT: mov.s $f0, $f12
-; M2-NEXT: c.olt.s $f0, $f14
+; M2-NEXT: c.olt.s $f12, $f14
; M2-NEXT: bc1t $BB2_2
-; M2-NEXT: nop
+; M2-NEXT: mov.s $f0, $f12
; M2-NEXT: # %bb.1: # %entry
; M2-NEXT: mov.s $f0, $f14
; M2-NEXT: $BB2_2: # %entry
@@ -201,14 +200,14 @@ define float @tst_select_fcmp_olt_float(float %x, float %y) {
; CMOV32R1-LABEL: tst_select_fcmp_olt_float:
; CMOV32R1: # %bb.0: # %entry
; CMOV32R1-NEXT: mov.s $f0, $f14
-; CMOV32R1-NEXT: c.olt.s $f12, $f0
+; CMOV32R1-NEXT: c.olt.s $f12, $f14
; CMOV32R1-NEXT: jr $ra
; CMOV32R1-NEXT: movt.s $f0, $f12, $fcc0
;
; CMOV32R2-LABEL: tst_select_fcmp_olt_float:
; CMOV32R2: # %bb.0: # %entry
; CMOV32R2-NEXT: mov.s $f0, $f14
-; CMOV32R2-NEXT: c.olt.s $f12, $f0
+; CMOV32R2-NEXT: c.olt.s $f12, $f14
; CMOV32R2-NEXT: jr $ra
; CMOV32R2-NEXT: movt.s $f0, $f12, $fcc0
;
@@ -220,10 +219,9 @@ define float @tst_select_fcmp_olt_float(float %x, float %y) {
;
; M3-LABEL: tst_select_fcmp_olt_float:
; M3: # %bb.0: # %entry
-; M3-NEXT: mov.s $f0, $f12
-; M3-NEXT: c.olt.s $f0, $f13
+; M3-NEXT: c.olt.s $f12, $f13
; M3-NEXT: bc1t .LBB2_2
-; M3-NEXT: nop
+; M3-NEXT: mov.s $f0, $f12
; M3-NEXT: # %bb.1: # %entry
; M3-NEXT: mov.s $f0, $f13
; M3-NEXT: .LBB2_2: # %entry
@@ -233,7 +231,7 @@ define float @tst_select_fcmp_olt_float(float %x, float %y) {
; CMOV64-LABEL: tst_select_fcmp_olt_float:
; CMOV64: # %bb.0: # %entry
; CMOV64-NEXT: mov.s $f0, $f13
-; CMOV64-NEXT: c.olt.s $f12, $f0
+; CMOV64-NEXT: c.olt.s $f12, $f13
; CMOV64-NEXT: jr $ra
; CMOV64-NEXT: movt.s $f0, $f12, $fcc0
;
@@ -246,7 +244,7 @@ define float @tst_select_fcmp_olt_float(float %x, float %y) {
; MM32R3-LABEL: tst_select_fcmp_olt_float:
; MM32R3: # %bb.0: # %entry
; MM32R3-NEXT: mov.s $f0, $f14
-; MM32R3-NEXT: c.olt.s $f12, $f0
+; MM32R3-NEXT: c.olt.s $f12, $f14
; MM32R3-NEXT: jr $ra
; MM32R3-NEXT: movt.s $f0, $f12, $fcc0
;
@@ -264,10 +262,9 @@ entry:
define float @tst_select_fcmp_ole_float(float %x, float %y) {
; M2-LABEL: tst_select_fcmp_ole_float:
; M2: # %bb.0: # %entry
-; M2-NEXT: mov.s $f0, $f12
-; M2-NEXT: c.ole.s $f0, $f14
+; M2-NEXT: c.ole.s $f12, $f14
; M2-NEXT: bc1t $BB3_2
-; M2-NEXT: nop
+; M2-NEXT: mov.s $f0, $f12
; M2-NEXT: # %bb.1: # %entry
; M2-NEXT: mov.s $f0, $f14
; M2-NEXT: $BB3_2: # %entry
@@ -277,14 +274,14 @@ define float @tst_select_fcmp_ole_float(float %x, float %y) {
; CMOV32R1-LABEL: tst_select_fcmp_ole_float:
; CMOV32R1: # %bb.0: # %entry
; CMOV32R1-NEXT: mov.s $f0, $f14
-; CMOV32R1-NEXT: c.ole.s $f12, $f0
+; CMOV32R1-NEXT: c.ole.s $f12, $f14
; CMOV32R1-NEXT: jr $ra
; CMOV32R1-NEXT: movt.s $f0, $f12, $fcc0
;
; CMOV32R2-LABEL: tst_select_fcmp_ole_float:
; CMOV32R2: # %bb.0: # %entry
; CMOV32R2-NEXT: mov.s $f0, $f14
-; CMOV32R2-NEXT: c.ole.s $f12, $f0
+; CMOV32R2-NEXT: c.ole.s $f12, $f14
; CMOV32R2-NEXT: jr $ra
; CMOV32R2-NEXT: movt.s $f0, $f12, $fcc0
;
@@ -296,10 +293,9 @@ define float @tst_select_fcmp_ole_float(float %x, float %y) {
;
; M3-LABEL: tst_select_fcmp_ole_float:
; M3: # %bb.0: # %entry
-; M3-NEXT: mov.s $f0, $f12
-; M3-NEXT: c.ole.s $f0, $f13
+; M3-NEXT: c.ole.s $f12, $f13
; M3-NEXT: bc1t .LBB3_2
-; M3-NEXT: nop
+; M3-NEXT: mov.s $f0, $f12
; M3-NEXT: # %bb.1: # %entry
; M3-NEXT: mov.s $f0, $f13
; M3-NEXT: .LBB3_2: # %entry
@@ -309,7 +305,7 @@ define float @tst_select_fcmp_ole_float(float %x, float %y) {
; CMOV64-LABEL: tst_select_fcmp_ole_float:
; CMOV64: # %bb.0: # %entry
; CMOV64-NEXT: mov.s $f0, $f13
-; CMOV64-NEXT: c.ole.s $f12, $f0
+; CMOV64-NEXT: c.ole.s $f12, $f13
; CMOV64-NEXT: jr $ra
; CMOV64-NEXT: movt.s $f0, $f12, $fcc0
;
@@ -322,7 +318,7 @@ define float @tst_select_fcmp_ole_float(float %x, float %y) {
; MM32R3-LABEL: tst_select_fcmp_ole_float:
; MM32R3: # %bb.0: # %entry
; MM32R3-NEXT: mov.s $f0, $f14
-; MM32R3-NEXT: c.ole.s $f12, $f0
+; MM32R3-NEXT: c.ole.s $f12, $f14
; MM32R3-NEXT: jr $ra
; MM32R3-NEXT: movt.s $f0, $f12, $fcc0
;
@@ -340,10 +336,9 @@ entry:
define float @tst_select_fcmp_ogt_float(float %x, float %y) {
; M2-LABEL: tst_select_fcmp_ogt_float:
; M2: # %bb.0: # %entry
-; M2-NEXT: mov.s $f0, $f12
-; M2-NEXT: c.ule.s $f0, $f14
+; M2-NEXT: c.ule.s $f12, $f14
; M2-NEXT: bc1f $BB4_2
-; M2-NEXT: nop
+; M2-NEXT: mov.s $f0, $f12
; M2-NEXT: # %bb.1: # %entry
; M2-NEXT: mov.s $f0, $f14
; M2-NEXT: $BB4_2: # %entry
@@ -353,14 +348,14 @@ define float @tst_select_fcmp_ogt_float(float %x, float %y) {
; CMOV32R1-LABEL: tst_select_fcmp_ogt_float:
; CMOV32R1: # %bb.0: # %entry
; CMOV32R1-NEXT: mov.s $f0, $f14
-; CMOV32R1-NEXT: c.ule.s $f12, $f0
+; CMOV32R1-NEXT: c.ule.s $f12, $f14
; CMOV32R1-NEXT: jr $ra
; CMOV32R1-NEXT: movf.s $f0, $f12, $fcc0
;
; CMOV32R2-LABEL: tst_select_fcmp_ogt_float:
; CMOV32R2: # %bb.0: # %entry
; CMOV32R2-NEXT: mov.s $f0, $f14
-; CMOV32R2-NEXT: c.ule.s $f12, $f0
+; CMOV32R2-NEXT: c.ule.s $f12, $f14
; CMOV32R2-NEXT: jr $ra
; CMOV32R2-NEXT: movf.s $f0, $f12, $fcc0
;
@@ -372,10 +367,9 @@ define float @tst_select_fcmp_ogt_float(float %x, float %y) {
;
; M3-LABEL: tst_select_fcmp_ogt_float:
; M3: # %bb.0: # %entry
-; M3-NEXT: mov.s $f0, $f12
-; M3-NEXT: c.ule.s $f0, $f13
+; M3-NEXT: c.ule.s $f12, $f13
; M3-NEXT: bc1f .LBB4_2
-; M3-NEXT: nop
+; M3-NEXT: mov.s $f0, $f12
; M3-NEXT: # %bb.1: # %entry
; M3-NEXT: mov.s $f0, $f13
; M3-NEXT: .LBB4_2: # %entry
@@ -385,7 +379,7 @@ define float @tst_select_fcmp_ogt_float(float %x, float %y) {
; CMOV64-LABEL: tst_select_fcmp_ogt_float:
; CMOV64: # %bb.0: # %entry
; CMOV64-NEXT: mov.s $f0, $f13
-; CMOV64-NEXT: c.ule.s $f12, $f0
+; CMOV64-NEXT: c.ule.s $f12, $f13
; CMOV64-NEXT: jr $ra
; CMOV64-NEXT: movf.s $f0, $f12, $fcc0
;
@@ -398,7 +392,7 @@ define float @tst_select_fcmp_ogt_float(float %x, float %y) {
; MM32R3-LABEL: tst_select_fcmp_ogt_float:
; MM32R3: # %bb.0: # %entry
; MM32R3-NEXT: mov.s $f0, $f14
-; MM32R3-NEXT: c.ule.s $f12, $f0
+; MM32R3-NEXT: c.ule.s $f12, $f14
; MM32R3-NEXT: jr $ra
; MM32R3-NEXT: movf.s $f0, $f12, $fcc0
;
@@ -416,10 +410,9 @@ entry:
define float @tst_select_fcmp_oge_float(float %x, float %y) {
; M2-LABEL: tst_select_fcmp_oge_float:
; M2: # %bb.0: # %entry
-; M2-NEXT: mov.s $f0, $f12
-; M2-NEXT: c.ult.s $f0, $f14
+; M2-NEXT: c.ult.s $f12, $f14
; M2-NEXT: bc1f $BB5_2
-; M2-NEXT: nop
+; M2-NEXT: mov.s $f0, $f12
; M2-NEXT: # %bb.1: # %entry
; M2-NEXT: mov.s $f0, $f14
; M2-NEXT: $BB5_2: # %entry
@@ -429,14 +422,14 @@ define float @tst_select_fcmp_oge_float(float %x, float %y) {
; CMOV32R1-LABEL: tst_select_fcmp_oge_float:
; CMOV32R1: # %bb.0: # %entry
; CMOV32R1-NEXT: mov.s $f0, $f14
-; CMOV32R1-NEXT: c.ult.s $f12, $f0
+; CMOV32R1-NEXT: c.ult.s $f12, $f14
; CMOV32R1-NEXT: jr $ra
; CMOV32R1-NEXT: movf.s $f0, $f12, $fcc0
;
; CMOV32R2-LABEL: tst_select_fcmp_oge_float:
; CMOV32R2: # %bb.0: # %entry
; CMOV32R2-NEXT: mov.s $f0, $f14
-; CMOV32R2-NEXT: c.ult.s $f12, $f0
+; CMOV32R2-NEXT: c.ult.s $f12, $f14
; CMOV32R2-NEXT: jr $ra
; CMOV32R2-NEXT: movf.s $f0, $f12, $fcc0
;
@@ -448,10 +441,9 @@ define float @tst_select_fcmp_oge_float(float %x, float %y) {
;
; M3-LABEL: tst_select_fcmp_oge_float:
; M3: # %bb.0: # %entry
-; M3-NEXT: mov.s $f0, $f12
-; M3-NEXT: c.ult.s $f0, $f13
+; M3-NEXT: c.ult.s $f12, $f13
; M3-NEXT: bc1f .LBB5_2
-; M3-NEXT: nop
+; M3-NEXT: mov.s $f0, $f12
; M3-NEXT: # %bb.1: # %entry
; M3-NEXT: mov.s $f0, $f13
; M3-NEXT: .LBB5_2: # %entry
@@ -461,7 +453,7 @@ define float @tst_select_fcmp_oge_float(float %x, float %y) {
; CMOV64-LABEL: tst_select_fcmp_oge_float:
; CMOV64: # %bb.0: # %entry
; CMOV64-NEXT: mov.s $f0, $f13
-; CMOV64-NEXT: c.ult.s $f12, $f0
+; CMOV64-NEXT: c.ult.s $f12, $f13
; CMOV64-NEXT: jr $ra
; CMOV64-NEXT: movf.s $f0, $f12, $fcc0
;
@@ -474,7 +466,7 @@ define float @tst_select_fcmp_oge_float(float %x, float %y) {
; MM32R3-LABEL: tst_select_fcmp_oge_float:
; MM32R3: # %bb.0: # %entry
; MM32R3-NEXT: mov.s $f0, $f14
-; MM32R3-NEXT: c.ult.s $f12, $f0
+; MM32R3-NEXT: c.ult.s $f12, $f14
; MM32R3-NEXT: jr $ra
; MM32R3-NEXT: movf.s $f0, $f12, $fcc0
;
@@ -492,10 +484,9 @@ entry:
define float @tst_select_fcmp_oeq_float(float %x, float %y) {
; M2-LABEL: tst_select_fcmp_oeq_float:
; M2: # %bb.0: # %entry
-; M2-NEXT: mov.s $f0, $f12
-; M2-NEXT: c.eq.s $f0, $f14
+; M2-NEXT: c.eq.s $f12, $f14
; M2-NEXT: bc1t $BB6_2
-; M2-NEXT: nop
+; M2-NEXT: mov.s $f0, $f12
; M2-NEXT: # %bb.1: # %entry
; M2-NEXT: mov.s $f0, $f14
; M2-NEXT: $BB6_2: # %entry
@@ -505,14 +496,14 @@ define float @tst_select_fcmp_oeq_float(float %x, float %y) {
; CMOV32R1-LABEL: tst_select_fcmp_oeq_float:
; CMOV32R1: # %bb.0: # %entry
; CMOV32R1-NEXT: mov.s $f0, $f14
-; CMOV32R1-NEXT: c.eq.s $f12, $f0
+; CMOV32R1-NEXT: c.eq.s $f12, $f14
; CMOV32R1-NEXT: jr $ra
; CMOV32R1-NEXT: movt.s $f0, $f12, $fcc0
;
; CMOV32R2-LABEL: tst_select_fcmp_oeq_float:
; CMOV32R2: # %bb.0: # %entry
; CMOV32R2-NEXT: mov.s $f0, $f14
-; CMOV32R2-NEXT: c.eq.s $f12, $f0
+; CMOV32R2-NEXT: c.eq.s $f12, $f14
; CMOV32R2-NEXT: jr $ra
; CMOV32R2-NEXT: movt.s $f0, $f12, $fcc0
;
@@ -524,10 +515,9 @@ define float @tst_select_fcmp_oeq_float(float %x, float %y) {
;
; M3-LABEL: tst_select_fcmp_oeq_float:
; M3: # %bb.0: # %entry
-; M3-NEXT: mov.s $f0, $f12
-; M3-NEXT: c.eq.s $f0, $f13
+; M3-NEXT: c.eq.s $f12, $f13
; M3-NEXT: bc1t .LBB6_2
-; M3-NEXT: nop
+; M3-NEXT: mov.s $f0, $f12
; M3-NEXT: # %bb.1: # %entry
; M3-NEXT: mov.s $f0, $f13
; M3-NEXT: .LBB6_2: # %entry
@@ -537,7 +527,7 @@ define float @tst_select_fcmp_oeq_float(float %x, float %y) {
; CMOV64-LABEL: tst_select_fcmp_oeq_float:
; CMOV64: # %bb.0: # %entry
; CMOV64-NEXT: mov.s $f0, $f13
-; CMOV64-NEXT: c.eq.s $f12, $f0
+; CMOV64-NEXT: c.eq.s $f12, $f13
; CMOV64-NEXT: jr $ra
; CMOV64-NEXT: movt.s $f0, $f12, $fcc0
;
@@ -550,7 +540,7 @@ define float @tst_select_fcmp_oeq_float(float %x, float %y) {
; MM32R3-LABEL: tst_select_fcmp_oeq_float:
; MM32R3: # %bb.0: # %entry
; MM32R3-NEXT: mov.s $f0, $f14
-; MM32R3-NEXT: c.eq.s $f12, $f0
+; MM32R3-NEXT: c.eq.s $f12, $f14
; MM32R3-NEXT: jr $ra
; MM32R3-NEXT: movt.s $f0, $f12, $fcc0
;
@@ -568,10 +558,9 @@ entry:
define float @tst_select_fcmp_one_float(float %x, float %y) {
; M2-LABEL: tst_select_fcmp_one_float:
; M2: # %bb.0: # %entry
-; M2-NEXT: mov.s $f0, $f12
-; M2-NEXT: c.ueq.s $f0, $f14
+; M2-NEXT: c.ueq.s $f12, $f14
; M2-NEXT: bc1f $BB7_2
-; M2-NEXT: nop
+; M2-NEXT: mov.s $f0, $f12
; M2-NEXT: # %bb.1: # %entry
; M2-NEXT: mov.s $f0, $f14
; M2-NEXT: $BB7_2: # %entry
@@ -581,14 +570,14 @@ define float @tst_select_fcmp_one_float(float %x, float %y) {
; CMOV32R1-LABEL: tst_select_fcmp_one_float:
; CMOV32R1: # %bb.0: # %entry
; CMOV32R1-NEXT: mov.s $f0, $f14
-; CMOV32R1-NEXT: c.ueq.s $f12, $f0
+; CMOV32R1-NEXT: c.ueq.s $f12, $f14
; CMOV32R1-NEXT: jr $ra
; CMOV32R1-NEXT: movf.s $f0, $f12, $fcc0
;
; CMOV32R2-LABEL: tst_select_fcmp_one_float:
; CMOV32R2: # %bb.0: # %entry
; CMOV32R2-NEXT: mov.s $f0, $f14
-; CMOV32R2-NEXT: c.ueq.s $f12, $f0
+; CMOV32R2-NEXT: c.ueq.s $f12, $f14
; CMOV32R2-NEXT: jr $ra
; CMOV32R2-NEXT: movf.s $f0, $f12, $fcc0
;
@@ -603,10 +592,9 @@ define float @tst_select_fcmp_one_float(float %x, float %y) {
;
; M3-LABEL: tst_select_fcmp_one_float:
; M3: # %bb.0: # %entry
-; M3-NEXT: mov.s $f0, $f12
-; M3-NEXT: c.ueq.s $f0, $f13
+; M3-NEXT: c.ueq.s $f12, $f13
; M3-NEXT: bc1f .LBB7_2
-; M3-NEXT: nop
+; M3-NEXT: mov.s $f0, $f12
; M3-NEXT: # %bb.1: # %entry
; M3-NEXT: mov.s $f0, $f13
; M3-NEXT: .LBB7_2: # %entry
@@ -616,7 +604,7 @@ define float @tst_select_fcmp_one_float(float %x, float %y) {
; CMOV64-LABEL: tst_select_fcmp_one_float:
; CMOV64: # %bb.0: # %entry
; CMOV64-NEXT: mov.s $f0, $f13
-; CMOV64-NEXT: c.ueq.s $f12, $f0
+; CMOV64-NEXT: c.ueq.s $f12, $f13
; CMOV64-NEXT: jr $ra
; CMOV64-NEXT: movf.s $f0, $f12, $fcc0
;
@@ -632,7 +620,7 @@ define float @tst_select_fcmp_one_float(float %x, float %y) {
; MM32R3-LABEL: tst_select_fcmp_one_float:
; MM32R3: # %bb.0: # %entry
; MM32R3-NEXT: mov.s $f0, $f14
-; MM32R3-NEXT: c.ueq.s $f12, $f0
+; MM32R3-NEXT: c.ueq.s $f12, $f14
; MM32R3-NEXT: jr $ra
; MM32R3-NEXT: movf.s $f0, $f12, $fcc0
;
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/shl.ll b/llvm/test/CodeGen/Mips/llvm-ir/shl.ll
index 7d90b0ec8d0..8c6138e0eba 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/shl.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/shl.ll
@@ -857,7 +857,7 @@ define signext i128 @shl_i128(i128 signext %a, i128 signext %b) {
; MMR3-NEXT: sw $5, 32($sp) # 4-byte Folded Spill
; MMR3-NEXT: move $1, $4
; MMR3-NEXT: lw $16, 76($sp)
-; MMR3-NEXT: sllv $2, $1, $16
+; MMR3-NEXT: sllv $2, $4, $16
; MMR3-NEXT: not16 $4, $16
; MMR3-NEXT: sw $4, 24($sp) # 4-byte Folded Spill
; MMR3-NEXT: srl16 $3, $5, 1
@@ -945,7 +945,7 @@ define signext i128 @shl_i128(i128 signext %a, i128 signext %b) {
; MMR6-NEXT: .cfi_offset 16, -8
; MMR6-NEXT: move $11, $4
; MMR6-NEXT: lw $3, 44($sp)
-; MMR6-NEXT: sllv $1, $11, $3
+; MMR6-NEXT: sllv $1, $4, $3
; MMR6-NEXT: not16 $2, $3
; MMR6-NEXT: sw $2, 4($sp) # 4-byte Folded Spill
; MMR6-NEXT: srl16 $16, $5, 1
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/sub.ll b/llvm/test/CodeGen/Mips/llvm-ir/sub.ll
index d06170f1db1..d839a6e4c88 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/sub.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/sub.ll
@@ -163,7 +163,7 @@ entry:
; MMR3: subu16 $5, $[[T19]], $[[T20]]
; MMR6: move $[[T0:[0-9]+]], $7
-; MMR6: sw $[[T0]], 8($sp)
+; MMR6: sw $7, 8($sp)
; MMR6: move $[[T1:[0-9]+]], $5
; MMR6: sw $4, 12($sp)
; MMR6: lw $[[T2:[0-9]+]], 48($sp)
OpenPOWER on IntegriCloud