summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
authorAleksandar Beserminji <Aleksandar.Beserminji@mips.com>2017-12-11 11:21:40 +0000
committerAleksandar Beserminji <Aleksandar.Beserminji@mips.com>2017-12-11 11:21:40 +0000
commitd6dada17ff7f61f43cd6286d215f569433429804 (patch)
treeda6258891ba59a60f960736ce8abfd1605a33e65 /llvm/test/CodeGen
parent293d6c39d3490ab6c09128a087ae605ada34387e (diff)
downloadbcm5719-llvm-d6dada17ff7f61f43cd6286d215f569433429804.tar.gz
bcm5719-llvm-d6dada17ff7f61f43cd6286d215f569433429804.zip
[mips] Removal of microMIPS64R6
All files and parts of files related to microMIPS4R6 are removed. When target is microMIPS4R6, errors are printed. This is LLVM part of patch. Differential Revision: https://reviews.llvm.org/D35625 llvm-svn: 320350
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/Mips/cannot-copy-registers.ll24
-rw-r--r--llvm/test/CodeGen/Mips/countleading.ll7
-rw-r--r--llvm/test/CodeGen/Mips/fcmp.ll54
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/add.ll35
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/and.ll46
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/lh_lhu.ll1
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/mul.ll22
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/not.ll21
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/or.ll46
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/sdiv.ll7
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/srem.ll7
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/sub.ll12
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/udiv.ll7
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/urem.ll7
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/xor.ll21
-rw-r--r--llvm/test/CodeGen/Mips/lw16-base-reg.ll2
-rw-r--r--llvm/test/CodeGen/Mips/micromips-lwc1-swc1.ll15
-rw-r--r--llvm/test/CodeGen/Mips/micromips64r6-unsupported.ll5
-rw-r--r--llvm/test/CodeGen/Mips/mips64fpldst.ll2
-rw-r--r--llvm/test/CodeGen/Mips/mips64shift.ll1
-rw-r--r--llvm/test/CodeGen/Mips/tailcall/tailcall.ll5
21 files changed, 5 insertions, 342 deletions
diff --git a/llvm/test/CodeGen/Mips/cannot-copy-registers.ll b/llvm/test/CodeGen/Mips/cannot-copy-registers.ll
deleted file mode 100644
index 75cceb2011e..00000000000
--- a/llvm/test/CodeGen/Mips/cannot-copy-registers.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc -march=mips64 -mcpu=mips64r6 -mattr=+micromips \
-; RUN: -relocation-model=pic -O3 < %s
-
-; Check that message "Cannot copy registers" is not asserted in case of microMIPS64r6.
-
-@x = global i32 65504, align 4
-@y = global i32 60929, align 4
-@.str = private unnamed_addr constant [7 x i8] c"%08x \0A\00", align 1
-
-define i32 @main() nounwind {
-entry:
- %0 = load i32, i32* @x, align 4
- %and1 = and i32 %0, 4
- %call1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds
- ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %and1)
-
- %1 = load i32, i32* @y, align 4
- %and2 = and i32 %1, 5
- %call2 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds
- ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %and2)
- ret i32 0
-}
-
-declare i32 @printf(i8*, ...)
diff --git a/llvm/test/CodeGen/Mips/countleading.ll b/llvm/test/CodeGen/Mips/countleading.ll
index 1b61be5ed2a..35933f668fb 100644
--- a/llvm/test/CodeGen/Mips/countleading.ll
+++ b/llvm/test/CodeGen/Mips/countleading.ll
@@ -5,7 +5,6 @@
; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck -check-prefixes=ALL,MIPS64-GT-R1 %s
; RUN: llc -march=mips64el -mcpu=mips64r2 < %s | FileCheck -check-prefixes=ALL,MIPS64-GT-R1 %s
; RUN: llc -march=mips64el -mcpu=mips64r6 < %s | FileCheck -check-prefixes=ALL,MIPS64-GT-R1 %s
-; RUN: llc -march=mips64el -mcpu=mips64r6 -mattr=micromips < %s | FileCheck -check-prefixes=ALL,MICROMIPS64 %s
; Prefixes:
; ALL - All
@@ -22,8 +21,6 @@ entry:
; MIPS64-GT-R1: clz $2, $4
-; MICROMIPS64: clz $2, $4
-
%tmp1 = tail call i32 @llvm.ctlz.i32(i32 %X, i1 true)
ret i32 %tmp1
}
@@ -40,8 +37,6 @@ entry:
; MIPS64-GT-R1: clo $2, $4
-; MICROMIPS64: clo $2, $4
-
%neg = xor i32 %X, -1
%tmp1 = tail call i32 @llvm.ctlz.i32(i32 %neg, i1 true)
ret i32 %tmp1
@@ -63,7 +58,6 @@ entry:
; MIPS32-GT-R1-DAG: addiu $3, $zero, 0
; MIPS64-GT-R1: dclz $2, $4
-; MICROMIPS64: dclz $2, $4
%tmp1 = tail call i64 @llvm.ctlz.i64(i64 %X, i1 true)
ret i64 %tmp1
@@ -89,7 +83,6 @@ entry:
; MIPS32-GT-R1-DAG: addiu $3, $zero, 0
; MIPS64-GT-R1: dclo $2, $4
-; MICROMIPS64: dclo $2, $4
%neg = xor i64 %X, -1
%tmp1 = tail call i64 @llvm.ctlz.i64(i64 %neg, i1 true)
diff --git a/llvm/test/CodeGen/Mips/fcmp.ll b/llvm/test/CodeGen/Mips/fcmp.ll
index e5c40f2bfd4..34088beb224 100644
--- a/llvm/test/CodeGen/Mips/fcmp.ll
+++ b/llvm/test/CodeGen/Mips/fcmp.ll
@@ -16,8 +16,6 @@
; RUN: -check-prefixes=ALL,MM,MM32R3
; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips | FileCheck %s \
; RUN: -check-prefixes=ALL,MM,MMR6,MM32R6
-; RUN: llc < %s -march=mips64 -mcpu=mips64r6 -mattr=+micromips | FileCheck %s \
-; RUN: -check-prefixes=ALL,MM,MMR6,MM64R6
define i32 @false_f32(float %a, float %b) nounwind {
; ALL-LABEL: false_f32:
@@ -61,7 +59,6 @@ define i32 @oeq_f32(float %a, float %b) nounwind {
; MM32R3-DAG: movf $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.eq.s $[[T0:f[0-9]+]], $f12, $f14
-; MM64R6-DAG: cmp.eq.s $[[T0:f[0-9]+]], $f12, $f13
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: andi16 $2, $[[T1]], 1
@@ -95,7 +92,6 @@ define i32 @ogt_f32(float %a, float %b) nounwind {
; MM32R3-DAG: movt $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.lt.s $[[T0:f[0-9]+]], $f14, $f12
-; MM64R6-DAG: cmp.lt.s $[[T0:f[0-9]+]], $f13, $f12
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: andi16 $2, $[[T1]], 1
@@ -129,7 +125,6 @@ define i32 @oge_f32(float %a, float %b) nounwind {
; MM32R3-DAG: movt $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.le.s $[[T0:f[0-9]+]], $f14, $f12
-; MM64R6-DAG: cmp.le.s $[[T0:f[0-9]+]], $f13, $f12
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: andi16 $2, $[[T1]], 1
@@ -163,7 +158,6 @@ define i32 @olt_f32(float %a, float %b) nounwind {
; MM32R3-DAG: movf $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.lt.s $[[T0:f[0-9]+]], $f12, $f14
-; MM64R6-DAG: cmp.lt.s $[[T0:f[0-9]+]], $f12, $f13
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: andi16 $2, $[[T1]], 1
@@ -197,7 +191,6 @@ define i32 @ole_f32(float %a, float %b) nounwind {
; MM32R3-DAG: movf $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.le.s $[[T0:f[0-9]+]], $f12, $f14
-; MM64R6-DAG: cmp.le.s $[[T0:f[0-9]+]], $f12, $f13
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: andi16 $2, $[[T1]], 1
@@ -233,7 +226,6 @@ define i32 @one_f32(float %a, float %b) nounwind {
; MM32R3-DAG: movt $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.ueq.s $[[T0:f[0-9]+]], $f12, $f14
-; MM64R6-DAG: cmp.ueq.s $[[T0:f[0-9]+]], $f12, $f13
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: not $[[T2:[0-9]+]], $[[T1]]
; MMR6-DAG: andi16 $2, $[[T2]], 1
@@ -270,7 +262,6 @@ define i32 @ord_f32(float %a, float %b) nounwind {
; MM32R3-DAG: movt $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.un.s $[[T0:f[0-9]+]], $f12, $f14
-; MM64R6-DAG: cmp.un.s $[[T0:f[0-9]+]], $f12, $f13
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: not $[[T2:[0-9]+]], $[[T1]]
; MMR6-DAG: andi16 $2, $[[T2]], 1
@@ -305,7 +296,6 @@ define i32 @ueq_f32(float %a, float %b) nounwind {
; MM32R3-DAG: movf $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.ueq.s $[[T0:f[0-9]+]], $f12, $f14
-; MM64R6-DAG: cmp.ueq.s $[[T0:f[0-9]+]], $f12, $f13
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: andi16 $2, $[[T1]], 1
@@ -339,7 +329,6 @@ define i32 @ugt_f32(float %a, float %b) nounwind {
; MM32R3-DAG: movt $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.ult.s $[[T0:f[0-9]+]], $f14, $f12
-; MM64R6-DAG: cmp.ult.s $[[T0:f[0-9]+]], $f13, $f12
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: andi16 $2, $[[T1]], 1
@@ -373,7 +362,6 @@ define i32 @uge_f32(float %a, float %b) nounwind {
; MM32R3-DAG: movt $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.ule.s $[[T0:f[0-9]+]], $f14, $f12
-; MM64R6-DAG: cmp.ule.s $[[T0:f[0-9]+]], $f13, $f12
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: andi16 $2, $[[T1]], 1
@@ -407,7 +395,6 @@ define i32 @ult_f32(float %a, float %b) nounwind {
; MM32R3-DAG: movf $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.ult.s $[[T0:f[0-9]+]], $f12, $f14
-; MM64R6-DAG: cmp.ult.s $[[T0:f[0-9]+]], $f12, $f13
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: andi16 $2, $[[T1]], 1
@@ -441,7 +428,6 @@ define i32 @ule_f32(float %a, float %b) nounwind {
; MM32R3-DAG: movf $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.ule.s $[[T0:f[0-9]+]], $f12, $f14
-; MM64R6-DAG: cmp.ule.s $[[T0:f[0-9]+]], $f12, $f13
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: andi16 $2, $[[T1]], 1
@@ -477,7 +463,6 @@ define i32 @une_f32(float %a, float %b) nounwind {
; MM32R3-DAG: movt $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.eq.s $[[T0:f[0-9]+]], $f12, $f14
-; MM64R6-DAG: cmp.eq.s $[[T0:f[0-9]+]], $f12, $f13
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: not $[[T2:[0-9]+]], $[[T1]]
; MMR6-DAG: andi16 $2, $[[T2]], 1
@@ -512,7 +497,6 @@ define i32 @uno_f32(float %a, float %b) nounwind {
; MM32R3-DAG: movf $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.un.s $[[T0:f[0-9]+]], $f12, $f14
-; MM64R6-DAG: cmp.un.s $[[T0:f[0-9]+]], $f12, $f13
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: andi16 $2, $[[T1]], 1
@@ -580,7 +564,6 @@ define i32 @oeq_f64(double %a, double %b) nounwind {
; MM32R3-DAG: movf $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.eq.d $[[T0:f[0-9]+]], $f12, $f14
-; MM64R6-DAG: cmp.eq.d $[[T0:f[0-9]+]], $f12, $f13
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: andi16 $2, $[[T1]], 1
@@ -614,7 +597,6 @@ define i32 @ogt_f64(double %a, double %b) nounwind {
; MM32R3-DAG: movt $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.lt.d $[[T0:f[0-9]+]], $f14, $f12
-; MM64R6-DAG: cmp.lt.d $[[T0:f[0-9]+]], $f13, $f12
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: andi16 $2, $[[T1]], 1
@@ -648,7 +630,6 @@ define i32 @oge_f64(double %a, double %b) nounwind {
; MM32R3-DAG: movt $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.le.d $[[T0:f[0-9]+]], $f14, $f12
-; MM64R6-DAG: cmp.le.d $[[T0:f[0-9]+]], $f13, $f12
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: andi16 $2, $[[T1]], 1
@@ -682,7 +663,6 @@ define i32 @olt_f64(double %a, double %b) nounwind {
; MM32R3-DAG: movf $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.lt.d $[[T0:f[0-9]+]], $f12, $f14
-; MM64R6-DAG: cmp.lt.d $[[T0:f[0-9]+]], $f12, $f13
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: andi16 $2, $[[T1]], 1
@@ -716,7 +696,6 @@ define i32 @ole_f64(double %a, double %b) nounwind {
; MM32R3-DAG: movf $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.le.d $[[T0:f[0-9]+]], $f12, $f14
-; MM64R6-DAG: cmp.le.d $[[T0:f[0-9]+]], $f12, $f13
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: andi16 $2, $[[T1]], 1
@@ -752,7 +731,6 @@ define i32 @one_f64(double %a, double %b) nounwind {
; MM32R3-DAG: movt $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.ueq.d $[[T0:f[0-9]+]], $f12, $f14
-; MM64R6-DAG: cmp.ueq.d $[[T0:f[0-9]+]], $f12, $f13
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: not $[[T2:[0-9]+]], $[[T1]]
; MMR6-DAG: andi16 $2, $[[T2]], 1
@@ -789,7 +767,6 @@ define i32 @ord_f64(double %a, double %b) nounwind {
; MM32R3-DAG: movt $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.un.d $[[T0:f[0-9]+]], $f12, $f14
-; MM64R6-DAG: cmp.un.d $[[T0:f[0-9]+]], $f12, $f13
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: not $[[T2:[0-9]+]], $[[T1]]
; MMR6-DAG: andi16 $2, $[[T2]], 1
@@ -824,7 +801,6 @@ define i32 @ueq_f64(double %a, double %b) nounwind {
; MM32R3-DAG: movf $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.ueq.d $[[T0:f[0-9]+]], $f12, $f14
-; MM64R6-DAG: cmp.ueq.d $[[T0:f[0-9]+]], $f12, $f13
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: andi16 $2, $[[T1]], 1
@@ -858,7 +834,6 @@ define i32 @ugt_f64(double %a, double %b) nounwind {
; MM32R3-DAG: movt $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.ult.d $[[T0:f[0-9]+]], $f14, $f12
-; MM64R6-DAG: cmp.ult.d $[[T0:f[0-9]+]], $f13, $f12
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: andi16 $2, $[[T1]], 1
@@ -892,7 +867,6 @@ define i32 @uge_f64(double %a, double %b) nounwind {
; MM32R3-DAG: movt $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.ule.d $[[T0:f[0-9]+]], $f14, $f12
-; MM64R6-DAG: cmp.ule.d $[[T0:f[0-9]+]], $f13, $f12
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: andi16 $2, $[[T1]], 1
@@ -926,7 +900,6 @@ define i32 @ult_f64(double %a, double %b) nounwind {
; MM32R3-DAG: movf $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.ult.d $[[T0:f[0-9]+]], $f12, $f14
-; MM64R6-DAG: cmp.ult.d $[[T0:f[0-9]+]], $f12, $f13
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: andi16 $2, $[[T1]], 1
@@ -960,7 +933,6 @@ define i32 @ule_f64(double %a, double %b) nounwind {
; MM32R3-DAG: movf $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.ule.d $[[T0:f[0-9]+]], $f12, $f14
-; MM64R6-DAG: cmp.ule.d $[[T0:f[0-9]+]], $f12, $f13
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: andi16 $2, $[[T1]], 1
@@ -996,7 +968,6 @@ define i32 @une_f64(double %a, double %b) nounwind {
; MM32R3-DAG: movt $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.eq.d $[[T0:f[0-9]+]], $f12, $f14
-; MM64R6-DAG: cmp.eq.d $[[T0:f[0-9]+]], $f12, $f13
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: not $[[T2:[0-9]+]], $[[T1]]
; MMR6-DAG: andi16 $2, $[[T2]], 1
@@ -1031,7 +1002,6 @@ define i32 @uno_f64(double %a, double %b) nounwind {
; MM32R3-DAG: movf $[[T1]], $[[T0]], $fcc0
; MM32R6-DAG: cmp.un.d $[[T0:f[0-9]+]], $f12, $f14
-; MM64R6-DAG: cmp.un.d $[[T0:f[0-9]+]], $f12, $f13
; MMR6-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
; MMR6-DAG: andi16 $2, $[[T1]], 1
@@ -1102,18 +1072,6 @@ entry:
; MM32R6-DAG: andi16 $[[T5:[0-9]+]], $[[T4]], 1
; MM32R6-DAG: bnezc $[[T5]],
-; MM64R6-DAG: add.s $[[T0:f[0-9]+]], $f13, $f12
-; MM64R6-DAG: lui $[[T1:[0-9]+]], %highest(.LCPI32_0)
-; MM64R6-DAG: daddiu $[[T2:[0-9]+]], $[[T1]], %higher(.LCPI32_0)
-; MM64R6-DAG: dsll $[[T3:[0-9]+]], $[[T2]], 16
-; MM64R6-DAG: daddiu $[[T4:[0-9]+]], $[[T3]], %hi(.LCPI32_0)
-; MM64R6-DAG: dsll $[[T5:[0-9]+]], $[[T4]], 16
-; MM64R6-DAG: lwc1 $[[T6:f[0-9]+]], %lo(.LCPI32_0)($[[T5]])
-; MM64R6-DAG: cmp.le.s $[[T7:f[0-9]+]], $[[T0]], $[[T6]]
-; MM64R6-DAG: mfc1 $[[T8:[0-9]+]], $[[T7]]
-; MM64R6-DAG: andi16 $[[T9:[0-9]+]], $[[T8]], 1
-; MM64R6-DAG: bnezc $[[T9]],
-
%add = fadd fast float %at, %angle
%cmp = fcmp ogt float %add, 1.000000e+00
br i1 %cmp, label %if.then, label %if.end
@@ -1172,18 +1130,6 @@ entry:
; MM32R6-DAG: andi16 $[[T5:[0-9]+]], $[[T4]], 1
; MM32R6-DAG: bnezc $[[T5]],
-; MM64R6-DAG: add.d $[[T0:f[0-9]+]], $f13, $f12
-; MM64R6-DAG: lui $[[T1:[0-9]+]], %highest(.LCPI33_0)
-; MM64R6-DAG: daddiu $[[T2:[0-9]+]], $[[T1]], %higher(.LCPI33_0)
-; MM64R6-DAG: dsll $[[T3:[0-9]+]], $[[T2]], 16
-; MM64R6-DAG: daddiu $[[T4:[0-9]+]], $[[T3]], %hi(.LCPI33_0)
-; MM64R6-DAG: dsll $[[T5:[0-9]+]], $[[T4]], 16
-; MM64R6-DAG: ldc1 $[[T6:f[0-9]+]], %lo(.LCPI33_0)($[[T5]])
-; MM64R6-DAG: cmp.le.d $[[T7:f[0-9]+]], $[[T0]], $[[T6]]
-; MM64R6-DAG: mfc1 $[[T8:[0-9]+]], $[[T7]]
-; MM64R6-DAG: andi16 $[[T9:[0-9]+]], $[[T8]], 1
-; MM64R6-DAG: bnezc $[[T9]],
-
%add = fadd fast double %at, %angle
%cmp = fcmp ogt double %add, 1.000000e+00
br i1 %cmp, label %if.then, label %if.end
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/add.ll b/llvm/test/CodeGen/Mips/llvm-ir/add.ll
index 63884eb03b8..2a7ae5a7153 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/add.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/add.ll
@@ -28,8 +28,6 @@
; RUN: -check-prefixes=ALL,MMR3,MM32
; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips -O2 | FileCheck %s \
; RUN: -check-prefixes=ALL,MMR6,MM32
-; RUN: llc < %s -march=mips -mcpu=mips64r6 -target-abi n64 -mattr=+micromips -O2 | FileCheck %s \
-; RUN: -check-prefixes=ALL,MM64
; FIXME: This code sequence is inefficient as it should be 'subu $[[T0]], $zero, $[[T0]'.
@@ -122,8 +120,6 @@ entry:
; MM32: sltu $[[T1:[0-9]+]], $3, $5
; MM32: addu16 $2, $[[T0]], $[[T1]]
- ; MM64: daddu $2, $4, $5
-
%r = add i64 %a, %b
ret i64 %r
}
@@ -228,13 +224,6 @@ entry:
; MMR6: addu16 $2, $[[T16]], $[[T20]]
; MMR6: addu16 $2, $[[T20]], $[[T21]]
- ; MM64: daddu $[[T0:[0-9]+]], $4, $6
- ; MM64: daddu $3, $5, $7
- ; MM64: sltu $[[T1:[0-9]+]], $3, $5
- ; MM64: dsll $[[T2:[0-9]+]], $[[T1]], 32
- ; MM64: dsrl $[[T3:[0-9]+]], $[[T2]], 32
- ; MM64: daddu $2, $[[T0]], $[[T3]]
-
%r = add i128 %a, %b
ret i128 %r
}
@@ -262,9 +251,6 @@ define signext i8 @add_i8_4(i8 signext %a) {
; MM32: addiur2 $[[T0:[0-9]+]], $4, 4
; MM32: seb $2, $[[T0]]
- ; MM64: addiur2 $[[T0:[0-9]+]], $4, 4
- ; MM64: seb $2, $[[T0]]
-
%r = add i8 4, %a
ret i8 %r
}
@@ -283,9 +269,6 @@ define signext i16 @add_i16_4(i16 signext %a) {
; MM32: addiur2 $[[T0:[0-9]+]], $4, 4
; MM32: seh $2, $[[T0]]
- ; MM64: addiur2 $[[T0:[0-9]+]], $4, 4
- ; MM64: seh $2, $[[T0]]
-
%r = add i16 4, %a
ret i16 %r
}
@@ -299,8 +282,6 @@ define signext i32 @add_i32_4(i32 signext %a) {
; MM32: addiur2 $2, $4, 4
- ; MM64: addiur2 $2, $4, 4
-
%r = add i32 4, %a
ret i32 %r
}
@@ -319,8 +300,6 @@ define signext i64 @add_i64_4(i64 signext %a) {
; GP64: daddiu $2, $4, 4
- ; MM64: daddiu $2, $4, 4
-
%r = add i64 4, %a
ret i64 %r
}
@@ -384,12 +363,6 @@ define signext i128 @add_i128_4(i128 signext %a) {
; MMR6: move $4, $7
; MMR6: move $5, $[[T1]]
- ; MM64: daddiu $[[T0:[0-9]+]], $5, 4
- ; MM64: sltu $[[T1:[0-9]+]], $[[T0]], $5
- ; MM64: dsll $[[T2:[0-9]+]], $[[T1]], 32
- ; MM64: dsrl $[[T3:[0-9]+]], $[[T2]], 32
- ; MM64: daddu $2, $4, $[[T3]]
-
%r = add i128 4, %a
ret i128 %r
}
@@ -477,8 +450,6 @@ define signext i64 @add_i64_3(i64 signext %a) {
; MM32: sltu $[[T2:[0-9]+]], $[[T1]], $5
; MM32: addu16 $2, $4, $[[T2]]
- ; MM64: daddiu $2, $4, 3
-
%r = add i64 3, %a
ret i64 %r
}
@@ -545,12 +516,6 @@ define signext i128 @add_i128_3(i128 signext %a) {
; MMR6: move $4, $[[T5]]
; MMR6: move $5, $[[T1]]
- ; MM64: daddiu $[[T0:[0-9]+]], $5, 3
- ; MM64: sltu $[[T1:[0-9]+]], $[[T0]], $5
- ; MM64: dsll $[[T2:[0-9]+]], $[[T1]], 32
- ; MM64: dsrl $[[T3:[0-9]+]], $[[T2]], 32
- ; MM64: daddu $2, $4, $[[T3]]
-
%r = add i128 3, %a
ret i128 %r
}
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/and.ll b/llvm/test/CodeGen/Mips/llvm-ir/and.ll
index 18d7a439f62..9ea810dfcbf 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/and.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/and.ll
@@ -28,8 +28,6 @@
; RUN: -check-prefixes=ALL,MM,MM32
; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips | FileCheck %s \
; RUN: -check-prefixes=ALL,MM,MM32
-; RUN: llc < %s -march=mips -mcpu=mips64r6 -target-abi n64 -mattr=+micromips | FileCheck %s \
-; RUN: -check-prefixes=ALL,MM,MM64
define signext i1 @and_i1(i1 signext %a, i1 signext %b) {
entry:
@@ -42,8 +40,6 @@ entry:
; MM32: and16 $[[T0:[0-9]+]], $5
; MM32: move $2, $[[T0]]
- ; MM64: and $1, $4, $5
-
%r = and i1 %a, %b
ret i1 %r
}
@@ -59,8 +55,6 @@ entry:
; MM32: and16 $[[T0:[0-9]+]], $5
; MM32: move $2, $[[T0]]
- ; MM64: and $1, $4, $5
-
%r = and i8 %a, %b
ret i8 %r
}
@@ -76,8 +70,6 @@ entry:
; MM32: and16 $[[T0:[0-9]+]], $5
; MM32 move $2, $[[T0]]
- ; MM64: and $1, $4, $5
-
%r = and i16 %a, %b
ret i16 %r
}
@@ -94,9 +86,6 @@ entry:
; MM32: and16 $[[T0:[0-9]+]], $5
; MM32: move $2, $[[T0]]
- ; MM64: and $[[T0:[0-9]+]], $4, $5
- ; MM64: sll $2, $[[T0]], 0
-
%r = and i32 %a, %b
ret i32 %r
}
@@ -115,8 +104,6 @@ entry:
; MM32: move $2, $[[T0]]
; MM32: move $3, $[[T1]]
- ; MM64: and $2, $4, $5
-
%r = and i64 %a, %b
ret i64 %r
}
@@ -146,9 +133,6 @@ entry:
; MM32: lw $[[T3:[0-9]+]], 28($sp)
; MM32: and16 $[[T3]], $7
- ; MM64: and $2, $4, $6
- ; MM64: and $3, $5, $7
-
%r = and i128 %a, %b
ret i128 %r
}
@@ -221,8 +205,6 @@ entry:
; MM32: andi16 $3, $5, 4
; MM32: li16 $2, 0
- ; MM64: andi $2, $4, 4
-
%r = and i64 4, %b
ret i64 %r
}
@@ -244,9 +226,6 @@ entry:
; MM32: li16 $3, 0
; MM32: li16 $4, 0
- ; MM64: andi $3, $5, 4
- ; MM64: daddiu $2, $zero, 0
-
%r = and i128 4, %b
ret i128 %r
}
@@ -315,8 +294,6 @@ entry:
; MM32: andi16 $3, $5, 31
; MM32: li16 $2, 0
- ; MM64: andi $2, $4, 31
-
%r = and i64 31, %b
ret i64 %r
}
@@ -338,9 +315,6 @@ entry:
; MM32: li16 $3, 0
; MM32: li16 $4, 0
- ; MM64: andi $3, $5, 31
- ; MM64: daddiu $2, $zero, 0
-
%r = and i128 31, %b
ret i128 %r
}
@@ -405,8 +379,6 @@ entry:
; MM32: andi16 $3, $5, 255
; MM32: li16 $2, 0
- ; MM64: andi $2, $4, 255
-
%r = and i64 255, %b
ret i64 %r
}
@@ -428,9 +400,6 @@ entry:
; MM32: li16 $3, 0
; MM32: li16 $4, 0
- ; MM64: andi $3, $5, 255
- ; MM64: daddiu $2, $zero, 0
-
%r = and i128 255, %b
ret i128 %r
}
@@ -506,8 +475,6 @@ entry:
; MM32: andi16 $3, $5, 32768
; MM32: li16 $2, 0
- ; MM64: andi $2, $4, 32768
-
%r = and i64 32768, %b
ret i64 %r
}
@@ -529,9 +496,6 @@ entry:
; MM32: li16 $3, 0
; MM32: li16 $4, 0
- ; MM64: andi $3, $5, 32768
- ; MM64: daddiu $2, $zero, 0
-
%r = and i128 32768, %b
ret i128 %r
}
@@ -588,8 +552,6 @@ entry:
; MM32-DAG: andi $3, $5, 65
; MM32-DAG: li16 $2, 0
- ; MM64: andi $2, $4, 65
-
%r = and i64 65, %b
ret i64 %r
}
@@ -611,9 +573,6 @@ entry:
; MM32-DAG: li16 $3, 0
; MM32-DAG: li16 $4, 0
- ; MM64: andi $3, $5, 65
- ; MM64: daddiu $2, $zero, 0
-
%r = and i128 65, %b
ret i128 %r
}
@@ -678,8 +637,6 @@ entry:
; MM32-DAG: andi $3, $5, 256
; MM32-DAG: li16 $2, 0
- ; MM64: andi $2, $4, 256
-
%r = and i64 256, %b
ret i64 %r
}
@@ -701,9 +658,6 @@ entry:
; MM32-DAG: li16 $3, 0
; MM32-DAG: li16 $4, 0
- ; MM64: andi $3, $5, 256
- ; MM64: daddiu $2, $zero, 0
-
%r = and i128 256, %b
ret i128 %r
}
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/lh_lhu.ll b/llvm/test/CodeGen/Mips/llvm-ir/lh_lhu.ll
index fadcfdb0fb4..192e10ae8fc 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/lh_lhu.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/lh_lhu.ll
@@ -1,7 +1,6 @@
; RUN: llc < %s -march=mips -mcpu=mips32r2 -mattr=+micromips -relocation-model=pic | FileCheck %s
; RUN: llc < %s -march=mips -mcpu=mips32r3 -mattr=+micromips -relocation-model=pic | FileCheck %s
; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips -relocation-model=pic | FileCheck %s
-; RUN: llc < %s -march=mips -mcpu=mips64r6 -target-abi n64 -mattr=+micromips -relocation-model=pic | FileCheck %s
@us = global i16 0, align 2
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/mul.ll b/llvm/test/CodeGen/Mips/llvm-ir/mul.ll
index 1562372ce9a..5e85ecf2a93 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/mul.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/mul.ll
@@ -26,8 +26,6 @@
; RUN: FileCheck %s -check-prefixes=MM32,MM32R3
; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips -relocation-model=pic | \
; RUN: FileCheck %s -check-prefixes=MM32,MM32R6
-; RUN: llc < %s -march=mips -mcpu=mips64r6 -mattr=+micromips -target-abi n64 -relocation-model=pic | \
-; RUN: FileCheck %s -check-prefix=MM64R6
define signext i1 @mul_i1(i1 signext %a, i1 signext %b) {
entry:
@@ -59,11 +57,6 @@ entry:
; 64R6: andi $[[T0]], $[[T0]], 1
; 64R6: negu $2, $[[T0]]
- ; MM64R6: mul $[[T0:[0-9]+]], $4, $5
- ; MM64R6: andi16 $[[T0]], $[[T0]], 1
- ; MM64R6: li16 $[[T1:[0-9]+]], 0
- ; MM64R6: subu16 $2, $[[T1]], $[[T0]]
-
; MM32: mul $[[T0:[0-9]+]], $4, $5
; MM32: andi16 $[[T0]], $[[T0]], 1
; MM32: li16 $[[T1:[0-9]+]], 0
@@ -107,9 +100,6 @@ entry:
; 64R6: mul $[[T0:[0-9]+]], $4, $5
; 64R6: seb $2, $[[T0]]
- ; MM64R6: mul $[[T0:[0-9]+]], $4, $5
- ; MM64R6: seb $2, $[[T0]]
-
; MM32: mul $[[T0:[0-9]+]], $4, $5
; MM32: seb $2, $[[T0]]
@@ -151,9 +141,6 @@ entry:
; 64R6: mul $[[T0:[0-9]+]], $4, $5
; 64R6: seh $2, $[[T0]]
- ; MM64R6: mul $[[T0:[0-9]+]], $4, $5
- ; MM64R6: seh $2, $[[T0]]
-
; MM32: mul $[[T0:[0-9]+]], $4, $5
; MM32: seh $2, $[[T0]]
@@ -173,7 +160,6 @@ entry:
; 64R1-R5: mul $2, $4, $5
; 64R6: mul $2, $4, $5
- ; MM64R6: mul $2, $4, $5
; MM32: mul $2, $4, $5
@@ -217,7 +203,6 @@ entry:
; 64R1-R5: mflo $2
; 64R6: dmul $2, $4, $5
- ; MM64R6: dmul $2, $4, $5
; MM32R3: multu $[[T0:[0-9]+]], $7
; MM32R3: mflo $[[T1:[0-9]+]]
@@ -261,13 +246,6 @@ entry:
; 64R6: daddu $2, $[[T1]], $[[T0]]
; 64R6-DAG: dmul $3, $5, $7
- ; MM64R6-DAG: dmul $[[T1:[0-9]+]], $5, $6
- ; MM64R6: dmuhu $[[T2:[0-9]+]], $5, $7
- ; MM64R6: daddu $[[T3:[0-9]+]], $[[T2]], $[[T1]]
- ; MM64R6-DAG: dmul $[[T0:[0-9]+]], $4, $7
- ; MM64R6: daddu $2, $[[T1]], $[[T0]]
- ; MM64R6-DAG: dmul $3, $5, $7
-
; MM32: lw $25, %call16(__multi3)($16)
%r = mul i128 %a, %b
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/not.ll b/llvm/test/CodeGen/Mips/llvm-ir/not.ll
index ab7a3c4613a..6a27612c0e2 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/not.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/not.ll
@@ -26,8 +26,6 @@
; RUN: -check-prefixes=ALL,MM,MM32
; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips | FileCheck %s \
; RUN: -check-prefixes=ALL,MM,MM32
-; RUN: llc < %s -march=mips -mcpu=mips64r6 -target-abi n64 -mattr=+micromips | FileCheck %s \
-; RUN: -check-prefixes=ALL,MM,MM64
define signext i1 @not_i1(i1 signext %a) {
entry:
@@ -98,9 +96,6 @@ entry:
; MM32: not16 $2, $4
; MM32: not16 $3, $5
- ; MM64: daddiu $[[T0:[0-9]+]], $zero, -1
- ; MM64: xor $2, $4, $[[T0]]
-
%r = xor i64 %a, -1
ret i64 %r
}
@@ -123,10 +118,6 @@ entry:
; MM32: not16 $4, $6
; MM32: not16 $5, $7
- ; MM64: daddiu $[[T0:[0-9]+]], $zero, -1
- ; MM64: xor $2, $4, $[[T0]]
- ; MM64: xor $3, $5, $[[T0]]
-
%r = xor i128 %a, -1
ret i128 %r
}
@@ -138,7 +129,6 @@ entry:
; GP32: nor $2, $5, $4
; GP64: or $1, $5, $4
; MM32: nor $2, $5, $4
- ; MM64: or $1, $5, $4
%or = or i1 %b, %a
%r = xor i1 %or, -1
@@ -152,7 +142,6 @@ entry:
; GP32: nor $2, $5, $4
; GP64: or $1, $5, $4
; MM32: nor $2, $5, $4
- ; MM64: or $1, $5, $4
%or = or i8 %b, %a
%r = xor i8 %or, -1
@@ -166,7 +155,6 @@ entry:
; GP32: nor $2, $5, $4
; GP64: or $1, $5, $4
; MM32: nor $2, $5, $4
- ; MM64: or $1, $5, $4
%or = or i16 %b, %a
%r = xor i16 %or, -1
@@ -185,10 +173,6 @@ entry:
; MM32: nor $2, $5, $4
- ; MM64: or $[[T0:[0-9]+]], $5, $4
- ; MM64: sll $[[T1:[0-9]+]], $[[T0]], 0
- ; MM64: not16 $2, $[[T1]]
-
%or = or i32 %b, %a
%r = xor i32 %or, -1
ret i32 %r
@@ -207,8 +191,6 @@ entry:
; MM32: nor $2, $6, $4
; MM32: nor $3, $7, $5
- ; MM64: nor $2, $5, $4
-
%or = or i64 %b, %a
%r = xor i64 %or, -1
ret i64 %r
@@ -239,9 +221,6 @@ entry:
; MM32: lw $[[T3:[0-9]+]], 28($sp)
; MM32: nor $5, $[[T3]], $7
- ; MM64: nor $2, $6, $4
- ; MM64: nor $3, $7, $5
-
%or = or i128 %b, %a
%r = xor i128 %or, -1
ret i128 %r
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/or.ll b/llvm/test/CodeGen/Mips/llvm-ir/or.ll
index 609cf0210c3..6850bed9659 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/or.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/or.ll
@@ -15,8 +15,6 @@
; RUN: -check-prefixes=ALL,MM,MM32
; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips | FileCheck %s \
; RUN: -check-prefixes=ALL,MM,MM32
-; RUN: llc < %s -march=mips -mcpu=mips64r6 -target-abi n64 -mattr=+micromips | FileCheck %s \
-; RUN: -check-prefixes=ALL,MM,MM64
define signext i1 @or_i1(i1 signext %a, i1 signext %b) {
entry:
@@ -29,8 +27,6 @@ entry:
; MM32: or16 $[[T0:[0-9]+]], $5
; MM32 move $2, $[[T0]]
- ; MM64: or $1, $4, $5
-
%r = or i1 %a, %b
ret i1 %r
}
@@ -46,8 +42,6 @@ entry:
; MM32: or16 $[[T0:[0-9]+]], $5
; MM32 move $2, $[[T0]]
- ; MM64: or $1, $4, $5
-
%r = or i8 %a, %b
ret i8 %r
}
@@ -63,8 +57,6 @@ entry:
; MM32: or16 $[[T0:[0-9]+]], $5
; MM32 move $2, $[[T0]]
- ; MM64: or $1, $4, $5
-
%r = or i16 %a, %b
ret i16 %r
}
@@ -82,9 +74,6 @@ entry:
; MM32: or16 $[[T0:[0-9]+]], $5
; MM32: move $2, $[[T0]]
- ; MM64: or $[[T0:[0-9]+]], $4, $5
- ; MM64: sll $2, $[[T0]], 0
-
%r = or i32 %a, %b
ret i32 %r
}
@@ -103,8 +92,6 @@ entry:
; MM32: move $2, $[[T0]]
; MM32: move $3, $[[T1]]
- ; MM64: or $2, $4, $5
-
%r = or i64 %a, %b
ret i64 %r
}
@@ -134,9 +121,6 @@ entry:
; MM32: lw $[[T3:[0-9]+]], 28($sp)
; MM32: or16 $[[T3]], $7
- ; MM64: or $2, $4, $6
- ; MM64: or $3, $5, $7
-
%r = or i128 %a, %b
ret i128 %r
}
@@ -193,8 +177,6 @@ entry:
; MM32: ori $3, $5, 4
; MM32: move $2, $4
- ; MM64: ori $2, $4, 4
-
%r = or i64 4, %b
ret i64 %r
}
@@ -218,9 +200,6 @@ entry:
; MM32: move $4, $6
; MM32: move $5, $[[T0]]
- ; MM64: ori $3, $5, 4
- ; MM64: move $2, $4
-
%r = or i128 4, %b
ret i128 %r
}
@@ -281,8 +260,6 @@ entry:
; MM32: ori $3, $5, 31
; MM32: move $2, $4
- ; MM64: ori $2, $4, 31
-
%r = or i64 31, %b
ret i64 %r
}
@@ -306,9 +283,6 @@ entry:
; MM32: move $4, $6
; MM32: move $5, $[[T0]]
- ; MM64: ori $3, $5, 31
- ; MM64: move $2, $4
-
%r = or i128 31, %b
ret i128 %r
}
@@ -373,8 +347,6 @@ entry:
; MM32: ori $3, $5, 255
; MM32: move $2, $4
- ; MM64: ori $2, $4, 255
-
%r = or i64 255, %b
ret i64 %r
}
@@ -398,9 +370,6 @@ entry:
; MM32: move $4, $6
; MM32: move $5, $[[T0]]
- ; MM64: ori $3, $5, 255
- ; MM64: move $2, $4
-
%r = or i128 255, %b
ret i128 %r
}
@@ -464,8 +433,6 @@ entry:
; MM32: ori $3, $5, 32768
; MM32: move $2, $4
- ; MM64: ori $2, $4, 32768
-
%r = or i64 32768, %b
ret i64 %r
}
@@ -489,9 +456,6 @@ entry:
; MM32: move $4, $6
; MM32: move $5, $[[T0]]
- ; MM64: ori $3, $5, 32768
- ; MM64: move $2, $4
-
%r = or i128 32768, %b
ret i128 %r
}
@@ -552,8 +516,6 @@ entry:
; MM32: ori $3, $5, 65
; MM32: move $2, $4
- ; MM64: ori $2, $4, 65
-
%r = or i64 65, %b
ret i64 %r
}
@@ -577,9 +539,6 @@ entry:
; MM32: move $4, $6
; MM32: move $5, $[[T0]]
- ; MM64: ori $3, $5, 65
- ; MM64: move $2, $4
-
%r = or i128 65, %b
ret i128 %r
}
@@ -636,8 +595,6 @@ entry:
; MM32: ori $3, $5, 256
; MM32: move $2, $4
- ; MM64: ori $2, $4, 256
-
%r = or i64 256, %b
ret i64 %r
}
@@ -661,9 +618,6 @@ entry:
; MM32: move $4, $6
; MM32: move $5, $[[T0]]
- ; MM64: ori $3, $5, 256
- ; MM64: move $2, $4
-
%r = or i128 256, %b
ret i128 %r
}
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/sdiv.ll b/llvm/test/CodeGen/Mips/llvm-ir/sdiv.ll
index defd25bb41a..11e766319e7 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/sdiv.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/sdiv.ll
@@ -30,8 +30,6 @@
; RUN: -check-prefixes=ALL,MMR3,MM32
; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips -relocation-model=pic | FileCheck %s \
; RUN: -check-prefixes=ALL,MMR6,MM32
-; RUN: llc < %s -march=mips -mcpu=mips64r6 -mattr=+micromips -target-abi n64 -relocation-model=pic | FileCheck %s \
-; RUN: -check-prefixes=ALL,MMR6,MM64
define signext i1 @sdiv_i1(i1 signext %a, i1 signext %b) {
entry:
@@ -174,9 +172,6 @@ entry:
; MM32: lw $25, %call16(__divdi3)($2)
- ; MM64: ddiv $2, $4, $5
- ; MM64: teq $5, $zero, 7
-
%r = sdiv i64 %a, %b
ret i64 %r
}
@@ -192,8 +187,6 @@ entry:
; MM32: lw $25, %call16(__divti3)($16)
- ; MM64: ld $25, %call16(__divti3)($2)
-
%r = sdiv i128 %a, %b
ret i128 %r
}
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/srem.ll b/llvm/test/CodeGen/Mips/llvm-ir/srem.ll
index 42664d7457e..971b1e00d8a 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/srem.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/srem.ll
@@ -30,8 +30,6 @@
; RUN: -check-prefixes=ALL,MMR3,MM32
; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips -relocation-model=pic | FileCheck %s \
; RUN: -check-prefixes=ALL,MMR6,MM32
-; RUN: llc < %s -march=mips -mcpu=mips64r6 -target-abi n64 -mattr=+micromips -relocation-model=pic | FileCheck %s \
-; RUN: -check-prefixes=ALL,MMR6,MM64
define signext i1 @srem_i1(i1 signext %a, i1 signext %b) {
entry:
@@ -166,9 +164,6 @@ entry:
; MM32: lw $25, %call16(__moddi3)($2)
- ; MM64: dmod $2, $4, $5
- ; MM64: teq $5, $zero, 7
-
%r = srem i64 %a, %b
ret i64 %r
}
@@ -184,8 +179,6 @@ entry:
; MM32: lw $25, %call16(__modti3)($16)
- ; MM64: ld $25, %call16(__modti3)($2)
-
%r = srem i128 %a, %b
ret i128 %r
}
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/sub.ll b/llvm/test/CodeGen/Mips/llvm-ir/sub.ll
index 2ab7225f445..d06170f1db1 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/sub.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/sub.ll
@@ -28,8 +28,6 @@
; RUN: -check-prefixes=R2-R6,GP64,NOT-MM,GP64-R2
; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
; RUN: -check-prefixes=R2-R6,GP64,NOT-MM,GP64-R2
-; RUN: llc < %s -march=mips64 -mcpu=mips64r6 -mattr=+micromips | FileCheck %s \
-; RUN: -check-prefixes=GP64,MM64
define signext i1 @sub_i1(i1 signext %a, i1 signext %b) {
entry:
@@ -213,16 +211,6 @@ entry:
; GP64-R2: dsubu $2, $1, $[[T1]]
; GP64-R2: dsubu $3, $5, $7
-; FIXME: Again, redundant sign extension. Also, microMIPSR6 has the
-; dext instruction which should be used here.
-
-; MM64: dsubu $[[T0:[0-9]+]], $4, $6
-; MM64: sltu $[[T1:[0-9]+]], $5, $7
-; MM64: dsll $[[T2:[0-9]+]], $[[T1]], 32
-; MM64: dsrl $[[T3:[0-9]+]], $[[T2]], 32
-; MM64: dsubu $2, $[[T0]], $[[T3]]
-; MM64: dsubu $3, $5, $7
-
%r = sub i128 %a, %b
ret i128 %r
}
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/udiv.ll b/llvm/test/CodeGen/Mips/llvm-ir/udiv.ll
index 78ab36442a9..70882a33869 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/udiv.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/udiv.ll
@@ -30,8 +30,6 @@
; RUN: -check-prefixes=ALL,MMR3,MM32
; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips -relocation-model=pic | FileCheck %s \
; RUN: -check-prefixes=ALL,MMR6,MM32
-; RUN: llc < %s -march=mips -mcpu=mips64r6 -target-abi n64 -mattr=+micromips -relocation-model=pic | FileCheck %s \
-; RUN: -check-prefixes=ALL,MMR6,MM64
define zeroext i1 @udiv_i1(i1 zeroext %a, i1 zeroext %b) {
entry:
@@ -136,9 +134,6 @@ entry:
; MM32: lw $25, %call16(__udivdi3)($2)
- ; MM64: ddivu $2, $4, $5
- ; MM64: teq $5, $zero, 7
-
%r = udiv i64 %a, %b
ret i64 %r
}
@@ -154,8 +149,6 @@ entry:
; MM32: lw $25, %call16(__udivti3)($16)
- ; MM64: ld $25, %call16(__udivti3)($2)
-
%r = udiv i128 %a, %b
ret i128 %r
}
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/urem.ll b/llvm/test/CodeGen/Mips/llvm-ir/urem.ll
index 160c126c7e3..d0ac39d61dc 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/urem.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/urem.ll
@@ -30,8 +30,6 @@
; RUN: -check-prefixes=ALL,MMR3,MM32
; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips -relocation-model=pic | FileCheck %s \
; RUN: -check-prefixes=ALL,MMR6,MM32
-; RUN: llc < %s -march=mips -mcpu=mips64r6 -target-abi n64 -mattr=+micromips -relocation-model=pic | FileCheck %s \
-; RUN: -check-prefixes=ALL,MMR6,MM64
define signext i1 @urem_i1(i1 signext %a, i1 signext %b) {
entry:
@@ -192,9 +190,6 @@ entry:
; MM32: lw $25, %call16(__umoddi3)($2)
- ; MM64: dmodu $2, $4, $5
- ; MM64: teq $5, $zero, 7
-
%r = urem i64 %a, %b
ret i64 %r
}
@@ -210,8 +205,6 @@ entry:
; MM32: lw $25, %call16(__umodti3)($16)
- ; MM64: ld $25, %call16(__umodti3)($2)
-
%r = urem i128 %a, %b
ret i128 %r
}
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/xor.ll b/llvm/test/CodeGen/Mips/llvm-ir/xor.ll
index 068d390839d..8fcf11feace 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/xor.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/xor.ll
@@ -26,8 +26,6 @@
; RUN: -check-prefixes=ALL,MM,MM32
; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips | FileCheck %s \
; RUN: -check-prefixes=ALL,MM,MM32
-; RUN: llc < %s -march=mips -mcpu=mips64r6 -target-abi n64 -mattr=+micromips | FileCheck %s \
-; RUN: -check-prefixes=ALL,MM,MM64
define signext i1 @xor_i1(i1 signext %a, i1 signext %b) {
entry:
@@ -40,8 +38,6 @@ entry:
; MM32: xor16 $[[T0:[0-9]+]], $5
; MM32: move $2, $[[T0]]
- ; MM64: xor $1, $4, $5
-
%r = xor i1 %a, %b
ret i1 %r
}
@@ -57,8 +53,6 @@ entry:
; MM32: xor16 $[[T0:[0-9]+]], $5
; MM32: move $2, $[[T0]]
- ; MM64: xor $1, $4, $5
-
%r = xor i8 %a, %b
ret i8 %r
}
@@ -74,8 +68,6 @@ entry:
; MM32: xor16 $[[T0:[0-9]+]], $5
; MM32: move $2, $[[T0]]
- ; MM64: xor $1, $4, $5
-
%r = xor i16 %a, %b
ret i16 %r
}
@@ -92,9 +84,6 @@ entry:
; MM32: xor16 $[[T0:[0-9]+]], $5
; MM32: move $2, $[[T0]]
- ; MM64: xor $[[T0:[0-9]+]], $4, $5
- ; MM64: sll $2, $[[T0]], 0
-
%r = xor i32 %a, %b
ret i32 %r
}
@@ -113,8 +102,6 @@ entry:
; MM32: move $2, $[[T0]]
; MM32: move $3, $[[T1]]
- ; MM64: xor $2, $4, $5
-
%r = xor i64 %a, %b
ret i64 %r
}
@@ -144,9 +131,6 @@ entry:
; MM32: lw $[[T3:[0-9]+]], 28($sp)
; MM32: xor16 $[[T3]], $7
- ; MM64: xor $2, $4, $6
- ; MM64: xor $3, $5, $7
-
%r = xor i128 %a, %b
ret i128 %r
}
@@ -203,8 +187,6 @@ entry:
; MM32: xori $3, $5, 4
; MM32: move $2, $4
- ; MM64: xori $2, $4, 4
-
%r = xor i64 4, %b
ret i64 %r
}
@@ -228,9 +210,6 @@ entry:
; MM32: move $4, $6
; MM32: move $5, $[[T0]]
- ; MM64: xori $3, $5, 4
- ; MM64: move $2, $4
-
%r = xor i128 4, %b
ret i128 %r
}
diff --git a/llvm/test/CodeGen/Mips/lw16-base-reg.ll b/llvm/test/CodeGen/Mips/lw16-base-reg.ll
index 09150421a96..9eeb5d21135 100644
--- a/llvm/test/CodeGen/Mips/lw16-base-reg.ll
+++ b/llvm/test/CodeGen/Mips/lw16-base-reg.ll
@@ -1,7 +1,5 @@
; RUN: llc %s -march=mips -mcpu=mips32r3 -mattr=micromips -filetype=asm \
; RUN: -relocation-model=pic -O3 -o - | FileCheck %s
-; RUN: llc %s -march=mips64 -mcpu=mips64r6 -mattr=micromips -filetype=asm \
-; RUN: -relocation-model=pic -O3 -o - | FileCheck %s
; The purpose of this test is to check whether the CodeGen selects
; LW16 instruction with the base register in a range of $2-$7, $16, $17.
diff --git a/llvm/test/CodeGen/Mips/micromips-lwc1-swc1.ll b/llvm/test/CodeGen/Mips/micromips-lwc1-swc1.ll
index a1a10a5de25..b08ea6bfbff 100644
--- a/llvm/test/CodeGen/Mips/micromips-lwc1-swc1.ll
+++ b/llvm/test/CodeGen/Mips/micromips-lwc1-swc1.ll
@@ -4,9 +4,6 @@
; RUN: llc -march=mips -mcpu=mips32r6 -mattr=+micromips \
; RUN: -relocation-model=pic < %s | \
; RUN: FileCheck %s -check-prefixes=ALL,MM32
-; RUN: llc -march=mips -mcpu=mips64r6 -mattr=+micromips -target-abi n64 \
-; RUN: -relocation-model=pic < %s | \
-; RUN: FileCheck %s -check-prefixes=ALL,MM64
@gf0 = external global float
@@ -19,12 +16,6 @@ entry:
; MM32: lw $[[R3:[0-9]+]], %got(gf0)($[[R2]])
; MM32: lwc1 $f0, 0($[[R3]])
-; MM64: lui $[[R0:[0-9]+]], %hi(%neg(%gp_rel(test_lwc1)))
-; MM64: daddu $[[R1:[0-9]+]], $[[R0]], $25
-; MM64: daddiu $[[R2:[0-9]+]], $[[R1]], %lo(%neg(%gp_rel(test_lwc1)))
-; MM64: ld $[[R3:[0-9]+]], %got_disp(gf0)($[[R2]])
-; MM64: lwc1 $f0, 0($[[R3]])
-
%0 = load float, float* @gf0, align 4
ret float %0
}
@@ -38,12 +29,6 @@ entry:
; MM32: lw $[[R3:[0-9]+]], %got(gf0)($[[R2]])
; MM32: swc1 $f12, 0($[[R3]])
-; MM64: lui $[[R0:[0-9]+]], %hi(%neg(%gp_rel(test_swc1)))
-; MM64: daddu $[[R1:[0-9]+]], $[[R0]], $25
-; MM64: daddiu $[[R2:[0-9]+]], $[[R1]], %lo(%neg(%gp_rel(test_swc1)))
-; MM64: ld $[[R3:[0-9]+]], %got_disp(gf0)($[[R2]])
-; MM64: swc1 $f12, 0($[[R3]])
-
store float %a, float* @gf0, align 4
ret void
}
diff --git a/llvm/test/CodeGen/Mips/micromips64r6-unsupported.ll b/llvm/test/CodeGen/Mips/micromips64r6-unsupported.ll
new file mode 100644
index 00000000000..6c3cd1be3e4
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/micromips64r6-unsupported.ll
@@ -0,0 +1,5 @@
+; RUN: not llc -mtriple=mips64-unknown-linux -mcpu=mips64r6 -mattr=+micromips %s 2>&1 | FileCheck %s
+
+; Test that microMIPS64R6 is not supported.
+
+; CHECK: LLVM ERROR: microMIPS64R6 is not supported
diff --git a/llvm/test/CodeGen/Mips/mips64fpldst.ll b/llvm/test/CodeGen/Mips/mips64fpldst.ll
index 564ffdd2f69..c439b4ba623 100644
--- a/llvm/test/CodeGen/Mips/mips64fpldst.ll
+++ b/llvm/test/CodeGen/Mips/mips64fpldst.ll
@@ -2,8 +2,6 @@
; RUN: llc < %s -march=mips64el -mcpu=mips4 -target-abi n32 -relocation-model=pic | FileCheck %s -check-prefix=CHECK-N32
; RUN: llc < %s -march=mips64el -mcpu=mips64 -target-abi n64 -relocation-model=pic | FileCheck %s -check-prefix=CHECK-N64
; RUN: llc < %s -march=mips64el -mcpu=mips64 -target-abi n32 -relocation-model=pic | FileCheck %s -check-prefix=CHECK-N32
-; RUN: llc < %s -march=mipsel -mcpu=mips64r6 -mattr=+micromips -target-abi n32 -relocation-model=pic | FileCheck %s -check-prefix=CHECK-N32
-; RUN: llc < %s -march=mipsel -mcpu=mips64r6 -mattr=+micromips -target-abi n64 -relocation-model=pic | FileCheck %s -check-prefix=CHECK-N64
@f0 = common global float 0.000000e+00, align 4
@d0 = common global double 0.000000e+00, align 8
diff --git a/llvm/test/CodeGen/Mips/mips64shift.ll b/llvm/test/CodeGen/Mips/mips64shift.ll
index e93140f18c9..0b1294d3afb 100644
--- a/llvm/test/CodeGen/Mips/mips64shift.ll
+++ b/llvm/test/CodeGen/Mips/mips64shift.ll
@@ -1,5 +1,4 @@
; RUN: llc -march=mips64el -mcpu=mips64r2 < %s | FileCheck -check-prefixes=ALL,MIPS %s
-; RUN: llc -march=mips64el -mcpu=mips64r6 -mattr=micromips < %s | FileCheck -check-prefixes=ALL,MICROMIPS %s
define i64 @f0(i64 %a0, i64 %a1) nounwind readnone {
entry:
diff --git a/llvm/test/CodeGen/Mips/tailcall/tailcall.ll b/llvm/test/CodeGen/Mips/tailcall/tailcall.ll
index eafbd10f5e3..02556fbfd3d 100644
--- a/llvm/test/CodeGen/Mips/tailcall/tailcall.ll
+++ b/llvm/test/CodeGen/Mips/tailcall/tailcall.ll
@@ -28,10 +28,6 @@
; RUN: -mips-tail-calls=1 < %s | FileCheck %s -check-prefixes=ALL,PIC32MM
; RUN: llc -march=mipsel -relocation-model=static -mcpu=mips32r6 \
; RUN: -mattr=+micromips -mips-tail-calls=1 < %s | FileCheck %s -check-prefixes=ALL,STATIC32MMR6
-; RUN: llc -march=mips64el -relocation-model=pic -mcpu=mips64r6 \
-; RUN: -mattr=+micromips -mips-tail-calls=1 < %s | FileCheck %s -check-prefix=PIC64R6MM
-; RUN: llc -march=mips64el -relocation-model=static -mcpu=mips64r6 \
-; RUN: -mattr=+micromips -mips-tail-calls=1 < %s | FileCheck %s -check-prefix=STATIC64
@g0 = common global i32 0, align 4
@g1 = common global i32 0, align 4
@@ -169,7 +165,6 @@ entry:
; STATIC32MMR6: bc
; PIC64: jr $25
; PIC64R6: jrc $25
-; PIC64R6MM: jrc $25
; STATIC64: j
; PIC16: jalrc
OpenPOWER on IntegriCloud