summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
authorZlatko Buljan <Zlatko.Buljan@imgtec.com>2016-03-24 09:22:45 +0000
committerZlatko Buljan <Zlatko.Buljan@imgtec.com>2016-03-24 09:22:45 +0000
commit94af4cbcf4f9f937ee5eaf773cfb004e1b0385b2 (patch)
tree587efd255e18e666c43e50720f1083fec8a61f66 /llvm/test
parentee675880b8df286b1271e9af1bfe8923c87ba9ba (diff)
downloadbcm5719-llvm-94af4cbcf4f9f937ee5eaf773cfb004e1b0385b2.tar.gz
bcm5719-llvm-94af4cbcf4f9f937ee5eaf773cfb004e1b0385b2.zip
[mips][microMIPS] Add CodeGen support for DIV, MOD, DIVU, MODU, DDIV, DMOD, DDIVU and DMODU instructions
Differential Revision: http://reviews.llvm.org/D17137 llvm-svn: 264248
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/sdiv.ll231
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/srem.ll224
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/udiv.ll43
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/urem.ll69
-rw-r--r--llvm/test/MC/Disassembler/Mips/micromips64r6/valid.txt8
-rw-r--r--llvm/test/MC/Mips/micromips64r6/valid.s8
6 files changed, 566 insertions, 17 deletions
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/sdiv.ll b/llvm/test/CodeGen/Mips/llvm-ir/sdiv.ll
index 929ee88bb7f..8e91529149d 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/sdiv.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/sdiv.ll
@@ -24,6 +24,12 @@
; RUN: -check-prefix=NOT-R6 -check-prefix=R2-R5 -check-prefix=GP64-NOT-R6
; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
; RUN: -check-prefix=R6 -check-prefix=64R6
+; RUN: llc < %s -march=mips -mcpu=mips32r3 -mattr=+micromips | FileCheck %s \
+; RUN: -check-prefix=MM -check-prefix=MMR3 -check-prefix=MM32
+; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips | FileCheck %s \
+; RUN: -check-prefix=MM -check-prefix=MMR6 -check-prefix=MM32
+; RUN: llc < %s -march=mips -mcpu=mips64r6 -mattr=+micromips | FileCheck %s \
+; RUN: -check-prefix=MM -check-prefix=MMR6 -check-prefix=MM64
define signext i1 @sdiv_i1(i1 signext %a, i1 signext %b) {
entry:
@@ -42,6 +48,17 @@ entry:
; R6: sll $[[T1:[0-9]+]], $[[T0]], 31
; R6: sra $2, $[[T1]], 31
+ ; MMR3: div $zero, $4, $5
+ ; MMR3: teq $5, $zero, 7
+ ; MMR3: mflo $[[T0:[0-9]+]]
+ ; MMR3: sll $[[T1:[0-9]+]], $[[T0]], 31
+ ; MMR3: sra $2, $[[T1]], 31
+
+ ; MMR6: div $[[T0:[0-9]+]], $4, $5
+ ; MMR6: teq $5, $zero, 7
+ ; MMR6: sll $[[T1:[0-9]+]], $[[T0]], 31
+ ; MMR6: sra $2, $[[T1]], 31
+
%r = sdiv i1 %a, %b
ret i1 %r
}
@@ -68,6 +85,15 @@ entry:
; FIXME: This instruction is redundant.
; R6: seb $2, $[[T0]]
+ ; MMR3: div $zero, $4, $5
+ ; MMR3: teq $5, $zero, 7
+ ; MMR3: mflo $[[T0:[0-9]+]]
+ ; MMR3: seb $2, $[[T0]]
+
+ ; MMR6: div $[[T0:[0-9]+]], $4, $5
+ ; MMR6: teq $5, $zero, 7
+ ; MMR6: seb $2, $[[T0]]
+
%r = sdiv i8 %a, %b
ret i8 %r
}
@@ -94,6 +120,15 @@ entry:
; FIXME: This is instruction is redundant since div is signed.
; R6: seh $2, $[[T0]]
+ ; MMR3: div $zero, $4, $5
+ ; MMR3: teq $5, $zero, 7
+ ; MMR3: mflo $[[T0:[0-9]+]]
+ ; MMR3: seh $2, $[[T0]]
+
+ ; MMR6: div $[[T0:[0-9]+]], $4, $5
+ ; MMR6: teq $5, $zero, 7
+ ; MMR6: seh $2, $[[T0]]
+
%r = sdiv i16 %a, %b
ret i16 %r
}
@@ -109,6 +144,13 @@ entry:
; R6: div $2, $4, $5
; R6: teq $5, $zero, 7
+ ; MMR3: div $zero, $4, $5
+ ; MMR3: teq $5, $zero, 7
+ ; MMR3: mflo $2
+
+ ; MMR6: div $2, $4, $5
+ ; MMR6: teq $5, $zero, 7
+
%r = sdiv i32 %a, %b
ret i32 %r
}
@@ -126,6 +168,11 @@ entry:
; 64R6: ddiv $2, $4, $5
; 64R6: teq $5, $zero, 7
+ ; MM32: lw $25, %call16(__divdi3)($2)
+
+ ; MM64: ddiv $2, $4, $5
+ ; MM64: teq $5, $zero, 7
+
%r = sdiv i64 %a, %b
ret i64 %r
}
@@ -134,11 +181,185 @@ define signext i128 @sdiv_i128(i128 signext %a, i128 signext %b) {
entry:
; ALL-LABEL: sdiv_i128:
- ; GP32: lw $25, %call16(__divti3)($gp)
+ ; GP32: lw $25, %call16(__divti3)($gp)
+
+ ; GP64-NOT-R6: ld $25, %call16(__divti3)($gp)
+ ; 64R6: ld $25, %call16(__divti3)($gp)
+
+ ; MM32: lw $25, %call16(__divti3)($2)
+
+ ; MM64: ld $25, %call16(__divti3)($2)
+
+ %r = sdiv i128 %a, %b
+ ret i128 %r
+}
+
+define signext i1 @sdiv_0_i1(i1 signext %a) {
+entry:
+; ALL-LABEL: sdiv_0_i8:
+
+ ; NOT-R6: addiu $[[T0:[0-9]+]], $zero, 0
+ ; NOT-R6: div $zero, $4, $[[T0]]
+ ; NOT-R6: teq $[[T0]], $zero, 7
+ ; NOT-R6: mflo $[[T1:[0-9]+]]
+ ; NOT-R6: sll $[[T2:[0-9]+]], $[[T1]], 31
+ ; NOT-R6: sra $2, $[[T2]], 31
+
+ ; R6: div $[[T0:[0-9]+]], $4, $zero
+ ; R6: teq $zero, $zero, 7
+ ; R6: sll $[[T1:[0-9]+]], $[[T0]], 31
+ ; R6: sra $2, $[[T1]], 31
+
+ ; MMR3: lui $[[T0:[0-9]+]], 0
+ ; MMR3: div $zero, $4, $[[T0]]
+ ; MMR3: teq $[[T0]], $zero, 7
+ ; MMR3: mflo $[[T1:[0-9]+]]
+ ; MMR3: sll $[[T2:[0-9]+]], $[[T1]], 31
+ ; MMR3: sra $2, $[[T2]], 31
+
+ ; MMR6: lui $[[T0:[0-9]+]], 0
+ ; MMR6: div $[[T1:[0-9]+]], $4, $[[T0]]
+ ; MMR6: teq $[[T0]], $zero, 7
+ ; MMR6: sll $[[T2:[0-9]+]], $[[T1]], 31
+ ; MMR6: sra $2, $[[T2]], 31
+
+ %r = sdiv i1 %a, 0
+ ret i1 %r
+}
+
+define signext i8 @sdiv_0_i8(i8 signext %a) {
+entry:
+; ALL-LABEL: sdiv_0_i8:
+
+ ; NOT-R2-R6: addiu $[[T0:[0-9]+]], $zero, 0
+ ; NOT-R2-R6: div $zero, $4, $[[T0]]
+ ; NOT-R2-R6: teq $[[T0]], $zero, 7
+ ; NOT-R2-R6: mflo $[[T1:[0-9]+]]
+ ; NOT-R2-R6: sll $[[T2:[0-9]+]], $[[T1]], 24
+ ; NOT-R2-R6: sra $2, $[[T2]], 24
+
+ ; R2-R5: addiu $[[T0:[0-9]+]], $zero, 0
+ ; R2-R5: div $zero, $4, $[[T0]]
+ ; R2-R5: teq $[[T0]], $zero, 7
+ ; R2-R5: mflo $[[T1:[0-9]+]]
+ ; R2-R5: seb $2, $[[T1]]
+
+ ; R6: div $[[T0:[0-9]+]], $4, $zero
+ ; R6: teq $zero, $zero, 7
+ ; R6: seb $2, $[[T0]]
+
+ ; MMR3: lui $[[T0:[0-9]+]], 0
+ ; MMR3: div $zero, $4, $[[T0]]
+ ; MMR3: teq $[[T0]], $zero, 7
+ ; MMR3: mflo $[[T1:[0-9]+]]
+ ; MMR3: seb $2, $[[T1]]
+
+ ; MMR6: lui $[[T0:[0-9]+]], 0
+ ; MMR6: div $[[T1:[0-9]+]], $4, $[[T0]]
+ ; MMR6: teq $[[T0]], $zero, 7
+ ; MMR6: seb $2, $[[T1]]
+
+ %r = sdiv i8 %a, 0
+ ret i8 %r
+}
+
+define signext i16 @sdiv_0_i16(i16 signext %a) {
+entry:
+; ALL-LABEL: sdiv_0_i16:
+
+ ; NOT-R2-R6: addiu $[[T0:[0-9]+]], $zero, 0
+ ; NOT-R2-R6: div $zero, $4, $[[T0]]
+ ; NOT-R2-R6: teq $[[T0]], $zero, 7
+ ; NOT-R2-R6: mflo $[[T1:[0-9]+]]
+ ; NOT-R2-R6: sll $[[T2:[0-9]+]], $[[T1]], 16
+ ; NOT-R2-R6: sra $2, $[[T2]], 16
+
+ ; R2-R5: addiu $[[T0:[0-9]+]], $zero, 0
+ ; R2-R5: div $zero, $4, $[[T0]]
+ ; R2-R5: teq $[[T0]], $zero, 7
+ ; R2-R5: mflo $[[T1:[0-9]+]]
+ ; R2-R5: seh $2, $[[T1]]
+
+ ; R6: div $[[T0:[0-9]+]], $4, $zero
+ ; R6: teq $zero, $zero, 7
+ ; R6: seh $2, $[[T0]]
+
+ ; MMR3: lui $[[T0:[0-9]+]], 0
+ ; MMR3: div $zero, $4, $[[T0]]
+ ; MMR3: teq $[[T0]], $zero, 7
+ ; MMR3: mflo $[[T1:[0-9]+]]
+ ; MMR3: seh $2, $[[T1]]
+
+ ; MMR6: lui $[[T0:[0-9]+]], 0
+ ; MMR6: div $[[T1:[0-9]+]], $4, $[[T0]]
+ ; MMR6: teq $[[T0]], $zero, 7
+ ; MMR6: seh $2, $[[T1]]
+
+ %r = sdiv i16 %a, 0
+ ret i16 %r
+}
+
+define signext i32 @sdiv_0_i32(i32 signext %a) {
+entry:
+; ALL-LABEL: sdiv_0_i32:
+
+ ; NOT-R6: addiu $[[T0:[0-9]+]], $zero, 0
+ ; NOT-R6: div $zero, $4, $[[T0]]
+ ; NOT-R6: teq $[[T0]], $zero, 7
+ ; NOT-R6: mflo $2
+
+ ; R6: div $2, $4, $zero
+ ; R6: teq $zero, $zero, 7
+
+ ; MMR3: lui $[[T0:[0-9]+]], 0
+ ; MMR3: div $zero, $4, $[[T0]]
+ ; MMR3: teq $[[T0]], $zero, 7
+ ; MMR3: mflo $2
+
+ ; MMR6: lui $[[T0:[0-9]+]], 0
+ ; MMR6: div $2, $4, $[[T0]]
+ ; MMR6: teq $[[T0]], $zero, 7
+
+ %r = sdiv i32 %a, 0
+ ret i32 %r
+}
+
+define signext i64 @sdiv_0_i64(i64 signext %a) {
+entry:
+; ALL-LABEL: sdiv_0_i64:
+
+ ; GP32: lw $25, %call16(__divdi3)($gp)
+
+ ; GP64-NOT-R6: daddiu $[[T0:[0-9]+]], $zero, 0
+ ; GP64-NOT-R6: ddiv $zero, $4, $[[T0]]
+ ; GP64-NOT-R6: teq $[[T0]], $zero, 7
+ ; GP64-NOT-R6: mflo $2
+
+ ; 64R6: ddiv $2, $4, $zero
+ ; 64R6: teq $zero, $zero, 7
+
+ ; MM32: lw $25, %call16(__divdi3)($2)
+
+ ; MM64: ddiv $2, $4, $zero
+ ; MM64: teq $zero, $zero, 7
+
+ %r = sdiv i64 %a, 0
+ ret i64 %r
+}
+
+define signext i128 @sdiv_0_i128(i128 signext %a) {
+entry:
+; ALL-LABEL: sdiv_0_i128:
+
+ ; GP32: lw $25, %call16(__divti3)($gp)
+
+ ; GP64-NOT-R6: ld $25, %call16(__divti3)($gp)
+ ; 64R6: ld $25, %call16(__divti3)($gp)
+
+ ; MM32: lw $25, %call16(__divti3)($2)
- ; GP64-NOT-R6: ld $25, %call16(__divti3)($gp)
- ; 64R6: ld $25, %call16(__divti3)($gp)
+ ; MM64: ld $25, %call16(__divti3)($2)
- %r = sdiv i128 %a, %b
- ret i128 %r
+ %r = sdiv i128 %a, 0
+ ret i128 %r
}
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/srem.ll b/llvm/test/CodeGen/Mips/llvm-ir/srem.ll
index ceb53ee7033..4dffe9c9eb1 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/srem.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/srem.ll
@@ -27,6 +27,12 @@
; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R6
; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
; RUN: -check-prefix=64R6 -check-prefix=R6 -check-prefix=R2-R6
+; RUN: llc < %s -march=mips -mcpu=mips32r3 -mattr=+micromips | FileCheck %s \
+; RUN: -check-prefix=MM -check-prefix=MMR3 -check-prefix=MM32
+; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips | FileCheck %s \
+; RUN: -check-prefix=MM -check-prefix=MMR6 -check-prefix=MM32
+; RUN: llc < %s -march=mips -mcpu=mips64r6 -mattr=+micromips | FileCheck %s \
+; RUN: -check-prefix=MM -check-prefix=MMR6 -check-prefix=MM64
define signext i1 @srem_i1(i1 signext %a, i1 signext %b) {
entry:
@@ -43,6 +49,17 @@ entry:
; R6: sll $[[T3:[0-9]+]], $[[T0]], 31
; R6: sra $2, $[[T3]], 31
+ ; MMR3: div $zero, $4, $5
+ ; MMR3: teq $5, $zero, 7
+ ; MMR3: mfhi $[[T0:[0-9]+]]
+ ; MMR3: sll $[[T1:[0-9]+]], $[[T0]], 31
+ ; MMR3: sra $2, $[[T1]], 31
+
+ ; MMR6: mod $[[T0:[0-9]+]], $4, $5
+ ; MMR6: teq $5, $zero, 7
+ ; MMR6: sll $[[T1:[0-9]+]], $[[T0]], 31
+ ; MMR6: sra $2, $[[T1]], 31
+
%r = srem i1 %a, %b
ret i1 %r
}
@@ -66,6 +83,15 @@ entry:
; R6: teq $5, $zero, 7
; R6: seb $2, $[[T0]]
+ ; MMR3: div $zero, $4, $5
+ ; MMR3: teq $5, $zero, 7
+ ; MMR3: mfhi $[[T0:[0-9]+]]
+ ; MMR3: seb $2, $[[T0]]
+
+ ; MMR6: mod $[[T0:[0-9]+]], $4, $5
+ ; MMR6: teq $5, $zero, 7
+ ; MMR6: seb $2, $[[T0]]
+
%r = srem i8 %a, %b
ret i8 %r
}
@@ -83,12 +109,21 @@ entry:
; R2-R5: div $zero, $4, $5
; R2-R5: teq $5, $zero, 7
; R2-R5: mfhi $[[T0:[0-9]+]]
- ; R2-R5: seh $2, $[[T1]]
+ ; R2-R5: seh $2, $[[T0]]
; R6: mod $[[T0:[0-9]+]], $4, $5
; R6: teq $5, $zero, 7
; R6: seh $2, $[[T0]]
+ ; MMR3: div $zero, $4, $5
+ ; MMR3: teq $5, $zero, 7
+ ; MMR3: mfhi $[[T0:[0-9]+]]
+ ; MMR3: seh $2, $[[T0]]
+
+ ; MMR6: mod $[[T0:[0-9]+]], $4, $5
+ ; MMR6: teq $5, $zero, 7
+ ; MMR6: seh $2, $[[T0]]
+
%r = srem i16 %a, %b
ret i16 %r
}
@@ -104,6 +139,13 @@ entry:
; R6: mod $2, $4, $5
; R6: teq $5, $zero, 7
+ ; MMR3: div $zero, $4, $5
+ ; MMR3: teq $5, $zero, 7
+ ; MMR3: mfhi $2
+
+ ; MMR6: mod $2, $4, $5
+ ; MMR6: teq $5, $zero, 7
+
%r = srem i32 %a, %b
ret i32 %r
}
@@ -121,6 +163,11 @@ entry:
; 64R6: dmod $2, $4, $5
; 64R6: teq $5, $zero, 7
+ ; MM32: lw $25, %call16(__moddi3)($2)
+
+ ; MM64: dmod $2, $4, $5
+ ; MM64: teq $5, $zero, 7
+
%r = srem i64 %a, %b
ret i64 %r
}
@@ -134,6 +181,181 @@ entry:
; GP64-NOT-R6: ld $25, %call16(__modti3)($gp)
; 64-R6: ld $25, %call16(__modti3)($gp)
+ ; MM32: lw $25, %call16(__modti3)($2)
+
+ ; MM64: ld $25, %call16(__modti3)($2)
+
%r = srem i128 %a, %b
ret i128 %r
}
+
+define signext i1 @srem_0_i1(i1 signext %a) {
+entry:
+; ALL-LABEL: srem_0_i1:
+
+ ; NOT-R6: addiu $[[T0:[0-9]+]], $zero, 0
+ ; NOT-R6: div $zero, $4, $[[T0]]
+ ; NOT-R6: teq $[[T0]], $zero, 7
+ ; NOT-R6: mfhi $[[T1:[0-9]+]]
+ ; NOT-R6: sll $[[T2:[0-9]+]], $[[T1]], 31
+ ; NOT-R6: sra $2, $[[T2]], 31
+
+ ; R6: mod $[[T0:[0-9]+]], $4, $zero
+ ; R6: teq $zero, $zero, 7
+ ; R6: sll $[[T1:[0-9]+]], $[[T0]], 31
+ ; R6: sra $2, $[[T1]], 31
+
+ ; MMR3: lui $[[T0:[0-9]+]], 0
+ ; MMR3: div $zero, $4, $[[T0]]
+ ; MMR3: teq $[[T0]], $zero, 7
+ ; MMR3: mfhi $[[T1:[0-9]+]]
+ ; MMR3: sll $[[T2:[0-9]+]], $[[T1]], 31
+ ; MMR3: sra $2, $[[T2]], 31
+
+ ; MMR6: lui $[[T0:[0-9]+]], 0
+ ; MMR6: mod $[[T1:[0-9]+]], $4, $[[T0]]
+ ; MMR6: teq $[[T0]], $zero, 7
+ ; MMR6: sll $[[T2:[0-9]+]], $[[T1]], 31
+ ; MMR6: sra $2, $[[T2]], 31
+
+ %r = srem i1 %a, 0
+ ret i1 %r
+}
+
+define signext i8 @srem_0_i8(i8 signext %a) {
+entry:
+; ALL-LABEL: srem_0_i8:
+
+ ; NOT-R2-R6: addiu $[[T0:[0-9]+]], $zero, 0
+ ; NOT-R2-R6: div $zero, $4, $[[T0]]
+ ; NOT-R2-R6: teq $[[T0]], $zero, 7
+ ; NOT-R2-R6: mfhi $[[T1:[0-9]+]]
+ ; NOT-R2-R6: sll $[[T2:[0-9]+]], $[[T1]], 24
+ ; NOT-R2-R6: sra $2, $[[T2]], 24
+
+ ; R2-R5: addiu $[[T0:[0-9]+]], $zero, 0
+ ; R2-R5: div $zero, $4, $[[T0]]
+ ; R2-R5: teq $[[T0]], $zero, 7
+ ; R2-R5: mfhi $[[T1:[0-9]+]]
+ ; R2-R5: seb $2, $[[T1]]
+
+ ; R6: mod $[[T0:[0-9]+]], $4, $zero
+ ; R6: teq $zero, $zero, 7
+ ; R6: seb $2, $[[T0]]
+
+ ; MMR3: lui $[[T0:[0-9]+]], 0
+ ; MMR3: div $zero, $4, $[[T0]]
+ ; MMR3: teq $[[T0]], $zero, 7
+ ; MMR3: mfhi $[[T1:[0-9]+]]
+ ; MMR3: seb $2, $[[T1]]
+
+ ; MMR6: lui $[[T0:[0-9]+]], 0
+ ; MMR6: mod $[[T1:[0-9]+]], $4, $[[T0]]
+ ; MMR6: teq $[[T0]], $zero, 7
+ ; MMR6: seb $2, $[[T1]]
+
+ %r = srem i8 %a, 0
+ ret i8 %r
+}
+
+define signext i16 @srem_0_i16(i16 signext %a) {
+entry:
+; ALL-LABEL: srem_0_i16:
+
+ ; NOT-R2-R6: addiu $[[T0:[0-9]+]], $zero, 0
+ ; NOT-R2-R6: div $zero, $4, $[[T0]]
+ ; NOT-R2-R6: teq $[[T0]], $zero, 7
+ ; NOT-R2-R6: mfhi $[[T1:[0-9]+]]
+ ; NOT-R2-R6: sll $[[T2:[0-9]+]], $[[T1]], 16
+ ; NOT-R2-R6: sra $2, $[[T2]], 16
+
+ ; R2-R5: addiu $[[T0:[0-9]+]], $zero, 0
+ ; R2-R5: div $zero, $4, $[[T0]]
+ ; R2-R5: teq $[[T0]], $zero, 7
+ ; R2-R5: mfhi $[[T1:[0-9]+]]
+ ; R2-R5: seh $2, $[[T1]]
+
+ ; R6: mod $[[T0:[0-9]+]], $4, $zero
+ ; R6: teq $zero, $zero, 7
+ ; R6: seh $2, $[[T0]]
+
+ ; MMR3: lui $[[T0:[0-9]+]], 0
+ ; MMR3: div $zero, $4, $[[T0]]
+ ; MMR3: teq $[[T0]], $zero, 7
+ ; MMR3: mfhi $[[T1:[0-9]+]]
+ ; MMR3: seh $2, $[[T1]]
+
+ ; MMR6: lui $[[T0:[0-9]+]], 0
+ ; MMR6: mod $[[T1:[0-9]+]], $4, $[[T0]]
+ ; MMR6: teq $[[T0]], $zero, 7
+ ; MMR6: seh $2, $[[T1]]
+
+ %r = srem i16 %a, 0
+ ret i16 %r
+}
+
+
+define signext i32 @srem_0_i32(i32 signext %a) {
+entry:
+; ALL-LABEL: srem_0_i32:
+
+ ; NOT-R6: addiu $[[T0:[0-9]+]], $zero, 0
+ ; NOT-R6: div $zero, $4, $[[T0]]
+ ; NOT-R6: teq $[[T0]], $zero, 7
+ ; NOT-R6: mfhi $2
+
+ ; R6: mod $2, $4, $zero
+ ; R6: teq $zero, $zero, 7
+
+ ; MMR3: lui $[[T0:[0-9]+]], 0
+ ; MMR3: div $zero, $4, $[[T0]]
+ ; MMR3: teq $[[T0]], $zero, 7
+ ; MMR3: mfhi $2
+
+ ; MMR6: lui $[[T0:[0-9]+]], 0
+ ; MMR6: mod $2, $4, $[[T0]]
+ ; MMR6: teq $[[T0]], $zero, 7
+
+ %r = srem i32 %a, 0
+ ret i32 %r
+}
+
+define signext i64 @srem_0_i64(i64 signext %a) {
+entry:
+; ALL-LABEL: srem_0_i64:
+
+ ; GP32: lw $25, %call16(__moddi3)($gp)
+
+ ; GP64-NOT-R6: daddiu $[[T0:[0-9]+]], $zero, 0
+ ; GP64-NOT-R6: ddiv $zero, $4, $[[T0]]
+ ; GP64-NOT-R6: teq $[[T0]], $zero, 7
+ ; GP64-NOT-R6: mfhi $2
+
+ ; 64R6: dmod $2, $4, $zero
+ ; 64R6: teq $zero, $zero, 7
+
+ ; MM32: lw $25, %call16(__moddi3)($2)
+
+ ; MM64: dmod $2, $4, $zero
+ ; MM64: teq $zero, $zero, 7
+
+ %r = srem i64 %a, 0
+ ret i64 %r
+}
+
+define signext i128 @srem_0_i128(i128 signext %a) {
+entry:
+; ALL-LABEL: srem_0_i128:
+
+ ; GP32: lw $25, %call16(__modti3)($gp)
+
+ ; GP64-NOT-R6: ld $25, %call16(__modti3)($gp)
+ ; 64R6: ld $25, %call16(__modti3)($gp)
+
+ ; MM32: lw $25, %call16(__modti3)($2)
+
+ ; MM64: ld $25, %call16(__modti3)($2)
+
+ %r = srem i128 %a, 0
+ ret i128 %r
+}
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/udiv.ll b/llvm/test/CodeGen/Mips/llvm-ir/udiv.ll
index a7cafe52d1a..6c09535a95b 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/udiv.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/udiv.ll
@@ -24,6 +24,12 @@
; RUN: -check-prefix=NOT-R6 -check-prefix=GP64-NOT-R6
; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
; RUN: -check-prefix=R6 -check-prefix=64R6
+; RUN: llc < %s -march=mips -mcpu=mips32r3 -mattr=+micromips | FileCheck %s \
+; RUN: -check-prefix=MM -check-prefix=MMR3 -check-prefix=MM32
+; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips | FileCheck %s \
+; RUN: -check-prefix=MM -check-prefix=MMR6 -check-prefix=MM32
+; RUN: llc < %s -march=mips -mcpu=mips64r6 -mattr=+micromips | FileCheck %s \
+; RUN: -check-prefix=MM -check-prefix=MMR6 -check-prefix=MM64
define zeroext i1 @udiv_i1(i1 zeroext %a, i1 zeroext %b) {
entry:
@@ -36,6 +42,13 @@ entry:
; R6: divu $2, $4, $5
; R6: teq $5, $zero, 7
+ ; MMR3: divu $zero, $4, $5
+ ; MMR3: teq $5, $zero, 7
+ ; MMR3: mflo $2
+
+ ; MMR6: divu $2, $4, $5
+ ; MMR6: teq $5, $zero, 7
+
%r = udiv i1 %a, %b
ret i1 %r
}
@@ -51,6 +64,13 @@ entry:
; R6: divu $2, $4, $5
; R6: teq $5, $zero, 7
+ ; MMR3: divu $zero, $4, $5
+ ; MMR3: teq $5, $zero, 7
+ ; MMR3: mflo $2
+
+ ; MMR6: divu $2, $4, $5
+ ; MMR6: teq $5, $zero, 7
+
%r = udiv i8 %a, %b
ret i8 %r
}
@@ -66,6 +86,13 @@ entry:
; R6: divu $2, $4, $5
; R6: teq $5, $zero, 7
+ ; MMR3: divu $zero, $4, $5
+ ; MMR3: teq $5, $zero, 7
+ ; MMR3: mflo $2
+
+ ; MMR6: divu $2, $4, $5
+ ; MMR6: teq $5, $zero, 7
+
%r = udiv i16 %a, %b
ret i16 %r
}
@@ -81,6 +108,13 @@ entry:
; R6: divu $2, $4, $5
; R6: teq $5, $zero, 7
+ ; MMR3: divu $zero, $4, $5
+ ; MMR3: teq $5, $zero, 7
+ ; MMR3: mflo $2
+
+ ; MMR6: divu $2, $4, $5
+ ; MMR6: teq $5, $zero, 7
+
%r = udiv i32 %a, %b
ret i32 %r
}
@@ -98,6 +132,11 @@ entry:
; 64R6: ddivu $2, $4, $5
; 64R6: teq $5, $zero, 7
+ ; MM32: lw $25, %call16(__udivdi3)($2)
+
+ ; MM64: ddivu $2, $4, $5
+ ; MM64: teq $5, $zero, 7
+
%r = udiv i64 %a, %b
ret i64 %r
}
@@ -111,6 +150,10 @@ entry:
; GP64-NOT-R6: ld $25, %call16(__udivti3)($gp)
; 64-R6: ld $25, %call16(__udivti3)($gp)
+ ; MM32: lw $25, %call16(__udivti3)($2)
+
+ ; MM64: ld $25, %call16(__udivti3)($2)
+
%r = udiv i128 %a, %b
ret i128 %r
}
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/urem.ll b/llvm/test/CodeGen/Mips/llvm-ir/urem.ll
index d5a231c8dfc..e548f1efd64 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/urem.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/urem.ll
@@ -27,6 +27,12 @@
; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R6
; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
; RUN: -check-prefix=64R6 -check-prefix=R6 -check-prefix=R2-R6
+; RUN: llc < %s -march=mips -mcpu=mips32r3 -mattr=+micromips | FileCheck %s \
+; RUN: -check-prefix=MM -check-prefix=MMR3 -check-prefix=MM32
+; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips | FileCheck %s \
+; RUN: -check-prefix=MM -check-prefix=MMR6 -check-prefix=MM32
+; RUN: llc < %s -march=mips -mcpu=mips64r6 -mattr=+micromips | FileCheck %s \
+; RUN: -check-prefix=MM -check-prefix=MMR6 -check-prefix=MM64
define signext i1 @urem_i1(i1 signext %a, i1 signext %b) {
entry:
@@ -47,6 +53,21 @@ entry:
; R6: sll $[[T3:[0-9]+]], $[[T2]], 31
; R6: sra $2, $[[T3]], 31
+ ; MMR3: andi16 $[[T0:[0-9]+]], $5, 1
+ ; MMR3: andi16 $[[T1:[0-9]+]], $4, 1
+ ; MMR3: divu $zero, $[[T1]], $[[T0]]
+ ; MMR3: teq $[[T0]], $zero, 7
+ ; MMR3: mfhi $[[T2:[0-9]+]]
+ ; MMR3: sll $[[T3:[0-9]+]], $[[T2]], 31
+ ; MMR3: sra $2, $[[T3]], 31
+
+ ; MMR6: andi16 $[[T0:[0-9]+]], $5, 1
+ ; MMR6: andi16 $[[T1:[0-9]+]], $4, 1
+ ; MMR6: modu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+ ; MMR6: teq $[[T0]], $zero, 7
+ ; MMR6: sll $[[T3:[0-9]+]], $[[T2]], 31
+ ; MMR6: sra $2, $[[T3]], 31
+
%r = urem i1 %a, %b
ret i1 %r
}
@@ -76,6 +97,19 @@ entry:
; R6: teq $[[T0]], $zero, 7
; R6: seb $2, $[[T2]]
+ ; MMR3: andi16 $[[T0:[0-9]+]], $5, 255
+ ; MMR3: andi16 $[[T1:[0-9]+]], $4, 255
+ ; MMR3: divu $zero, $[[T1]], $[[T0]]
+ ; MMR3: teq $[[T0]], $zero, 7
+ ; MMR3: mfhi $[[T2:[0-9]+]]
+ ; MMR3: seb $2, $[[T2]]
+
+ ; MMR6: andi16 $[[T0:[0-9]+]], $5, 255
+ ; MMR6: andi16 $[[T1:[0-9]+]], $4, 255
+ ; MMR6: modu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+ ; MMR6: teq $[[T0]], $zero, 7
+ ; MMR6: seb $2, $[[T2]]
+
%r = urem i8 %a, %b
ret i8 %r
}
@@ -105,6 +139,19 @@ entry:
; R6: teq $[[T0]], $zero, 7
; R6: seh $2, $[[T2]]
+ ; MMR3: andi16 $[[T0:[0-9]+]], $5, 65535
+ ; MMR3: andi16 $[[T1:[0-9]+]], $4, 65535
+ ; MMR3: divu $zero, $[[T1]], $[[T0]]
+ ; MMR3: teq $[[T0]], $zero, 7
+ ; MMR3: mfhi $[[T2:[0-9]+]]
+ ; MMR3: seh $2, $[[T2]]
+
+ ; MMR6: andi16 $[[T0:[0-9]+]], $5, 65535
+ ; MMR6: andi16 $[[T1:[0-9]+]], $4, 65535
+ ; MMR6: modu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+ ; MMR6: teq $[[T0]], $zero, 7
+ ; MMR6: seh $2, $[[T2]]
+
%r = urem i16 %a, %b
ret i16 %r
}
@@ -120,6 +167,13 @@ entry:
; R6: modu $2, $4, $5
; R6: teq $5, $zero, 7
+ ; MMR3: divu $zero, $4, $5
+ ; MMR3: teq $5, $zero, 7
+ ; MMR3: mfhi $2
+
+ ; MMR6: modu $2, $4, $5
+ ; MMR6: teq $5, $zero, 7
+
%r = urem i32 %a, %b
ret i32 %r
}
@@ -137,6 +191,11 @@ entry:
; 64R6: dmodu $2, $4, $5
; 64R6: teq $5, $zero, 7
+ ; MM32: lw $25, %call16(__umoddi3)($2)
+
+ ; MM64: dmodu $2, $4, $5
+ ; MM64: teq $5, $zero, 7
+
%r = urem i64 %a, %b
ret i64 %r
}
@@ -145,10 +204,14 @@ define signext i128 @urem_i128(i128 signext %a, i128 signext %b) {
entry:
; ALL-LABEL: urem_i128:
- ; GP32: lw $25, %call16(__umodti3)($gp)
+ ; GP32: lw $25, %call16(__umodti3)($gp)
+
+ ; GP64-NOT-R6: ld $25, %call16(__umodti3)($gp)
+ ; 64-R6: ld $25, %call16(__umodti3)($gp)
+
+ ; MM32: lw $25, %call16(__umodti3)($2)
- ; GP64-NOT-R6: ld $25, %call16(__umodti3)($gp)
- ; 64-R6: ld $25, %call16(__umodti3)($gp)
+ ; MM64: ld $25, %call16(__umodti3)($2)
%r = urem i128 %a, %b
ret i128 %r
diff --git a/llvm/test/MC/Disassembler/Mips/micromips64r6/valid.txt b/llvm/test/MC/Disassembler/Mips/micromips64r6/valid.txt
index 531e1e492b4..36c33f547d0 100644
--- a/llvm/test/MC/Disassembler/Mips/micromips64r6/valid.txt
+++ b/llvm/test/MC/Disassembler/Mips/micromips64r6/valid.txt
@@ -28,10 +28,10 @@
0x59 0x26 0x30 0xe4 # CHECK: dextm $9, $6, 3, 39
0x59 0x26 0x30 0xd4 # CHECK: dextu $9, $6, 35, 7
0x58 0x43 0x25 0x1c # CHECK: dalign $4, $2, $3, 5
-0x58 0x64 0x29 0x18 # CHECK: ddiv $3, $4, $5
-0x58 0x64 0x29 0x58 # CHECK: dmod $3, $4, $5
-0x58 0x64 0x29 0x98 # CHECK: ddivu $3, $4, $5
-0x58 0x64 0x29 0xd8 # CHECK: dmodu $3, $4, $5
+0x58 0xa4 0x19 0x18 # CHECK: ddiv $3, $4, $5
+0x58 0xa4 0x19 0x58 # CHECK: dmod $3, $4, $5
+0x58 0xa4 0x19 0x98 # CHECK: ddivu $3, $4, $5
+0x58 0xa4 0x19 0xd8 # CHECK: dmodu $3, $4, $5
0x54 0xa4 0x18 0x30 # CHECK: add.s $f3, $f4, $f5
0x54 0xc4 0x11 0x30 # CHECK: add.d $f2, $f4, $f6
0x54 0xa4 0x18 0x70 # CHECK: sub.s $f3, $f4, $f5
diff --git a/llvm/test/MC/Mips/micromips64r6/valid.s b/llvm/test/MC/Mips/micromips64r6/valid.s
index 409d8a21302..48d2a21b3f6 100644
--- a/llvm/test/MC/Mips/micromips64r6/valid.s
+++ b/llvm/test/MC/Mips/micromips64r6/valid.s
@@ -27,10 +27,10 @@ a:
lhu16 $3, 4($16) # CHECK: lhu16 $3, 4($16) # encoding: [0x29,0x82]
lbu16 $3, 4($17) # CHECK: lbu16 $3, 4($17) # encoding: [0x09,0x94]
lbu16 $3, -1($17) # CHECK: lbu16 $3, -1($17) # encoding: [0x09,0x9f]
- ddiv $3, $4, $5 # CHECK: ddiv $3, $4, $5 # encoding: [0x58,0x64,0x29,0x18]
- dmod $3, $4, $5 # CHECK: dmod $3, $4, $5 # encoding: [0x58,0x64,0x29,0x58]
- ddivu $3, $4, $5 # CHECK: ddivu $3, $4, $5 # encoding: [0x58,0x64,0x29,0x98]
- dmodu $3, $4, $5 # CHECK: dmodu $3, $4, $5 # encoding: [0x58,0x64,0x29,0xd8]
+ ddiv $3, $4, $5 # CHECK: ddiv $3, $4, $5 # encoding: [0x58,0xa4,0x19,0x18]
+ dmod $3, $4, $5 # CHECK: dmod $3, $4, $5 # encoding: [0x58,0xa4,0x19,0x58]
+ ddivu $3, $4, $5 # CHECK: ddivu $3, $4, $5 # encoding: [0x58,0xa4,0x19,0x98]
+ dmodu $3, $4, $5 # CHECK: dmodu $3, $4, $5 # encoding: [0x58,0xa4,0x19,0xd8]
add.s $f3, $f4, $f5 # CHECK: add.s $f3, $f4, $f5 # encoding: [0x54,0xa4,0x18,0x30]
add.d $f2, $f4, $f6 # CHECK: add.d $f2, $f4, $f6 # encoding: [0x54,0xc4,0x11,0x30]
sub.s $f3, $f4, $f5 # CHECK: sub.s $f3, $f4, $f5 # encoding: [0x54,0xa4,0x18,0x70]
OpenPOWER on IntegriCloud