summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/MSP430/libcalls.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/MSP430/libcalls.ll')
-rw-r--r--llvm/test/CodeGen/MSP430/libcalls.ll60
1 files changed, 36 insertions, 24 deletions
diff --git a/llvm/test/CodeGen/MSP430/libcalls.ll b/llvm/test/CodeGen/MSP430/libcalls.ll
index 950ed6c17e2..30402377813 100644
--- a/llvm/test/CodeGen/MSP430/libcalls.ll
+++ b/llvm/test/CodeGen/MSP430/libcalls.ll
@@ -433,9 +433,10 @@ entry:
; CHECK: call #__mspabi_divi
%0 = load volatile i16, i16* @g_i16, align 8
- %1 = sdiv i16 %0, %0
+ %1 = load volatile i16, i16* @g_i16, align 8
+ %2 = sdiv i16 %0, %1
- ret i16 %1
+ ret i16 %2
}
define i32 @divli() #0 {
@@ -444,9 +445,10 @@ entry:
; CHECK: call #__mspabi_divli
%0 = load volatile i32, i32* @g_i32, align 8
- %1 = sdiv i32 %0, %0
+ %1 = load volatile i32, i32* @g_i32, align 8
+ %2 = sdiv i32 %0, %1
- ret i32 %1
+ ret i32 %2
}
define i64 @divlli() #0 {
@@ -455,9 +457,10 @@ entry:
; CHECK: call #__mspabi_divlli
%0 = load volatile i64, i64* @g_i64, align 8
- %1 = sdiv i64 %0, %0
+ %1 = load volatile i64, i64* @g_i64, align 8
+ %2 = sdiv i64 %0, %1
- ret i64 %1
+ ret i64 %2
}
define i16 @divu() #0 {
@@ -466,9 +469,10 @@ entry:
; CHECK: call #__mspabi_divu
%0 = load volatile i16, i16* @g_i16, align 8
- %1 = udiv i16 %0, %0
+ %1 = load volatile i16, i16* @g_i16, align 8
+ %2 = udiv i16 %0, %1
- ret i16 %1
+ ret i16 %2
}
define i32 @divul() #0 {
@@ -477,9 +481,10 @@ entry:
; CHECK: call #__mspabi_divul
%0 = load volatile i32, i32* @g_i32, align 8
- %1 = udiv i32 %0, %0
+ %1 = load volatile i32, i32* @g_i32, align 8
+ %2 = udiv i32 %0, %1
- ret i32 %1
+ ret i32 %2
}
define i64 @divull() #0 {
@@ -488,9 +493,10 @@ entry:
; CHECK: call #__mspabi_divull
%0 = load volatile i64, i64* @g_i64, align 8
- %1 = udiv i64 %0, %0
+ %1 = load volatile i64, i64* @g_i64, align 8
+ %2 = udiv i64 %0, %1
- ret i64 %1
+ ret i64 %2
}
define i16 @remi() #0 {
@@ -499,9 +505,10 @@ entry:
; CHECK: call #__mspabi_remi
%0 = load volatile i16, i16* @g_i16, align 8
- %1 = srem i16 %0, %0
+ %1 = load volatile i16, i16* @g_i16, align 8
+ %2 = srem i16 %0, %1
- ret i16 %1
+ ret i16 %2
}
define i32 @remli() #0 {
@@ -510,9 +517,10 @@ entry:
; CHECK: call #__mspabi_remli
%0 = load volatile i32, i32* @g_i32, align 8
- %1 = srem i32 %0, %0
+ %1 = load volatile i32, i32* @g_i32, align 8
+ %2 = srem i32 %0, %1
- ret i32 %1
+ ret i32 %2
}
define i64 @remlli() #0 {
@@ -521,9 +529,10 @@ entry:
; CHECK: call #__mspabi_remlli
%0 = load volatile i64, i64* @g_i64, align 8
- %1 = srem i64 %0, %0
+ %1 = load volatile i64, i64* @g_i64, align 8
+ %2 = srem i64 %0, %1
- ret i64 %1
+ ret i64 %2
}
define i16 @remu() #0 {
@@ -532,9 +541,10 @@ entry:
; CHECK: call #__mspabi_remu
%0 = load volatile i16, i16* @g_i16, align 8
- %1 = urem i16 %0, %0
+ %1 = load volatile i16, i16* @g_i16, align 8
+ %2 = urem i16 %0, %1
- ret i16 %1
+ ret i16 %2
}
define i32 @remul() #0 {
@@ -543,9 +553,10 @@ entry:
; CHECK: call #__mspabi_remul
%0 = load volatile i32, i32* @g_i32, align 8
- %1 = urem i32 %0, %0
+ %1 = load volatile i32, i32* @g_i32, align 8
+ %2 = urem i32 %0, %1
- ret i32 %1
+ ret i32 %2
}
define i64 @remull() #0 {
@@ -554,9 +565,10 @@ entry:
; CHECK: call #__mspabi_remull
%0 = load volatile i64, i64* @g_i64, align 8
- %1 = urem i64 %0, %0
+ %1 = load volatile i64, i64* @g_i64, align 8
+ %2 = urem i64 %0, %1
- ret i64 %1
+ ret i64 %2
}
define i16 @mpyi() #0 {
OpenPOWER on IntegriCloud