summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
authorVadzim Dambrouski <pftbest@gmail.com>2017-05-11 19:56:14 +0000
committerVadzim Dambrouski <pftbest@gmail.com>2017-05-11 19:56:14 +0000
commit38e30197c31951a6f27f781060395161313fd614 (patch)
tree30abea07b9e8fc46e034e76fb51007fb6886fe2a /llvm/test/CodeGen
parent36acbc716d7da51dc7f1306a172eb7f512a394ae (diff)
downloadbcm5719-llvm-38e30197c31951a6f27f781060395161313fd614.tar.gz
bcm5719-llvm-38e30197c31951a6f27f781060395161313fd614.zip
[MSP430] Generate EABI-compliant libcalls
Updates the MSP430 target to generate EABI-compatible libcall names. As a byproduct, adjusts the hardware multiplier options available in the MSP430 target, adds support for promotion of the ISD::MUL operation for 8-bit integers, and correctly marks R11 as used by call instructions. Patch by Andrew Wygle. Differential Revision: https://reviews.llvm.org/D32676 llvm-svn: 302820
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/MSP430/hwmult16.ll43
-rw-r--r--llvm/test/CodeGen/MSP430/hwmult32.ll43
-rw-r--r--llvm/test/CodeGen/MSP430/hwmultf5.ll43
-rw-r--r--llvm/test/CodeGen/MSP430/jumptable.ll2
-rw-r--r--llvm/test/CodeGen/MSP430/libcalls.ll595
-rw-r--r--llvm/test/CodeGen/MSP430/promote-i8-mul.ll (renamed from llvm/test/CodeGen/MSP430/2009-11-05-8BitLibcalls.ll)2
6 files changed, 726 insertions, 2 deletions
diff --git a/llvm/test/CodeGen/MSP430/hwmult16.ll b/llvm/test/CodeGen/MSP430/hwmult16.ll
new file mode 100644
index 00000000000..b23f1ad37d8
--- /dev/null
+++ b/llvm/test/CodeGen/MSP430/hwmult16.ll
@@ -0,0 +1,43 @@
+; RUN: llc -O0 -mhwmult=16bit < %s | FileCheck %s
+
+target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16-a0:16:16"
+target triple = "msp430---elf"
+
+@g_i32 = global i32 123, align 8
+@g_i64 = global i64 456, align 8
+@g_i16 = global i16 789, align 8
+
+define i16 @mpyi() #0 {
+entry:
+; CHECK: mpyi:
+
+; CHECK: call #__mspabi_mpyi_hw
+ %0 = load volatile i16, i16* @g_i16, align 8
+ %1 = mul i16 %0, %0
+
+ ret i16 %1
+}
+
+define i32 @mpyli() #0 {
+entry:
+; CHECK: mpyli:
+
+; CHECK: call #__mspabi_mpyl_hw
+ %0 = load volatile i32, i32* @g_i32, align 8
+ %1 = mul i32 %0, %0
+
+ ret i32 %1
+}
+
+define i64 @mpylli() #0 {
+entry:
+; CHECK: mpylli:
+
+; CHECK: call #__mspabi_mpyll_hw
+ %0 = load volatile i64, i64* @g_i64, align 8
+ %1 = mul i64 %0, %0
+
+ ret i64 %1
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/MSP430/hwmult32.ll b/llvm/test/CodeGen/MSP430/hwmult32.ll
new file mode 100644
index 00000000000..6ffeb969886
--- /dev/null
+++ b/llvm/test/CodeGen/MSP430/hwmult32.ll
@@ -0,0 +1,43 @@
+; RUN: llc -O0 -mhwmult=32bit < %s | FileCheck %s
+
+target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16-a0:16:16"
+target triple = "msp430---elf"
+
+@g_i32 = global i32 123, align 8
+@g_i64 = global i64 456, align 8
+@g_i16 = global i16 789, align 8
+
+define i16 @mpyi() #0 {
+entry:
+; CHECK: mpyi:
+
+; CHECK: call #__mspabi_mpyi_hw
+ %0 = load volatile i16, i16* @g_i16, align 8
+ %1 = mul i16 %0, %0
+
+ ret i16 %1
+}
+
+define i32 @mpyli() #0 {
+entry:
+; CHECK: mpyli:
+
+; CHECK: call #__mspabi_mpyl_hw32
+ %0 = load volatile i32, i32* @g_i32, align 8
+ %1 = mul i32 %0, %0
+
+ ret i32 %1
+}
+
+define i64 @mpylli() #0 {
+entry:
+; CHECK: mpylli:
+
+; CHECK: call #__mspabi_mpyll_hw32
+ %0 = load volatile i64, i64* @g_i64, align 8
+ %1 = mul i64 %0, %0
+
+ ret i64 %1
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/MSP430/hwmultf5.ll b/llvm/test/CodeGen/MSP430/hwmultf5.ll
new file mode 100644
index 00000000000..51ca4be4a65
--- /dev/null
+++ b/llvm/test/CodeGen/MSP430/hwmultf5.ll
@@ -0,0 +1,43 @@
+; RUN: llc -O0 -mhwmult=f5series < %s | FileCheck %s
+
+target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16-a0:16:16"
+target triple = "msp430---elf"
+
+@g_i32 = global i32 123, align 8
+@g_i64 = global i64 456, align 8
+@g_i16 = global i16 789, align 8
+
+define i16 @mpyi() #0 {
+entry:
+; CHECK: mpyi:
+
+; CHECK: call #__mspabi_mpyi_f5hw
+ %0 = load volatile i16, i16* @g_i16, align 8
+ %1 = mul i16 %0, %0
+
+ ret i16 %1
+}
+
+define i32 @mpyli() #0 {
+entry:
+; CHECK: mpyli:
+
+; CHECK: call #__mspabi_mpyl_f5hw
+ %0 = load volatile i32, i32* @g_i32, align 8
+ %1 = mul i32 %0, %0
+
+ ret i32 %1
+}
+
+define i64 @mpylli() #0 {
+entry:
+; CHECK: mpylli:
+
+; CHECK: call #__mspabi_mpyll_f5hw
+ %0 = load volatile i64, i64* @g_i64, align 8
+ %1 = mul i64 %0, %0
+
+ ret i64 %1
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/MSP430/jumptable.ll b/llvm/test/CodeGen/MSP430/jumptable.ll
index 5ccdbb701db..b4366251698 100644
--- a/llvm/test/CodeGen/MSP430/jumptable.ll
+++ b/llvm/test/CodeGen/MSP430/jumptable.ll
@@ -12,7 +12,7 @@ entry:
store i16 %i, i16* %i.addr, align 2
%0 = load i16, i16* %i.addr, align 2
; CHECK: mov.w #2, r13
-; CHECK: call #__mulhi3hw_noint
+; CHECK: call #__mspabi_mpyi
; CHECK: br .LJTI0_0(r12)
switch i16 %0, label %sw.default [
i16 0, label %sw.bb
diff --git a/llvm/test/CodeGen/MSP430/libcalls.ll b/llvm/test/CodeGen/MSP430/libcalls.ll
new file mode 100644
index 00000000000..950ed6c17e2
--- /dev/null
+++ b/llvm/test/CodeGen/MSP430/libcalls.ll
@@ -0,0 +1,595 @@
+; RUN: llc -O0 < %s | FileCheck %s
+
+target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16-a0:16:16"
+target triple = "msp430---elf"
+
+@g_double = global double 123.0, align 8
+@g_float = global float 123.0, align 8
+@g_i32 = global i32 123, align 8
+@g_i64 = global i64 456, align 8
+@g_i16 = global i16 789, align 8
+
+define float @d2f() #0 {
+entry:
+; CHECK: d2f:
+
+; CHECK: call #__mspabi_cvtdf
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fptrunc double %0 to float
+
+ ret float %1
+}
+
+define double @f2d() #0 {
+entry:
+; CHECK: f2d:
+
+; CHECK: call #__mspabi_cvtfd
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fpext float %0 to double
+
+ ret double %1
+}
+
+define i32 @d2l() #0 {
+entry:
+; CHECK: d2l:
+
+; CHECK: call #__mspabi_fixdli
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fptosi double %0 to i32
+
+ ret i32 %1
+}
+
+define i64 @d2ll() #0 {
+entry:
+; CHECK: d2ll:
+
+; CHECK: call #__mspabi_fixdlli
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fptosi double %0 to i64
+
+ ret i64 %1
+}
+
+define i32 @d2ul() #0 {
+entry:
+; CHECK: d2ul:
+
+; CHECK: call #__mspabi_fixdul
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fptoui double %0 to i32
+
+ ret i32 %1
+}
+
+define i64 @d2ull() #0 {
+entry:
+; CHECK: d2ull:
+
+; CHECK: call #__mspabi_fixdull
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fptoui double %0 to i64
+
+ ret i64 %1
+}
+
+define i32 @f2l() #0 {
+entry:
+; CHECK: f2l:
+
+; CHECK: call #__mspabi_fixfli
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fptosi float %0 to i32
+
+ ret i32 %1
+}
+
+define i64 @f2ll() #0 {
+entry:
+; CHECK: f2ll:
+
+; CHECK: call #__mspabi_fixflli
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fptosi float %0 to i64
+
+ ret i64 %1
+}
+
+define i32 @f2ul() #0 {
+entry:
+; CHECK: f2ul:
+
+; CHECK: call #__mspabi_fixful
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fptoui float %0 to i32
+
+ ret i32 %1
+}
+
+define i64 @f2ull() #0 {
+entry:
+; CHECK: f2ull:
+
+; CHECK: call #__mspabi_fixfull
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fptoui float %0 to i64
+
+ ret i64 %1
+}
+
+define double @l2d() #0 {
+entry:
+; CHECK: l2d:
+
+; CHECK: call #__mspabi_fltlid
+ %0 = load volatile i32, i32* @g_i32, align 8
+ %1 = sitofp i32 %0 to double
+
+ ret double %1
+}
+
+define double @ll2d() #0 {
+entry:
+; CHECK: ll2d:
+
+; CHECK: call #__mspabi_fltllid
+ %0 = load volatile i64, i64* @g_i64, align 8
+ %1 = sitofp i64 %0 to double
+
+ ret double %1
+}
+
+define double @ul2d() #0 {
+entry:
+; CHECK: ul2d:
+
+; CHECK: call #__mspabi_fltuld
+ %0 = load volatile i32, i32* @g_i32, align 8
+ %1 = uitofp i32 %0 to double
+
+ ret double %1
+}
+
+define double @ull2d() #0 {
+entry:
+; CHECK: ull2d:
+
+; CHECK: call #__mspabi_fltulld
+ %0 = load volatile i64, i64* @g_i64, align 8
+ %1 = uitofp i64 %0 to double
+
+ ret double %1
+}
+
+define float @l2f() #0 {
+entry:
+; CHECK: l2f:
+
+; CHECK: call #__mspabi_fltlif
+ %0 = load volatile i32, i32* @g_i32, align 8
+ %1 = sitofp i32 %0 to float
+
+ ret float %1
+}
+
+define float @ll2f() #0 {
+entry:
+; CHECK: ll2f:
+
+; CHECK: call #__mspabi_fltllif
+ %0 = load volatile i64, i64* @g_i64, align 8
+ %1 = sitofp i64 %0 to float
+
+ ret float %1
+}
+
+define float @ul2f() #0 {
+entry:
+; CHECK: ul2f:
+
+; CHECK: call #__mspabi_fltulf
+ %0 = load volatile i32, i32* @g_i32, align 8
+ %1 = uitofp i32 %0 to float
+
+ ret float %1
+}
+
+define float @ull2f() #0 {
+entry:
+; CHECK: ull2f:
+
+; CHECK: call #__mspabi_fltullf
+ %0 = load volatile i64, i64* @g_i64, align 8
+ %1 = uitofp i64 %0 to float
+
+ ret float %1
+}
+
+define i1 @cmpd_oeq() #0 {
+entry:
+; CHECK: cmpd_oeq:
+
+; CHECK: call #__mspabi_cmpd
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fcmp oeq double %0, 123.0
+
+ ret i1 %1
+}
+
+define i1 @cmpd_une() #0 {
+entry:
+; CHECK: cmpd_une:
+
+; CHECK: call #__mspabi_cmpd
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fcmp une double %0, 123.0
+
+ ret i1 %1
+}
+
+define i1 @cmpd_oge() #0 {
+entry:
+; CHECK: cmpd_oge:
+
+; CHECK: call #__mspabi_cmpd
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fcmp oge double %0, 123.0
+
+ ret i1 %1
+}
+
+define i1 @cmpd_olt() #0 {
+entry:
+; CHECK: cmpd_olt:
+
+; CHECK: call #__mspabi_cmpd
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fcmp olt double %0, 123.0
+
+ ret i1 %1
+}
+
+define i1 @cmpd_ole() #0 {
+entry:
+; CHECK: cmpd_ole:
+
+; CHECK: call #__mspabi_cmpd
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fcmp ole double %0, 123.0
+
+ ret i1 %1
+}
+
+define i1 @cmpd_ogt() #0 {
+entry:
+; CHECK: cmpd_ogt:
+
+; CHECK: call #__mspabi_cmpd
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fcmp ogt double %0, 123.0
+
+ ret i1 %1
+}
+
+define i1 @cmpf_oeq() #0 {
+entry:
+; CHECK: cmpf_oeq:
+
+; CHECK: call #__mspabi_cmpf
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fcmp oeq float %0, 123.0
+
+ ret i1 %1
+}
+
+define i1 @cmpf_une() #0 {
+entry:
+; CHECK: cmpf_une:
+
+; CHECK: call #__mspabi_cmpf
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fcmp une float %0, 123.0
+
+ ret i1 %1
+}
+
+define i1 @cmpf_oge() #0 {
+entry:
+; CHECK: cmpf_oge:
+
+; CHECK: call #__mspabi_cmpf
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fcmp oge float %0, 123.0
+
+ ret i1 %1
+}
+
+define i1 @cmpf_olt() #0 {
+entry:
+; CHECK: cmpf_olt:
+
+; CHECK: call #__mspabi_cmpf
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fcmp olt float %0, 123.0
+
+ ret i1 %1
+}
+
+define i1 @cmpf_ole() #0 {
+entry:
+; CHECK: cmpf_ole:
+
+; CHECK: call #__mspabi_cmpf
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fcmp ole float %0, 123.0
+
+ ret i1 %1
+}
+
+define i1 @cmpf_ogt() #0 {
+entry:
+; CHECK: cmpf_ogt:
+
+; CHECK: call #__mspabi_cmpf
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fcmp ogt float %0, 123.0
+
+ ret i1 %1
+}
+
+define double @addd() #0 {
+entry:
+; CHECK: addd:
+
+; CHECK: call #__mspabi_addd
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fadd double %0, 123.0
+
+ ret double %1
+}
+
+define float @addf() #0 {
+entry:
+; CHECK: addf:
+
+; CHECK: call #__mspabi_addf
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fadd float %0, 123.0
+
+ ret float %1
+}
+
+define double @divd() #0 {
+entry:
+; CHECK: divd:
+
+; CHECK: call #__mspabi_divd
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fdiv double %0, 123.0
+
+ ret double %1
+}
+
+define float @divf() #0 {
+entry:
+; CHECK: divf:
+
+; CHECK: call #__mspabi_divf
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fdiv float %0, 123.0
+
+ ret float %1
+}
+
+define double @mpyd() #0 {
+entry:
+; CHECK: mpyd:
+
+; CHECK: call #__mspabi_mpyd
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fmul double %0, 123.0
+
+ ret double %1
+}
+
+define float @mpyf() #0 {
+entry:
+; CHECK: mpyf:
+
+; CHECK: call #__mspabi_mpyf
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fmul float %0, 123.0
+
+ ret float %1
+}
+
+define double @subd() #0 {
+entry:
+; CHECK: subd:
+
+; CHECK: call #__mspabi_subd
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fsub double %0, %0
+
+ ret double %1
+}
+
+define float @subf() #0 {
+entry:
+; CHECK: subf:
+
+; CHECK: call #__mspabi_subf
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fsub float %0, %0
+
+ ret float %1
+}
+
+define i16 @divi() #0 {
+entry:
+; CHECK: divi:
+
+; CHECK: call #__mspabi_divi
+ %0 = load volatile i16, i16* @g_i16, align 8
+ %1 = sdiv i16 %0, %0
+
+ ret i16 %1
+}
+
+define i32 @divli() #0 {
+entry:
+; CHECK: divli:
+
+; CHECK: call #__mspabi_divli
+ %0 = load volatile i32, i32* @g_i32, align 8
+ %1 = sdiv i32 %0, %0
+
+ ret i32 %1
+}
+
+define i64 @divlli() #0 {
+entry:
+; CHECK: divlli:
+
+; CHECK: call #__mspabi_divlli
+ %0 = load volatile i64, i64* @g_i64, align 8
+ %1 = sdiv i64 %0, %0
+
+ ret i64 %1
+}
+
+define i16 @divu() #0 {
+entry:
+; CHECK: divu:
+
+; CHECK: call #__mspabi_divu
+ %0 = load volatile i16, i16* @g_i16, align 8
+ %1 = udiv i16 %0, %0
+
+ ret i16 %1
+}
+
+define i32 @divul() #0 {
+entry:
+; CHECK: divul:
+
+; CHECK: call #__mspabi_divul
+ %0 = load volatile i32, i32* @g_i32, align 8
+ %1 = udiv i32 %0, %0
+
+ ret i32 %1
+}
+
+define i64 @divull() #0 {
+entry:
+; CHECK: divull:
+
+; CHECK: call #__mspabi_divull
+ %0 = load volatile i64, i64* @g_i64, align 8
+ %1 = udiv i64 %0, %0
+
+ ret i64 %1
+}
+
+define i16 @remi() #0 {
+entry:
+; CHECK: remi:
+
+; CHECK: call #__mspabi_remi
+ %0 = load volatile i16, i16* @g_i16, align 8
+ %1 = srem i16 %0, %0
+
+ ret i16 %1
+}
+
+define i32 @remli() #0 {
+entry:
+; CHECK: remli:
+
+; CHECK: call #__mspabi_remli
+ %0 = load volatile i32, i32* @g_i32, align 8
+ %1 = srem i32 %0, %0
+
+ ret i32 %1
+}
+
+define i64 @remlli() #0 {
+entry:
+; CHECK: remlli:
+
+; CHECK: call #__mspabi_remlli
+ %0 = load volatile i64, i64* @g_i64, align 8
+ %1 = srem i64 %0, %0
+
+ ret i64 %1
+}
+
+define i16 @remu() #0 {
+entry:
+; CHECK: remu:
+
+; CHECK: call #__mspabi_remu
+ %0 = load volatile i16, i16* @g_i16, align 8
+ %1 = urem i16 %0, %0
+
+ ret i16 %1
+}
+
+define i32 @remul() #0 {
+entry:
+; CHECK: remul:
+
+; CHECK: call #__mspabi_remul
+ %0 = load volatile i32, i32* @g_i32, align 8
+ %1 = urem i32 %0, %0
+
+ ret i32 %1
+}
+
+define i64 @remull() #0 {
+entry:
+; CHECK: remull:
+
+; CHECK: call #__mspabi_remull
+ %0 = load volatile i64, i64* @g_i64, align 8
+ %1 = urem i64 %0, %0
+
+ ret i64 %1
+}
+
+define i16 @mpyi() #0 {
+entry:
+; CHECK: mpyi:
+
+; CHECK: call #__mspabi_mpyi
+ %0 = load volatile i16, i16* @g_i16, align 8
+ %1 = mul i16 %0, %0
+
+ ret i16 %1
+}
+
+define i32 @mpyli() #0 {
+entry:
+; CHECK: mpyli:
+
+; CHECK: call #__mspabi_mpyl
+ %0 = load volatile i32, i32* @g_i32, align 8
+ %1 = mul i32 %0, %0
+
+ ret i32 %1
+}
+
+define i64 @mpylli() #0 {
+entry:
+; CHECK: mpylli:
+
+; CHECK: call #__mspabi_mpyll
+ %0 = load volatile i64, i64* @g_i64, align 8
+ %1 = mul i64 %0, %0
+
+ ret i64 %1
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/MSP430/2009-11-05-8BitLibcalls.ll b/llvm/test/CodeGen/MSP430/promote-i8-mul.ll
index dce9d25ca87..0e05e3978b1 100644
--- a/llvm/test/CodeGen/MSP430/2009-11-05-8BitLibcalls.ll
+++ b/llvm/test/CodeGen/MSP430/promote-i8-mul.ll
@@ -8,7 +8,7 @@ target triple = "msp430-elf"
define signext i8 @foo(i8 signext %_si1, i8 signext %_si2) nounwind readnone {
entry:
; CHECK-LABEL: foo:
-; CHECK: call #__mulqi3
+; CHECK: call #__mspabi_mpyi
%mul = mul i8 %_si2, %_si1 ; <i8> [#uses=1]
ret i8 %mul
}
OpenPOWER on IntegriCloud