diff options
author | Bill Schmidt <wschmidt@linux.vnet.ibm.com> | 2013-08-30 15:18:11 +0000 |
---|---|---|
committer | Bill Schmidt <wschmidt@linux.vnet.ibm.com> | 2013-08-30 15:18:11 +0000 |
commit | 8d86fe7d6f541f4b3f2c6ab09afe3d0ce889e386 (patch) | |
tree | ae66c10f57b2567338406f968bcff7c13bdee95a /llvm/test/CodeGen/PowerPC/fast-isel-conversion.ll | |
parent | 998cda23b9c90e1e0aa0fe049283f77351c3fb34 (diff) | |
download | bcm5719-llvm-8d86fe7d6f541f4b3f2c6ab09afe3d0ce889e386.tar.gz bcm5719-llvm-8d86fe7d6f541f4b3f2c6ab09afe3d0ce889e386.zip |
[PowerPC] Add handling for conversions to fast-isel.
Yet another chunk of fast-isel code. This one handles various
conversions involving floating-point. (It also includes some
miscellaneous handling throughout the back end for LWA_32 and LWAX_32
that should have been part of the load-store patch.)
llvm-svn: 189677
Diffstat (limited to 'llvm/test/CodeGen/PowerPC/fast-isel-conversion.ll')
-rw-r--r-- | llvm/test/CodeGen/PowerPC/fast-isel-conversion.ll | 305 |
1 files changed, 305 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/PowerPC/fast-isel-conversion.ll b/llvm/test/CodeGen/PowerPC/fast-isel-conversion.ll new file mode 100644 index 00000000000..a31c31210c3 --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/fast-isel-conversion.ll @@ -0,0 +1,305 @@ +; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64 + +; Test sitofp + +define void @sitofp_single_i64(i64 %a, float %b) nounwind ssp { +entry: +; ELF64: sitofp_single_i64 + %b.addr = alloca float, align 4 + %conv = sitofp i64 %a to float +; ELF64: std +; ELF64: lfd +; ELF64: fcfids + store float %conv, float* %b.addr, align 4 + ret void +} + +define void @sitofp_single_i32(i32 %a, float %b) nounwind ssp { +entry: +; ELF64: sitofp_single_i32 + %b.addr = alloca float, align 4 + %conv = sitofp i32 %a to float +; ELF64: std +; ELF64: lfiwax +; ELF64: fcfids + store float %conv, float* %b.addr, align 4 + ret void +} + +define void @sitofp_single_i16(i16 %a, float %b) nounwind ssp { +entry: +; ELF64: sitofp_single_i16 + %b.addr = alloca float, align 4 + %conv = sitofp i16 %a to float +; ELF64: extsh +; ELF64: std +; ELF64: lfd +; ELF64: fcfids + store float %conv, float* %b.addr, align 4 + ret void +} + +define void @sitofp_single_i8(i8 %a) nounwind ssp { +entry: +; ELF64: sitofp_single_i8 + %b.addr = alloca float, align 4 + %conv = sitofp i8 %a to float +; ELF64: extsb +; ELF64: std +; ELF64: lfd +; ELF64: fcfids + store float %conv, float* %b.addr, align 4 + ret void +} + +define void @sitofp_double_i32(i32 %a, double %b) nounwind ssp { +entry: +; ELF64: sitofp_double_i32 + %b.addr = alloca double, align 8 + %conv = sitofp i32 %a to double +; ELF64: std +; ELF64: lfiwax +; ELF64: fcfid + store double %conv, double* %b.addr, align 8 + ret void +} + +define void @sitofp_double_i64(i64 %a, double %b) nounwind ssp { +entry: +; ELF64: sitofp_double_i64 + %b.addr = alloca double, align 8 + %conv = sitofp i64 %a to double +; ELF64: std +; ELF64: lfd +; ELF64: fcfid + store double %conv, double* %b.addr, align 8 + ret void +} + +define void @sitofp_double_i16(i16 %a, double %b) nounwind ssp { +entry: +; ELF64: sitofp_double_i16 + %b.addr = alloca double, align 8 + %conv = sitofp i16 %a to double +; ELF64: extsh +; ELF64: std +; ELF64: lfd +; ELF64: fcfid + store double %conv, double* %b.addr, align 8 + ret void +} + +define void @sitofp_double_i8(i8 %a, double %b) nounwind ssp { +entry: +; ELF64: sitofp_double_i8 + %b.addr = alloca double, align 8 + %conv = sitofp i8 %a to double +; ELF64: extsb +; ELF64: std +; ELF64: lfd +; ELF64: fcfid + store double %conv, double* %b.addr, align 8 + ret void +} + +; Test uitofp + +define void @uitofp_single_i64(i64 %a, float %b) nounwind ssp { +entry: +; ELF64: uitofp_single_i64 + %b.addr = alloca float, align 4 + %conv = uitofp i64 %a to float +; ELF64: std +; ELF64: lfd +; ELF64: fcfidus + store float %conv, float* %b.addr, align 4 + ret void +} + +define void @uitofp_single_i32(i32 %a, float %b) nounwind ssp { +entry: +; ELF64: uitofp_single_i32 + %b.addr = alloca float, align 4 + %conv = uitofp i32 %a to float +; ELF64: std +; ELF64: lfiwzx +; ELF64: fcfidus + store float %conv, float* %b.addr, align 4 + ret void +} + +define void @uitofp_single_i16(i16 %a, float %b) nounwind ssp { +entry: +; ELF64: uitofp_single_i16 + %b.addr = alloca float, align 4 + %conv = uitofp i16 %a to float +; ELF64: rldicl {{[0-9]+}}, {{[0-9]+}}, 0, 48 +; ELF64: std +; ELF64: lfd +; ELF64: fcfidus + store float %conv, float* %b.addr, align 4 + ret void +} + +define void @uitofp_single_i8(i8 %a) nounwind ssp { +entry: +; ELF64: uitofp_single_i8 + %b.addr = alloca float, align 4 + %conv = uitofp i8 %a to float +; ELF64: rldicl {{[0-9]+}}, {{[0-9]+}}, 0, 56 +; ELF64: std +; ELF64: lfd +; ELF64: fcfidus + store float %conv, float* %b.addr, align 4 + ret void +} + +define void @uitofp_double_i64(i64 %a, double %b) nounwind ssp { +entry: +; ELF64: uitofp_double_i64 + %b.addr = alloca double, align 8 + %conv = uitofp i64 %a to double +; ELF64: std +; ELF64: lfd +; ELF64: fcfidu + store double %conv, double* %b.addr, align 8 + ret void +} + +define void @uitofp_double_i32(i32 %a, double %b) nounwind ssp { +entry: +; ELF64: uitofp_double_i32 + %b.addr = alloca double, align 8 + %conv = uitofp i32 %a to double +; ELF64: std +; ELF64: lfiwzx +; ELF64: fcfidu + store double %conv, double* %b.addr, align 8 + ret void +} + +define void @uitofp_double_i16(i16 %a, double %b) nounwind ssp { +entry: +; ELF64: uitofp_double_i16 + %b.addr = alloca double, align 8 + %conv = uitofp i16 %a to double +; ELF64: rldicl {{[0-9]+}}, {{[0-9]+}}, 0, 48 +; ELF64: std +; ELF64: lfd +; ELF64: fcfidu + store double %conv, double* %b.addr, align 8 + ret void +} + +define void @uitofp_double_i8(i8 %a, double %b) nounwind ssp { +entry: +; ELF64: uitofp_double_i8 + %b.addr = alloca double, align 8 + %conv = uitofp i8 %a to double +; ELF64: rldicl {{[0-9]+}}, {{[0-9]+}}, 0, 56 +; ELF64: std +; ELF64: lfd +; ELF64: fcfidu + store double %conv, double* %b.addr, align 8 + ret void +} + +; Test fptosi + +define void @fptosi_float_i32(float %a) nounwind ssp { +entry: +; ELF64: fptosi_float_i32 + %b.addr = alloca i32, align 4 + %conv = fptosi float %a to i32 +; ELF64: fctiwz +; ELF64: stfd +; ELF64: lwa + store i32 %conv, i32* %b.addr, align 4 + ret void +} + +define void @fptosi_float_i64(float %a) nounwind ssp { +entry: +; ELF64: fptosi_float_i64 + %b.addr = alloca i64, align 4 + %conv = fptosi float %a to i64 +; ELF64: fctidz +; ELF64: stfd +; ELF64: ld + store i64 %conv, i64* %b.addr, align 4 + ret void +} + +define void @fptosi_double_i32(double %a) nounwind ssp { +entry: +; ELF64: fptosi_double_i32 + %b.addr = alloca i32, align 8 + %conv = fptosi double %a to i32 +; ELF64: fctiwz +; ELF64: stfd +; ELF64: lwa + store i32 %conv, i32* %b.addr, align 8 + ret void +} + +define void @fptosi_double_i64(double %a) nounwind ssp { +entry: +; ELF64: fptosi_double_i64 + %b.addr = alloca i64, align 8 + %conv = fptosi double %a to i64 +; ELF64: fctidz +; ELF64: stfd +; ELF64: ld + store i64 %conv, i64* %b.addr, align 8 + ret void +} + +; Test fptoui + +define void @fptoui_float_i32(float %a) nounwind ssp { +entry: +; ELF64: fptoui_float_i32 + %b.addr = alloca i32, align 4 + %conv = fptoui float %a to i32 +; ELF64: fctiwuz +; ELF64: stfd +; ELF64: lwz + store i32 %conv, i32* %b.addr, align 4 + ret void +} + +define void @fptoui_float_i64(float %a) nounwind ssp { +entry: +; ELF64: fptoui_float_i64 + %b.addr = alloca i64, align 4 + %conv = fptoui float %a to i64 +; ELF64: fctiduz +; ELF64: stfd +; ELF64: ld + store i64 %conv, i64* %b.addr, align 4 + ret void +} + +define void @fptoui_double_i32(double %a) nounwind ssp { +entry: +; ELF64: fptoui_double_i32 + %b.addr = alloca i32, align 8 + %conv = fptoui double %a to i32 +; ELF64: fctiwuz +; ELF64: stfd +; ELF64: lwz + store i32 %conv, i32* %b.addr, align 8 + ret void +} + +define void @fptoui_double_i64(double %a) nounwind ssp { +entry: +; ELF64: fptoui_double_i64 + %b.addr = alloca i64, align 8 + %conv = fptoui double %a to i64 +; ELF64: fctiduz +; ELF64: stfd +; ELF64: ld + store i64 %conv, i64* %b.addr, align 8 + ret void +} |