summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2020-01-13 12:40:15 -0800
committerCraig Topper <craig.topper@intel.com>2020-01-13 13:11:12 -0800
commit26c7a4ed101fae85d2041ee1c8e8483b96e4460e (patch)
tree0e68675b507601f830b0840f2b932eb212c712ac /llvm/test
parent577efd09e3b7b3a1ec7fcf0597397f137da99843 (diff)
downloadbcm5719-llvm-26c7a4ed101fae85d2041ee1c8e8483b96e4460e.tar.gz
bcm5719-llvm-26c7a4ed101fae85d2041ee1c8e8483b96e4460e.zip
[LegalizeIntegerTypes][X86] Add support for expanding input of STRICT_SINT_TO_FP/STRICT_UINT_TO_FP into a libcall.
Needed to support i128->fp128 on 32-bit X86. Add full set of strict sint_to_fp/uint_to_fp conversion tests for fp128.
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/X86/fp128-cast-strict.ll406
1 files changed, 406 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/fp128-cast-strict.ll b/llvm/test/CodeGen/X86/fp128-cast-strict.ll
index 868dfc41637..048656f48b6 100644
--- a/llvm/test/CodeGen/X86/fp128-cast-strict.ll
+++ b/llvm/test/CodeGen/X86/fp128-cast-strict.ll
@@ -549,6 +549,402 @@ entry:
ret i128 %conv
}
+define fp128 @sitofp_i8(i8 %x) nounwind strictfp {
+; X64-LABEL: sitofp_i8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: pushq %rax
+; X64-NEXT: movsbl %dil, %edi
+; X64-NEXT: callq __floatsitf
+; X64-NEXT: popq %rax
+; X64-NEXT: retq
+;
+; X86-LABEL: sitofp_i8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movsbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: pushl %eax
+; X86-NEXT: pushl %ecx
+; X86-NEXT: calll __floatsitf
+; X86-NEXT: addl $12, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
+entry:
+ %conv = call fp128 @llvm.experimental.constrained.sitofp.f128.i8(i8 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+ ret fp128 %conv
+}
+
+define fp128 @sitofp_i16(i16 %x) nounwind strictfp {
+; X64-LABEL: sitofp_i16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: pushq %rax
+; X64-NEXT: movswl %di, %edi
+; X64-NEXT: callq __floatsitf
+; X64-NEXT: popq %rax
+; X64-NEXT: retq
+;
+; X86-LABEL: sitofp_i16:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movswl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: pushl %eax
+; X86-NEXT: pushl %ecx
+; X86-NEXT: calll __floatsitf
+; X86-NEXT: addl $12, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
+entry:
+ %conv = call fp128 @llvm.experimental.constrained.sitofp.f128.i16(i16 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+ ret fp128 %conv
+}
+
+define fp128 @sitofp_i32(i32 %x) nounwind strictfp {
+; X64-LABEL: sitofp_i32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: pushq %rax
+; X64-NEXT: callq __floatsitf
+; X64-NEXT: popq %rax
+; X64-NEXT: retq
+;
+; X86-LABEL: sitofp_i32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll __floatsitf
+; X86-NEXT: addl $12, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
+entry:
+ %conv = call fp128 @llvm.experimental.constrained.sitofp.f128.i32(i32 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+ ret fp128 %conv
+}
+
+define fp128 @sitofp_i64(i64 %x) nounwind strictfp {
+; X64-LABEL: sitofp_i64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: pushq %rax
+; X64-NEXT: callq __floatditf
+; X64-NEXT: popq %rax
+; X64-NEXT: retq
+;
+; X86-LABEL: sitofp_i64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $4, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll __floatditf
+; X86-NEXT: addl $12, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
+entry:
+ %conv = call fp128 @llvm.experimental.constrained.sitofp.f128.i64(i64 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+ ret fp128 %conv
+}
+
+define fp128 @sitofp_i128(i128 %x) nounwind strictfp {
+; X64-LABEL: sitofp_i128:
+; X64: # %bb.0: # %entry
+; X64-NEXT: pushq %rax
+; X64-NEXT: callq __floattitf
+; X64-NEXT: popq %rax
+; X64-NEXT: retq
+;
+; X86-LABEL: sitofp_i128:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll __floattitf
+; X86-NEXT: addl $28, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
+entry:
+ %conv = call fp128 @llvm.experimental.constrained.sitofp.f128.i128(i128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+ ret fp128 %conv
+}
+
+define fp128 @uitofp_i8(i8 %x) nounwind strictfp {
+; X64-LABEL: uitofp_i8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: pushq %rax
+; X64-NEXT: movzbl %dil, %edi
+; X64-NEXT: callq __floatsitf
+; X64-NEXT: popq %rax
+; X64-NEXT: retq
+;
+; X86-LABEL: uitofp_i8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: pushl %eax
+; X86-NEXT: pushl %ecx
+; X86-NEXT: calll __floatunsitf
+; X86-NEXT: addl $12, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
+entry:
+ %conv = call fp128 @llvm.experimental.constrained.uitofp.f128.i8(i8 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+ ret fp128 %conv
+}
+
+define fp128 @uitofp_i16(i16 %x) nounwind strictfp {
+; X64-LABEL: uitofp_i16:
+; X64: # %bb.0: # %entry
+; X64-NEXT: pushq %rax
+; X64-NEXT: movzwl %di, %edi
+; X64-NEXT: callq __floatsitf
+; X64-NEXT: popq %rax
+; X64-NEXT: retq
+;
+; X86-LABEL: uitofp_i16:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: pushl %eax
+; X86-NEXT: pushl %ecx
+; X86-NEXT: calll __floatunsitf
+; X86-NEXT: addl $12, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
+entry:
+ %conv = call fp128 @llvm.experimental.constrained.uitofp.f128.i16(i16 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+ ret fp128 %conv
+}
+
+define fp128 @uitofp_i32(i32 %x) nounwind strictfp {
+; X64-LABEL: uitofp_i32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: pushq %rax
+; X64-NEXT: callq __floatunsitf
+; X64-NEXT: popq %rax
+; X64-NEXT: retq
+;
+; X86-LABEL: uitofp_i32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll __floatunsitf
+; X86-NEXT: addl $12, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
+entry:
+ %conv = call fp128 @llvm.experimental.constrained.uitofp.f128.i32(i32 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+ ret fp128 %conv
+}
+
+define fp128 @uitofp_i64(i64 %x) nounwind strictfp {
+; X64-LABEL: uitofp_i64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: pushq %rax
+; X64-NEXT: callq __floatunditf
+; X64-NEXT: popq %rax
+; X64-NEXT: retq
+;
+; X86-LABEL: uitofp_i64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $4, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll __floatunditf
+; X86-NEXT: addl $12, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
+entry:
+ %conv = call fp128 @llvm.experimental.constrained.uitofp.f128.i64(i64 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+ ret fp128 %conv
+}
+
+define fp128 @uitofp_i128(i128 %x) nounwind strictfp {
+; X64-LABEL: uitofp_i128:
+; X64: # %bb.0: # %entry
+; X64-NEXT: pushq %rax
+; X64-NEXT: callq __floatuntitf
+; X64-NEXT: popq %rax
+; X64-NEXT: retq
+;
+; X86-LABEL: uitofp_i128:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll __floatuntitf
+; X86-NEXT: addl $28, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
+entry:
+ %conv = call fp128 @llvm.experimental.constrained.uitofp.f128.i128(i128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+ ret fp128 %conv
+}
+
attributes #0 = { strictfp }
declare float @llvm.experimental.constrained.fptrunc.f32.f128(fp128, metadata, metadata)
@@ -567,3 +963,13 @@ declare i16 @llvm.experimental.constrained.fptoui.i16.f128(fp128, metadata)
declare i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128, metadata)
declare i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128, metadata)
declare i128 @llvm.experimental.constrained.fptoui.i128.f128(fp128, metadata)
+declare fp128 @llvm.experimental.constrained.sitofp.f128.i8(i8, metadata, metadata)
+declare fp128 @llvm.experimental.constrained.sitofp.f128.i16(i16, metadata, metadata)
+declare fp128 @llvm.experimental.constrained.sitofp.f128.i32(i32, metadata, metadata)
+declare fp128 @llvm.experimental.constrained.sitofp.f128.i64(i64, metadata, metadata)
+declare fp128 @llvm.experimental.constrained.sitofp.f128.i128(i128, metadata, metadata)
+declare fp128 @llvm.experimental.constrained.uitofp.f128.i8(i8, metadata, metadata)
+declare fp128 @llvm.experimental.constrained.uitofp.f128.i16(i16, metadata, metadata)
+declare fp128 @llvm.experimental.constrained.uitofp.f128.i32(i32, metadata, metadata)
+declare fp128 @llvm.experimental.constrained.uitofp.f128.i64(i64, metadata, metadata)
+declare fp128 @llvm.experimental.constrained.uitofp.f128.i128(i128, metadata, metadata)
OpenPOWER on IntegriCloud