diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll | 719 |
1 files changed, 396 insertions, 323 deletions
diff --git a/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll b/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll index fdefd937e7e..ab5c6b7f998 100644 --- a/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll +++ b/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll @@ -176,7 +176,7 @@ define <2 x i64> @strict_vector_fptoui_v2f64_to_v2i64(<2 x double> %a) #0 { ; SSE-32-NEXT: andl $-8, %esp ; SSE-32-NEXT: subl $24, %esp ; SSE-32-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero -; SSE-32-NEXT: ucomisd %xmm2, %xmm0 +; SSE-32-NEXT: comisd %xmm2, %xmm0 ; SSE-32-NEXT: xorpd %xmm1, %xmm1 ; SSE-32-NEXT: xorpd %xmm3, %xmm3 ; SSE-32-NEXT: jb .LBB1_2 @@ -196,7 +196,7 @@ define <2 x i64> @strict_vector_fptoui_v2f64_to_v2i64(<2 x double> %a) #0 { ; SSE-32-NEXT: fistpll {{[0-9]+}}(%esp) ; SSE-32-NEXT: fldcw {{[0-9]+}}(%esp) ; SSE-32-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1] -; SSE-32-NEXT: ucomisd %xmm2, %xmm0 +; SSE-32-NEXT: comisd %xmm2, %xmm0 ; SSE-32-NEXT: jb .LBB1_4 ; SSE-32-NEXT: # %bb.3: ; SSE-32-NEXT: movapd %xmm2, %xmm1 @@ -232,29 +232,33 @@ define <2 x i64> @strict_vector_fptoui_v2f64_to_v2i64(<2 x double> %a) #0 { ; ; SSE-64-LABEL: strict_vector_fptoui_v2f64_to_v2i64: ; SSE-64: # %bb.0: -; SSE-64-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero -; SSE-64-NEXT: xorl %eax, %eax -; SSE-64-NEXT: ucomisd %xmm2, %xmm0 -; SSE-64-NEXT: setae %al -; SSE-64-NEXT: shlq $63, %rax -; SSE-64-NEXT: movapd %xmm0, %xmm1 -; SSE-64-NEXT: cmpltsd %xmm2, %xmm1 -; SSE-64-NEXT: andnpd %xmm2, %xmm1 -; SSE-64-NEXT: movapd %xmm0, %xmm3 -; SSE-64-NEXT: subsd %xmm1, %xmm3 -; SSE-64-NEXT: cvttsd2si %xmm3, %rcx +; SSE-64-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero +; SSE-64-NEXT: comisd %xmm3, %xmm0 +; SSE-64-NEXT: xorpd %xmm2, %xmm2 +; SSE-64-NEXT: xorpd %xmm1, %xmm1 +; SSE-64-NEXT: jb .LBB1_2 +; SSE-64-NEXT: # %bb.1: +; SSE-64-NEXT: movapd %xmm3, %xmm1 +; SSE-64-NEXT: .LBB1_2: +; SSE-64-NEXT: movapd %xmm0, %xmm4 +; SSE-64-NEXT: subsd %xmm1, %xmm4 +; SSE-64-NEXT: cvttsd2si %xmm4, %rax +; SSE-64-NEXT: setae %cl +; SSE-64-NEXT: movzbl %cl, %ecx +; SSE-64-NEXT: shlq $63, %rcx ; SSE-64-NEXT: xorq %rax, %rcx ; SSE-64-NEXT: movq %rcx, %xmm1 ; SSE-64-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1] -; SSE-64-NEXT: xorl %eax, %eax -; SSE-64-NEXT: ucomisd %xmm2, %xmm0 -; SSE-64-NEXT: setae %al -; SSE-64-NEXT: shlq $63, %rax -; SSE-64-NEXT: movapd %xmm0, %xmm3 -; SSE-64-NEXT: cmpltsd %xmm2, %xmm3 -; SSE-64-NEXT: andnpd %xmm2, %xmm3 -; SSE-64-NEXT: subsd %xmm3, %xmm0 -; SSE-64-NEXT: cvttsd2si %xmm0, %rcx +; SSE-64-NEXT: comisd %xmm3, %xmm0 +; SSE-64-NEXT: jb .LBB1_4 +; SSE-64-NEXT: # %bb.3: +; SSE-64-NEXT: movapd %xmm3, %xmm2 +; SSE-64-NEXT: .LBB1_4: +; SSE-64-NEXT: subsd %xmm2, %xmm0 +; SSE-64-NEXT: cvttsd2si %xmm0, %rax +; SSE-64-NEXT: setae %cl +; SSE-64-NEXT: movzbl %cl, %ecx +; SSE-64-NEXT: shlq $63, %rcx ; SSE-64-NEXT: xorq %rax, %rcx ; SSE-64-NEXT: movq %rcx, %xmm0 ; SSE-64-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] @@ -272,7 +276,7 @@ define <2 x i64> @strict_vector_fptoui_v2f64_to_v2i64(<2 x double> %a) #0 { ; AVX-32-NEXT: subl $16, %esp ; AVX-32-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0] ; AVX-32-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero -; AVX-32-NEXT: vucomisd %xmm1, %xmm3 +; AVX-32-NEXT: vcomisd %xmm1, %xmm3 ; AVX-32-NEXT: vxorpd %xmm2, %xmm2, %xmm2 ; AVX-32-NEXT: vxorpd %xmm4, %xmm4, %xmm4 ; AVX-32-NEXT: jb .LBB1_2 @@ -287,7 +291,7 @@ define <2 x i64> @strict_vector_fptoui_v2f64_to_v2i64(<2 x double> %a) #0 { ; AVX-32-NEXT: movzbl %al, %eax ; AVX-32-NEXT: shll $31, %eax ; AVX-32-NEXT: xorl {{[0-9]+}}(%esp), %eax -; AVX-32-NEXT: vucomisd %xmm1, %xmm0 +; AVX-32-NEXT: vcomisd %xmm1, %xmm0 ; AVX-32-NEXT: jb .LBB1_4 ; AVX-32-NEXT: # %bb.3: ; AVX-32-NEXT: vmovapd %xmm1, %xmm2 @@ -312,28 +316,34 @@ define <2 x i64> @strict_vector_fptoui_v2f64_to_v2i64(<2 x double> %a) #0 { ; AVX-64-LABEL: strict_vector_fptoui_v2f64_to_v2i64: ; AVX-64: # %bb.0: ; AVX-64-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero -; AVX-64-NEXT: xorl %eax, %eax -; AVX-64-NEXT: vucomisd %xmm1, %xmm0 -; AVX-64-NEXT: setae %al -; AVX-64-NEXT: shlq $63, %rax -; AVX-64-NEXT: vcmpltsd %xmm1, %xmm0, %xmm2 -; AVX-64-NEXT: vandnpd %xmm1, %xmm2, %xmm2 -; AVX-64-NEXT: vsubsd %xmm2, %xmm0, %xmm2 -; AVX-64-NEXT: vcvttsd2si %xmm2, %rcx +; AVX-64-NEXT: vcomisd %xmm1, %xmm0 +; AVX-64-NEXT: vxorpd %xmm2, %xmm2, %xmm2 +; AVX-64-NEXT: vxorpd %xmm3, %xmm3, %xmm3 +; AVX-64-NEXT: jb .LBB1_2 +; AVX-64-NEXT: # %bb.1: +; AVX-64-NEXT: vmovapd %xmm1, %xmm3 +; AVX-64-NEXT: .LBB1_2: +; AVX-64-NEXT: vsubsd %xmm3, %xmm0, %xmm3 +; AVX-64-NEXT: vcvttsd2si %xmm3, %rax +; AVX-64-NEXT: setae %cl +; AVX-64-NEXT: movzbl %cl, %ecx +; AVX-64-NEXT: shlq $63, %rcx ; AVX-64-NEXT: xorq %rax, %rcx -; AVX-64-NEXT: vmovq %rcx, %xmm2 +; AVX-64-NEXT: vmovq %rcx, %xmm3 ; AVX-64-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX-64-NEXT: xorl %eax, %eax -; AVX-64-NEXT: vucomisd %xmm1, %xmm0 -; AVX-64-NEXT: setae %al -; AVX-64-NEXT: shlq $63, %rax -; AVX-64-NEXT: vcmpltsd %xmm1, %xmm0, %xmm3 -; AVX-64-NEXT: vandnpd %xmm1, %xmm3, %xmm1 -; AVX-64-NEXT: vsubsd %xmm1, %xmm0, %xmm0 -; AVX-64-NEXT: vcvttsd2si %xmm0, %rcx +; AVX-64-NEXT: vcomisd %xmm1, %xmm0 +; AVX-64-NEXT: jb .LBB1_4 +; AVX-64-NEXT: # %bb.3: +; AVX-64-NEXT: vmovapd %xmm1, %xmm2 +; AVX-64-NEXT: .LBB1_4: +; AVX-64-NEXT: vsubsd %xmm2, %xmm0, %xmm0 +; AVX-64-NEXT: vcvttsd2si %xmm0, %rax +; AVX-64-NEXT: setae %cl +; AVX-64-NEXT: movzbl %cl, %ecx +; AVX-64-NEXT: shlq $63, %rcx ; AVX-64-NEXT: xorq %rax, %rcx ; AVX-64-NEXT: vmovq %rcx, %xmm0 -; AVX-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0] +; AVX-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm3[0],xmm0[0] ; AVX-64-NEXT: retq ; ; AVX512VL-32-LABEL: strict_vector_fptoui_v2f64_to_v2i64: @@ -348,7 +358,7 @@ define <2 x i64> @strict_vector_fptoui_v2f64_to_v2i64(<2 x double> %a) #0 { ; AVX512VL-32-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX512VL-32-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero ; AVX512VL-32-NEXT: xorl %eax, %eax -; AVX512VL-32-NEXT: vucomisd %xmm2, %xmm1 +; AVX512VL-32-NEXT: vcomisd %xmm2, %xmm1 ; AVX512VL-32-NEXT: setb %cl ; AVX512VL-32-NEXT: kmovw %ecx, %k1 ; AVX512VL-32-NEXT: vxorpd %xmm3, %xmm3, %xmm3 @@ -362,7 +372,7 @@ define <2 x i64> @strict_vector_fptoui_v2f64_to_v2i64(<2 x double> %a) #0 { ; AVX512VL-32-NEXT: shll $31, %eax ; AVX512VL-32-NEXT: xorl {{[0-9]+}}(%esp), %eax ; AVX512VL-32-NEXT: xorl %ecx, %ecx -; AVX512VL-32-NEXT: vucomisd %xmm2, %xmm0 +; AVX512VL-32-NEXT: vcomisd %xmm2, %xmm0 ; AVX512VL-32-NEXT: setb %dl ; AVX512VL-32-NEXT: kmovw %edx, %k1 ; AVX512VL-32-NEXT: vmovsd %xmm3, %xmm2, %xmm2 {%k1} @@ -559,48 +569,50 @@ define <2 x i64> @strict_vector_fptoui_v2f32_to_v2i64(<2 x float> %a) #0 { ; SSE-32-NEXT: .cfi_def_cfa_register %ebp ; SSE-32-NEXT: andl $-8, %esp ; SSE-32-NEXT: subl $24, %esp -; SSE-32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; SSE-32-NEXT: movaps %xmm0, %xmm2 -; SSE-32-NEXT: cmpltss %xmm1, %xmm2 -; SSE-32-NEXT: andnps %xmm1, %xmm2 -; SSE-32-NEXT: movaps %xmm0, %xmm3 -; SSE-32-NEXT: subss %xmm2, %xmm3 -; SSE-32-NEXT: movss %xmm3, {{[0-9]+}}(%esp) -; SSE-32-NEXT: movaps %xmm0, %xmm2 -; SSE-32-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3] +; SSE-32-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE-32-NEXT: comiss %xmm2, %xmm0 +; SSE-32-NEXT: xorps %xmm1, %xmm1 +; SSE-32-NEXT: xorps %xmm3, %xmm3 +; SSE-32-NEXT: jb .LBB3_2 +; SSE-32-NEXT: # %bb.1: ; SSE-32-NEXT: movaps %xmm2, %xmm3 -; SSE-32-NEXT: cmpltss %xmm1, %xmm3 -; SSE-32-NEXT: andnps %xmm1, %xmm3 -; SSE-32-NEXT: movaps %xmm2, %xmm4 +; SSE-32-NEXT: .LBB3_2: +; SSE-32-NEXT: movaps %xmm0, %xmm4 ; SSE-32-NEXT: subss %xmm3, %xmm4 ; SSE-32-NEXT: movss %xmm4, {{[0-9]+}}(%esp) +; SSE-32-NEXT: setae %al ; SSE-32-NEXT: flds {{[0-9]+}}(%esp) ; SSE-32-NEXT: fnstcw {{[0-9]+}}(%esp) -; SSE-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax -; SSE-32-NEXT: orl $3072, %eax # imm = 0xC00 -; SSE-32-NEXT: movw %ax, {{[0-9]+}}(%esp) +; SSE-32-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; SSE-32-NEXT: orl $3072, %ecx # imm = 0xC00 +; SSE-32-NEXT: movw %cx, {{[0-9]+}}(%esp) ; SSE-32-NEXT: fldcw {{[0-9]+}}(%esp) ; SSE-32-NEXT: fistpll {{[0-9]+}}(%esp) ; SSE-32-NEXT: fldcw {{[0-9]+}}(%esp) +; SSE-32-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] +; SSE-32-NEXT: comiss %xmm2, %xmm0 +; SSE-32-NEXT: jb .LBB3_4 +; SSE-32-NEXT: # %bb.3: +; SSE-32-NEXT: movaps %xmm2, %xmm1 +; SSE-32-NEXT: .LBB3_4: +; SSE-32-NEXT: subss %xmm1, %xmm0 +; SSE-32-NEXT: movss %xmm0, {{[0-9]+}}(%esp) +; SSE-32-NEXT: setae %cl ; SSE-32-NEXT: flds {{[0-9]+}}(%esp) ; SSE-32-NEXT: fnstcw (%esp) -; SSE-32-NEXT: movzwl (%esp), %eax -; SSE-32-NEXT: orl $3072, %eax # imm = 0xC00 -; SSE-32-NEXT: movw %ax, {{[0-9]+}}(%esp) +; SSE-32-NEXT: movzwl (%esp), %edx +; SSE-32-NEXT: orl $3072, %edx # imm = 0xC00 +; SSE-32-NEXT: movw %dx, {{[0-9]+}}(%esp) ; SSE-32-NEXT: fldcw {{[0-9]+}}(%esp) ; SSE-32-NEXT: fistpll {{[0-9]+}}(%esp) ; SSE-32-NEXT: fldcw (%esp) -; SSE-32-NEXT: xorl %eax, %eax -; SSE-32-NEXT: ucomiss %xmm1, %xmm0 -; SSE-32-NEXT: setae %al +; SSE-32-NEXT: movzbl %al, %eax ; SSE-32-NEXT: shll $31, %eax ; SSE-32-NEXT: xorl {{[0-9]+}}(%esp), %eax -; SSE-32-NEXT: movd %eax, %xmm3 +; SSE-32-NEXT: movd %eax, %xmm1 ; SSE-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; SSE-32-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] -; SSE-32-NEXT: xorl %eax, %eax -; SSE-32-NEXT: ucomiss %xmm1, %xmm2 -; SSE-32-NEXT: setae %al +; SSE-32-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE-32-NEXT: movzbl %cl, %eax ; SSE-32-NEXT: shll $31, %eax ; SSE-32-NEXT: xorl {{[0-9]+}}(%esp), %eax ; SSE-32-NEXT: movd %eax, %xmm1 @@ -614,29 +626,33 @@ define <2 x i64> @strict_vector_fptoui_v2f32_to_v2i64(<2 x float> %a) #0 { ; ; SSE-64-LABEL: strict_vector_fptoui_v2f32_to_v2i64: ; SSE-64: # %bb.0: -; SSE-64-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; SSE-64-NEXT: xorl %eax, %eax -; SSE-64-NEXT: ucomiss %xmm2, %xmm0 -; SSE-64-NEXT: setae %al -; SSE-64-NEXT: shlq $63, %rax -; SSE-64-NEXT: movaps %xmm0, %xmm1 -; SSE-64-NEXT: cmpltss %xmm2, %xmm1 -; SSE-64-NEXT: andnps %xmm2, %xmm1 -; SSE-64-NEXT: movaps %xmm0, %xmm3 -; SSE-64-NEXT: subss %xmm1, %xmm3 -; SSE-64-NEXT: cvttss2si %xmm3, %rcx +; SSE-64-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE-64-NEXT: comiss %xmm3, %xmm0 +; SSE-64-NEXT: xorps %xmm2, %xmm2 +; SSE-64-NEXT: xorps %xmm1, %xmm1 +; SSE-64-NEXT: jb .LBB3_2 +; SSE-64-NEXT: # %bb.1: +; SSE-64-NEXT: movaps %xmm3, %xmm1 +; SSE-64-NEXT: .LBB3_2: +; SSE-64-NEXT: movaps %xmm0, %xmm4 +; SSE-64-NEXT: subss %xmm1, %xmm4 +; SSE-64-NEXT: cvttss2si %xmm4, %rax +; SSE-64-NEXT: setae %cl +; SSE-64-NEXT: movzbl %cl, %ecx +; SSE-64-NEXT: shlq $63, %rcx ; SSE-64-NEXT: xorq %rax, %rcx ; SSE-64-NEXT: movq %rcx, %xmm1 ; SSE-64-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] -; SSE-64-NEXT: xorl %eax, %eax -; SSE-64-NEXT: ucomiss %xmm2, %xmm0 -; SSE-64-NEXT: setae %al -; SSE-64-NEXT: shlq $63, %rax -; SSE-64-NEXT: movaps %xmm0, %xmm3 -; SSE-64-NEXT: cmpltss %xmm2, %xmm3 -; SSE-64-NEXT: andnps %xmm2, %xmm3 -; SSE-64-NEXT: subss %xmm3, %xmm0 -; SSE-64-NEXT: cvttss2si %xmm0, %rcx +; SSE-64-NEXT: comiss %xmm3, %xmm0 +; SSE-64-NEXT: jb .LBB3_4 +; SSE-64-NEXT: # %bb.3: +; SSE-64-NEXT: movaps %xmm3, %xmm2 +; SSE-64-NEXT: .LBB3_4: +; SSE-64-NEXT: subss %xmm2, %xmm0 +; SSE-64-NEXT: cvttss2si %xmm0, %rax +; SSE-64-NEXT: setae %cl +; SSE-64-NEXT: movzbl %cl, %ecx +; SSE-64-NEXT: shlq $63, %rcx ; SSE-64-NEXT: xorq %rax, %rcx ; SSE-64-NEXT: movq %rcx, %xmm0 ; SSE-64-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] @@ -652,28 +668,34 @@ define <2 x i64> @strict_vector_fptoui_v2f32_to_v2i64(<2 x float> %a) #0 { ; AVX-32-NEXT: .cfi_def_cfa_register %ebp ; AVX-32-NEXT: andl $-8, %esp ; AVX-32-NEXT: subl $16, %esp -; AVX-32-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX-32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; AVX-32-NEXT: vcmpltss %xmm2, %xmm1, %xmm3 -; AVX-32-NEXT: vandnps %xmm2, %xmm3, %xmm3 -; AVX-32-NEXT: vsubss %xmm3, %xmm1, %xmm3 +; AVX-32-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX-32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX-32-NEXT: vcomiss %xmm1, %xmm3 +; AVX-32-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; AVX-32-NEXT: vxorps %xmm4, %xmm4, %xmm4 +; AVX-32-NEXT: jb .LBB3_2 +; AVX-32-NEXT: # %bb.1: +; AVX-32-NEXT: vmovaps %xmm1, %xmm4 +; AVX-32-NEXT: .LBB3_2: +; AVX-32-NEXT: vsubss %xmm4, %xmm3, %xmm3 ; AVX-32-NEXT: vmovss %xmm3, {{[0-9]+}}(%esp) -; AVX-32-NEXT: vcmpltss %xmm2, %xmm0, %xmm3 -; AVX-32-NEXT: vandnps %xmm2, %xmm3, %xmm3 -; AVX-32-NEXT: vsubss %xmm3, %xmm0, %xmm3 -; AVX-32-NEXT: vmovss %xmm3, (%esp) ; AVX-32-NEXT: flds {{[0-9]+}}(%esp) ; AVX-32-NEXT: fisttpll {{[0-9]+}}(%esp) -; AVX-32-NEXT: flds (%esp) -; AVX-32-NEXT: fisttpll (%esp) -; AVX-32-NEXT: xorl %eax, %eax -; AVX-32-NEXT: vucomiss %xmm2, %xmm1 ; AVX-32-NEXT: setae %al +; AVX-32-NEXT: movzbl %al, %eax ; AVX-32-NEXT: shll $31, %eax ; AVX-32-NEXT: xorl {{[0-9]+}}(%esp), %eax -; AVX-32-NEXT: xorl %ecx, %ecx -; AVX-32-NEXT: vucomiss %xmm2, %xmm0 +; AVX-32-NEXT: vcomiss %xmm1, %xmm0 +; AVX-32-NEXT: jb .LBB3_4 +; AVX-32-NEXT: # %bb.3: +; AVX-32-NEXT: vmovaps %xmm1, %xmm2 +; AVX-32-NEXT: .LBB3_4: +; AVX-32-NEXT: vsubss %xmm2, %xmm0, %xmm0 +; AVX-32-NEXT: vmovss %xmm0, (%esp) +; AVX-32-NEXT: flds (%esp) +; AVX-32-NEXT: fisttpll (%esp) ; AVX-32-NEXT: setae %cl +; AVX-32-NEXT: movzbl %cl, %ecx ; AVX-32-NEXT: shll $31, %ecx ; AVX-32-NEXT: xorl {{[0-9]+}}(%esp), %ecx ; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero @@ -688,28 +710,34 @@ define <2 x i64> @strict_vector_fptoui_v2f32_to_v2i64(<2 x float> %a) #0 { ; AVX-64-LABEL: strict_vector_fptoui_v2f32_to_v2i64: ; AVX-64: # %bb.0: ; AVX-64-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; AVX-64-NEXT: xorl %eax, %eax -; AVX-64-NEXT: vucomiss %xmm1, %xmm0 -; AVX-64-NEXT: setae %al -; AVX-64-NEXT: shlq $63, %rax -; AVX-64-NEXT: vcmpltss %xmm1, %xmm0, %xmm2 -; AVX-64-NEXT: vandnps %xmm1, %xmm2, %xmm2 -; AVX-64-NEXT: vsubss %xmm2, %xmm0, %xmm2 -; AVX-64-NEXT: vcvttss2si %xmm2, %rcx +; AVX-64-NEXT: vcomiss %xmm1, %xmm0 +; AVX-64-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; AVX-64-NEXT: vxorps %xmm3, %xmm3, %xmm3 +; AVX-64-NEXT: jb .LBB3_2 +; AVX-64-NEXT: # %bb.1: +; AVX-64-NEXT: vmovaps %xmm1, %xmm3 +; AVX-64-NEXT: .LBB3_2: +; AVX-64-NEXT: vsubss %xmm3, %xmm0, %xmm3 +; AVX-64-NEXT: vcvttss2si %xmm3, %rax +; AVX-64-NEXT: setae %cl +; AVX-64-NEXT: movzbl %cl, %ecx +; AVX-64-NEXT: shlq $63, %rcx ; AVX-64-NEXT: xorq %rax, %rcx -; AVX-64-NEXT: vmovq %rcx, %xmm2 +; AVX-64-NEXT: vmovq %rcx, %xmm3 ; AVX-64-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] -; AVX-64-NEXT: xorl %eax, %eax -; AVX-64-NEXT: vucomiss %xmm1, %xmm0 -; AVX-64-NEXT: setae %al -; AVX-64-NEXT: shlq $63, %rax -; AVX-64-NEXT: vcmpltss %xmm1, %xmm0, %xmm3 -; AVX-64-NEXT: vandnps %xmm1, %xmm3, %xmm1 -; AVX-64-NEXT: vsubss %xmm1, %xmm0, %xmm0 -; AVX-64-NEXT: vcvttss2si %xmm0, %rcx +; AVX-64-NEXT: vcomiss %xmm1, %xmm0 +; AVX-64-NEXT: jb .LBB3_4 +; AVX-64-NEXT: # %bb.3: +; AVX-64-NEXT: vmovaps %xmm1, %xmm2 +; AVX-64-NEXT: .LBB3_4: +; AVX-64-NEXT: vsubss %xmm2, %xmm0, %xmm0 +; AVX-64-NEXT: vcvttss2si %xmm0, %rax +; AVX-64-NEXT: setae %cl +; AVX-64-NEXT: movzbl %cl, %ecx +; AVX-64-NEXT: shlq $63, %rcx ; AVX-64-NEXT: xorq %rax, %rcx ; AVX-64-NEXT: vmovq %rcx, %xmm0 -; AVX-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0] +; AVX-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm3[0],xmm0[0] ; AVX-64-NEXT: retq ; ; AVX512VL-32-LABEL: strict_vector_fptoui_v2f32_to_v2i64: @@ -723,28 +751,29 @@ define <2 x i64> @strict_vector_fptoui_v2f32_to_v2i64(<2 x float> %a) #0 { ; AVX512VL-32-NEXT: subl $16, %esp ; AVX512VL-32-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX512VL-32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; AVX512VL-32-NEXT: vcmpltss %xmm2, %xmm1, %k1 +; AVX512VL-32-NEXT: xorl %eax, %eax +; AVX512VL-32-NEXT: vcomiss %xmm2, %xmm1 +; AVX512VL-32-NEXT: setb %cl +; AVX512VL-32-NEXT: kmovw %ecx, %k1 ; AVX512VL-32-NEXT: vxorps %xmm3, %xmm3, %xmm3 ; AVX512VL-32-NEXT: vmovaps %xmm2, %xmm4 ; AVX512VL-32-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1} -; AVX512VL-32-NEXT: vsubss %xmm4, %xmm1, %xmm4 -; AVX512VL-32-NEXT: vmovss %xmm4, {{[0-9]+}}(%esp) -; AVX512VL-32-NEXT: vcmpltss %xmm2, %xmm0, %k1 -; AVX512VL-32-NEXT: vmovaps %xmm2, %xmm4 -; AVX512VL-32-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1} -; AVX512VL-32-NEXT: vsubss %xmm4, %xmm0, %xmm3 -; AVX512VL-32-NEXT: vmovss %xmm3, (%esp) +; AVX512VL-32-NEXT: vsubss %xmm4, %xmm1, %xmm1 +; AVX512VL-32-NEXT: vmovss %xmm1, {{[0-9]+}}(%esp) ; AVX512VL-32-NEXT: flds {{[0-9]+}}(%esp) ; AVX512VL-32-NEXT: fisttpll {{[0-9]+}}(%esp) -; AVX512VL-32-NEXT: flds (%esp) -; AVX512VL-32-NEXT: fisttpll (%esp) -; AVX512VL-32-NEXT: xorl %eax, %eax -; AVX512VL-32-NEXT: vucomiss %xmm2, %xmm1 ; AVX512VL-32-NEXT: setae %al ; AVX512VL-32-NEXT: shll $31, %eax ; AVX512VL-32-NEXT: xorl {{[0-9]+}}(%esp), %eax ; AVX512VL-32-NEXT: xorl %ecx, %ecx -; AVX512VL-32-NEXT: vucomiss %xmm2, %xmm0 +; AVX512VL-32-NEXT: vcomiss %xmm2, %xmm0 +; AVX512VL-32-NEXT: setb %dl +; AVX512VL-32-NEXT: kmovw %edx, %k1 +; AVX512VL-32-NEXT: vmovss %xmm3, %xmm2, %xmm2 {%k1} +; AVX512VL-32-NEXT: vsubss %xmm2, %xmm0, %xmm0 +; AVX512VL-32-NEXT: vmovss %xmm0, (%esp) +; AVX512VL-32-NEXT: flds (%esp) +; AVX512VL-32-NEXT: fisttpll (%esp) ; AVX512VL-32-NEXT: setae %cl ; AVX512VL-32-NEXT: shll $31, %ecx ; AVX512VL-32-NEXT: xorl {{[0-9]+}}(%esp), %ecx @@ -828,28 +857,32 @@ define <2 x i32> @strict_vector_fptosi_v2f64_to_v2i32(<2 x double> %a) #0 { define <2 x i32> @strict_vector_fptoui_v2f64_to_v2i32(<2 x double> %a) #0 { ; SSE-32-LABEL: strict_vector_fptoui_v2f64_to_v2i32: ; SSE-32: # %bb.0: -; SSE-32-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero -; SSE-32-NEXT: xorl %eax, %eax -; SSE-32-NEXT: ucomisd %xmm2, %xmm0 +; SSE-32-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero +; SSE-32-NEXT: comisd %xmm3, %xmm0 +; SSE-32-NEXT: xorpd %xmm2, %xmm2 +; SSE-32-NEXT: xorpd %xmm1, %xmm1 +; SSE-32-NEXT: jb .LBB5_2 +; SSE-32-NEXT: # %bb.1: +; SSE-32-NEXT: movapd %xmm3, %xmm1 +; SSE-32-NEXT: .LBB5_2: ; SSE-32-NEXT: setae %al +; SSE-32-NEXT: movzbl %al, %eax ; SSE-32-NEXT: shll $31, %eax -; SSE-32-NEXT: movapd %xmm0, %xmm1 -; SSE-32-NEXT: cmpltsd %xmm2, %xmm1 -; SSE-32-NEXT: andnpd %xmm2, %xmm1 -; SSE-32-NEXT: movapd %xmm0, %xmm3 -; SSE-32-NEXT: subsd %xmm1, %xmm3 -; SSE-32-NEXT: cvttsd2si %xmm3, %ecx +; SSE-32-NEXT: movapd %xmm0, %xmm4 +; SSE-32-NEXT: subsd %xmm1, %xmm4 +; SSE-32-NEXT: cvttsd2si %xmm4, %ecx ; SSE-32-NEXT: xorl %eax, %ecx ; SSE-32-NEXT: movd %ecx, %xmm1 ; SSE-32-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1] -; SSE-32-NEXT: xorl %eax, %eax -; SSE-32-NEXT: ucomisd %xmm2, %xmm0 +; SSE-32-NEXT: comisd %xmm3, %xmm0 +; SSE-32-NEXT: jb .LBB5_4 +; SSE-32-NEXT: # %bb.3: +; SSE-32-NEXT: movapd %xmm3, %xmm2 +; SSE-32-NEXT: .LBB5_4: ; SSE-32-NEXT: setae %al +; SSE-32-NEXT: movzbl %al, %eax ; SSE-32-NEXT: shll $31, %eax -; SSE-32-NEXT: movapd %xmm0, %xmm3 -; SSE-32-NEXT: cmpltsd %xmm2, %xmm3 -; SSE-32-NEXT: andnpd %xmm2, %xmm3 -; SSE-32-NEXT: subsd %xmm3, %xmm0 +; SSE-32-NEXT: subsd %xmm2, %xmm0 ; SSE-32-NEXT: cvttsd2si %xmm0, %ecx ; SSE-32-NEXT: xorl %eax, %ecx ; SSE-32-NEXT: movd %ecx, %xmm0 @@ -978,28 +1011,32 @@ define <2 x i32> @strict_vector_fptosi_v2f32_to_v2i32(<2 x float> %a) #0 { define <2 x i32> @strict_vector_fptoui_v2f32_to_v2i32(<2 x float> %a) #0 { ; SSE-32-LABEL: strict_vector_fptoui_v2f32_to_v2i32: ; SSE-32: # %bb.0: -; SSE-32-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; SSE-32-NEXT: xorl %eax, %eax -; SSE-32-NEXT: ucomiss %xmm2, %xmm0 +; SSE-32-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE-32-NEXT: comiss %xmm3, %xmm0 +; SSE-32-NEXT: xorps %xmm2, %xmm2 +; SSE-32-NEXT: xorps %xmm1, %xmm1 +; SSE-32-NEXT: jb .LBB7_2 +; SSE-32-NEXT: # %bb.1: +; SSE-32-NEXT: movaps %xmm3, %xmm1 +; SSE-32-NEXT: .LBB7_2: ; SSE-32-NEXT: setae %al +; SSE-32-NEXT: movzbl %al, %eax ; SSE-32-NEXT: shll $31, %eax -; SSE-32-NEXT: movaps %xmm0, %xmm1 -; SSE-32-NEXT: cmpltss %xmm2, %xmm1 -; SSE-32-NEXT: andnps %xmm2, %xmm1 -; SSE-32-NEXT: movaps %xmm0, %xmm3 -; SSE-32-NEXT: subss %xmm1, %xmm3 -; SSE-32-NEXT: cvttss2si %xmm3, %ecx +; SSE-32-NEXT: movaps %xmm0, %xmm4 +; SSE-32-NEXT: subss %xmm1, %xmm4 +; SSE-32-NEXT: cvttss2si %xmm4, %ecx ; SSE-32-NEXT: xorl %eax, %ecx ; SSE-32-NEXT: movd %ecx, %xmm1 ; SSE-32-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] -; SSE-32-NEXT: xorl %eax, %eax -; SSE-32-NEXT: ucomiss %xmm2, %xmm0 +; SSE-32-NEXT: comiss %xmm3, %xmm0 +; SSE-32-NEXT: jb .LBB7_4 +; SSE-32-NEXT: # %bb.3: +; SSE-32-NEXT: movaps %xmm3, %xmm2 +; SSE-32-NEXT: .LBB7_4: ; SSE-32-NEXT: setae %al +; SSE-32-NEXT: movzbl %al, %eax ; SSE-32-NEXT: shll $31, %eax -; SSE-32-NEXT: movaps %xmm0, %xmm3 -; SSE-32-NEXT: cmpltss %xmm2, %xmm3 -; SSE-32-NEXT: andnps %xmm2, %xmm3 -; SSE-32-NEXT: subss %xmm3, %xmm0 +; SSE-32-NEXT: subss %xmm2, %xmm0 ; SSE-32-NEXT: cvttss2si %xmm0, %ecx ; SSE-32-NEXT: xorl %eax, %ecx ; SSE-32-NEXT: movd %ecx, %xmm0 @@ -1542,7 +1579,7 @@ define <2 x i1> @strict_vector_fptoui_v2f64_to_v2i1(<2 x double> %a) #0 { ; SSE-32-NEXT: andl $-8, %esp ; SSE-32-NEXT: subl $24, %esp ; SSE-32-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero -; SSE-32-NEXT: ucomisd %xmm2, %xmm0 +; SSE-32-NEXT: comisd %xmm2, %xmm0 ; SSE-32-NEXT: xorpd %xmm1, %xmm1 ; SSE-32-NEXT: xorpd %xmm3, %xmm3 ; SSE-32-NEXT: jb .LBB17_2 @@ -1562,7 +1599,7 @@ define <2 x i1> @strict_vector_fptoui_v2f64_to_v2i1(<2 x double> %a) #0 { ; SSE-32-NEXT: fistpll {{[0-9]+}}(%esp) ; SSE-32-NEXT: fldcw {{[0-9]+}}(%esp) ; SSE-32-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1] -; SSE-32-NEXT: ucomisd %xmm2, %xmm0 +; SSE-32-NEXT: comisd %xmm2, %xmm0 ; SSE-32-NEXT: jb .LBB17_4 ; SSE-32-NEXT: # %bb.3: ; SSE-32-NEXT: movapd %xmm2, %xmm1 @@ -1598,29 +1635,33 @@ define <2 x i1> @strict_vector_fptoui_v2f64_to_v2i1(<2 x double> %a) #0 { ; ; SSE-64-LABEL: strict_vector_fptoui_v2f64_to_v2i1: ; SSE-64: # %bb.0: -; SSE-64-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero -; SSE-64-NEXT: xorl %eax, %eax -; SSE-64-NEXT: ucomisd %xmm2, %xmm0 -; SSE-64-NEXT: setae %al -; SSE-64-NEXT: shlq $63, %rax -; SSE-64-NEXT: movapd %xmm0, %xmm1 -; SSE-64-NEXT: cmpltsd %xmm2, %xmm1 -; SSE-64-NEXT: andnpd %xmm2, %xmm1 -; SSE-64-NEXT: movapd %xmm0, %xmm3 -; SSE-64-NEXT: subsd %xmm1, %xmm3 -; SSE-64-NEXT: cvttsd2si %xmm3, %rcx +; SSE-64-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero +; SSE-64-NEXT: comisd %xmm3, %xmm0 +; SSE-64-NEXT: xorpd %xmm2, %xmm2 +; SSE-64-NEXT: xorpd %xmm1, %xmm1 +; SSE-64-NEXT: jb .LBB17_2 +; SSE-64-NEXT: # %bb.1: +; SSE-64-NEXT: movapd %xmm3, %xmm1 +; SSE-64-NEXT: .LBB17_2: +; SSE-64-NEXT: movapd %xmm0, %xmm4 +; SSE-64-NEXT: subsd %xmm1, %xmm4 +; SSE-64-NEXT: cvttsd2si %xmm4, %rax +; SSE-64-NEXT: setae %cl +; SSE-64-NEXT: movzbl %cl, %ecx +; SSE-64-NEXT: shlq $63, %rcx ; SSE-64-NEXT: xorq %rax, %rcx ; SSE-64-NEXT: movq %rcx, %xmm1 ; SSE-64-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1] -; SSE-64-NEXT: xorl %eax, %eax -; SSE-64-NEXT: ucomisd %xmm2, %xmm0 -; SSE-64-NEXT: setae %al -; SSE-64-NEXT: shlq $63, %rax -; SSE-64-NEXT: movapd %xmm0, %xmm3 -; SSE-64-NEXT: cmpltsd %xmm2, %xmm3 -; SSE-64-NEXT: andnpd %xmm2, %xmm3 -; SSE-64-NEXT: subsd %xmm3, %xmm0 -; SSE-64-NEXT: cvttsd2si %xmm0, %rcx +; SSE-64-NEXT: comisd %xmm3, %xmm0 +; SSE-64-NEXT: jb .LBB17_4 +; SSE-64-NEXT: # %bb.3: +; SSE-64-NEXT: movapd %xmm3, %xmm2 +; SSE-64-NEXT: .LBB17_4: +; SSE-64-NEXT: subsd %xmm2, %xmm0 +; SSE-64-NEXT: cvttsd2si %xmm0, %rax +; SSE-64-NEXT: setae %cl +; SSE-64-NEXT: movzbl %cl, %ecx +; SSE-64-NEXT: shlq $63, %rcx ; SSE-64-NEXT: xorq %rax, %rcx ; SSE-64-NEXT: movq %rcx, %xmm0 ; SSE-64-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] @@ -1638,7 +1679,7 @@ define <2 x i1> @strict_vector_fptoui_v2f64_to_v2i1(<2 x double> %a) #0 { ; AVX-32-NEXT: subl $16, %esp ; AVX-32-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0] ; AVX-32-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero -; AVX-32-NEXT: vucomisd %xmm1, %xmm3 +; AVX-32-NEXT: vcomisd %xmm1, %xmm3 ; AVX-32-NEXT: vxorpd %xmm2, %xmm2, %xmm2 ; AVX-32-NEXT: vxorpd %xmm4, %xmm4, %xmm4 ; AVX-32-NEXT: jb .LBB17_2 @@ -1653,7 +1694,7 @@ define <2 x i1> @strict_vector_fptoui_v2f64_to_v2i1(<2 x double> %a) #0 { ; AVX-32-NEXT: movzbl %al, %eax ; AVX-32-NEXT: shll $31, %eax ; AVX-32-NEXT: xorl {{[0-9]+}}(%esp), %eax -; AVX-32-NEXT: vucomisd %xmm1, %xmm0 +; AVX-32-NEXT: vcomisd %xmm1, %xmm0 ; AVX-32-NEXT: jb .LBB17_4 ; AVX-32-NEXT: # %bb.3: ; AVX-32-NEXT: vmovapd %xmm1, %xmm2 @@ -1678,28 +1719,34 @@ define <2 x i1> @strict_vector_fptoui_v2f64_to_v2i1(<2 x double> %a) #0 { ; AVX-64-LABEL: strict_vector_fptoui_v2f64_to_v2i1: ; AVX-64: # %bb.0: ; AVX-64-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero -; AVX-64-NEXT: xorl %eax, %eax -; AVX-64-NEXT: vucomisd %xmm1, %xmm0 -; AVX-64-NEXT: setae %al -; AVX-64-NEXT: shlq $63, %rax -; AVX-64-NEXT: vcmpltsd %xmm1, %xmm0, %xmm2 -; AVX-64-NEXT: vandnpd %xmm1, %xmm2, %xmm2 -; AVX-64-NEXT: vsubsd %xmm2, %xmm0, %xmm2 -; AVX-64-NEXT: vcvttsd2si %xmm2, %rcx +; AVX-64-NEXT: vcomisd %xmm1, %xmm0 +; AVX-64-NEXT: vxorpd %xmm2, %xmm2, %xmm2 +; AVX-64-NEXT: vxorpd %xmm3, %xmm3, %xmm3 +; AVX-64-NEXT: jb .LBB17_2 +; AVX-64-NEXT: # %bb.1: +; AVX-64-NEXT: vmovapd %xmm1, %xmm3 +; AVX-64-NEXT: .LBB17_2: +; AVX-64-NEXT: vsubsd %xmm3, %xmm0, %xmm3 +; AVX-64-NEXT: vcvttsd2si %xmm3, %rax +; AVX-64-NEXT: setae %cl +; AVX-64-NEXT: movzbl %cl, %ecx +; AVX-64-NEXT: shlq $63, %rcx ; AVX-64-NEXT: xorq %rax, %rcx -; AVX-64-NEXT: vmovq %rcx, %xmm2 +; AVX-64-NEXT: vmovq %rcx, %xmm3 ; AVX-64-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX-64-NEXT: xorl %eax, %eax -; AVX-64-NEXT: vucomisd %xmm1, %xmm0 -; AVX-64-NEXT: setae %al -; AVX-64-NEXT: shlq $63, %rax -; AVX-64-NEXT: vcmpltsd %xmm1, %xmm0, %xmm3 -; AVX-64-NEXT: vandnpd %xmm1, %xmm3, %xmm1 -; AVX-64-NEXT: vsubsd %xmm1, %xmm0, %xmm0 -; AVX-64-NEXT: vcvttsd2si %xmm0, %rcx +; AVX-64-NEXT: vcomisd %xmm1, %xmm0 +; AVX-64-NEXT: jb .LBB17_4 +; AVX-64-NEXT: # %bb.3: +; AVX-64-NEXT: vmovapd %xmm1, %xmm2 +; AVX-64-NEXT: .LBB17_4: +; AVX-64-NEXT: vsubsd %xmm2, %xmm0, %xmm0 +; AVX-64-NEXT: vcvttsd2si %xmm0, %rax +; AVX-64-NEXT: setae %cl +; AVX-64-NEXT: movzbl %cl, %ecx +; AVX-64-NEXT: shlq $63, %rcx ; AVX-64-NEXT: xorq %rax, %rcx ; AVX-64-NEXT: vmovq %rcx, %xmm0 -; AVX-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0] +; AVX-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm3[0],xmm0[0] ; AVX-64-NEXT: retq ; ; AVX512VL-LABEL: strict_vector_fptoui_v2f64_to_v2i1: @@ -1863,48 +1910,50 @@ define <2 x i1> @strict_vector_fptoui_v2f32_to_v2i1(<2 x float> %a) #0 { ; SSE-32-NEXT: .cfi_def_cfa_register %ebp ; SSE-32-NEXT: andl $-8, %esp ; SSE-32-NEXT: subl $24, %esp -; SSE-32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; SSE-32-NEXT: movaps %xmm0, %xmm2 -; SSE-32-NEXT: cmpltss %xmm1, %xmm2 -; SSE-32-NEXT: andnps %xmm1, %xmm2 -; SSE-32-NEXT: movaps %xmm0, %xmm3 -; SSE-32-NEXT: subss %xmm2, %xmm3 -; SSE-32-NEXT: movss %xmm3, {{[0-9]+}}(%esp) -; SSE-32-NEXT: movaps %xmm0, %xmm2 -; SSE-32-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3] +; SSE-32-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE-32-NEXT: comiss %xmm2, %xmm0 +; SSE-32-NEXT: xorps %xmm1, %xmm1 +; SSE-32-NEXT: xorps %xmm3, %xmm3 +; SSE-32-NEXT: jb .LBB19_2 +; SSE-32-NEXT: # %bb.1: ; SSE-32-NEXT: movaps %xmm2, %xmm3 -; SSE-32-NEXT: cmpltss %xmm1, %xmm3 -; SSE-32-NEXT: andnps %xmm1, %xmm3 -; SSE-32-NEXT: movaps %xmm2, %xmm4 +; SSE-32-NEXT: .LBB19_2: +; SSE-32-NEXT: movaps %xmm0, %xmm4 ; SSE-32-NEXT: subss %xmm3, %xmm4 ; SSE-32-NEXT: movss %xmm4, {{[0-9]+}}(%esp) +; SSE-32-NEXT: setae %al ; SSE-32-NEXT: flds {{[0-9]+}}(%esp) ; SSE-32-NEXT: fnstcw {{[0-9]+}}(%esp) -; SSE-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax -; SSE-32-NEXT: orl $3072, %eax # imm = 0xC00 -; SSE-32-NEXT: movw %ax, {{[0-9]+}}(%esp) +; SSE-32-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; SSE-32-NEXT: orl $3072, %ecx # imm = 0xC00 +; SSE-32-NEXT: movw %cx, {{[0-9]+}}(%esp) ; SSE-32-NEXT: fldcw {{[0-9]+}}(%esp) ; SSE-32-NEXT: fistpll {{[0-9]+}}(%esp) ; SSE-32-NEXT: fldcw {{[0-9]+}}(%esp) +; SSE-32-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] +; SSE-32-NEXT: comiss %xmm2, %xmm0 +; SSE-32-NEXT: jb .LBB19_4 +; SSE-32-NEXT: # %bb.3: +; SSE-32-NEXT: movaps %xmm2, %xmm1 +; SSE-32-NEXT: .LBB19_4: +; SSE-32-NEXT: subss %xmm1, %xmm0 +; SSE-32-NEXT: movss %xmm0, {{[0-9]+}}(%esp) +; SSE-32-NEXT: setae %cl ; SSE-32-NEXT: flds {{[0-9]+}}(%esp) ; SSE-32-NEXT: fnstcw (%esp) -; SSE-32-NEXT: movzwl (%esp), %eax -; SSE-32-NEXT: orl $3072, %eax # imm = 0xC00 -; SSE-32-NEXT: movw %ax, {{[0-9]+}}(%esp) +; SSE-32-NEXT: movzwl (%esp), %edx +; SSE-32-NEXT: orl $3072, %edx # imm = 0xC00 +; SSE-32-NEXT: movw %dx, {{[0-9]+}}(%esp) ; SSE-32-NEXT: fldcw {{[0-9]+}}(%esp) ; SSE-32-NEXT: fistpll {{[0-9]+}}(%esp) ; SSE-32-NEXT: fldcw (%esp) -; SSE-32-NEXT: xorl %eax, %eax -; SSE-32-NEXT: ucomiss %xmm1, %xmm0 -; SSE-32-NEXT: setae %al +; SSE-32-NEXT: movzbl %al, %eax ; SSE-32-NEXT: shll $31, %eax ; SSE-32-NEXT: xorl {{[0-9]+}}(%esp), %eax -; SSE-32-NEXT: movd %eax, %xmm3 +; SSE-32-NEXT: movd %eax, %xmm1 ; SSE-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; SSE-32-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] -; SSE-32-NEXT: xorl %eax, %eax -; SSE-32-NEXT: ucomiss %xmm1, %xmm2 -; SSE-32-NEXT: setae %al +; SSE-32-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE-32-NEXT: movzbl %cl, %eax ; SSE-32-NEXT: shll $31, %eax ; SSE-32-NEXT: xorl {{[0-9]+}}(%esp), %eax ; SSE-32-NEXT: movd %eax, %xmm1 @@ -1918,29 +1967,33 @@ define <2 x i1> @strict_vector_fptoui_v2f32_to_v2i1(<2 x float> %a) #0 { ; ; SSE-64-LABEL: strict_vector_fptoui_v2f32_to_v2i1: ; SSE-64: # %bb.0: -; SSE-64-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; SSE-64-NEXT: xorl %eax, %eax -; SSE-64-NEXT: ucomiss %xmm2, %xmm0 -; SSE-64-NEXT: setae %al -; SSE-64-NEXT: shlq $63, %rax -; SSE-64-NEXT: movaps %xmm0, %xmm1 -; SSE-64-NEXT: cmpltss %xmm2, %xmm1 -; SSE-64-NEXT: andnps %xmm2, %xmm1 -; SSE-64-NEXT: movaps %xmm0, %xmm3 -; SSE-64-NEXT: subss %xmm1, %xmm3 -; SSE-64-NEXT: cvttss2si %xmm3, %rcx +; SSE-64-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE-64-NEXT: comiss %xmm3, %xmm0 +; SSE-64-NEXT: xorps %xmm2, %xmm2 +; SSE-64-NEXT: xorps %xmm1, %xmm1 +; SSE-64-NEXT: jb .LBB19_2 +; SSE-64-NEXT: # %bb.1: +; SSE-64-NEXT: movaps %xmm3, %xmm1 +; SSE-64-NEXT: .LBB19_2: +; SSE-64-NEXT: movaps %xmm0, %xmm4 +; SSE-64-NEXT: subss %xmm1, %xmm4 +; SSE-64-NEXT: cvttss2si %xmm4, %rax +; SSE-64-NEXT: setae %cl +; SSE-64-NEXT: movzbl %cl, %ecx +; SSE-64-NEXT: shlq $63, %rcx ; SSE-64-NEXT: xorq %rax, %rcx ; SSE-64-NEXT: movq %rcx, %xmm1 ; SSE-64-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] -; SSE-64-NEXT: xorl %eax, %eax -; SSE-64-NEXT: ucomiss %xmm2, %xmm0 -; SSE-64-NEXT: setae %al -; SSE-64-NEXT: shlq $63, %rax -; SSE-64-NEXT: movaps %xmm0, %xmm3 -; SSE-64-NEXT: cmpltss %xmm2, %xmm3 -; SSE-64-NEXT: andnps %xmm2, %xmm3 -; SSE-64-NEXT: subss %xmm3, %xmm0 -; SSE-64-NEXT: cvttss2si %xmm0, %rcx +; SSE-64-NEXT: comiss %xmm3, %xmm0 +; SSE-64-NEXT: jb .LBB19_4 +; SSE-64-NEXT: # %bb.3: +; SSE-64-NEXT: movaps %xmm3, %xmm2 +; SSE-64-NEXT: .LBB19_4: +; SSE-64-NEXT: subss %xmm2, %xmm0 +; SSE-64-NEXT: cvttss2si %xmm0, %rax +; SSE-64-NEXT: setae %cl +; SSE-64-NEXT: movzbl %cl, %ecx +; SSE-64-NEXT: shlq $63, %rcx ; SSE-64-NEXT: xorq %rax, %rcx ; SSE-64-NEXT: movq %rcx, %xmm0 ; SSE-64-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] @@ -1956,28 +2009,34 @@ define <2 x i1> @strict_vector_fptoui_v2f32_to_v2i1(<2 x float> %a) #0 { ; AVX-32-NEXT: .cfi_def_cfa_register %ebp ; AVX-32-NEXT: andl $-8, %esp ; AVX-32-NEXT: subl $16, %esp -; AVX-32-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX-32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; AVX-32-NEXT: vcmpltss %xmm2, %xmm1, %xmm3 -; AVX-32-NEXT: vandnps %xmm2, %xmm3, %xmm3 -; AVX-32-NEXT: vsubss %xmm3, %xmm1, %xmm3 +; AVX-32-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX-32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX-32-NEXT: vcomiss %xmm1, %xmm3 +; AVX-32-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; AVX-32-NEXT: vxorps %xmm4, %xmm4, %xmm4 +; AVX-32-NEXT: jb .LBB19_2 +; AVX-32-NEXT: # %bb.1: +; AVX-32-NEXT: vmovaps %xmm1, %xmm4 +; AVX-32-NEXT: .LBB19_2: +; AVX-32-NEXT: vsubss %xmm4, %xmm3, %xmm3 ; AVX-32-NEXT: vmovss %xmm3, {{[0-9]+}}(%esp) -; AVX-32-NEXT: vcmpltss %xmm2, %xmm0, %xmm3 -; AVX-32-NEXT: vandnps %xmm2, %xmm3, %xmm3 -; AVX-32-NEXT: vsubss %xmm3, %xmm0, %xmm3 -; AVX-32-NEXT: vmovss %xmm3, (%esp) ; AVX-32-NEXT: flds {{[0-9]+}}(%esp) ; AVX-32-NEXT: fisttpll {{[0-9]+}}(%esp) -; AVX-32-NEXT: flds (%esp) -; AVX-32-NEXT: fisttpll (%esp) -; AVX-32-NEXT: xorl %eax, %eax -; AVX-32-NEXT: vucomiss %xmm2, %xmm1 ; AVX-32-NEXT: setae %al +; AVX-32-NEXT: movzbl %al, %eax ; AVX-32-NEXT: shll $31, %eax ; AVX-32-NEXT: xorl {{[0-9]+}}(%esp), %eax -; AVX-32-NEXT: xorl %ecx, %ecx -; AVX-32-NEXT: vucomiss %xmm2, %xmm0 +; AVX-32-NEXT: vcomiss %xmm1, %xmm0 +; AVX-32-NEXT: jb .LBB19_4 +; AVX-32-NEXT: # %bb.3: +; AVX-32-NEXT: vmovaps %xmm1, %xmm2 +; AVX-32-NEXT: .LBB19_4: +; AVX-32-NEXT: vsubss %xmm2, %xmm0, %xmm0 +; AVX-32-NEXT: vmovss %xmm0, (%esp) +; AVX-32-NEXT: flds (%esp) +; AVX-32-NEXT: fisttpll (%esp) ; AVX-32-NEXT: setae %cl +; AVX-32-NEXT: movzbl %cl, %ecx ; AVX-32-NEXT: shll $31, %ecx ; AVX-32-NEXT: xorl {{[0-9]+}}(%esp), %ecx ; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero @@ -1992,28 +2051,34 @@ define <2 x i1> @strict_vector_fptoui_v2f32_to_v2i1(<2 x float> %a) #0 { ; AVX-64-LABEL: strict_vector_fptoui_v2f32_to_v2i1: ; AVX-64: # %bb.0: ; AVX-64-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; AVX-64-NEXT: xorl %eax, %eax -; AVX-64-NEXT: vucomiss %xmm1, %xmm0 -; AVX-64-NEXT: setae %al -; AVX-64-NEXT: shlq $63, %rax -; AVX-64-NEXT: vcmpltss %xmm1, %xmm0, %xmm2 -; AVX-64-NEXT: vandnps %xmm1, %xmm2, %xmm2 -; AVX-64-NEXT: vsubss %xmm2, %xmm0, %xmm2 -; AVX-64-NEXT: vcvttss2si %xmm2, %rcx +; AVX-64-NEXT: vcomiss %xmm1, %xmm0 +; AVX-64-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; AVX-64-NEXT: vxorps %xmm3, %xmm3, %xmm3 +; AVX-64-NEXT: jb .LBB19_2 +; AVX-64-NEXT: # %bb.1: +; AVX-64-NEXT: vmovaps %xmm1, %xmm3 +; AVX-64-NEXT: .LBB19_2: +; AVX-64-NEXT: vsubss %xmm3, %xmm0, %xmm3 +; AVX-64-NEXT: vcvttss2si %xmm3, %rax +; AVX-64-NEXT: setae %cl +; AVX-64-NEXT: movzbl %cl, %ecx +; AVX-64-NEXT: shlq $63, %rcx ; AVX-64-NEXT: xorq %rax, %rcx -; AVX-64-NEXT: vmovq %rcx, %xmm2 +; AVX-64-NEXT: vmovq %rcx, %xmm3 ; AVX-64-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] -; AVX-64-NEXT: xorl %eax, %eax -; AVX-64-NEXT: vucomiss %xmm1, %xmm0 -; AVX-64-NEXT: setae %al -; AVX-64-NEXT: shlq $63, %rax -; AVX-64-NEXT: vcmpltss %xmm1, %xmm0, %xmm3 -; AVX-64-NEXT: vandnps %xmm1, %xmm3, %xmm1 -; AVX-64-NEXT: vsubss %xmm1, %xmm0, %xmm0 -; AVX-64-NEXT: vcvttss2si %xmm0, %rcx +; AVX-64-NEXT: vcomiss %xmm1, %xmm0 +; AVX-64-NEXT: jb .LBB19_4 +; AVX-64-NEXT: # %bb.3: +; AVX-64-NEXT: vmovaps %xmm1, %xmm2 +; AVX-64-NEXT: .LBB19_4: +; AVX-64-NEXT: vsubss %xmm2, %xmm0, %xmm0 +; AVX-64-NEXT: vcvttss2si %xmm0, %rax +; AVX-64-NEXT: setae %cl +; AVX-64-NEXT: movzbl %cl, %ecx +; AVX-64-NEXT: shlq $63, %rcx ; AVX-64-NEXT: xorq %rax, %rcx ; AVX-64-NEXT: vmovq %rcx, %xmm0 -; AVX-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0] +; AVX-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm3[0],xmm0[0] ; AVX-64-NEXT: retq ; ; AVX512VL-LABEL: strict_vector_fptoui_v2f32_to_v2i1: @@ -2085,58 +2150,66 @@ define <4 x i32> @strict_vector_fptoui_v4f32_to_v4i32(<4 x float> %a) #0 { ; SSE-32: # %bb.0: ; SSE-32-NEXT: movaps %xmm0, %xmm1 ; SSE-32-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3] -; SSE-32-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; SSE-32-NEXT: xorl %eax, %eax -; SSE-32-NEXT: ucomiss %xmm2, %xmm1 +; SSE-32-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE-32-NEXT: comiss %xmm3, %xmm1 +; SSE-32-NEXT: xorps %xmm2, %xmm2 +; SSE-32-NEXT: xorps %xmm4, %xmm4 +; SSE-32-NEXT: jb .LBB21_2 +; SSE-32-NEXT: # %bb.1: +; SSE-32-NEXT: movaps %xmm3, %xmm4 +; SSE-32-NEXT: .LBB21_2: ; SSE-32-NEXT: setae %al -; SSE-32-NEXT: shll $31, %eax -; SSE-32-NEXT: movaps %xmm1, %xmm3 -; SSE-32-NEXT: cmpltss %xmm2, %xmm3 -; SSE-32-NEXT: andnps %xmm2, %xmm3 -; SSE-32-NEXT: subss %xmm3, %xmm1 -; SSE-32-NEXT: cvttss2si %xmm1, %ecx -; SSE-32-NEXT: xorl %eax, %ecx -; SSE-32-NEXT: movd %ecx, %xmm1 -; SSE-32-NEXT: movaps %xmm0, %xmm3 -; SSE-32-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1] -; SSE-32-NEXT: xorl %eax, %eax -; SSE-32-NEXT: ucomiss %xmm2, %xmm3 +; SSE-32-NEXT: movzbl %al, %ecx +; SSE-32-NEXT: shll $31, %ecx +; SSE-32-NEXT: subss %xmm4, %xmm1 +; SSE-32-NEXT: cvttss2si %xmm1, %eax +; SSE-32-NEXT: xorl %ecx, %eax +; SSE-32-NEXT: movaps %xmm0, %xmm4 +; SSE-32-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm0[1] +; SSE-32-NEXT: comiss %xmm3, %xmm4 +; SSE-32-NEXT: xorps %xmm5, %xmm5 +; SSE-32-NEXT: jb .LBB21_4 +; SSE-32-NEXT: # %bb.3: +; SSE-32-NEXT: movaps %xmm3, %xmm5 +; SSE-32-NEXT: .LBB21_4: +; SSE-32-NEXT: movd %eax, %xmm1 ; SSE-32-NEXT: setae %al +; SSE-32-NEXT: movzbl %al, %eax ; SSE-32-NEXT: shll $31, %eax -; SSE-32-NEXT: movaps %xmm3, %xmm4 -; SSE-32-NEXT: cmpltss %xmm2, %xmm4 -; SSE-32-NEXT: andnps %xmm2, %xmm4 -; SSE-32-NEXT: subss %xmm4, %xmm3 -; SSE-32-NEXT: cvttss2si %xmm3, %ecx +; SSE-32-NEXT: subss %xmm5, %xmm4 +; SSE-32-NEXT: cvttss2si %xmm4, %ecx ; SSE-32-NEXT: xorl %eax, %ecx -; SSE-32-NEXT: movd %ecx, %xmm3 -; SSE-32-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] -; SSE-32-NEXT: xorl %eax, %eax -; SSE-32-NEXT: ucomiss %xmm2, %xmm0 +; SSE-32-NEXT: movd %ecx, %xmm4 +; SSE-32-NEXT: comiss %xmm3, %xmm0 +; SSE-32-NEXT: xorps %xmm5, %xmm5 +; SSE-32-NEXT: jb .LBB21_6 +; SSE-32-NEXT: # %bb.5: +; SSE-32-NEXT: movaps %xmm3, %xmm5 +; SSE-32-NEXT: .LBB21_6: +; SSE-32-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1] ; SSE-32-NEXT: setae %al +; SSE-32-NEXT: movzbl %al, %eax ; SSE-32-NEXT: shll $31, %eax ; SSE-32-NEXT: movaps %xmm0, %xmm1 -; SSE-32-NEXT: cmpltss %xmm2, %xmm1 -; SSE-32-NEXT: andnps %xmm2, %xmm1 -; SSE-32-NEXT: movaps %xmm0, %xmm4 -; SSE-32-NEXT: subss %xmm1, %xmm4 -; SSE-32-NEXT: cvttss2si %xmm4, %ecx +; SSE-32-NEXT: subss %xmm5, %xmm1 +; SSE-32-NEXT: cvttss2si %xmm1, %ecx ; SSE-32-NEXT: xorl %eax, %ecx ; SSE-32-NEXT: movd %ecx, %xmm1 ; SSE-32-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] -; SSE-32-NEXT: xorl %eax, %eax -; SSE-32-NEXT: ucomiss %xmm2, %xmm0 +; SSE-32-NEXT: comiss %xmm3, %xmm0 +; SSE-32-NEXT: jb .LBB21_8 +; SSE-32-NEXT: # %bb.7: +; SSE-32-NEXT: movaps %xmm3, %xmm2 +; SSE-32-NEXT: .LBB21_8: ; SSE-32-NEXT: setae %al +; SSE-32-NEXT: movzbl %al, %eax ; SSE-32-NEXT: shll $31, %eax -; SSE-32-NEXT: movaps %xmm0, %xmm4 -; SSE-32-NEXT: cmpltss %xmm2, %xmm4 -; SSE-32-NEXT: andnps %xmm2, %xmm4 -; SSE-32-NEXT: subss %xmm4, %xmm0 +; SSE-32-NEXT: subss %xmm2, %xmm0 ; SSE-32-NEXT: cvttss2si %xmm0, %ecx ; SSE-32-NEXT: xorl %eax, %ecx ; SSE-32-NEXT: movd %ecx, %xmm0 ; SSE-32-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] +; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0] ; SSE-32-NEXT: movdqa %xmm1, %xmm0 ; SSE-32-NEXT: retl ; |