diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/vec_fp_to_int.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/vec_fp_to_int.ll | 310 |
1 files changed, 155 insertions, 155 deletions
diff --git a/llvm/test/CodeGen/X86/vec_fp_to_int.ll b/llvm/test/CodeGen/X86/vec_fp_to_int.ll index 0ad5ef7ee8f..5e32f2c89c7 100644 --- a/llvm/test/CodeGen/X86/vec_fp_to_int.ll +++ b/llvm/test/CodeGen/X86/vec_fp_to_int.ll @@ -12,13 +12,13 @@ define <2 x i64> @fptosi_2f64_to_2i64(<2 x double> %a) { ; SSE-LABEL: fptosi_2f64_to_2i64: -; SSE: # BB#0: -; SSE-NEXT: cvttsd2si %xmm0, %rax -; SSE-NEXT: movd %rax, %xmm1 -; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0] -; SSE-NEXT: cvttsd2si %xmm0, %rax -; SSE-NEXT: movd %rax, %xmm0 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSE: # BB#0:
+; SSE-NEXT: cvttsd2si %xmm0, %rax
+; SSE-NEXT: movd %rax, %xmm1
+; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; SSE-NEXT: cvttsd2si %xmm0, %rax
+; SSE-NEXT: movd %rax, %xmm0
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: retq ; @@ -37,13 +37,13 @@ define <2 x i64> @fptosi_2f64_to_2i64(<2 x double> %a) { define <4 x i32> @fptosi_2f64_to_2i32(<2 x double> %a) { ; SSE-LABEL: fptosi_2f64_to_2i32: -; SSE: # BB#0: -; SSE-NEXT: cvttsd2si %xmm0, %rax -; SSE-NEXT: movd %rax, %xmm1 -; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0] -; SSE-NEXT: cvttsd2si %xmm0, %rax -; SSE-NEXT: movd %rax, %xmm0 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSE: # BB#0:
+; SSE-NEXT: cvttsd2si %xmm0, %rax
+; SSE-NEXT: movd %rax, %xmm1
+; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; SSE-NEXT: cvttsd2si %xmm0, %rax
+; SSE-NEXT: movd %rax, %xmm0
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3] ; SSE-NEXT: retq ; @@ -64,13 +64,13 @@ define <4 x i32> @fptosi_2f64_to_2i32(<2 x double> %a) { define <4 x i32> @fptosi_4f64_to_2i32(<2 x double> %a) { ; SSE-LABEL: fptosi_4f64_to_2i32: -; SSE: # BB#0: -; SSE-NEXT: cvttsd2si %xmm0, %rax -; SSE-NEXT: movd %rax, %xmm1 -; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0] -; SSE-NEXT: cvttsd2si %xmm0, %rax -; SSE-NEXT: movd %rax, %xmm0 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSE: # BB#0:
+; SSE-NEXT: cvttsd2si %xmm0, %rax
+; SSE-NEXT: movd %rax, %xmm1
+; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; SSE-NEXT: cvttsd2si %xmm0, %rax
+; SSE-NEXT: movd %rax, %xmm0
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3] ; SSE-NEXT: cvttsd2si %xmm0, %rax ; SSE-NEXT: movd %rax, %xmm1 @@ -92,19 +92,19 @@ define <4 x i32> @fptosi_4f64_to_2i32(<2 x double> %a) { define <4 x i64> @fptosi_4f64_to_4i64(<4 x double> %a) { ; SSE-LABEL: fptosi_4f64_to_4i64: -; SSE: # BB#0: -; SSE-NEXT: cvttsd2si %xmm0, %rax -; SSE-NEXT: movd %rax, %xmm2 -; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0] -; SSE-NEXT: cvttsd2si %xmm0, %rax -; SSE-NEXT: movd %rax, %xmm0 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0] -; SSE-NEXT: cvttsd2si %xmm1, %rax -; SSE-NEXT: movd %rax, %xmm3 -; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1,0] -; SSE-NEXT: cvttsd2si %xmm1, %rax -; SSE-NEXT: movd %rax, %xmm0 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0] +; SSE: # BB#0:
+; SSE-NEXT: cvttsd2si %xmm0, %rax
+; SSE-NEXT: movd %rax, %xmm2
+; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; SSE-NEXT: cvttsd2si %xmm0, %rax
+; SSE-NEXT: movd %rax, %xmm0
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
+; SSE-NEXT: cvttsd2si %xmm1, %rax
+; SSE-NEXT: movd %rax, %xmm3
+; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; SSE-NEXT: cvttsd2si %xmm1, %rax
+; SSE-NEXT: movd %rax, %xmm0
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
; SSE-NEXT: movdqa %xmm2, %xmm0 ; SSE-NEXT: movdqa %xmm3, %xmm1 ; SSE-NEXT: retq @@ -132,20 +132,20 @@ define <4 x i64> @fptosi_4f64_to_4i64(<4 x double> %a) { define <4 x i32> @fptosi_4f64_to_4i32(<4 x double> %a) { ; SSE-LABEL: fptosi_4f64_to_4i32: -; SSE: # BB#0: -; SSE-NEXT: cvttsd2si %xmm1, %rax -; SSE-NEXT: movd %rax, %xmm2 -; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1,0] -; SSE-NEXT: cvttsd2si %xmm1, %rax -; SSE-NEXT: movd %rax, %xmm1 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3] -; SSE-NEXT: cvttsd2si %xmm0, %rax -; SSE-NEXT: movd %rax, %xmm2 -; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0] -; SSE-NEXT: cvttsd2si %xmm0, %rax -; SSE-NEXT: movd %rax, %xmm0 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0] +; SSE: # BB#0:
+; SSE-NEXT: cvttsd2si %xmm1, %rax
+; SSE-NEXT: movd %rax, %xmm2
+; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; SSE-NEXT: cvttsd2si %xmm1, %rax
+; SSE-NEXT: movd %rax, %xmm1
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; SSE-NEXT: cvttsd2si %xmm0, %rax
+; SSE-NEXT: movd %rax, %xmm2
+; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; SSE-NEXT: cvttsd2si %xmm0, %rax
+; SSE-NEXT: movd %rax, %xmm0
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3] ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE-NEXT: retq @@ -173,14 +173,14 @@ define <2 x i64> @fptoui_2f64_to_2i64(<2 x double> %a) { ; SSE-NEXT: movabsq $-9223372036854775808, %rcx # imm = 0x8000000000000000 ; SSE-NEXT: xorq %rcx, %rax ; SSE-NEXT: cvttsd2si %xmm0, %rdx -; SSE-NEXT: ucomisd %xmm2, %xmm0 -; SSE-NEXT: cmovaeq %rax, %rdx -; SSE-NEXT: movd %rdx, %xmm1 -; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0] -; SSE-NEXT: movapd %xmm0, %xmm3 -; SSE-NEXT: subsd %xmm2, %xmm3 -; SSE-NEXT: cvttsd2si %xmm3, %rax -; SSE-NEXT: xorq %rcx, %rax +; SSE-NEXT: ucomisd %xmm2, %xmm0
+; SSE-NEXT: cmovaeq %rax, %rdx
+; SSE-NEXT: movd %rdx, %xmm1
+; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; SSE-NEXT: movaps %xmm0, %xmm3
+; SSE-NEXT: subsd %xmm2, %xmm3
+; SSE-NEXT: cvttsd2si %xmm3, %rax
+; SSE-NEXT: xorq %rcx, %rax
; SSE-NEXT: cvttsd2si %xmm0, %rcx ; SSE-NEXT: ucomisd %xmm2, %xmm0 ; SSE-NEXT: cmovaeq %rax, %rcx @@ -224,14 +224,14 @@ define <4 x i32> @fptoui_2f64_to_2i32(<2 x double> %a) { ; SSE-NEXT: movabsq $-9223372036854775808, %rcx # imm = 0x8000000000000000 ; SSE-NEXT: xorq %rcx, %rax ; SSE-NEXT: cvttsd2si %xmm0, %rdx -; SSE-NEXT: ucomisd %xmm1, %xmm0 -; SSE-NEXT: cmovaeq %rax, %rdx -; SSE-NEXT: movd %rdx, %xmm2 -; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0] -; SSE-NEXT: movapd %xmm0, %xmm3 -; SSE-NEXT: subsd %xmm1, %xmm3 -; SSE-NEXT: cvttsd2si %xmm3, %rax -; SSE-NEXT: xorq %rcx, %rax +; SSE-NEXT: ucomisd %xmm1, %xmm0
+; SSE-NEXT: cmovaeq %rax, %rdx
+; SSE-NEXT: movd %rdx, %xmm2
+; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; SSE-NEXT: movaps %xmm0, %xmm3
+; SSE-NEXT: subsd %xmm1, %xmm3
+; SSE-NEXT: cvttsd2si %xmm3, %rax
+; SSE-NEXT: xorq %rcx, %rax
; SSE-NEXT: cvttsd2si %xmm0, %rcx ; SSE-NEXT: ucomisd %xmm1, %xmm0 ; SSE-NEXT: cmovaeq %rax, %rcx @@ -277,14 +277,14 @@ define <4 x i32> @fptoui_4f64_to_2i32(<2 x double> %a) { ; SSE-NEXT: movabsq $-9223372036854775808, %rcx # imm = 0x8000000000000000 ; SSE-NEXT: xorq %rcx, %rax ; SSE-NEXT: cvttsd2si %xmm0, %rdx -; SSE-NEXT: ucomisd %xmm1, %xmm0 -; SSE-NEXT: cmovaeq %rax, %rdx -; SSE-NEXT: movd %rdx, %xmm2 -; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0] -; SSE-NEXT: movapd %xmm0, %xmm3 -; SSE-NEXT: subsd %xmm1, %xmm3 -; SSE-NEXT: cvttsd2si %xmm3, %rax -; SSE-NEXT: xorq %rcx, %rax +; SSE-NEXT: ucomisd %xmm1, %xmm0
+; SSE-NEXT: cmovaeq %rax, %rdx
+; SSE-NEXT: movd %rdx, %xmm2
+; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; SSE-NEXT: movaps %xmm0, %xmm3
+; SSE-NEXT: subsd %xmm1, %xmm3
+; SSE-NEXT: cvttsd2si %xmm3, %rax
+; SSE-NEXT: xorq %rcx, %rax
; SSE-NEXT: cvttsd2si %xmm0, %rdx ; SSE-NEXT: ucomisd %xmm1, %xmm0 ; SSE-NEXT: cmovaeq %rax, %rdx @@ -327,14 +327,14 @@ define <4 x i64> @fptoui_4f64_to_4i64(<4 x double> %a) { ; SSE-NEXT: movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000 ; SSE-NEXT: xorq %rax, %rcx ; SSE-NEXT: cvttsd2si %xmm2, %rdx -; SSE-NEXT: ucomisd %xmm3, %xmm2 -; SSE-NEXT: cmovaeq %rcx, %rdx -; SSE-NEXT: movd %rdx, %xmm0 -; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1,0] -; SSE-NEXT: movapd %xmm2, %xmm4 -; SSE-NEXT: subsd %xmm3, %xmm4 -; SSE-NEXT: cvttsd2si %xmm4, %rcx -; SSE-NEXT: xorq %rax, %rcx +; SSE-NEXT: ucomisd %xmm3, %xmm2
+; SSE-NEXT: cmovaeq %rcx, %rdx
+; SSE-NEXT: movd %rdx, %xmm0
+; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
+; SSE-NEXT: movaps %xmm2, %xmm4
+; SSE-NEXT: subsd %xmm3, %xmm4
+; SSE-NEXT: cvttsd2si %xmm4, %rcx
+; SSE-NEXT: xorq %rax, %rcx
; SSE-NEXT: cvttsd2si %xmm2, %rdx ; SSE-NEXT: ucomisd %xmm3, %xmm2 ; SSE-NEXT: cmovaeq %rcx, %rdx @@ -345,14 +345,14 @@ define <4 x i64> @fptoui_4f64_to_4i64(<4 x double> %a) { ; SSE-NEXT: cvttsd2si %xmm2, %rcx ; SSE-NEXT: xorq %rax, %rcx ; SSE-NEXT: cvttsd2si %xmm1, %rdx -; SSE-NEXT: ucomisd %xmm3, %xmm1 -; SSE-NEXT: cmovaeq %rcx, %rdx -; SSE-NEXT: movd %rdx, %xmm2 -; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1,0] -; SSE-NEXT: movapd %xmm1, %xmm4 -; SSE-NEXT: subsd %xmm3, %xmm4 -; SSE-NEXT: cvttsd2si %xmm4, %rcx -; SSE-NEXT: xorq %rax, %rcx +; SSE-NEXT: ucomisd %xmm3, %xmm1
+; SSE-NEXT: cmovaeq %rcx, %rdx
+; SSE-NEXT: movd %rdx, %xmm2
+; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; SSE-NEXT: movaps %xmm1, %xmm4
+; SSE-NEXT: subsd %xmm3, %xmm4
+; SSE-NEXT: cvttsd2si %xmm4, %rcx
+; SSE-NEXT: xorq %rax, %rcx
; SSE-NEXT: cvttsd2si %xmm1, %rax ; SSE-NEXT: ucomisd %xmm3, %xmm1 ; SSE-NEXT: cmovaeq %rcx, %rax @@ -414,14 +414,14 @@ define <4 x i32> @fptoui_4f64_to_4i32(<4 x double> %a) { ; SSE-NEXT: movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000 ; SSE-NEXT: xorq %rax, %rcx ; SSE-NEXT: cvttsd2si %xmm1, %rdx -; SSE-NEXT: ucomisd %xmm2, %xmm1 -; SSE-NEXT: cmovaeq %rcx, %rdx -; SSE-NEXT: movd %rdx, %xmm3 -; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1,0] -; SSE-NEXT: movapd %xmm1, %xmm4 -; SSE-NEXT: subsd %xmm2, %xmm4 -; SSE-NEXT: cvttsd2si %xmm4, %rcx -; SSE-NEXT: xorq %rax, %rcx +; SSE-NEXT: ucomisd %xmm2, %xmm1
+; SSE-NEXT: cmovaeq %rcx, %rdx
+; SSE-NEXT: movd %rdx, %xmm3
+; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; SSE-NEXT: movaps %xmm1, %xmm4
+; SSE-NEXT: subsd %xmm2, %xmm4
+; SSE-NEXT: cvttsd2si %xmm4, %rcx
+; SSE-NEXT: xorq %rax, %rcx
; SSE-NEXT: cvttsd2si %xmm1, %rdx ; SSE-NEXT: ucomisd %xmm2, %xmm1 ; SSE-NEXT: cmovaeq %rcx, %rdx @@ -433,14 +433,14 @@ define <4 x i32> @fptoui_4f64_to_4i32(<4 x double> %a) { ; SSE-NEXT: cvttsd2si %xmm3, %rcx ; SSE-NEXT: xorq %rax, %rcx ; SSE-NEXT: cvttsd2si %xmm0, %rdx -; SSE-NEXT: ucomisd %xmm2, %xmm0 -; SSE-NEXT: cmovaeq %rcx, %rdx -; SSE-NEXT: movd %rdx, %xmm3 -; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0] -; SSE-NEXT: movapd %xmm0, %xmm4 -; SSE-NEXT: subsd %xmm2, %xmm4 -; SSE-NEXT: cvttsd2si %xmm4, %rcx -; SSE-NEXT: xorq %rax, %rcx +; SSE-NEXT: ucomisd %xmm2, %xmm0
+; SSE-NEXT: cmovaeq %rcx, %rdx
+; SSE-NEXT: movd %rdx, %xmm3
+; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; SSE-NEXT: movaps %xmm0, %xmm4
+; SSE-NEXT: subsd %xmm2, %xmm4
+; SSE-NEXT: cvttsd2si %xmm4, %rcx
+; SSE-NEXT: xorq %rax, %rcx
; SSE-NEXT: cvttsd2si %xmm0, %rax ; SSE-NEXT: ucomisd %xmm2, %xmm0 ; SSE-NEXT: cmovaeq %rcx, %rax @@ -565,13 +565,13 @@ define <4 x i64> @fptosi_4f32_to_4i64(<8 x float> %a) { ; SSE-NEXT: movd %rax, %xmm1 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] ; SSE-NEXT: movaps %xmm0, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3] -; SSE-NEXT: cvttss2si %xmm1, %rax -; SSE-NEXT: movd %rax, %xmm3 -; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0] -; SSE-NEXT: cvttss2si %xmm0, %rax -; SSE-NEXT: movd %rax, %xmm1 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; SSE-NEXT: cvttss2si %xmm1, %rax
+; SSE-NEXT: movd %rax, %xmm3
+; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; SSE-NEXT: cvttss2si %xmm0, %rax
+; SSE-NEXT: movd %rax, %xmm1
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
; SSE-NEXT: movdqa %xmm2, %xmm0 ; SSE-NEXT: retq ; @@ -608,13 +608,13 @@ define <4 x i64> @fptosi_8f32_to_4i64(<8 x float> %a) { ; SSE-NEXT: movd %rax, %xmm1 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] ; SSE-NEXT: movaps %xmm0, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3] -; SSE-NEXT: cvttss2si %xmm1, %rax -; SSE-NEXT: movd %rax, %xmm3 -; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0] -; SSE-NEXT: cvttss2si %xmm0, %rax -; SSE-NEXT: movd %rax, %xmm1 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; SSE-NEXT: cvttss2si %xmm1, %rax
+; SSE-NEXT: movd %rax, %xmm3
+; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; SSE-NEXT: cvttss2si %xmm0, %rax
+; SSE-NEXT: movd %rax, %xmm1
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
; SSE-NEXT: movdqa %xmm2, %xmm0 ; SSE-NEXT: retq ; @@ -655,13 +655,13 @@ define <4 x i32> @fptoui_4f32_to_4i32(<4 x float> %a) { ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3] ; SSE-NEXT: cvttss2si %xmm2, %rax ; SSE-NEXT: movd %eax, %xmm2 -; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSE-NEXT: cvttss2si %xmm0, %rax -; SSE-NEXT: movd %eax, %xmm1 -; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0] -; SSE-NEXT: cvttss2si %xmm0, %rax -; SSE-NEXT: movd %eax, %xmm0 -; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE-NEXT: cvttss2si %xmm0, %rax
+; SSE-NEXT: movd %eax, %xmm1
+; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; SSE-NEXT: cvttss2si %xmm0, %rax
+; SSE-NEXT: movd %eax, %xmm0
+; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: retq @@ -799,13 +799,13 @@ define <8 x i32> @fptoui_8f32_to_8i32(<8 x float> %a) { ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,2,3] ; SSE-NEXT: cvttss2si %xmm3, %rax ; SSE-NEXT: movd %eax, %xmm3 -; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1] -; SSE-NEXT: cvttss2si %xmm2, %rax -; SSE-NEXT: movd %eax, %xmm0 -; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1,0] -; SSE-NEXT: cvttss2si %xmm2, %rax -; SSE-NEXT: movd %eax, %xmm2 -; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
+; SSE-NEXT: cvttss2si %xmm2, %rax
+; SSE-NEXT: movd %eax, %xmm0
+; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
+; SSE-NEXT: cvttss2si %xmm2, %rax
+; SSE-NEXT: movd %eax, %xmm2
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] ; SSE-NEXT: movaps %xmm1, %xmm2 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3] @@ -815,13 +815,13 @@ define <8 x i32> @fptoui_8f32_to_8i32(<8 x float> %a) { ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,2,3] ; SSE-NEXT: cvttss2si %xmm3, %rax ; SSE-NEXT: movd %eax, %xmm3 -; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] -; SSE-NEXT: cvttss2si %xmm1, %rax -; SSE-NEXT: movd %eax, %xmm2 -; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1,0] -; SSE-NEXT: cvttss2si %xmm1, %rax -; SSE-NEXT: movd %eax, %xmm1 -; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE-NEXT: cvttss2si %xmm1, %rax
+; SSE-NEXT: movd %eax, %xmm2
+; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; SSE-NEXT: cvttss2si %xmm1, %rax
+; SSE-NEXT: movd %eax, %xmm1
+; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; SSE-NEXT: movdqa %xmm2, %xmm1 ; SSE-NEXT: retq @@ -888,14 +888,14 @@ define <4 x i64> @fptoui_4f32_to_4i64(<8 x float> %a) { ; SSE-NEXT: cvttss2si %xmm4, %rcx ; SSE-NEXT: xorq %rax, %rcx ; SSE-NEXT: cvttss2si %xmm3, %rdx -; SSE-NEXT: ucomiss %xmm1, %xmm3 -; SSE-NEXT: cmovaeq %rcx, %rdx -; SSE-NEXT: movd %rdx, %xmm3 -; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0] -; SSE-NEXT: movapd %xmm0, %xmm4 -; SSE-NEXT: subss %xmm1, %xmm4 -; SSE-NEXT: cvttss2si %xmm4, %rcx -; SSE-NEXT: xorq %rax, %rcx +; SSE-NEXT: ucomiss %xmm1, %xmm3
+; SSE-NEXT: cmovaeq %rcx, %rdx
+; SSE-NEXT: movd %rdx, %xmm3
+; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; SSE-NEXT: movaps %xmm0, %xmm4
+; SSE-NEXT: subss %xmm1, %xmm4
+; SSE-NEXT: cvttss2si %xmm4, %rcx
+; SSE-NEXT: xorq %rax, %rcx
; SSE-NEXT: cvttss2si %xmm0, %rax ; SSE-NEXT: ucomiss %xmm1, %xmm0 ; SSE-NEXT: cmovaeq %rcx, %rax @@ -979,14 +979,14 @@ define <4 x i64> @fptoui_8f32_to_4i64(<8 x float> %a) { ; SSE-NEXT: cvttss2si %xmm4, %rcx ; SSE-NEXT: xorq %rax, %rcx ; SSE-NEXT: cvttss2si %xmm3, %rdx -; SSE-NEXT: ucomiss %xmm1, %xmm3 -; SSE-NEXT: cmovaeq %rcx, %rdx -; SSE-NEXT: movd %rdx, %xmm3 -; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0] -; SSE-NEXT: movapd %xmm0, %xmm4 -; SSE-NEXT: subss %xmm1, %xmm4 -; SSE-NEXT: cvttss2si %xmm4, %rcx -; SSE-NEXT: xorq %rax, %rcx +; SSE-NEXT: ucomiss %xmm1, %xmm3
+; SSE-NEXT: cmovaeq %rcx, %rdx
+; SSE-NEXT: movd %rdx, %xmm3
+; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; SSE-NEXT: movaps %xmm0, %xmm4
+; SSE-NEXT: subss %xmm1, %xmm4
+; SSE-NEXT: cvttss2si %xmm4, %rcx
+; SSE-NEXT: xorq %rax, %rcx
; SSE-NEXT: cvttss2si %xmm0, %rax ; SSE-NEXT: ucomiss %xmm1, %xmm0 ; SSE-NEXT: cmovaeq %rcx, %rax |