summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/i64-to-float.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86/i64-to-float.ll')
-rw-r--r--llvm/test/CodeGen/X86/i64-to-float.ll48
1 files changed, 16 insertions, 32 deletions
diff --git a/llvm/test/CodeGen/X86/i64-to-float.ll b/llvm/test/CodeGen/X86/i64-to-float.ll
index 7ca55f16c99..8898551a976 100644
--- a/llvm/test/CodeGen/X86/i64-to-float.ll
+++ b/llvm/test/CodeGen/X86/i64-to-float.ll
@@ -71,11 +71,9 @@ define <2 x double> @mask_uitofp_2i64_2f64(<2 x i64> %a) nounwind {
define <4 x float> @mask_sitofp_4i64_4f32(<4 x i64> %a) nounwind {
; X32-SSE-LABEL: mask_sitofp_4i64_4f32:
; X32-SSE: # BB#0:
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X32-SSE-NEXT: andps {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT: andps {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X32-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; X32-SSE-NEXT: retl
;
@@ -83,20 +81,16 @@ define <4 x float> @mask_sitofp_4i64_4f32(<4 x i64> %a) nounwind {
; X32-AVX: # BB#0:
; X32-AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
; X32-AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
-; X32-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; X32-AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X32-AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; X32-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X32-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; X32-AVX-NEXT: vzeroupper
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: mask_sitofp_4i64_4f32:
; X64-SSE: # BB#0:
-; X64-SSE-NEXT: pand {{.*}}(%rip), %xmm0
-; X64-SSE-NEXT: pand {{.*}}(%rip), %xmm1
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-SSE-NEXT: andps {{.*}}(%rip), %xmm1
+; X64-SSE-NEXT: andps {{.*}}(%rip), %xmm0
+; X64-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X64-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; X64-SSE-NEXT: retq
;
@@ -104,9 +98,7 @@ define <4 x float> @mask_sitofp_4i64_4f32(<4 x i64> %a) nounwind {
; X64-AVX: # BB#0:
; X64-AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
-; X64-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; X64-AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X64-AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; X64-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X64-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; X64-AVX-NEXT: vzeroupper
; X64-AVX-NEXT: retq
@@ -118,11 +110,9 @@ define <4 x float> @mask_sitofp_4i64_4f32(<4 x i64> %a) nounwind {
define <4 x float> @mask_uitofp_4i64_4f32(<4 x i64> %a) nounwind {
; X32-SSE-LABEL: mask_uitofp_4i64_4f32:
; X32-SSE: # BB#0:
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X32-SSE-NEXT: andps {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT: andps {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X32-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; X32-SSE-NEXT: retl
;
@@ -130,20 +120,16 @@ define <4 x float> @mask_uitofp_4i64_4f32(<4 x i64> %a) nounwind {
; X32-AVX: # BB#0:
; X32-AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
; X32-AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
-; X32-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; X32-AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X32-AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; X32-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X32-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; X32-AVX-NEXT: vzeroupper
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: mask_uitofp_4i64_4f32:
; X64-SSE: # BB#0:
-; X64-SSE-NEXT: pand {{.*}}(%rip), %xmm0
-; X64-SSE-NEXT: pand {{.*}}(%rip), %xmm1
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-SSE-NEXT: andps {{.*}}(%rip), %xmm1
+; X64-SSE-NEXT: andps {{.*}}(%rip), %xmm0
+; X64-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X64-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; X64-SSE-NEXT: retq
;
@@ -151,9 +137,7 @@ define <4 x float> @mask_uitofp_4i64_4f32(<4 x i64> %a) nounwind {
; X64-AVX: # BB#0:
; X64-AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
-; X64-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; X64-AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X64-AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; X64-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X64-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; X64-AVX-NEXT: vzeroupper
; X64-AVX-NEXT: retq
OpenPOWER on IntegriCloud