diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/i64-to-float.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/i64-to-float.ll | 40 |
1 files changed, 20 insertions, 20 deletions
diff --git a/llvm/test/CodeGen/X86/i64-to-float.ll b/llvm/test/CodeGen/X86/i64-to-float.ll index 26553f5f352..0440b3d9575 100644 --- a/llvm/test/CodeGen/X86/i64-to-float.ll +++ b/llvm/test/CodeGen/X86/i64-to-float.ll @@ -8,27 +8,27 @@ define <2 x double> @mask_sitofp_2i64_2f64(<2 x i64> %a) nounwind { ; X32-SSE-LABEL: mask_sitofp_2i64_2f64: -; X32-SSE: # BB#0: +; X32-SSE: # %bb.0: ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 ; X32-SSE-NEXT: cvtdq2pd %xmm0, %xmm0 ; X32-SSE-NEXT: retl ; ; X32-AVX-LABEL: mask_sitofp_2i64_2f64: -; X32-AVX: # BB#0: +; X32-AVX: # %bb.0: ; X32-AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[8,9],zero,zero,xmm0[u,u,u,u,u,u,u,u] ; X32-AVX-NEXT: vcvtdq2pd %xmm0, %xmm0 ; X32-AVX-NEXT: retl ; ; X64-SSE-LABEL: mask_sitofp_2i64_2f64: -; X64-SSE: # BB#0: +; X64-SSE: # %bb.0: ; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; X64-SSE-NEXT: pand {{.*}}(%rip), %xmm0 ; X64-SSE-NEXT: cvtdq2pd %xmm0, %xmm0 ; X64-SSE-NEXT: retq ; ; X64-AVX-LABEL: mask_sitofp_2i64_2f64: -; X64-AVX: # BB#0: +; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[8,9],zero,zero,xmm0[u,u,u,u,u,u,u,u] ; X64-AVX-NEXT: vcvtdq2pd %xmm0, %xmm0 ; X64-AVX-NEXT: retq @@ -39,27 +39,27 @@ define <2 x double> @mask_sitofp_2i64_2f64(<2 x i64> %a) nounwind { define <2 x double> @mask_uitofp_2i64_2f64(<2 x i64> %a) nounwind { ; X32-SSE-LABEL: mask_uitofp_2i64_2f64: -; X32-SSE: # BB#0: +; X32-SSE: # %bb.0: ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 ; X32-SSE-NEXT: cvtdq2pd %xmm0, %xmm0 ; X32-SSE-NEXT: retl ; ; X32-AVX-LABEL: mask_uitofp_2i64_2f64: -; X32-AVX: # BB#0: +; X32-AVX: # %bb.0: ; X32-AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[8,9],zero,zero,xmm0[u,u,u,u,u,u,u,u] ; X32-AVX-NEXT: vcvtdq2pd %xmm0, %xmm0 ; X32-AVX-NEXT: retl ; ; X64-SSE-LABEL: mask_uitofp_2i64_2f64: -; X64-SSE: # BB#0: +; X64-SSE: # %bb.0: ; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; X64-SSE-NEXT: pand {{.*}}(%rip), %xmm0 ; X64-SSE-NEXT: cvtdq2pd %xmm0, %xmm0 ; X64-SSE-NEXT: retq ; ; X64-AVX-LABEL: mask_uitofp_2i64_2f64: -; X64-AVX: # BB#0: +; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[8,9],zero,zero,xmm0[u,u,u,u,u,u,u,u] ; X64-AVX-NEXT: vcvtdq2pd %xmm0, %xmm0 ; X64-AVX-NEXT: retq @@ -70,14 +70,14 @@ define <2 x double> @mask_uitofp_2i64_2f64(<2 x i64> %a) nounwind { define <4 x float> @mask_sitofp_4i64_4f32(<4 x i64> %a) nounwind { ; X32-SSE-LABEL: mask_sitofp_4i64_4f32: -; X32-SSE: # BB#0: +; X32-SSE: # %bb.0: ; X32-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] ; X32-SSE-NEXT: andps {{\.LCPI.*}}, %xmm0 ; X32-SSE-NEXT: cvtdq2ps %xmm0, %xmm0 ; X32-SSE-NEXT: retl ; ; X32-AVX-LABEL: mask_sitofp_4i64_4f32: -; X32-AVX: # BB#0: +; X32-AVX: # %bb.0: ; X32-AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 ; X32-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] ; X32-AVX-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 @@ -86,14 +86,14 @@ define <4 x float> @mask_sitofp_4i64_4f32(<4 x i64> %a) nounwind { ; X32-AVX-NEXT: retl ; ; X64-SSE-LABEL: mask_sitofp_4i64_4f32: -; X64-SSE: # BB#0: +; X64-SSE: # %bb.0: ; X64-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] ; X64-SSE-NEXT: andps {{.*}}(%rip), %xmm0 ; X64-SSE-NEXT: cvtdq2ps %xmm0, %xmm0 ; X64-SSE-NEXT: retq ; ; X64-AVX-LABEL: mask_sitofp_4i64_4f32: -; X64-AVX: # BB#0: +; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 ; X64-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] ; X64-AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0 @@ -107,14 +107,14 @@ define <4 x float> @mask_sitofp_4i64_4f32(<4 x i64> %a) nounwind { define <4 x float> @mask_uitofp_4i64_4f32(<4 x i64> %a) nounwind { ; X32-SSE-LABEL: mask_uitofp_4i64_4f32: -; X32-SSE: # BB#0: +; X32-SSE: # %bb.0: ; X32-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] ; X32-SSE-NEXT: andps {{\.LCPI.*}}, %xmm0 ; X32-SSE-NEXT: cvtdq2ps %xmm0, %xmm0 ; X32-SSE-NEXT: retl ; ; X32-AVX-LABEL: mask_uitofp_4i64_4f32: -; X32-AVX: # BB#0: +; X32-AVX: # %bb.0: ; X32-AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 ; X32-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] ; X32-AVX-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 @@ -123,14 +123,14 @@ define <4 x float> @mask_uitofp_4i64_4f32(<4 x i64> %a) nounwind { ; X32-AVX-NEXT: retl ; ; X64-SSE-LABEL: mask_uitofp_4i64_4f32: -; X64-SSE: # BB#0: +; X64-SSE: # %bb.0: ; X64-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] ; X64-SSE-NEXT: andps {{.*}}(%rip), %xmm0 ; X64-SSE-NEXT: cvtdq2ps %xmm0, %xmm0 ; X64-SSE-NEXT: retq ; ; X64-AVX-LABEL: mask_uitofp_4i64_4f32: -; X64-AVX: # BB#0: +; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 ; X64-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] ; X64-AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0 @@ -144,7 +144,7 @@ define <4 x float> @mask_uitofp_4i64_4f32(<4 x i64> %a) nounwind { define <2 x double> @clamp_sitofp_2i64_2f64(<2 x i64> %a) nounwind { ; X32-SSE-LABEL: clamp_sitofp_2i64_2f64: -; X32-SSE: # BB#0: +; X32-SSE: # %bb.0: ; X32-SSE-NEXT: pushl %ebp ; X32-SSE-NEXT: movl %esp, %ebp ; X32-SSE-NEXT: andl $-8, %esp @@ -194,7 +194,7 @@ define <2 x double> @clamp_sitofp_2i64_2f64(<2 x i64> %a) nounwind { ; X32-SSE-NEXT: retl ; ; X32-AVX-LABEL: clamp_sitofp_2i64_2f64: -; X32-AVX: # BB#0: +; X32-AVX: # %bb.0: ; X32-AVX-NEXT: pushl %ebp ; X32-AVX-NEXT: movl %esp, %ebp ; X32-AVX-NEXT: andl $-8, %esp @@ -220,7 +220,7 @@ define <2 x double> @clamp_sitofp_2i64_2f64(<2 x i64> %a) nounwind { ; X32-AVX-NEXT: retl ; ; X64-SSE-LABEL: clamp_sitofp_2i64_2f64: -; X64-SSE: # BB#0: +; X64-SSE: # %bb.0: ; X64-SSE-NEXT: movdqa {{.*#+}} xmm1 = [2147483648,0,2147483648,0] ; X64-SSE-NEXT: movdqa %xmm0, %xmm2 ; X64-SSE-NEXT: pxor %xmm1, %xmm2 @@ -262,7 +262,7 @@ define <2 x double> @clamp_sitofp_2i64_2f64(<2 x i64> %a) nounwind { ; X64-SSE-NEXT: retq ; ; X64-AVX-LABEL: clamp_sitofp_2i64_2f64: -; X64-AVX: # BB#0: +; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [18446744073709551361,18446744073709551361] ; X64-AVX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2 ; X64-AVX-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 |