summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/vec_int_to_fp.ll
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2019-02-06 14:59:39 +0000
committerSanjay Patel <spatel@rotateright.com>2019-02-06 14:59:39 +0000
commite84fbb67a1f07dfd3b5ea6747222e2da57d8d80c (patch)
treea1867e6673d13b1110925fdd855d3cec8f469e48 /llvm/test/CodeGen/X86/vec_int_to_fp.ll
parent02974728dc45d641ad6b833767150adcb8aaf5fe (diff)
downloadbcm5719-llvm-e84fbb67a1f07dfd3b5ea6747222e2da57d8d80c.tar.gz
bcm5719-llvm-e84fbb67a1f07dfd3b5ea6747222e2da57d8d80c.zip
[x86] vectorize cast ops in lowering to avoid register file transfers
The proposal in D56796 may cross the line because we're trying to avoid vectorization transforms in generic DAG combining. So this is an alternate, later, x86-specific translation of that patch. There are several potential follow-ups to enhance this: 1. Allow extraction from non-zero element index. 2. Peek through extends of smaller width integers. 3. Support x86-specific conversion opcodes like X86ISD::CVTSI2P Differential Revision: https://reviews.llvm.org/D56864 llvm-svn: 353302
Diffstat (limited to 'llvm/test/CodeGen/X86/vec_int_to_fp.ll')
-rw-r--r--llvm/test/CodeGen/X86/vec_int_to_fp.ll93
1 files changed, 64 insertions, 29 deletions
diff --git a/llvm/test/CodeGen/X86/vec_int_to_fp.ll b/llvm/test/CodeGen/X86/vec_int_to_fp.ll
index 56b4e64d1d2..1f2749fd099 100644
--- a/llvm/test/CodeGen/X86/vec_int_to_fp.ll
+++ b/llvm/test/CodeGen/X86/vec_int_to_fp.ll
@@ -5556,15 +5556,12 @@ define <4 x float> @sitofp_i64_to_4f32(<4 x float> %a0, i64 %a1) nounwind {
define float @extract0_sitofp_v4i32_f32(<4 x i32> %x) nounwind {
; SSE-LABEL: extract0_sitofp_v4i32_f32:
; SSE: # %bb.0:
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: xorps %xmm0, %xmm0
-; SSE-NEXT: cvtsi2ssl %eax, %xmm0
+; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: extract0_sitofp_v4i32_f32:
; AVX: # %bb.0:
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: vcvtsi2ssl %eax, %xmm1, %xmm0
+; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX-NEXT: retq
%e = extractelement <4 x i32> %x, i32 0
%r = sitofp i32 %e to float
@@ -5575,8 +5572,7 @@ define float @extract0_sitofp_v4i32_f32i_multiuse1(<4 x i32> %x) nounwind {
; SSE-LABEL: extract0_sitofp_v4i32_f32i_multiuse1:
; SSE: # %bb.0:
; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: xorps %xmm0, %xmm0
-; SSE-NEXT: cvtsi2ssl %eax, %xmm0
+; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE-NEXT: incl %eax
; SSE-NEXT: cvtsi2ssl %eax, %xmm1
; SSE-NEXT: divss %xmm1, %xmm0
@@ -5585,7 +5581,7 @@ define float @extract0_sitofp_v4i32_f32i_multiuse1(<4 x i32> %x) nounwind {
; AVX-LABEL: extract0_sitofp_v4i32_f32i_multiuse1:
; AVX: # %bb.0:
; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: vcvtsi2ssl %eax, %xmm1, %xmm0
+; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX-NEXT: incl %eax
; AVX-NEXT: vcvtsi2ssl %eax, %xmm1, %xmm1
; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0
@@ -5601,17 +5597,15 @@ define float @extract0_sitofp_v4i32_f32i_multiuse1(<4 x i32> %x) nounwind {
define float @extract0_sitofp_v4i32_f32_multiuse2(<4 x i32> %x, i32* %p) nounwind {
; SSE-LABEL: extract0_sitofp_v4i32_f32_multiuse2:
; SSE: # %bb.0:
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: cvtsi2ssl %eax, %xmm1
-; SSE-NEXT: movd %xmm0, (%rdi)
+; SSE-NEXT: cvtdq2ps %xmm0, %xmm1
+; SSE-NEXT: movss %xmm0, (%rdi)
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: extract0_sitofp_v4i32_f32_multiuse2:
; AVX: # %bb.0:
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: vcvtsi2ssl %eax, %xmm1, %xmm1
-; AVX-NEXT: vmovd %xmm0, (%rdi)
+; AVX-NEXT: vcvtdq2ps %xmm0, %xmm1
+; AVX-NEXT: vmovss %xmm0, (%rdi)
; AVX-NEXT: vmovaps %xmm1, %xmm0
; AVX-NEXT: retq
%e = extractelement <4 x i32> %x, i32 0
@@ -5630,8 +5624,7 @@ define double @extract0_sitofp_v4i32_f64(<4 x i32> %x) nounwind {
;
; AVX-LABEL: extract0_sitofp_v4i32_f64:
; AVX: # %bb.0:
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: vcvtsi2sdl %eax, %xmm1, %xmm0
+; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
; AVX-NEXT: retq
%e = extractelement <4 x i32> %x, i32 0
%r = sitofp i32 %e to double
@@ -5652,11 +5645,31 @@ define float @extract0_uitofp_v4i32_f32(<4 x i32> %x) nounwind {
; VEX-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm0
; VEX-NEXT: retq
;
-; AVX512-LABEL: extract0_uitofp_v4i32_f32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: vcvtusi2ssl %eax, %xmm1, %xmm0
-; AVX512-NEXT: retq
+; AVX512F-LABEL: extract0_uitofp_v4i32_f32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: extract0_uitofp_v4i32_f32:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vcvtudq2ps %xmm0, %xmm0
+; AVX512VL-NEXT: retq
+;
+; AVX512DQ-LABEL: extract0_uitofp_v4i32_f32:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
+;
+; AVX512VLDQ-LABEL: extract0_uitofp_v4i32_f32:
+; AVX512VLDQ: # %bb.0:
+; AVX512VLDQ-NEXT: vcvtudq2ps %xmm0, %xmm0
+; AVX512VLDQ-NEXT: retq
%e = extractelement <4 x i32> %x, i32 0
%r = uitofp i32 %e to float
ret float %r
@@ -5676,11 +5689,35 @@ define double @extract0_uitofp_v4i32_f64(<4 x i32> %x) nounwind {
; VEX-NEXT: vcvtsi2sdq %rax, %xmm1, %xmm0
; VEX-NEXT: retq
;
-; AVX512-LABEL: extract0_uitofp_v4i32_f64:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: vcvtusi2sdl %eax, %xmm1, %xmm0
-; AVX512-NEXT: retq
+; AVX512F-LABEL: extract0_uitofp_v4i32_f64:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: extract0_uitofp_v4i32_f64:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vcvtudq2pd %xmm0, %ymm0
+; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512DQ-LABEL: extract0_uitofp_v4i32_f64:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
+;
+; AVX512VLDQ-LABEL: extract0_uitofp_v4i32_f64:
+; AVX512VLDQ: # %bb.0:
+; AVX512VLDQ-NEXT: vcvtudq2pd %xmm0, %ymm0
+; AVX512VLDQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512VLDQ-NEXT: vzeroupper
+; AVX512VLDQ-NEXT: retq
%e = extractelement <4 x i32> %x, i32 0
%r = uitofp i32 %e to double
ret double %r
@@ -5692,9 +5729,7 @@ define float @extract3_sitofp_v4i32_f32(<4 x i32> %x) nounwind {
; SSE2-LABEL: extract3_sitofp_v4i32_f32:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: xorps %xmm0, %xmm0
-; SSE2-NEXT: cvtsi2ssl %eax, %xmm0
+; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: extract3_sitofp_v4i32_f32:
OpenPOWER on IntegriCloud