summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll
diff options
context:
space:
mode:
authorSimon Pilgrim <llvm-dev@redking.me.uk>2016-12-01 15:41:40 +0000
committerSimon Pilgrim <llvm-dev@redking.me.uk>2016-12-01 15:41:40 +0000
commit1e4d8709992da36e46dca5b214a6810a0380948e (patch)
tree8368fc0c057c29f3716c8b2b121326b323632acf /llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll
parent5d9f8f914acf221b3a4c531e0335bf0dc2dad21c (diff)
downloadbcm5719-llvm-1e4d8709992da36e46dca5b214a6810a0380948e.tar.gz
bcm5719-llvm-1e4d8709992da36e46dca5b214a6810a0380948e.zip
[X86][SSE] Add support for combining AND bitmasks to shuffles.
llvm-svn: 288365
Diffstat (limited to 'llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll')
-rw-r--r--llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll102
1 files changed, 67 insertions, 35 deletions
diff --git a/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll b/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll
index 8f0e8b25689..78b799c2162 100644
--- a/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll
+++ b/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll
@@ -11,11 +11,11 @@
; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math -mattr=+avx512vl \
; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL
-; CST: [[MASKCSTADDR:.LCPI[0-9_]+]]:
-; CST-NEXT: .long 65535 # 0xffff
-; CST-NEXT: .long 65535 # 0xffff
-; CST-NEXT: .long 65535 # 0xffff
-; CST-NEXT: .long 65535 # 0xffff
+; SSE2: [[MASKCSTADDR:.LCPI[0-9_]+]]:
+; SSE2-NEXT: .long 65535 # 0xffff
+; SSE2-NEXT: .long 65535 # 0xffff
+; SSE2-NEXT: .long 65535 # 0xffff
+; SSE2-NEXT: .long 65535 # 0xffff
; CST: [[FPMASKCSTADDR:.LCPI[0-9_]+]]:
; CST-NEXT: .long 1199570944 # float 65536
@@ -30,20 +30,32 @@
; AVX2-NEXT: .long 65535 # 0xffff
define <4 x float> @test_uitofp_v4i32_to_v4f32(<4 x i32> %arg) {
-; SSE-LABEL: test_uitofp_v4i32_to_v4f32:
-; SSE: # BB#0:
-; SSE-NEXT: movaps {{.*#+}} xmm1 = [65535,65535,65535,65535]
-; SSE-NEXT: andps %xmm0, %xmm1
-; SSE-NEXT: cvtdq2ps %xmm1, %xmm1
-; SSE-NEXT: psrld $16, %xmm0
-; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
-; SSE-NEXT: mulps [[FPMASKCSTADDR]](%rip), %xmm0
-; SSE-NEXT: addps %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: test_uitofp_v4i32_to_v4f32:
+; SSE2: # BB#0:
+; SSE2-NEXT: movaps {{.*#+}} xmm1 = [65535,65535,65535,65535]
+; SSE2-NEXT: andps %xmm0, %xmm1
+; SSE2-NEXT: cvtdq2ps %xmm1, %xmm1
+; SSE2-NEXT: psrld $16, %xmm0
+; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0
+; SSE2-NEXT: mulps [[FPMASKCSTADDR]](%rip), %xmm0
+; SSE2-NEXT: addps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_uitofp_v4i32_to_v4f32:
+; SSE41: # BB#0:
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
+; SSE41-NEXT: cvtdq2ps %xmm1, %xmm1
+; SSE41-NEXT: psrld $16, %xmm0
+; SSE41-NEXT: cvtdq2ps %xmm0, %xmm0
+; SSE41-NEXT: mulps [[FPMASKCSTADDR]](%rip), %xmm0
+; SSE41-NEXT: addps %xmm1, %xmm0
+; SSE41-NEXT: retq
;
; AVX-LABEL: test_uitofp_v4i32_to_v4f32:
; AVX: # BB#0:
-; AVX-NEXT: vandps [[MASKCSTADDR]](%rip), %xmm0, %xmm1
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; AVX-NEXT: vcvtdq2ps %xmm1, %xmm1
; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
@@ -97,25 +109,45 @@ define <4 x float> @test_uitofp_v4i32_to_v4f32(<4 x i32> %arg) {
; AVX2-NEXT: .long 65535 # 0xffff
define <8 x float> @test_uitofp_v8i32_to_v8f32(<8 x i32> %arg) {
-; SSE-LABEL: test_uitofp_v8i32_to_v8f32:
-; SSE: # BB#0:
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrld $16, %xmm2
-; SSE-NEXT: cvtdq2ps %xmm2, %xmm2
-; SSE-NEXT: movaps {{.*#+}} xmm3 = [6.553600e+04,6.553600e+04,6.553600e+04,6.553600e+04]
-; SSE-NEXT: mulps %xmm3, %xmm2
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,65535]
-; SSE-NEXT: pand %xmm4, %xmm0
-; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
-; SSE-NEXT: addps %xmm2, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psrld $16, %xmm2
-; SSE-NEXT: cvtdq2ps %xmm2, %xmm2
-; SSE-NEXT: mulps %xmm3, %xmm2
-; SSE-NEXT: pand %xmm4, %xmm1
-; SSE-NEXT: cvtdq2ps %xmm1, %xmm1
-; SSE-NEXT: addps %xmm2, %xmm1
-; SSE-NEXT: retq
+; SSE2-LABEL: test_uitofp_v8i32_to_v8f32:
+; SSE2: # BB#0:
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: psrld $16, %xmm2
+; SSE2-NEXT: cvtdq2ps %xmm2, %xmm2
+; SSE2-NEXT: movaps {{.*#+}} xmm3 = [6.553600e+04,6.553600e+04,6.553600e+04,6.553600e+04]
+; SSE2-NEXT: mulps %xmm3, %xmm2
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,65535]
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0
+; SSE2-NEXT: addps %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psrld $16, %xmm2
+; SSE2-NEXT: cvtdq2ps %xmm2, %xmm2
+; SSE2-NEXT: mulps %xmm3, %xmm2
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: cvtdq2ps %xmm1, %xmm1
+; SSE2-NEXT: addps %xmm2, %xmm1
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_uitofp_v8i32_to_v8f32:
+; SSE41: # BB#0:
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: psrld $16, %xmm2
+; SSE41-NEXT: cvtdq2ps %xmm2, %xmm2
+; SSE41-NEXT: movaps {{.*#+}} xmm3 = [6.553600e+04,6.553600e+04,6.553600e+04,6.553600e+04]
+; SSE41-NEXT: mulps %xmm3, %xmm2
+; SSE41-NEXT: pxor %xmm4, %xmm4
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7]
+; SSE41-NEXT: cvtdq2ps %xmm0, %xmm0
+; SSE41-NEXT: addps %xmm2, %xmm0
+; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: psrld $16, %xmm2
+; SSE41-NEXT: cvtdq2ps %xmm2, %xmm2
+; SSE41-NEXT: mulps %xmm3, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
+; SSE41-NEXT: cvtdq2ps %xmm1, %xmm1
+; SSE41-NEXT: addps %xmm2, %xmm1
+; SSE41-NEXT: retq
;
; AVX-LABEL: test_uitofp_v8i32_to_v8f32:
; AVX: # BB#0:
OpenPOWER on IntegriCloud