summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86
diff options
context:
space:
mode:
authorSimon Pilgrim <llvm-dev@redking.me.uk>2017-02-05 22:50:29 +0000
committerSimon Pilgrim <llvm-dev@redking.me.uk>2017-02-05 22:50:29 +0000
commit380ce75687e9f3fc00d52984dd9d64c11a8ad6eb (patch)
tree3f2c81e191fb4ab89f6c70fc99890e5720ad0e25 /llvm/test/CodeGen/X86
parent134ed9986a1baa03d568dd551e20d2134d3dfeae (diff)
downloadbcm5719-llvm-380ce75687e9f3fc00d52984dd9d64c11a8ad6eb.tar.gz
bcm5719-llvm-380ce75687e9f3fc00d52984dd9d64c11a8ad6eb.zip
[X86][SSE] Replace insert_vector_elt(vec, -1, idx) with shuffle
Similar to what we already do for zero elt insertion, we can quickly rematerialize 'allbits' vectors so to avoid a unnecessary gpr value and insertion into a vector llvm-svn: 294162
Diffstat (limited to 'llvm/test/CodeGen/X86')
-rw-r--r--llvm/test/CodeGen/X86/avx-cvt-3.ll34
1 files changed, 12 insertions, 22 deletions
diff --git a/llvm/test/CodeGen/X86/avx-cvt-3.ll b/llvm/test/CodeGen/X86/avx-cvt-3.ll
index 45a6421cf22..066719b3bfe 100644
--- a/llvm/test/CodeGen/X86/avx-cvt-3.ll
+++ b/llvm/test/CodeGen/X86/avx-cvt-3.ll
@@ -48,27 +48,17 @@ define <8 x float> @sitofp_shuffle_zero_v8i32(<8 x i32> %a0) {
define <8 x float> @sitofp_insert_allbits_v8i32(<8 x i32> %a0) {
; X86-LABEL: sitofp_insert_allbits_v8i32:
; X86: # BB#0:
-; X86-NEXT: movl $-1, %eax
-; X86-NEXT: vpinsrd $0, %eax, %xmm0, %xmm1
-; X86-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
-; X86-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
-; X86-NEXT: vextractf128 $1, %ymm0, %xmm1
-; X86-NEXT: vpinsrd $0, %eax, %xmm1, %xmm1
-; X86-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
-; X86-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X86-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; X86-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; X86-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6,7]
; X86-NEXT: vcvtdq2ps %ymm0, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: sitofp_insert_allbits_v8i32:
; X64: # BB#0:
-; X64-NEXT: movl $-1, %eax
-; X64-NEXT: vpinsrd $0, %eax, %xmm0, %xmm1
-; X64-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
-; X64-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
-; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
-; X64-NEXT: vpinsrd $0, %eax, %xmm1, %xmm1
-; X64-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
-; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X64-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6,7]
; X64-NEXT: vcvtdq2ps %ymm0, %ymm0
; X64-NEXT: retq
%1 = insertelement <8 x i32> %a0, i32 -1, i32 0
@@ -105,9 +95,9 @@ define <8 x float> @sitofp_insert_constants_v8i32(<8 x i32> %a0) {
; X86: # BB#0:
; X86-NEXT: vxorps %ymm1, %ymm1, %ymm1
; X86-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
-; X86-NEXT: movl $-1, %eax
-; X86-NEXT: vpinsrd $2, %eax, %xmm0, %xmm1
-; X86-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; X86-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; X86-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; X86-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4,5,6,7]
; X86-NEXT: vextractf128 $1, %ymm0, %xmm1
; X86-NEXT: movl $2, %eax
; X86-NEXT: vpinsrd $0, %eax, %xmm1, %xmm1
@@ -121,9 +111,9 @@ define <8 x float> @sitofp_insert_constants_v8i32(<8 x i32> %a0) {
; X64: # BB#0:
; X64-NEXT: vxorps %ymm1, %ymm1, %ymm1
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
-; X64-NEXT: movl $-1, %eax
-; X64-NEXT: vpinsrd $2, %eax, %xmm0, %xmm1
-; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; X64-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4,5,6,7]
; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
; X64-NEXT: movl $2, %eax
; X64-NEXT: vpinsrd $0, %eax, %xmm1, %xmm1
OpenPOWER on IntegriCloud