summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
authorSimon Pilgrim <llvm-dev@redking.me.uk>2017-07-21 10:22:49 +0000
committerSimon Pilgrim <llvm-dev@redking.me.uk>2017-07-21 10:22:49 +0000
commit84cbd8e75081edb07c6f53a59c709df0524f393d (patch)
treecd6d16cb5bbd008629839f0e202bd6ffb6783afb /llvm/test/CodeGen
parent32c377a1cfb083125c1498b3850f885726f4f3b9 (diff)
downloadbcm5719-llvm-84cbd8e75081edb07c6f53a59c709df0524f393d.tar.gz
bcm5719-llvm-84cbd8e75081edb07c6f53a59c709df0524f393d.zip
[X86][SSE] Add extra (sra (sra x, c1), c2) -> (sra x, (add c1, c2)) test case
We should be able to handle the case where some c1+c2 elements exceed max shift and some don't by performing a clamp after the sum llvm-svn: 308724
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/X86/combine-sra.ll30
1 files changed, 30 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/combine-sra.ll b/llvm/test/CodeGen/X86/combine-sra.ll
index f9927198978..fb16faa30a9 100644
--- a/llvm/test/CodeGen/X86/combine-sra.ll
+++ b/llvm/test/CodeGen/X86/combine-sra.ll
@@ -125,6 +125,36 @@ define <4 x i32> @combine_vec_ashr_ashr2(<4 x i32> %x) {
ret <4 x i32> %2
}
+define <4 x i32> @combine_vec_ashr_ashr3(<4 x i32> %x) {
+; SSE-LABEL: combine_vec_ashr_ashr3:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrad $27, %xmm1
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: psrad $5, %xmm2
+; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrad $31, %xmm1
+; SSE-NEXT: psrad $1, %xmm0
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrad $10, %xmm1
+; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; SSE-NEXT: psrad $31, %xmm0
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_vec_ashr_ashr3:
+; AVX: # BB#0:
+; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: retq
+ %1 = ashr <4 x i32> %x, <i32 1, i32 5, i32 50, i32 27>
+ %2 = ashr <4 x i32> %1, <i32 33, i32 10, i32 33, i32 0>
+ ret <4 x i32> %2
+}
+
; fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))).
define <4 x i32> @combine_vec_ashr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
; SSE-LABEL: combine_vec_ashr_trunc_and:
OpenPOWER on IntegriCloud