summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/vector-rotate-256.ll
diff options
context:
space:
mode:
authorSimon Pilgrim <llvm-dev@redking.me.uk>2018-05-31 11:25:16 +0000
committerSimon Pilgrim <llvm-dev@redking.me.uk>2018-05-31 11:25:16 +0000
commit346886bc0de81ff4fe83573445de48cd6a5a81c8 (patch)
treebbad4605654e2da9ed0f94f17f273a955ee2dd7a /llvm/test/CodeGen/X86/vector-rotate-256.ll
parent0f11db359d36976e2b2471e49a39f20f6d0a9f08 (diff)
downloadbcm5719-llvm-346886bc0de81ff4fe83573445de48cd6a5a81c8.tar.gz
bcm5719-llvm-346886bc0de81ff4fe83573445de48cd6a5a81c8.zip
[X86][SSE] Add support for detecting SUB(SPLAT_BV, SPLAT) cases for shift-rotate patterns.
This improves splat rotations (rotation by an uniform value), to avoid having to use the generic non-uniform shift code (extension to PR37426). llvm-svn: 333641
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-rotate-256.ll')
-rw-r--r--llvm/test/CodeGen/X86/vector-rotate-256.ll46
1 files changed, 7 insertions, 39 deletions
diff --git a/llvm/test/CodeGen/X86/vector-rotate-256.ll b/llvm/test/CodeGen/X86/vector-rotate-256.ll
index 6287fac2586..a048c0c5689 100644
--- a/llvm/test/CodeGen/X86/vector-rotate-256.ll
+++ b/llvm/test/CodeGen/X86/vector-rotate-256.ll
@@ -447,13 +447,8 @@ define <4 x i64> @splatvar_rotate_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX1-NEXT: vpsllq %xmm1, %xmm3, %xmm4
; AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1
-; AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm4
-; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[2,3,0,1]
-; AVX1-NEXT: vpsrlq %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
-; AVX1-NEXT: vpsrlq %xmm2, %xmm0, %xmm2
-; AVX1-NEXT: vpsrlq %xmm5, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlq %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
; AVX1-NEXT: retq
@@ -594,33 +589,14 @@ define <16 x i16> @splatvar_rotate_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
; AVX1-NEXT: vpsubw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX1-NEXT: vpsllw %xmm1, %xmm3, %xmm4
; AVX1-NEXT: vpsllw %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1
-; AVX1-NEXT: vpsllw $12, %xmm2, %xmm4
-; AVX1-NEXT: vpsllw $4, %xmm2, %xmm2
-; AVX1-NEXT: vpor %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm4
-; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm5
-; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlw $4, %xmm3, %xmm5
-; AVX1-NEXT: vpblendvb %xmm4, %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlw $2, %xmm3, %xmm5
-; AVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm6
-; AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm5
-; AVX1-NEXT: vpaddw %xmm6, %xmm6, %xmm7
-; AVX1-NEXT: vpblendvb %xmm7, %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm5
-; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm2
-; AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm2
-; AVX1-NEXT: vpblendvb %xmm6, %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm2
-; AVX1-NEXT: vpblendvb %xmm7, %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw %xmm2, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
; AVX1-NEXT: retq
@@ -632,16 +608,8 @@ define <16 x i16> @splatvar_rotate_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
; AVX2-NEXT: vpsubw %ymm2, %ymm3, %ymm2
; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX2-NEXT: vpsllw %xmm1, %ymm0, %ymm1
-; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[12],ymm0[12],ymm3[13],ymm0[13],ymm3[14],ymm0[14],ymm3[15],ymm0[15]
-; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15]
-; AVX2-NEXT: vpsrlvd %ymm5, %ymm4, %ymm4
-; AVX2-NEXT: vpsrld $16, %ymm4, %ymm4
-; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[8],ymm0[8],ymm3[9],ymm0[9],ymm3[10],ymm0[10],ymm3[11],ymm0[11]
-; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11]
-; AVX2-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
-; AVX2-NEXT: vpackusdw %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX2-NEXT: vpsrlw %xmm2, %ymm0, %ymm0
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
OpenPOWER on IntegriCloud