summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/vector-rotate-256.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-rotate-256.ll')
-rw-r--r--llvm/test/CodeGen/X86/vector-rotate-256.ll455
1 files changed, 455 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/vector-rotate-256.ll b/llvm/test/CodeGen/X86/vector-rotate-256.ll
index 618a24bd09f..b4ca023e124 100644
--- a/llvm/test/CodeGen/X86/vector-rotate-256.ll
+++ b/llvm/test/CodeGen/X86/vector-rotate-256.ll
@@ -446,6 +446,461 @@ define <32 x i8> @var_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
}
;
+; Uniform Variable Rotates
+;
+
+define <4 x i64> @splatvar_rotate_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
+; AVX1-LABEL: splatvar_rotate_v4i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovddup {{.*#+}} xmm2 = xmm1[0,0]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [64,64]
+; AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpsllq %xmm1, %xmm3, %xmm4
+; AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1
+; AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm4
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[2,3,0,1]
+; AVX1-NEXT: vpsrlq %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT: vpsrlq %xmm2, %xmm0, %xmm2
+; AVX1-NEXT: vpsrlq %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: splatvar_rotate_v4i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastq %xmm1, %ymm2
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [64,64,64,64]
+; AVX2-NEXT: vpsubq %ymm2, %ymm3, %ymm2
+; AVX2-NEXT: vpsllq %xmm1, %ymm0, %ymm1
+; AVX2-NEXT: vpsrlvq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: splatvar_rotate_v4i64:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vpbroadcastq %xmm1, %ymm1
+; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: splatvar_rotate_v4i64:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpbroadcastq %xmm1, %ymm1
+; AVX512VL-NEXT: vprolvq %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT: retq
+;
+; XOPAVX1-LABEL: splatvar_rotate_v4i64:
+; XOPAVX1: # %bb.0:
+; XOPAVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0]
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT: vprotq %xmm1, %xmm2, %xmm2
+; XOPAVX1-NEXT: vprotq %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatvar_rotate_v4i64:
+; XOPAVX2: # %bb.0:
+; XOPAVX2-NEXT: vpbroadcastq %xmm1, %ymm1
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; XOPAVX2-NEXT: vprotq %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT: vprotq %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
+ %splat = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> zeroinitializer
+ %splat64 = sub <4 x i64> <i64 64, i64 64, i64 64, i64 64>, %splat
+ %shl = shl <4 x i64> %a, %splat
+ %lshr = lshr <4 x i64> %a, %splat64
+ %or = or <4 x i64> %shl, %lshr
+ ret <4 x i64> %or
+}
+
+define <8 x i32> @splatvar_rotate_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
+; AVX1-LABEL: splatvar_rotate_v8i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [32,32,32,32]
+; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT: vpsubd %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT: vpslld %xmm1, %xmm4, %xmm5
+; AVX1-NEXT: vpslld %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1
+; AVX1-NEXT: vpsrldq {{.*#+}} xmm5 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpsrld %xmm5, %xmm4, %xmm5
+; AVX1-NEXT: vpsrlq $32, %xmm2, %xmm6
+; AVX1-NEXT: vpsrld %xmm6, %xmm4, %xmm6
+; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4,5,6,7]
+; AVX1-NEXT: vpxor %xmm6, %xmm6, %xmm6
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm7 = xmm2[2],xmm6[2],xmm2[3],xmm6[3]
+; AVX1-NEXT: vpsrld %xmm7, %xmm4, %xmm7
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX1-NEXT: vpsrld %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm7[4,5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,3],xmm2[4,5],xmm5[6,7]
+; AVX1-NEXT: vpsrldq {{.*#+}} xmm4 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpsrld %xmm4, %xmm0, %xmm4
+; AVX1-NEXT: vpsrlq $32, %xmm3, %xmm5
+; AVX1-NEXT: vpsrld %xmm5, %xmm0, %xmm5
+; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm3[2],xmm6[2],xmm3[3],xmm6[3]
+; AVX1-NEXT: vpsrld %xmm5, %xmm0, %xmm5
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
+; AVX1-NEXT: vpsrld %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm5[4,5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: splatvar_rotate_v8i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [32,32,32,32,32,32,32,32]
+; AVX2-NEXT: vpsubd %ymm1, %ymm2, %ymm2
+; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX2-NEXT: vpslld %xmm1, %ymm0, %ymm1
+; AVX2-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: splatvar_rotate_v8i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpbroadcastd {{.*#+}} ymm2 = [32,32,32,32,32,32,32,32]
+; AVX512-NEXT: vpsubd %ymm1, %ymm2, %ymm2
+; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX512-NEXT: vpslld %xmm1, %ymm0, %ymm1
+; AVX512-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: retq
+;
+; XOPAVX1-LABEL: splatvar_rotate_v8i32:
+; XOPAVX1: # %bb.0:
+; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [32,32,32,32]
+; XOPAVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm3
+; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; XOPAVX1-NEXT: vpsubd %xmm4, %xmm2, %xmm2
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; XOPAVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; XOPAVX1-NEXT: vpslld %xmm1, %xmm4, %xmm5
+; XOPAVX1-NEXT: vpslld %xmm1, %xmm0, %xmm1
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1
+; XOPAVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; XOPAVX1-NEXT: vpsubd %xmm2, %xmm5, %xmm2
+; XOPAVX1-NEXT: vpshld %xmm2, %xmm4, %xmm2
+; XOPAVX1-NEXT: vpsubd %xmm3, %xmm5, %xmm3
+; XOPAVX1-NEXT: vpshld %xmm3, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatvar_rotate_v8i32:
+; XOPAVX2: # %bb.0:
+; XOPAVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [32,32,32,32,32,32,32,32]
+; XOPAVX2-NEXT: vpsubd %ymm1, %ymm2, %ymm2
+; XOPAVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; XOPAVX2-NEXT: vpslld %xmm1, %ymm0, %ymm1
+; XOPAVX2-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0
+; XOPAVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
+; XOPAVX2-NEXT: retq
+ %splat = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
+ %splat32 = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, %b
+ %shl = shl <8 x i32> %a, %splat
+ %lshr = lshr <8 x i32> %a, %splat32
+ %or = or <8 x i32> %shl, %lshr
+ ret <8 x i32> %or
+}
+
+define <16 x i16> @splatvar_rotate_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
+; AVX1-LABEL: splatvar_rotate_v16i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
+; AVX1-NEXT: vpsubw %xmm1, %xmm2, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT: vpsubw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX1-NEXT: vpsllw %xmm1, %xmm4, %xmm5
+; AVX1-NEXT: vpsllw %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1
+; AVX1-NEXT: vpsllw $12, %xmm2, %xmm5
+; AVX1-NEXT: vpsllw $4, %xmm2, %xmm2
+; AVX1-NEXT: vpor %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm5
+; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm6
+; AVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm4, %xmm2
+; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm4
+; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm4
+; AVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5
+; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm4
+; AVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5
+; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsllw $12, %xmm3, %xmm4
+; AVX1-NEXT: vpsllw $4, %xmm3, %xmm3
+; AVX1-NEXT: vpor %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm4
+; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm5
+; AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3
+; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm3
+; AVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm3
+; AVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: splatvar_rotate_v16i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX2-NEXT: vpsubw %ymm1, %ymm2, %ymm2
+; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX2-NEXT: vpsllw %xmm1, %ymm0, %ymm1
+; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[12],ymm0[12],ymm3[13],ymm0[13],ymm3[14],ymm0[14],ymm3[15],ymm0[15]
+; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15]
+; AVX2-NEXT: vpsrlvd %ymm5, %ymm4, %ymm4
+; AVX2-NEXT: vpsrld $16, %ymm4, %ymm4
+; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[8],ymm0[8],ymm3[9],ymm0[9],ymm3[10],ymm0[10],ymm3[11],ymm0[11]
+; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11]
+; AVX2-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
+; AVX2-NEXT: vpackusdw %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: splatvar_rotate_v16i16:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512BW-NEXT: vpsubw %ymm1, %ymm2, %ymm2
+; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX512BW-NEXT: vpsllw %xmm1, %ymm0, %ymm1
+; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: splatvar_rotate_v16i16:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT: vpsubw %ymm1, %ymm2, %ymm2
+; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX512VL-NEXT: vpsllw %xmm1, %ymm0, %ymm1
+; AVX512VL-NEXT: vpsrlvw %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: retq
+;
+; XOPAVX1-LABEL: splatvar_rotate_v16i16:
+; XOPAVX1: # %bb.0:
+; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
+; XOPAVX1-NEXT: vpsubw %xmm1, %xmm2, %xmm3
+; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; XOPAVX1-NEXT: vpsubw %xmm4, %xmm2, %xmm2
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; XOPAVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; XOPAVX1-NEXT: vpsllw %xmm1, %xmm4, %xmm5
+; XOPAVX1-NEXT: vpsllw %xmm1, %xmm0, %xmm1
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1
+; XOPAVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; XOPAVX1-NEXT: vpsubw %xmm2, %xmm5, %xmm2
+; XOPAVX1-NEXT: vpshlw %xmm2, %xmm4, %xmm2
+; XOPAVX1-NEXT: vpsubw %xmm3, %xmm5, %xmm3
+; XOPAVX1-NEXT: vpshlw %xmm3, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatvar_rotate_v16i16:
+; XOPAVX2: # %bb.0:
+; XOPAVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; XOPAVX2-NEXT: vpsubw %ymm1, %ymm2, %ymm2
+; XOPAVX2-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; XOPAVX2-NEXT: vpsllw %xmm1, %ymm0, %ymm1
+; XOPAVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
+; XOPAVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; XOPAVX2-NEXT: vpsubw %xmm3, %xmm4, %xmm3
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm5
+; XOPAVX2-NEXT: vpshlw %xmm3, %xmm5, %xmm3
+; XOPAVX2-NEXT: vpsubw %xmm2, %xmm4, %xmm2
+; XOPAVX2-NEXT: vpshlw %xmm2, %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0
+; XOPAVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
+; XOPAVX2-NEXT: retq
+ %splat = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
+ %splat16 = sub <16 x i16> <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>, %b
+ %shl = shl <16 x i16> %a, %splat
+ %lshr = lshr <16 x i16> %a, %splat16
+ %or = or <16 x i16> %shl, %lshr
+ ret <16 x i16> %or
+}
+
+define <32 x i8> @splatvar_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
+; AVX1-LABEL: splatvar_rotate_v32i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX1-NEXT: vpsubb %xmm1, %xmm4, %xmm8
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsubb %xmm1, %xmm4, %xmm9
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpsllw $4, %xmm4, %xmm5
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm10 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX1-NEXT: vpand %xmm10, %xmm5, %xmm5
+; AVX1-NEXT: vpsllw $5, %xmm3, %xmm3
+; AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm5
+; AVX1-NEXT: vpsllw $2, %xmm5, %xmm7
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX1-NEXT: vpand %xmm2, %xmm7, %xmm7
+; AVX1-NEXT: vpaddb %xmm3, %xmm3, %xmm1
+; AVX1-NEXT: vpblendvb %xmm1, %xmm7, %xmm5, %xmm5
+; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm7
+; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm6
+; AVX1-NEXT: vpblendvb %xmm6, %xmm7, %xmm5, %xmm5
+; AVX1-NEXT: vpsllw $4, %xmm0, %xmm7
+; AVX1-NEXT: vpand %xmm10, %xmm7, %xmm7
+; AVX1-NEXT: vpblendvb %xmm3, %xmm7, %xmm0, %xmm3
+; AVX1-NEXT: vpsllw $2, %xmm3, %xmm7
+; AVX1-NEXT: vpand %xmm2, %xmm7, %xmm2
+; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm3, %xmm1
+; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm2
+; AVX1-NEXT: vpblendvb %xmm6, %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1
+; AVX1-NEXT: vpsrlw $4, %xmm4, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsllw $5, %xmm9, %xmm5
+; AVX1-NEXT: vpblendvb %xmm5, %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm4
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5
+; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm4
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm4
+; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5
+; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm4
+; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsllw $5, %xmm8, %xmm4
+; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm3
+; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm3
+; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3
+; AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: splatvar_rotate_v32i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastb %xmm1, %ymm2
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX2-NEXT: vpsubb %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsllw $4, %ymm0, %ymm3
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX2-NEXT: vpsllw $5, %ymm2, %ymm2
+; AVX2-NEXT: vpblendvb %ymm2, %ymm3, %ymm0, %ymm3
+; AVX2-NEXT: vpsllw $2, %ymm3, %ymm4
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX2-NEXT: vpaddb %ymm3, %ymm3, %ymm4
+; AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
+; AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm3
+; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm4
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX2-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $2, %ymm0, %ymm1
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT: vpblendvb %ymm3, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm1
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT: vpaddb %ymm3, %ymm3, %ymm3
+; AVX2-NEXT: vpblendvb %ymm3, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: splatvar_rotate_v32i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpbroadcastb %xmm1, %ymm2
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512-NEXT: vpsubb %ymm1, %ymm3, %ymm1
+; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero,ymm2[16],zero,ymm2[17],zero,ymm2[18],zero,ymm2[19],zero,ymm2[20],zero,ymm2[21],zero,ymm2[22],zero,ymm2[23],zero,ymm2[24],zero,ymm2[25],zero,ymm2[26],zero,ymm2[27],zero,ymm2[28],zero,ymm2[29],zero,ymm2[30],zero,ymm2[31],zero
+; AVX512-NEXT: vpsllvw %zmm2, %zmm0, %zmm2
+; AVX512-NEXT: vpmovwb %zmm2, %ymm2
+; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512-NEXT: vpor %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: retq
+;
+; XOPAVX1-LABEL: splatvar_rotate_v32i8:
+; XOPAVX1: # %bb.0:
+; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm3
+; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; XOPAVX1-NEXT: vpsubb %xmm1, %xmm4, %xmm5
+; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; XOPAVX1-NEXT: vpsubb %xmm1, %xmm4, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; XOPAVX1-NEXT: vpshlb %xmm3, %xmm4, %xmm6
+; XOPAVX1-NEXT: vpshlb %xmm3, %xmm0, %xmm3
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm6, %ymm3, %ymm3
+; XOPAVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1
+; XOPAVX1-NEXT: vpshlb %xmm1, %xmm4, %xmm1
+; XOPAVX1-NEXT: vpsubb %xmm5, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpshlb %xmm2, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX1-NEXT: vorps %ymm0, %ymm3, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatvar_rotate_v32i8:
+; XOPAVX2: # %bb.0:
+; XOPAVX2-NEXT: vpbroadcastb %xmm1, %ymm2
+; XOPAVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; XOPAVX2-NEXT: vpsubb %ymm1, %ymm3, %ymm1
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
+; XOPAVX2-NEXT: vextracti128 $1, %ymm2, %xmm4
+; XOPAVX2-NEXT: vpshlb %xmm4, %xmm3, %xmm4
+; XOPAVX2-NEXT: vpshlb %xmm2, %xmm0, %xmm2
+; XOPAVX2-NEXT: vinserti128 $1, %xmm4, %ymm2, %ymm2
+; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
+; XOPAVX2-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; XOPAVX2-NEXT: vpsubb %xmm4, %xmm5, %xmm4
+; XOPAVX2-NEXT: vpshlb %xmm4, %xmm3, %xmm3
+; XOPAVX2-NEXT: vpsubb %xmm1, %xmm5, %xmm1
+; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0
+; XOPAVX2-NEXT: vpor %ymm0, %ymm2, %ymm0
+; XOPAVX2-NEXT: retq
+ %splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
+ %splat8 = sub <32 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>, %b
+ %shl = shl <32 x i8> %a, %splat
+ %lshr = lshr <32 x i8> %a, %splat8
+ %or = or <32 x i8> %shl, %lshr
+ ret <32 x i8> %or
+}
+
+;
; Constant Rotates
;
OpenPOWER on IntegriCloud