diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-shift-shl-256.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/vector-shift-shl-256.ll | 110 |
1 files changed, 52 insertions, 58 deletions
diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-256.ll b/llvm/test/CodeGen/X86/vector-shift-shl-256.ll index 27bc3b99f04..daa233f7b3c 100644 --- a/llvm/test/CodeGen/X86/vector-shift-shl-256.ll +++ b/llvm/test/CodeGen/X86/vector-shift-shl-256.ll @@ -986,35 +986,32 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind { ret <16 x i16> %shift } -define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind { -; AVX1-LABEL: constant_shift_v32i8: -; AVX1: # %bb.0: -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpsllw $4, %xmm1, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [8192,24640,41088,57536,49376,32928,16480,32] -; AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm1, %xmm1 -; AVX1-NEXT: vpsllw $2, %xmm1, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] -; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2 -; AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm6 -; AVX1-NEXT: vpblendvb %xmm6, %xmm2, %xmm1, %xmm1 -; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm2 -; AVX1-NEXT: vpaddb %xmm6, %xmm6, %xmm7 -; AVX1-NEXT: vpblendvb %xmm7, %xmm2, %xmm1, %xmm1 -; AVX1-NEXT: vpsllw $4, %xmm0, %xmm2 -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpsllw $2, %xmm0, %xmm2 -; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm6, %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm2 -; AVX1-NEXT: vpblendvb %xmm7, %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1-NEXT: retq -; -; AVX2-LABEL: constant_shift_v32i8: +define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
+; AVX1-LABEL: constant_shift_v32i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1]
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1,2,4,8,16,32,64,128]
+; AVX1-NEXT: vpmullw %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT: vpmullw %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: constant_shift_v32i8:
; AVX2: # %bb.0: ; AVX2-NEXT: vpsllw $4, %ymm0, %ymm1 ; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 @@ -1090,35 +1087,32 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind { ; AVX512BWVL-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm0 ; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0 ; AVX512BWVL-NEXT: retq -; -; X32-AVX1-LABEL: constant_shift_v32i8: -; X32-AVX1: # %bb.0: -; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; X32-AVX1-NEXT: vpsllw $4, %xmm1, %xmm2 -; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; X32-AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 -; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [8192,24640,41088,57536,49376,32928,16480,32] -; X32-AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm1, %xmm1 -; X32-AVX1-NEXT: vpsllw $2, %xmm1, %xmm2 -; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] -; X32-AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2 -; X32-AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm6 -; X32-AVX1-NEXT: vpblendvb %xmm6, %xmm2, %xmm1, %xmm1 -; X32-AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm2 -; X32-AVX1-NEXT: vpaddb %xmm6, %xmm6, %xmm7 -; X32-AVX1-NEXT: vpblendvb %xmm7, %xmm2, %xmm1, %xmm1 -; X32-AVX1-NEXT: vpsllw $4, %xmm0, %xmm2 -; X32-AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 -; X32-AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm0, %xmm0 -; X32-AVX1-NEXT: vpsllw $2, %xmm0, %xmm2 -; X32-AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2 -; X32-AVX1-NEXT: vpblendvb %xmm6, %xmm2, %xmm0, %xmm0 -; X32-AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm2 -; X32-AVX1-NEXT: vpblendvb %xmm7, %xmm2, %xmm0, %xmm0 -; X32-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; X32-AVX1-NEXT: retl -; -; X32-AVX2-LABEL: constant_shift_v32i8: +;
+; X32-AVX1-LABEL: constant_shift_v32i8:
+; X32-AVX1: # %bb.0:
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1]
+; X32-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; X32-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; X32-AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm3
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; X32-AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1,2,4,8,16,32,64,128]
+; X32-AVX1-NEXT: vpmullw %xmm5, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; X32-AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm1
+; X32-AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; X32-AVX1-NEXT: vpmullw %xmm5, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: constant_shift_v32i8:
; X32-AVX2: # %bb.0: ; X32-AVX2-NEXT: vpsllw $4, %ymm0, %ymm1 ; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm1, %ymm1 |