diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll | 3744 |
1 files changed, 2088 insertions, 1656 deletions
diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll index 3f0345ad8b2..ef358a0bb78 100644 --- a/llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll @@ -20,6 +20,157 @@ define <2 x i32> @var_shift_v2i32(<2 x i32> %a, <2 x i32> %b) nounwind { ; SSE2-LABEL: var_shift_v2i32: ; SSE2: # %bb.0: +; SSE2-NEXT: psllq $32, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808] +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: psrlq %xmm1, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] +; SSE2-NEXT: psrlq %xmm4, %xmm0 +; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1] +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: psrlq %xmm1, %xmm3 +; SSE2-NEXT: psrlq %xmm4, %xmm2 +; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1] +; SSE2-NEXT: xorpd %xmm0, %xmm2 +; SSE2-NEXT: psubq %xmm0, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm0 +; SSE2-NEXT: retq +; +; SSE41-LABEL: var_shift_v2i32: +; SSE41: # %bb.0: +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: psllq $32, %xmm2 +; SSE41-NEXT: psrad $31, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; SSE41-NEXT: pxor %xmm0, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7] +; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE41-NEXT: movdqa %xmm2, %xmm3 +; SSE41-NEXT: psrlq %xmm1, %xmm3 +; SSE41-NEXT: psrlq %xmm0, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4,5,6,7] +; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808] +; SSE41-NEXT: movdqa %xmm3, %xmm4 +; SSE41-NEXT: psrlq %xmm0, %xmm4 +; SSE41-NEXT: psrlq %xmm1, %xmm3 +; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7] +; SSE41-NEXT: pxor %xmm3, %xmm2 +; SSE41-NEXT: psubq %xmm3, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: retq +; +; AVX1-LABEL: var_shift_v2i32: +; AVX1: # %bb.0: +; AVX1-NEXT: vpsllq $32, %xmm0, %xmm2 +; AVX1-NEXT: vpsrad $31, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; AVX1-NEXT: vpsrlq %xmm2, %xmm0, %xmm3 +; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808] +; AVX1-NEXT: vpsrlq %xmm1, %xmm3, %xmm1 +; AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] +; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: var_shift_v2i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] +; AVX2-NEXT: vpsllq $32, %xmm0, %xmm2 +; AVX2-NEXT: vpsrad $31, %xmm2, %xmm2 +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] +; AVX2-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] +; AVX2-NEXT: vpsrlvq %xmm1, %xmm2, %xmm1 +; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: retq +; +; XOPAVX1-LABEL: var_shift_v2i32: +; XOPAVX1: # %bb.0: +; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; XOPAVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] +; XOPAVX1-NEXT: vpsllq $32, %xmm0, %xmm0 +; XOPAVX1-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0 +; XOPAVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1 +; XOPAVX1-NEXT: vpshaq %xmm1, %xmm0, %xmm0 +; XOPAVX1-NEXT: retq +; +; XOPAVX2-LABEL: var_shift_v2i32: +; XOPAVX2: # %bb.0: +; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; XOPAVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] +; XOPAVX2-NEXT: vpsllq $32, %xmm0, %xmm0 +; XOPAVX2-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0 +; XOPAVX2-NEXT: vpsubq %xmm1, %xmm2, %xmm1 +; XOPAVX2-NEXT: vpshaq %xmm1, %xmm0, %xmm0 +; XOPAVX2-NEXT: retq +; +; AVX512-LABEL: var_shift_v2i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllq $32, %xmm0, %xmm0 +; AVX512-NEXT: vpsraq $32, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] +; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq +; +; AVX512VL-LABEL: var_shift_v2i32: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] +; AVX512VL-NEXT: vpsllq $32, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsraq $32, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsravq %xmm1, %xmm0, %xmm0 +; AVX512VL-NEXT: retq +; +; X32-SSE-LABEL: var_shift_v2i32: +; X32-SSE: # %bb.0: +; X32-SSE-NEXT: psllq $32, %xmm0 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] +; X32-SSE-NEXT: psrad $31, %xmm0 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; X32-SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,2147483648,0,2147483648] +; X32-SSE-NEXT: movdqa %xmm0, %xmm3 +; X32-SSE-NEXT: psrlq %xmm1, %xmm3 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] +; X32-SSE-NEXT: xorps %xmm5, %xmm5 +; X32-SSE-NEXT: movss {{.*#+}} xmm5 = xmm4[0],xmm5[1,2,3] +; X32-SSE-NEXT: psrlq %xmm5, %xmm0 +; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1] +; X32-SSE-NEXT: movdqa %xmm2, %xmm3 +; X32-SSE-NEXT: psrlq %xmm5, %xmm3 +; X32-SSE-NEXT: psrlq %xmm1, %xmm2 +; X32-SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[0],xmm3[1] +; X32-SSE-NEXT: xorpd %xmm0, %xmm2 +; X32-SSE-NEXT: psubq %xmm0, %xmm2 +; X32-SSE-NEXT: movdqa %xmm2, %xmm0 +; X32-SSE-NEXT: retl + %shift = ashr <2 x i32> %a, %b + ret <2 x i32> %shift +} + +define <4 x i16> @var_shift_v4i16(<4 x i16> %a, <4 x i16> %b) nounwind { +; SSE2-LABEL: var_shift_v4i16: +; SSE2: # %bb.0: +; SSE2-NEXT: pslld $16, %xmm0 +; SSE2-NEXT: psrad $16, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7] ; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: psrad %xmm2, %xmm3 @@ -38,70 +189,100 @@ define <2 x i32> @var_shift_v2i32(<2 x i32> %a, <2 x i32> %b) nounwind { ; SSE2-NEXT: movaps %xmm2, %xmm0 ; SSE2-NEXT: retq ; -; SSE41-LABEL: var_shift_v2i32: +; SSE41-LABEL: var_shift_v4i16: ; SSE41: # %bb.0: -; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7] -; SSE41-NEXT: movdqa %xmm0, %xmm3 -; SSE41-NEXT: psrad %xmm2, %xmm3 -; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] -; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7] +; SSE41-NEXT: pxor %xmm2, %xmm2 +; SSE41-NEXT: movdqa %xmm1, %xmm3 +; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm2[1],xmm3[2],xmm2[3],xmm3[4],xmm2[5],xmm3[6],xmm2[7] +; SSE41-NEXT: pslld $16, %xmm0 +; SSE41-NEXT: psrad $16, %xmm0 +; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm3[2,3,3,3,4,5,6,7] ; SSE41-NEXT: movdqa %xmm0, %xmm5 ; SSE41-NEXT: psrad %xmm4, %xmm5 -; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7] -; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7] -; SSE41-NEXT: movdqa %xmm0, %xmm3 -; SSE41-NEXT: psrad %xmm1, %xmm3 -; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7] +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] +; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm3[2,3,3,3,4,5,6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm6 +; SSE41-NEXT: psrad %xmm4, %xmm6 +; SSE41-NEXT: pblendw {{.*#+}} xmm6 = xmm5[0,1,2,3],xmm6[4,5,6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5],xmm1[6],xmm2[7] +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: psrad %xmm1, %xmm2 +; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[0,1,1,1,4,5,6,7] ; SSE41-NEXT: psrad %xmm1, %xmm0 -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7] -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,3],xmm0[4,5],xmm6[6,7] ; SSE41-NEXT: retq ; -; AVX1-LABEL: var_shift_v2i32: +; AVX1-LABEL: var_shift_v4i16: ; AVX1: # %bb.0: -; AVX1-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; AVX1-NEXT: vpsrad %xmm2, %xmm0, %xmm2 -; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3 -; AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3 -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7] -; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm1[2],xmm3[2],xmm1[3],xmm3[3] +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7] +; AVX1-NEXT: vpslld $16, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $16, %xmm0, %xmm0 +; AVX1-NEXT: vpsrldq {{.*#+}} xmm3 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3 +; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm4 +; AVX1-NEXT: vpsrad %xmm4, %xmm0, %xmm4 +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7] +; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm1[2],xmm2[2],xmm1[3],xmm2[3] +; AVX1-NEXT: vpsrad %xmm2, %xmm0, %xmm2 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero ; AVX1-NEXT: vpsrad %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7] ; AVX1-NEXT: retq ; -; AVX2-LABEL: var_shift_v2i32: +; AVX2-LABEL: var_shift_v4i16: ; AVX2: # %bb.0: +; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7] +; AVX2-NEXT: vpslld $16, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $16, %xmm0, %xmm0 ; AVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq ; -; XOPAVX1-LABEL: var_shift_v2i32: +; XOPAVX1-LABEL: var_shift_v4i16: ; XOPAVX1: # %bb.0: ; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; XOPAVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7] +; XOPAVX1-NEXT: vpslld $16, %xmm0, %xmm0 +; XOPAVX1-NEXT: vpsrad $16, %xmm0, %xmm0 ; XOPAVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1 ; XOPAVX1-NEXT: vpshad %xmm1, %xmm0, %xmm0 ; XOPAVX1-NEXT: retq ; -; XOPAVX2-LABEL: var_shift_v2i32: +; XOPAVX2-LABEL: var_shift_v4i16: ; XOPAVX2: # %bb.0: +; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; XOPAVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7] +; XOPAVX2-NEXT: vpslld $16, %xmm0, %xmm0 +; XOPAVX2-NEXT: vpsrad $16, %xmm0, %xmm0 ; XOPAVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0 ; XOPAVX2-NEXT: retq ; -; AVX512-LABEL: var_shift_v2i32: +; AVX512-LABEL: var_shift_v4i16: ; AVX512: # %bb.0: +; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7] +; AVX512-NEXT: vpslld $16, %xmm0, %xmm0 +; AVX512-NEXT: vpsrad $16, %xmm0, %xmm0 ; AVX512-NEXT: vpsravd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: retq ; -; AVX512VL-LABEL: var_shift_v2i32: +; AVX512VL-LABEL: var_shift_v4i16: ; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512VL-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7] +; AVX512VL-NEXT: vpslld $16, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsrad $16, %xmm0, %xmm0 ; AVX512VL-NEXT: vpsravd %xmm1, %xmm0, %xmm0 ; AVX512VL-NEXT: retq ; -; X32-SSE-LABEL: var_shift_v2i32: +; X32-SSE-LABEL: var_shift_v4i16: ; X32-SSE: # %bb.0: +; X32-SSE-NEXT: pslld $16, %xmm0 +; X32-SSE-NEXT: psrad $16, %xmm0 +; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7] ; X32-SSE-NEXT: movdqa %xmm0, %xmm3 ; X32-SSE-NEXT: psrad %xmm2, %xmm3 @@ -119,194 +300,167 @@ define <2 x i32> @var_shift_v2i32(<2 x i32> %a, <2 x i32> %b) nounwind { ; X32-SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,3] ; X32-SSE-NEXT: movaps %xmm2, %xmm0 ; X32-SSE-NEXT: retl - %shift = ashr <2 x i32> %a, %b - ret <2 x i32> %shift + %shift = ashr <4 x i16> %a, %b + ret <4 x i16> %shift } -define <4 x i16> @var_shift_v4i16(<4 x i16> %a, <4 x i16> %b) nounwind { -; SSE2-LABEL: var_shift_v4i16: +define <2 x i16> @var_shift_v2i16(<2 x i16> %a, <2 x i16> %b) nounwind { +; SSE2-LABEL: var_shift_v2i16: ; SSE2: # %bb.0: -; SSE2-NEXT: psllw $12, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: psraw $15, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm3 -; SSE2-NEXT: pandn %xmm0, %xmm3 -; SSE2-NEXT: psraw $8, %xmm0 -; SSE2-NEXT: pand %xmm2, %xmm0 -; SSE2-NEXT: por %xmm3, %xmm0 -; SSE2-NEXT: paddw %xmm1, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: psraw $15, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm3 -; SSE2-NEXT: pandn %xmm0, %xmm3 -; SSE2-NEXT: psraw $4, %xmm0 -; SSE2-NEXT: pand %xmm2, %xmm0 -; SSE2-NEXT: por %xmm3, %xmm0 -; SSE2-NEXT: paddw %xmm1, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: psraw $15, %xmm2 +; SSE2-NEXT: psllq $48, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: psrad $31, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] +; SSE2-NEXT: psrad $16, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] ; SSE2-NEXT: movdqa %xmm2, %xmm3 -; SSE2-NEXT: pandn %xmm0, %xmm3 -; SSE2-NEXT: psraw $2, %xmm0 -; SSE2-NEXT: pand %xmm2, %xmm0 -; SSE2-NEXT: por %xmm3, %xmm0 -; SSE2-NEXT: paddw %xmm1, %xmm1 -; SSE2-NEXT: psraw $15, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: pandn %xmm0, %xmm2 -; SSE2-NEXT: psraw $1, %xmm0 -; SSE2-NEXT: pand %xmm1, %xmm0 -; SSE2-NEXT: por %xmm2, %xmm0 +; SSE2-NEXT: psrlq %xmm1, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] +; SSE2-NEXT: psrlq %xmm4, %xmm2 +; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1] +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: psrlq %xmm1, %xmm3 +; SSE2-NEXT: psrlq %xmm4, %xmm0 +; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1] +; SSE2-NEXT: xorpd %xmm2, %xmm0 +; SSE2-NEXT: psubq %xmm2, %xmm0 ; SSE2-NEXT: retq ; -; SSE41-LABEL: var_shift_v4i16: +; SSE41-LABEL: var_shift_v2i16: ; SSE41: # %bb.0: -; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: movdqa %xmm2, %xmm0 -; SSE41-NEXT: psllw $12, %xmm0 -; SSE41-NEXT: psllw $4, %xmm2 -; SSE41-NEXT: por %xmm0, %xmm2 -; SSE41-NEXT: movdqa %xmm2, %xmm3 -; SSE41-NEXT: paddw %xmm2, %xmm3 -; SSE41-NEXT: movdqa %xmm1, %xmm4 -; SSE41-NEXT: psraw $8, %xmm4 -; SSE41-NEXT: movdqa %xmm2, %xmm0 -; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: psraw $4, %xmm2 -; SSE41-NEXT: movdqa %xmm3, %xmm0 -; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: psraw $2, %xmm2 -; SSE41-NEXT: paddw %xmm3, %xmm3 -; SSE41-NEXT: movdqa %xmm3, %xmm0 -; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: psraw $1, %xmm2 -; SSE41-NEXT: paddw %xmm3, %xmm3 -; SSE41-NEXT: movdqa %xmm3, %xmm0 -; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: psllq $48, %xmm0 +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: psrad $31, %xmm2 +; SSE41-NEXT: psrad $16, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; SSE41-NEXT: pxor %xmm2, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7] +; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,0,1] +; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: psrlq %xmm1, %xmm3 +; SSE41-NEXT: psrlq %xmm2, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] +; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808] +; SSE41-NEXT: movdqa %xmm3, %xmm4 +; SSE41-NEXT: psrlq %xmm2, %xmm4 +; SSE41-NEXT: psrlq %xmm1, %xmm3 +; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7] +; SSE41-NEXT: pxor %xmm3, %xmm0 +; SSE41-NEXT: psubq %xmm3, %xmm0 ; SSE41-NEXT: retq ; -; AVX1-LABEL: var_shift_v4i16: +; AVX1-LABEL: var_shift_v2i16: ; AVX1: # %bb.0: -; AVX1-NEXT: vpsllw $12, %xmm1, %xmm2 -; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1 -; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1 -; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2 -; AVX1-NEXT: vpsraw $8, %xmm0, %xmm3 -; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsllq $48, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2 +; AVX1-NEXT: vpsrad $16, %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; AVX1-NEXT: vpsrlq %xmm2, %xmm0, %xmm3 +; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808] +; AVX1-NEXT: vpsrlq %xmm1, %xmm3, %xmm1 +; AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] +; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; -; AVX2-LABEL: var_shift_v4i16: +; AVX2-LABEL: var_shift_v2i16: ; AVX2: # %bb.0: -; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero -; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 -; AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vzeroupper +; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7] +; AVX2-NEXT: vpsllq $48, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $31, %xmm0, %xmm2 +; AVX2-NEXT: vpsrad $16, %xmm0, %xmm0 +; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] +; AVX2-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] +; AVX2-NEXT: vpsrlvq %xmm1, %xmm2, %xmm1 +; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq ; -; XOP-LABEL: var_shift_v4i16: +; XOP-LABEL: var_shift_v2i16: ; XOP: # %bb.0: ; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; XOP-NEXT: vpsubw %xmm1, %xmm2, %xmm1 -; XOP-NEXT: vpshaw %xmm1, %xmm0, %xmm0 +; XOP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7] +; XOP-NEXT: vpsllq $48, %xmm0, %xmm0 +; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: vpsubq %xmm1, %xmm2, %xmm1 +; XOP-NEXT: vpshaq %xmm1, %xmm0, %xmm0 ; XOP-NEXT: retq ; -; AVX512DQ-LABEL: var_shift_v4i16: -; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero -; AVX512DQ-NEXT: vpmovsxwd %xmm0, %ymm0 -; AVX512DQ-NEXT: vpsravd %ymm1, %ymm0, %ymm0 -; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX512DQ-NEXT: vzeroupper -; AVX512DQ-NEXT: retq -; -; AVX512BW-LABEL: var_shift_v4i16: -; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 -; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 -; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq -; -; AVX512DQVL-LABEL: var_shift_v4i16: -; AVX512DQVL: # %bb.0: -; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero -; AVX512DQVL-NEXT: vpmovsxwd %xmm0, %ymm0 -; AVX512DQVL-NEXT: vpsravd %ymm1, %ymm0, %ymm0 -; AVX512DQVL-NEXT: vpmovdw %ymm0, %xmm0 -; AVX512DQVL-NEXT: vzeroupper -; AVX512DQVL-NEXT: retq +; AVX512-LABEL: var_shift_v2i16: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllq $48, %xmm0, %xmm0 +; AVX512-NEXT: vpsraq $48, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7] +; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq ; -; AVX512BWVL-LABEL: var_shift_v4i16: -; AVX512BWVL: # %bb.0: -; AVX512BWVL-NEXT: vpsravw %xmm1, %xmm0, %xmm0 -; AVX512BWVL-NEXT: retq +; AVX512VL-LABEL: var_shift_v2i16: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512VL-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7] +; AVX512VL-NEXT: vpsllq $48, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsraq $48, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsravq %xmm1, %xmm0, %xmm0 +; AVX512VL-NEXT: retq ; -; X32-SSE-LABEL: var_shift_v4i16: +; X32-SSE-LABEL: var_shift_v2i16: ; X32-SSE: # %bb.0: -; X32-SSE-NEXT: psllw $12, %xmm1 -; X32-SSE-NEXT: movdqa %xmm1, %xmm2 -; X32-SSE-NEXT: psraw $15, %xmm2 -; X32-SSE-NEXT: movdqa %xmm2, %xmm3 -; X32-SSE-NEXT: pandn %xmm0, %xmm3 -; X32-SSE-NEXT: psraw $8, %xmm0 -; X32-SSE-NEXT: pand %xmm2, %xmm0 -; X32-SSE-NEXT: por %xmm3, %xmm0 -; X32-SSE-NEXT: paddw %xmm1, %xmm1 -; X32-SSE-NEXT: movdqa %xmm1, %xmm2 -; X32-SSE-NEXT: psraw $15, %xmm2 -; X32-SSE-NEXT: movdqa %xmm2, %xmm3 -; X32-SSE-NEXT: pandn %xmm0, %xmm3 -; X32-SSE-NEXT: psraw $4, %xmm0 -; X32-SSE-NEXT: pand %xmm2, %xmm0 -; X32-SSE-NEXT: por %xmm3, %xmm0 -; X32-SSE-NEXT: paddw %xmm1, %xmm1 -; X32-SSE-NEXT: movdqa %xmm1, %xmm2 -; X32-SSE-NEXT: psraw $15, %xmm2 +; X32-SSE-NEXT: psllq $48, %xmm0 +; X32-SSE-NEXT: movdqa %xmm0, %xmm2 +; X32-SSE-NEXT: psrad $31, %xmm2 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] +; X32-SSE-NEXT: psrad $16, %xmm0 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648] ; X32-SSE-NEXT: movdqa %xmm2, %xmm3 -; X32-SSE-NEXT: pandn %xmm0, %xmm3 -; X32-SSE-NEXT: psraw $2, %xmm0 -; X32-SSE-NEXT: pand %xmm2, %xmm0 -; X32-SSE-NEXT: por %xmm3, %xmm0 -; X32-SSE-NEXT: paddw %xmm1, %xmm1 -; X32-SSE-NEXT: psraw $15, %xmm1 -; X32-SSE-NEXT: movdqa %xmm1, %xmm2 -; X32-SSE-NEXT: pandn %xmm0, %xmm2 -; X32-SSE-NEXT: psraw $1, %xmm0 -; X32-SSE-NEXT: pand %xmm1, %xmm0 -; X32-SSE-NEXT: por %xmm2, %xmm0 +; X32-SSE-NEXT: psrlq %xmm1, %xmm3 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] +; X32-SSE-NEXT: psrlq %xmm4, %xmm2 +; X32-SSE-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1] +; X32-SSE-NEXT: movdqa %xmm0, %xmm3 +; X32-SSE-NEXT: psrlq %xmm1, %xmm3 +; X32-SSE-NEXT: psrlq %xmm4, %xmm0 +; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1] +; X32-SSE-NEXT: xorpd %xmm2, %xmm0 +; X32-SSE-NEXT: psubq %xmm2, %xmm0 ; X32-SSE-NEXT: retl - %shift = ashr <4 x i16> %a, %b - ret <4 x i16> %shift + %shift = ashr <2 x i16> %a, %b + ret <2 x i16> %shift } -define <2 x i16> @var_shift_v2i16(<2 x i16> %a, <2 x i16> %b) nounwind { -; SSE2-LABEL: var_shift_v2i16: +define <8 x i8> @var_shift_v8i8(<8 x i8> %a, <8 x i8> %b) nounwind { +; SSE2-LABEL: var_shift_v8i8: ; SSE2: # %bb.0: +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: psllw $8, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: psraw $8, %xmm3 ; SSE2-NEXT: psllw $12, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: psraw $15, %xmm0 ; SSE2-NEXT: psraw $15, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm3 -; SSE2-NEXT: pandn %xmm0, %xmm3 -; SSE2-NEXT: psraw $8, %xmm0 -; SSE2-NEXT: pand %xmm2, %xmm0 -; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: pand %xmm0, %xmm2 +; SSE2-NEXT: pandn %xmm3, %xmm0 +; SSE2-NEXT: por %xmm2, %xmm0 ; SSE2-NEXT: paddw %xmm1, %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: psraw $15, %xmm2 @@ -332,57 +486,66 @@ define <2 x i16> @var_shift_v2i16(<2 x i16> %a, <2 x i16> %b) nounwind { ; SSE2-NEXT: por %xmm2, %xmm0 ; SSE2-NEXT: retq ; -; SSE41-LABEL: var_shift_v2i16: +; SSE41-LABEL: var_shift_v8i8: ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: movdqa %xmm0, %xmm1 +; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: psllw $8, %xmm3 +; SSE41-NEXT: movdqa %xmm3, %xmm1 +; SSE41-NEXT: psraw $8, %xmm1 +; SSE41-NEXT: pand {{.*}}(%rip), %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm0 ; SSE41-NEXT: psllw $12, %xmm0 ; SSE41-NEXT: psllw $4, %xmm2 ; SSE41-NEXT: por %xmm0, %xmm2 -; SSE41-NEXT: movdqa %xmm2, %xmm3 -; SSE41-NEXT: paddw %xmm2, %xmm3 -; SSE41-NEXT: movdqa %xmm1, %xmm4 -; SSE41-NEXT: psraw $8, %xmm4 +; SSE41-NEXT: movdqa %xmm2, %xmm4 +; SSE41-NEXT: paddw %xmm2, %xmm4 +; SSE41-NEXT: psraw $15, %xmm3 ; SSE41-NEXT: movdqa %xmm2, %xmm0 -; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm1 +; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm2 ; SSE41-NEXT: psraw $4, %xmm2 -; SSE41-NEXT: movdqa %xmm3, %xmm0 +; SSE41-NEXT: movdqa %xmm4, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm2 ; SSE41-NEXT: psraw $2, %xmm2 -; SSE41-NEXT: paddw %xmm3, %xmm3 -; SSE41-NEXT: movdqa %xmm3, %xmm0 +; SSE41-NEXT: paddw %xmm4, %xmm4 +; SSE41-NEXT: movdqa %xmm4, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm2 ; SSE41-NEXT: psraw $1, %xmm2 -; SSE41-NEXT: paddw %xmm3, %xmm3 -; SSE41-NEXT: movdqa %xmm3, %xmm0 +; SSE41-NEXT: paddw %xmm4, %xmm4 +; SSE41-NEXT: movdqa %xmm4, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: retq ; -; AVX1-LABEL: var_shift_v2i16: +; AVX1-LABEL: var_shift_v8i8: ; AVX1: # %bb.0: -; AVX1-NEXT: vpsllw $12, %xmm1, %xmm2 +; AVX1-NEXT: vpsllw $8, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $8, %xmm0, %xmm2 +; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpsllw $12, %xmm1, %xmm3 ; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1 -; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1 -; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2 -; AVX1-NEXT: vpsraw $8, %xmm0, %xmm3 -; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm3 +; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0 +; AVX1-NEXT: vpblendvb %xmm1, %xmm0, %xmm2, %xmm0 ; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm2 ; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1 ; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; -; AVX2-LABEL: var_shift_v2i16: +; AVX2-LABEL: var_shift_v8i8: ; AVX2: # %bb.0: +; AVX2-NEXT: vpsllw $8, %xmm0, %xmm0 +; AVX2-NEXT: vpsraw $8, %xmm0, %xmm0 +; AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero ; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0 @@ -391,15 +554,21 @@ define <2 x i16> @var_shift_v2i16(<2 x i16> %a, <2 x i16> %b) nounwind { ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; -; XOP-LABEL: var_shift_v2i16: +; XOP-LABEL: var_shift_v8i8: ; XOP: # %bb.0: +; XOP-NEXT: vpsllw $8, %xmm0, %xmm0 +; XOP-NEXT: vpsraw $8, %xmm0, %xmm0 +; XOP-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 ; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; XOP-NEXT: vpsubw %xmm1, %xmm2, %xmm1 ; XOP-NEXT: vpshaw %xmm1, %xmm0, %xmm0 ; XOP-NEXT: retq ; -; AVX512DQ-LABEL: var_shift_v2i16: +; AVX512DQ-LABEL: var_shift_v8i8: ; AVX512DQ: # %bb.0: +; AVX512DQ-NEXT: vpsllw $8, %xmm0, %xmm0 +; AVX512DQ-NEXT: vpsraw $8, %xmm0, %xmm0 +; AVX512DQ-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 ; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero ; AVX512DQ-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX512DQ-NEXT: vpsravd %ymm1, %ymm0, %ymm0 @@ -408,17 +577,21 @@ define <2 x i16> @var_shift_v2i16(<2 x i16> %a, <2 x i16> %b) nounwind { ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; -; AVX512BW-LABEL: var_shift_v2i16: +; AVX512BW-LABEL: var_shift_v8i8: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 -; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 +; AVX512BW-NEXT: vpsllw $8, %xmm0, %xmm0 +; AVX512BW-NEXT: vpsraw $8, %xmm0, %xmm0 +; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 ; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; -; AVX512DQVL-LABEL: var_shift_v2i16: +; AVX512DQVL-LABEL: var_shift_v8i8: ; AVX512DQVL: # %bb.0: +; AVX512DQVL-NEXT: vpsllw $8, %xmm0, %xmm0 +; AVX512DQVL-NEXT: vpsraw $8, %xmm0, %xmm0 +; AVX512DQVL-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 ; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero ; AVX512DQVL-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX512DQVL-NEXT: vpsravd %ymm1, %ymm0, %ymm0 @@ -426,21 +599,27 @@ define <2 x i16> @var_shift_v2i16(<2 x i16> %a, <2 x i16> %b) nounwind { ; AVX512DQVL-NEXT: vzeroupper ; AVX512DQVL-NEXT: retq ; -; AVX512BWVL-LABEL: var_shift_v2i16: +; AVX512BWVL-LABEL: var_shift_v8i8: ; AVX512BWVL: # %bb.0: +; AVX512BWVL-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX512BWVL-NEXT: vpsllw $8, %xmm0, %xmm0 +; AVX512BWVL-NEXT: vpsraw $8, %xmm0, %xmm0 ; AVX512BWVL-NEXT: vpsravw %xmm1, %xmm0, %xmm0 ; AVX512BWVL-NEXT: retq ; -; X32-SSE-LABEL: var_shift_v2i16: +; X32-SSE-LABEL: var_shift_v8i8: ; X32-SSE: # %bb.0: +; X32-SSE-NEXT: movdqa %xmm0, %xmm2 +; X32-SSE-NEXT: psllw $8, %xmm2 +; X32-SSE-NEXT: movdqa %xmm2, %xmm3 +; X32-SSE-NEXT: psraw $8, %xmm3 ; X32-SSE-NEXT: psllw $12, %xmm1 -; X32-SSE-NEXT: movdqa %xmm1, %xmm2 +; X32-SSE-NEXT: movdqa %xmm1, %xmm0 +; X32-SSE-NEXT: psraw $15, %xmm0 ; X32-SSE-NEXT: psraw $15, %xmm2 -; X32-SSE-NEXT: movdqa %xmm2, %xmm3 -; X32-SSE-NEXT: pandn %xmm0, %xmm3 -; X32-SSE-NEXT: psraw $8, %xmm0 -; X32-SSE-NEXT: pand %xmm2, %xmm0 -; X32-SSE-NEXT: por %xmm3, %xmm0 +; X32-SSE-NEXT: pand %xmm0, %xmm2 +; X32-SSE-NEXT: pandn %xmm3, %xmm0 +; X32-SSE-NEXT: por %xmm2, %xmm0 ; X32-SSE-NEXT: paddw %xmm1, %xmm1 ; X32-SSE-NEXT: movdqa %xmm1, %xmm2 ; X32-SSE-NEXT: psraw $15, %xmm2 @@ -465,234 +644,6 @@ define <2 x i16> @var_shift_v2i16(<2 x i16> %a, <2 x i16> %b) nounwind { ; X32-SSE-NEXT: pand %xmm1, %xmm0 ; X32-SSE-NEXT: por %xmm2, %xmm0 ; X32-SSE-NEXT: retl - %shift = ashr <2 x i16> %a, %b - ret <2 x i16> %shift -} - -define <8 x i8> @var_shift_v8i8(<8 x i8> %a, <8 x i8> %b) nounwind { -; SSE2-LABEL: var_shift_v8i8: -; SSE2: # %bb.0: -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] -; SSE2-NEXT: psllw $5, %xmm1 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15] -; SSE2-NEXT: pxor %xmm3, %xmm3 -; SSE2-NEXT: pxor %xmm5, %xmm5 -; SSE2-NEXT: pcmpgtw %xmm4, %xmm5 -; SSE2-NEXT: movdqa %xmm5, %xmm6 -; SSE2-NEXT: pandn %xmm2, %xmm6 -; SSE2-NEXT: psraw $4, %xmm2 -; SSE2-NEXT: pand %xmm5, %xmm2 -; SSE2-NEXT: por %xmm6, %xmm2 -; SSE2-NEXT: paddw %xmm4, %xmm4 -; SSE2-NEXT: pxor %xmm5, %xmm5 -; SSE2-NEXT: pcmpgtw %xmm4, %xmm5 -; SSE2-NEXT: movdqa %xmm5, %xmm6 -; SSE2-NEXT: pandn %xmm2, %xmm6 -; SSE2-NEXT: psraw $2, %xmm2 -; SSE2-NEXT: pand %xmm5, %xmm2 -; SSE2-NEXT: por %xmm6, %xmm2 -; SSE2-NEXT: paddw %xmm4, %xmm4 -; SSE2-NEXT: pxor %xmm5, %xmm5 -; SSE2-NEXT: pcmpgtw %xmm4, %xmm5 -; SSE2-NEXT: movdqa %xmm5, %xmm4 -; SSE2-NEXT: pandn %xmm2, %xmm4 -; SSE2-NEXT: psraw $1, %xmm2 -; SSE2-NEXT: pand %xmm5, %xmm2 -; SSE2-NEXT: por %xmm4, %xmm2 -; SSE2-NEXT: psrlw $8, %xmm2 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: pxor %xmm4, %xmm4 -; SSE2-NEXT: pcmpgtw %xmm1, %xmm4 -; SSE2-NEXT: movdqa %xmm4, %xmm5 -; SSE2-NEXT: pandn %xmm0, %xmm5 -; SSE2-NEXT: psraw $4, %xmm0 -; SSE2-NEXT: pand %xmm4, %xmm0 -; SSE2-NEXT: por %xmm5, %xmm0 -; SSE2-NEXT: paddw %xmm1, %xmm1 -; SSE2-NEXT: pxor %xmm4, %xmm4 -; SSE2-NEXT: pcmpgtw %xmm1, %xmm4 -; SSE2-NEXT: movdqa %xmm4, %xmm5 -; SSE2-NEXT: pandn %xmm0, %xmm5 -; SSE2-NEXT: psraw $2, %xmm0 -; SSE2-NEXT: pand %xmm4, %xmm0 -; SSE2-NEXT: por %xmm5, %xmm0 -; SSE2-NEXT: paddw %xmm1, %xmm1 -; SSE2-NEXT: pcmpgtw %xmm1, %xmm3 -; SSE2-NEXT: movdqa %xmm3, %xmm1 -; SSE2-NEXT: pandn %xmm0, %xmm1 -; SSE2-NEXT: psraw $1, %xmm0 -; SSE2-NEXT: pand %xmm3, %xmm0 -; SSE2-NEXT: por %xmm1, %xmm0 -; SSE2-NEXT: psrlw $8, %xmm0 -; SSE2-NEXT: packuswb %xmm2, %xmm0 -; SSE2-NEXT: retq -; -; SSE41-LABEL: var_shift_v8i8: -; SSE41: # %bb.0: -; SSE41-NEXT: movdqa %xmm0, %xmm2 -; SSE41-NEXT: psllw $5, %xmm1 -; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] -; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15] -; SSE41-NEXT: movdqa %xmm3, %xmm4 -; SSE41-NEXT: psraw $4, %xmm4 -; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3 -; SSE41-NEXT: movdqa %xmm3, %xmm4 -; SSE41-NEXT: psraw $2, %xmm4 -; SSE41-NEXT: paddw %xmm0, %xmm0 -; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3 -; SSE41-NEXT: movdqa %xmm3, %xmm4 -; SSE41-NEXT: psraw $1, %xmm4 -; SSE41-NEXT: paddw %xmm0, %xmm0 -; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3 -; SSE41-NEXT: psrlw $8, %xmm3 -; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSE41-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: psraw $4, %xmm2 -; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: psraw $2, %xmm2 -; SSE41-NEXT: paddw %xmm0, %xmm0 -; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: psraw $1, %xmm2 -; SSE41-NEXT: paddw %xmm0, %xmm0 -; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 -; SSE41-NEXT: psrlw $8, %xmm1 -; SSE41-NEXT: packuswb %xmm3, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm0 -; SSE41-NEXT: retq -; -; AVX-LABEL: var_shift_v8i8: -; AVX: # %bb.0: -; AVX-NEXT: vpsllw $5, %xmm1, %xmm1 -; AVX-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] -; AVX-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; AVX-NEXT: vpsraw $4, %xmm3, %xmm4 -; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 -; AVX-NEXT: vpsraw $2, %xmm3, %xmm4 -; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 -; AVX-NEXT: vpsraw $1, %xmm3, %xmm4 -; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 -; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2 -; AVX-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; AVX-NEXT: vpsraw $4, %xmm0, %xmm3 -; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 -; AVX-NEXT: vpsraw $2, %xmm0, %xmm3 -; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 -; AVX-NEXT: vpsraw $1, %xmm0, %xmm3 -; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 -; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0 -; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 -; AVX-NEXT: retq -; -; XOP-LABEL: var_shift_v8i8: -; XOP: # %bb.0: -; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; XOP-NEXT: vpsubb %xmm1, %xmm2, %xmm1 -; XOP-NEXT: vpshab %xmm1, %xmm0, %xmm0 -; XOP-NEXT: retq -; -; AVX512DQ-LABEL: var_shift_v8i8: -; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero -; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0 -; AVX512DQ-NEXT: vpsravd %zmm1, %zmm0, %zmm0 -; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0 -; AVX512DQ-NEXT: vzeroupper -; AVX512DQ-NEXT: retq -; -; AVX512BW-LABEL: var_shift_v8i8: -; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero -; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0 -; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq -; -; AVX512DQVL-LABEL: var_shift_v8i8: -; AVX512DQVL: # %bb.0: -; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero -; AVX512DQVL-NEXT: vpmovsxbd %xmm0, %zmm0 -; AVX512DQVL-NEXT: vpsravd %zmm1, %zmm0, %zmm0 -; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0 -; AVX512DQVL-NEXT: vzeroupper -; AVX512DQVL-NEXT: retq -; -; AVX512BWVL-LABEL: var_shift_v8i8: -; AVX512BWVL: # %bb.0: -; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero -; AVX512BWVL-NEXT: vpmovsxbw %xmm0, %ymm0 -; AVX512BWVL-NEXT: vpsravw %ymm1, %ymm0, %ymm0 -; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0 -; AVX512BWVL-NEXT: vzeroupper -; AVX512BWVL-NEXT: retq -; -; X32-SSE-LABEL: var_shift_v8i8: -; X32-SSE: # %bb.0: -; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] -; X32-SSE-NEXT: psllw $5, %xmm1 -; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15] -; X32-SSE-NEXT: pxor %xmm3, %xmm3 -; X32-SSE-NEXT: pxor %xmm5, %xmm5 -; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5 -; X32-SSE-NEXT: movdqa %xmm5, %xmm6 -; X32-SSE-NEXT: pandn %xmm2, %xmm6 -; X32-SSE-NEXT: psraw $4, %xmm2 -; X32-SSE-NEXT: pand %xmm5, %xmm2 -; X32-SSE-NEXT: por %xmm6, %xmm2 -; X32-SSE-NEXT: paddw %xmm4, %xmm4 -; X32-SSE-NEXT: pxor %xmm5, %xmm5 -; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5 -; X32-SSE-NEXT: movdqa %xmm5, %xmm6 -; X32-SSE-NEXT: pandn %xmm2, %xmm6 -; X32-SSE-NEXT: psraw $2, %xmm2 -; X32-SSE-NEXT: pand %xmm5, %xmm2 -; X32-SSE-NEXT: por %xmm6, %xmm2 -; X32-SSE-NEXT: paddw %xmm4, %xmm4 -; X32-SSE-NEXT: pxor %xmm5, %xmm5 -; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5 -; X32-SSE-NEXT: movdqa %xmm5, %xmm4 -; X32-SSE-NEXT: pandn %xmm2, %xmm4 -; X32-SSE-NEXT: psraw $1, %xmm2 -; X32-SSE-NEXT: pand %xmm5, %xmm2 -; X32-SSE-NEXT: por %xmm4, %xmm2 -; X32-SSE-NEXT: psrlw $8, %xmm2 -; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X32-SSE-NEXT: pxor %xmm4, %xmm4 -; X32-SSE-NEXT: pcmpgtw %xmm1, %xmm4 -; X32-SSE-NEXT: movdqa %xmm4, %xmm5 -; X32-SSE-NEXT: pandn %xmm0, %xmm5 -; X32-SSE-NEXT: psraw $4, %xmm0 -; X32-SSE-NEXT: pand %xmm4, %xmm0 -; X32-SSE-NEXT: por %xmm5, %xmm0 -; X32-SSE-NEXT: paddw %xmm1, %xmm1 -; X32-SSE-NEXT: pxor %xmm4, %xmm4 -; X32-SSE-NEXT: pcmpgtw %xmm1, %xmm4 -; X32-SSE-NEXT: movdqa %xmm4, %xmm5 -; X32-SSE-NEXT: pandn %xmm0, %xmm5 -; X32-SSE-NEXT: psraw $2, %xmm0 -; X32-SSE-NEXT: pand %xmm4, %xmm0 -; X32-SSE-NEXT: por %xmm5, %xmm0 -; X32-SSE-NEXT: paddw %xmm1, %xmm1 -; X32-SSE-NEXT: pcmpgtw %xmm1, %xmm3 -; X32-SSE-NEXT: movdqa %xmm3, %xmm1 -; X32-SSE-NEXT: pandn %xmm0, %xmm1 -; X32-SSE-NEXT: psraw $1, %xmm0 -; X32-SSE-NEXT: pand %xmm3, %xmm0 -; X32-SSE-NEXT: por %xmm1, %xmm0 -; X32-SSE-NEXT: psrlw $8, %xmm0 -; X32-SSE-NEXT: packuswb %xmm2, %xmm0 -; X32-SSE-NEXT: retl %shift = ashr <8 x i8> %a, %b ret <8 x i8> %shift } @@ -700,226 +651,131 @@ define <8 x i8> @var_shift_v8i8(<8 x i8> %a, <8 x i8> %b) nounwind { define <4 x i8> @var_shift_v4i8(<4 x i8> %a, <4 x i8> %b) nounwind { ; SSE2-LABEL: var_shift_v4i8: ; SSE2: # %bb.0: -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] -; SSE2-NEXT: psllw $5, %xmm1 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15] -; SSE2-NEXT: pxor %xmm3, %xmm3 -; SSE2-NEXT: pxor %xmm5, %xmm5 -; SSE2-NEXT: pcmpgtw %xmm4, %xmm5 -; SSE2-NEXT: movdqa %xmm5, %xmm6 -; SSE2-NEXT: pandn %xmm2, %xmm6 -; SSE2-NEXT: psraw $4, %xmm2 -; SSE2-NEXT: pand %xmm5, %xmm2 -; SSE2-NEXT: por %xmm6, %xmm2 -; SSE2-NEXT: paddw %xmm4, %xmm4 -; SSE2-NEXT: pxor %xmm5, %xmm5 -; SSE2-NEXT: pcmpgtw %xmm4, %xmm5 -; SSE2-NEXT: movdqa %xmm5, %xmm6 -; SSE2-NEXT: pandn %xmm2, %xmm6 -; SSE2-NEXT: psraw $2, %xmm2 -; SSE2-NEXT: pand %xmm5, %xmm2 -; SSE2-NEXT: por %xmm6, %xmm2 -; SSE2-NEXT: paddw %xmm4, %xmm4 -; SSE2-NEXT: pxor %xmm5, %xmm5 -; SSE2-NEXT: pcmpgtw %xmm4, %xmm5 -; SSE2-NEXT: movdqa %xmm5, %xmm4 -; SSE2-NEXT: pandn %xmm2, %xmm4 -; SSE2-NEXT: psraw $1, %xmm2 -; SSE2-NEXT: pand %xmm5, %xmm2 -; SSE2-NEXT: por %xmm4, %xmm2 -; SSE2-NEXT: psrlw $8, %xmm2 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: pxor %xmm4, %xmm4 -; SSE2-NEXT: pcmpgtw %xmm1, %xmm4 -; SSE2-NEXT: movdqa %xmm4, %xmm5 -; SSE2-NEXT: pandn %xmm0, %xmm5 -; SSE2-NEXT: psraw $4, %xmm0 -; SSE2-NEXT: pand %xmm4, %xmm0 -; SSE2-NEXT: por %xmm5, %xmm0 -; SSE2-NEXT: paddw %xmm1, %xmm1 -; SSE2-NEXT: pxor %xmm4, %xmm4 -; SSE2-NEXT: pcmpgtw %xmm1, %xmm4 -; SSE2-NEXT: movdqa %xmm4, %xmm5 -; SSE2-NEXT: pandn %xmm0, %xmm5 -; SSE2-NEXT: psraw $2, %xmm0 -; SSE2-NEXT: pand %xmm4, %xmm0 -; SSE2-NEXT: por %xmm5, %xmm0 -; SSE2-NEXT: paddw %xmm1, %xmm1 -; SSE2-NEXT: pcmpgtw %xmm1, %xmm3 -; SSE2-NEXT: movdqa %xmm3, %xmm1 -; SSE2-NEXT: pandn %xmm0, %xmm1 -; SSE2-NEXT: psraw $1, %xmm0 -; SSE2-NEXT: pand %xmm3, %xmm0 -; SSE2-NEXT: por %xmm1, %xmm0 -; SSE2-NEXT: psrlw $8, %xmm0 -; SSE2-NEXT: packuswb %xmm2, %xmm0 +; SSE2-NEXT: pslld $24, %xmm0 +; SSE2-NEXT: psrad $24, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7] +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: psrad %xmm2, %xmm3 +; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[0,1,1,1,4,5,6,7] +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: psrad %xmm4, %xmm2 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7] +; SSE2-NEXT: movdqa %xmm0, %xmm4 +; SSE2-NEXT: psrad %xmm3, %xmm4 +; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7] +; SSE2-NEXT: psrad %xmm1, %xmm0 +; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1] +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,3] +; SSE2-NEXT: movaps %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: var_shift_v4i8: ; SSE41: # %bb.0: -; SSE41-NEXT: movdqa %xmm0, %xmm2 -; SSE41-NEXT: psllw $5, %xmm1 -; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] -; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15] -; SSE41-NEXT: movdqa %xmm3, %xmm4 -; SSE41-NEXT: psraw $4, %xmm4 -; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3 -; SSE41-NEXT: movdqa %xmm3, %xmm4 -; SSE41-NEXT: psraw $2, %xmm4 -; SSE41-NEXT: paddw %xmm0, %xmm0 -; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3 -; SSE41-NEXT: movdqa %xmm3, %xmm4 -; SSE41-NEXT: psraw $1, %xmm4 -; SSE41-NEXT: paddw %xmm0, %xmm0 -; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3 -; SSE41-NEXT: psrlw $8, %xmm3 -; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSE41-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: psraw $4, %xmm2 -; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: psraw $2, %xmm2 -; SSE41-NEXT: paddw %xmm0, %xmm0 -; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: psraw $1, %xmm2 -; SSE41-NEXT: paddw %xmm0, %xmm0 -; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 -; SSE41-NEXT: psrlw $8, %xmm1 -; SSE41-NEXT: packuswb %xmm3, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pslld $24, %xmm0 +; SSE41-NEXT: psrad $24, %xmm0 +; SSE41-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: psrad %xmm2, %xmm3 +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm5 +; SSE41-NEXT: psrad %xmm4, %xmm5 +; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7] +; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: psrad %xmm1, %xmm3 +; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7] +; SSE41-NEXT: psrad %xmm1, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7] ; SSE41-NEXT: retq ; -; AVX-LABEL: var_shift_v4i8: -; AVX: # %bb.0: -; AVX-NEXT: vpsllw $5, %xmm1, %xmm1 -; AVX-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] -; AVX-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; AVX-NEXT: vpsraw $4, %xmm3, %xmm4 -; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 -; AVX-NEXT: vpsraw $2, %xmm3, %xmm4 -; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 -; AVX-NEXT: vpsraw $1, %xmm3, %xmm4 -; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 -; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2 -; AVX-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; AVX-NEXT: vpsraw $4, %xmm0, %xmm3 -; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 -; AVX-NEXT: vpsraw $2, %xmm0, %xmm3 -; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 -; AVX-NEXT: vpsraw $1, %xmm0, %xmm3 -; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 -; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0 -; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: var_shift_v4i8: +; AVX1: # %bb.0: +; AVX1-NEXT: vpslld $24, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $24, %xmm0, %xmm0 +; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; AVX1-NEXT: vpsrad %xmm2, %xmm0, %xmm2 +; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3 +; AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7] +; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm1[2],xmm3[2],xmm1[3],xmm3[3] +; AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3 +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero +; AVX1-NEXT: vpsrad %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; AVX1-NEXT: retq ; -; XOP-LABEL: var_shift_v4i8: -; XOP: # %bb.0: -; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; XOP-NEXT: vpsubb %xmm1, %xmm2, %xmm1 -; XOP-NEXT: vpshab %xmm1, %xmm0, %xmm0 -; XOP-NEXT: retq +; AVX2-LABEL: var_shift_v4i8: +; AVX2: # %bb.0: +; AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX2-NEXT: vpslld $24, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $24, %xmm0, %xmm0 +; AVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: retq ; -; AVX512DQ-LABEL: var_shift_v4i8: -; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero -; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0 -; AVX512DQ-NEXT: vpsravd %zmm1, %zmm0, %zmm0 -; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0 -; AVX512DQ-NEXT: vzeroupper -; AVX512DQ-NEXT: retq +; XOPAVX1-LABEL: var_shift_v4i8: +; XOPAVX1: # %bb.0: +; XOPAVX1-NEXT: vpslld $24, %xmm0, %xmm0 +; XOPAVX1-NEXT: vpsrad $24, %xmm0, %xmm0 +; XOPAVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; XOPAVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1 +; XOPAVX1-NEXT: vpshad %xmm1, %xmm0, %xmm0 +; XOPAVX1-NEXT: retq ; -; AVX512BW-LABEL: var_shift_v4i8: -; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero -; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0 -; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq +; XOPAVX2-LABEL: var_shift_v4i8: +; XOPAVX2: # %bb.0: +; XOPAVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; XOPAVX2-NEXT: vpslld $24, %xmm0, %xmm0 +; XOPAVX2-NEXT: vpsrad $24, %xmm0, %xmm0 +; XOPAVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0 +; XOPAVX2-NEXT: retq ; -; AVX512DQVL-LABEL: var_shift_v4i8: -; AVX512DQVL: # %bb.0: -; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero -; AVX512DQVL-NEXT: vpmovsxbd %xmm0, %zmm0 -; AVX512DQVL-NEXT: vpsravd %zmm1, %zmm0, %zmm0 -; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0 -; AVX512DQVL-NEXT: vzeroupper -; AVX512DQVL-NEXT: retq +; AVX512-LABEL: var_shift_v4i8: +; AVX512: # %bb.0: +; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX512-NEXT: vpslld $24, %xmm0, %xmm0 +; AVX512-NEXT: vpsrad $24, %xmm0, %xmm0 +; AVX512-NEXT: vpsravd %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: retq ; -; AVX512BWVL-LABEL: var_shift_v4i8: -; AVX512BWVL: # %bb.0: -; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero -; AVX512BWVL-NEXT: vpmovsxbw %xmm0, %ymm0 -; AVX512BWVL-NEXT: vpsravw %ymm1, %ymm0, %ymm0 -; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0 -; AVX512BWVL-NEXT: vzeroupper -; AVX512BWVL-NEXT: retq +; AVX512VL-LABEL: var_shift_v4i8: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX512VL-NEXT: vpslld $24, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsrad $24, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsravd %xmm1, %xmm0, %xmm0 +; AVX512VL-NEXT: retq ; ; X32-SSE-LABEL: var_shift_v4i8: ; X32-SSE: # %bb.0: -; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] -; X32-SSE-NEXT: psllw $5, %xmm1 -; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15] -; X32-SSE-NEXT: pxor %xmm3, %xmm3 -; X32-SSE-NEXT: pxor %xmm5, %xmm5 -; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5 -; X32-SSE-NEXT: movdqa %xmm5, %xmm6 -; X32-SSE-NEXT: pandn %xmm2, %xmm6 -; X32-SSE-NEXT: psraw $4, %xmm2 -; X32-SSE-NEXT: pand %xmm5, %xmm2 -; X32-SSE-NEXT: por %xmm6, %xmm2 -; X32-SSE-NEXT: paddw %xmm4, %xmm4 -; X32-SSE-NEXT: pxor %xmm5, %xmm5 -; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5 -; X32-SSE-NEXT: movdqa %xmm5, %xmm6 -; X32-SSE-NEXT: pandn %xmm2, %xmm6 -; X32-SSE-NEXT: psraw $2, %xmm2 -; X32-SSE-NEXT: pand %xmm5, %xmm2 -; X32-SSE-NEXT: por %xmm6, %xmm2 -; X32-SSE-NEXT: paddw %xmm4, %xmm4 -; X32-SSE-NEXT: pxor %xmm5, %xmm5 -; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5 -; X32-SSE-NEXT: movdqa %xmm5, %xmm4 -; X32-SSE-NEXT: pandn %xmm2, %xmm4 -; X32-SSE-NEXT: psraw $1, %xmm2 -; X32-SSE-NEXT: pand %xmm5, %xmm2 -; X32-SSE-NEXT: por %xmm4, %xmm2 -; X32-SSE-NEXT: psrlw $8, %xmm2 -; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X32-SSE-NEXT: pxor %xmm4, %xmm4 -; X32-SSE-NEXT: pcmpgtw %xmm1, %xmm4 -; X32-SSE-NEXT: movdqa %xmm4, %xmm5 -; X32-SSE-NEXT: pandn %xmm0, %xmm5 -; X32-SSE-NEXT: psraw $4, %xmm0 -; X32-SSE-NEXT: pand %xmm4, %xmm0 -; X32-SSE-NEXT: por %xmm5, %xmm0 -; X32-SSE-NEXT: paddw %xmm1, %xmm1 -; X32-SSE-NEXT: pxor %xmm4, %xmm4 -; X32-SSE-NEXT: pcmpgtw %xmm1, %xmm4 -; X32-SSE-NEXT: movdqa %xmm4, %xmm5 -; X32-SSE-NEXT: pandn %xmm0, %xmm5 -; X32-SSE-NEXT: psraw $2, %xmm0 -; X32-SSE-NEXT: pand %xmm4, %xmm0 -; X32-SSE-NEXT: por %xmm5, %xmm0 -; X32-SSE-NEXT: paddw %xmm1, %xmm1 -; X32-SSE-NEXT: pcmpgtw %xmm1, %xmm3 -; X32-SSE-NEXT: movdqa %xmm3, %xmm1 -; X32-SSE-NEXT: pandn %xmm0, %xmm1 -; X32-SSE-NEXT: psraw $1, %xmm0 -; X32-SSE-NEXT: pand %xmm3, %xmm0 -; X32-SSE-NEXT: por %xmm1, %xmm0 -; X32-SSE-NEXT: psrlw $8, %xmm0 -; X32-SSE-NEXT: packuswb %xmm2, %xmm0 +; X32-SSE-NEXT: pslld $24, %xmm0 +; X32-SSE-NEXT: psrad $24, %xmm0 +; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7] +; X32-SSE-NEXT: movdqa %xmm0, %xmm3 +; X32-SSE-NEXT: psrad %xmm2, %xmm3 +; X32-SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[0,1,1,1,4,5,6,7] +; X32-SSE-NEXT: movdqa %xmm0, %xmm2 +; X32-SSE-NEXT: psrad %xmm4, %xmm2 +; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; X32-SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7] +; X32-SSE-NEXT: movdqa %xmm0, %xmm4 +; X32-SSE-NEXT: psrad %xmm3, %xmm4 +; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7] +; X32-SSE-NEXT: psrad %xmm1, %xmm0 +; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1] +; X32-SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,3] +; X32-SSE-NEXT: movaps %xmm2, %xmm0 ; X32-SSE-NEXT: retl %shift = ashr <4 x i8> %a, %b ret <4 x i8> %shift @@ -928,226 +784,136 @@ define <4 x i8> @var_shift_v4i8(<4 x i8> %a, <4 x i8> %b) nounwind { define <2 x i8> @var_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind { ; SSE2-LABEL: var_shift_v2i8: ; SSE2: # %bb.0: -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] -; SSE2-NEXT: psllw $5, %xmm1 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15] -; SSE2-NEXT: pxor %xmm3, %xmm3 -; SSE2-NEXT: pxor %xmm5, %xmm5 -; SSE2-NEXT: pcmpgtw %xmm4, %xmm5 -; SSE2-NEXT: movdqa %xmm5, %xmm6 -; SSE2-NEXT: pandn %xmm2, %xmm6 -; SSE2-NEXT: psraw $4, %xmm2 -; SSE2-NEXT: pand %xmm5, %xmm2 -; SSE2-NEXT: por %xmm6, %xmm2 -; SSE2-NEXT: paddw %xmm4, %xmm4 -; SSE2-NEXT: pxor %xmm5, %xmm5 -; SSE2-NEXT: pcmpgtw %xmm4, %xmm5 -; SSE2-NEXT: movdqa %xmm5, %xmm6 -; SSE2-NEXT: pandn %xmm2, %xmm6 -; SSE2-NEXT: psraw $2, %xmm2 -; SSE2-NEXT: pand %xmm5, %xmm2 -; SSE2-NEXT: por %xmm6, %xmm2 -; SSE2-NEXT: paddw %xmm4, %xmm4 -; SSE2-NEXT: pxor %xmm5, %xmm5 -; SSE2-NEXT: pcmpgtw %xmm4, %xmm5 -; SSE2-NEXT: movdqa %xmm5, %xmm4 -; SSE2-NEXT: pandn %xmm2, %xmm4 -; SSE2-NEXT: psraw $1, %xmm2 -; SSE2-NEXT: pand %xmm5, %xmm2 -; SSE2-NEXT: por %xmm4, %xmm2 -; SSE2-NEXT: psrlw $8, %xmm2 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: pxor %xmm4, %xmm4 -; SSE2-NEXT: pcmpgtw %xmm1, %xmm4 -; SSE2-NEXT: movdqa %xmm4, %xmm5 -; SSE2-NEXT: pandn %xmm0, %xmm5 -; SSE2-NEXT: psraw $4, %xmm0 -; SSE2-NEXT: pand %xmm4, %xmm0 -; SSE2-NEXT: por %xmm5, %xmm0 -; SSE2-NEXT: paddw %xmm1, %xmm1 -; SSE2-NEXT: pxor %xmm4, %xmm4 -; SSE2-NEXT: pcmpgtw %xmm1, %xmm4 -; SSE2-NEXT: movdqa %xmm4, %xmm5 -; SSE2-NEXT: pandn %xmm0, %xmm5 -; SSE2-NEXT: psraw $2, %xmm0 -; SSE2-NEXT: pand %xmm4, %xmm0 -; SSE2-NEXT: por %xmm5, %xmm0 -; SSE2-NEXT: paddw %xmm1, %xmm1 -; SSE2-NEXT: pcmpgtw %xmm1, %xmm3 -; SSE2-NEXT: movdqa %xmm3, %xmm1 -; SSE2-NEXT: pandn %xmm0, %xmm1 -; SSE2-NEXT: psraw $1, %xmm0 -; SSE2-NEXT: pand %xmm3, %xmm0 -; SSE2-NEXT: por %xmm1, %xmm0 -; SSE2-NEXT: psrlw $8, %xmm0 -; SSE2-NEXT: packuswb %xmm2, %xmm0 +; SSE2-NEXT: psllq $56, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: psrad $31, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] +; SSE2-NEXT: psrad $24, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: psrlq %xmm1, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] +; SSE2-NEXT: psrlq %xmm4, %xmm2 +; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1] +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: psrlq %xmm1, %xmm3 +; SSE2-NEXT: psrlq %xmm4, %xmm0 +; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1] +; SSE2-NEXT: xorpd %xmm2, %xmm0 +; SSE2-NEXT: psubq %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: var_shift_v2i8: ; SSE41: # %bb.0: +; SSE41-NEXT: psllq $56, %xmm0 ; SSE41-NEXT: movdqa %xmm0, %xmm2 -; SSE41-NEXT: psllw $5, %xmm1 -; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] -; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15] -; SSE41-NEXT: movdqa %xmm3, %xmm4 -; SSE41-NEXT: psraw $4, %xmm4 -; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3 -; SSE41-NEXT: movdqa %xmm3, %xmm4 -; SSE41-NEXT: psraw $2, %xmm4 -; SSE41-NEXT: paddw %xmm0, %xmm0 -; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3 -; SSE41-NEXT: movdqa %xmm3, %xmm4 -; SSE41-NEXT: psraw $1, %xmm4 -; SSE41-NEXT: paddw %xmm0, %xmm0 -; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3 -; SSE41-NEXT: psrlw $8, %xmm3 -; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSE41-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: psraw $4, %xmm2 -; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: psraw $2, %xmm2 -; SSE41-NEXT: paddw %xmm0, %xmm0 -; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: psraw $1, %xmm2 -; SSE41-NEXT: paddw %xmm0, %xmm0 -; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 -; SSE41-NEXT: psrlw $8, %xmm1 -; SSE41-NEXT: packuswb %xmm3, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: psrad $31, %xmm2 +; SSE41-NEXT: psrad $24, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; SSE41-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] +; SSE41-NEXT: movdqa %xmm2, %xmm3 +; SSE41-NEXT: psrlq %xmm1, %xmm3 +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] +; SSE41-NEXT: psrlq %xmm4, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: psrlq %xmm4, %xmm3 +; SSE41-NEXT: psrlq %xmm1, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] +; SSE41-NEXT: pxor %xmm2, %xmm0 +; SSE41-NEXT: psubq %xmm2, %xmm0 ; SSE41-NEXT: retq ; -; AVX-LABEL: var_shift_v2i8: -; AVX: # %bb.0: -; AVX-NEXT: vpsllw $5, %xmm1, %xmm1 -; AVX-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] -; AVX-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; AVX-NEXT: vpsraw $4, %xmm3, %xmm4 -; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 -; AVX-NEXT: vpsraw $2, %xmm3, %xmm4 -; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 -; AVX-NEXT: vpsraw $1, %xmm3, %xmm4 -; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 -; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2 -; AVX-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; AVX-NEXT: vpsraw $4, %xmm0, %xmm3 -; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 -; AVX-NEXT: vpsraw $2, %xmm0, %xmm3 -; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 -; AVX-NEXT: vpsraw $1, %xmm0, %xmm3 -; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 -; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0 -; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: var_shift_v2i8: +; AVX1: # %bb.0: +; AVX1-NEXT: vpsllq $56, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2 +; AVX1-NEXT: vpsrad $24, %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] +; AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] +; AVX1-NEXT: vpsrlq %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7] +; AVX1-NEXT: vpsrlq %xmm4, %xmm0, %xmm3 +; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] +; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: var_shift_v2i8: +; AVX2: # %bb.0: +; AVX2-NEXT: vpsllq $56, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $31, %xmm0, %xmm2 +; AVX2-NEXT: vpsrad $24, %xmm0, %xmm0 +; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] +; AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] +; AVX2-NEXT: vpsrlvq %xmm1, %xmm2, %xmm2 +; AVX2-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpsubq %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: retq ; ; XOP-LABEL: var_shift_v2i8: ; XOP: # %bb.0: +; XOP-NEXT: vpsllq $56, %xmm0, %xmm0 +; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 ; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; XOP-NEXT: vpsubb %xmm1, %xmm2, %xmm1 -; XOP-NEXT: vpshab %xmm1, %xmm0, %xmm0 +; XOP-NEXT: vpsubq %xmm1, %xmm2, %xmm1 +; XOP-NEXT: vpshaq %xmm1, %xmm0, %xmm0 ; XOP-NEXT: retq ; -; AVX512DQ-LABEL: var_shift_v2i8: -; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero -; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0 -; AVX512DQ-NEXT: vpsravd %zmm1, %zmm0, %zmm0 -; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0 -; AVX512DQ-NEXT: vzeroupper -; AVX512DQ-NEXT: retq -; -; AVX512BW-LABEL: var_shift_v2i8: -; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero -; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0 -; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq -; -; AVX512DQVL-LABEL: var_shift_v2i8: -; AVX512DQVL: # %bb.0: -; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero -; AVX512DQVL-NEXT: vpmovsxbd %xmm0, %zmm0 -; AVX512DQVL-NEXT: vpsravd %zmm1, %zmm0, %zmm0 -; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0 -; AVX512DQVL-NEXT: vzeroupper -; AVX512DQVL-NEXT: retq +; AVX512-LABEL: var_shift_v2i8: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllq $56, %xmm0, %xmm0 +; AVX512-NEXT: vpsraq $56, %zmm0, %zmm0 +; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq ; -; AVX512BWVL-LABEL: var_shift_v2i8: -; AVX512BWVL: # %bb.0: -; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero -; AVX512BWVL-NEXT: vpmovsxbw %xmm0, %ymm0 -; AVX512BWVL-NEXT: vpsravw %ymm1, %ymm0, %ymm0 -; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0 -; AVX512BWVL-NEXT: vzeroupper -; AVX512BWVL-NEXT: retq +; AVX512VL-LABEL: var_shift_v2i8: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX512VL-NEXT: vpsllq $56, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsraq $56, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsravq %xmm1, %xmm0, %xmm0 +; AVX512VL-NEXT: retq ; ; X32-SSE-LABEL: var_shift_v2i8: ; X32-SSE: # %bb.0: -; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] -; X32-SSE-NEXT: psllw $5, %xmm1 -; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15] -; X32-SSE-NEXT: pxor %xmm3, %xmm3 -; X32-SSE-NEXT: pxor %xmm5, %xmm5 -; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5 -; X32-SSE-NEXT: movdqa %xmm5, %xmm6 -; X32-SSE-NEXT: pandn %xmm2, %xmm6 -; X32-SSE-NEXT: psraw $4, %xmm2 -; X32-SSE-NEXT: pand %xmm5, %xmm2 -; X32-SSE-NEXT: por %xmm6, %xmm2 -; X32-SSE-NEXT: paddw %xmm4, %xmm4 -; X32-SSE-NEXT: pxor %xmm5, %xmm5 -; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5 -; X32-SSE-NEXT: movdqa %xmm5, %xmm6 -; X32-SSE-NEXT: pandn %xmm2, %xmm6 -; X32-SSE-NEXT: psraw $2, %xmm2 -; X32-SSE-NEXT: pand %xmm5, %xmm2 -; X32-SSE-NEXT: por %xmm6, %xmm2 -; X32-SSE-NEXT: paddw %xmm4, %xmm4 -; X32-SSE-NEXT: pxor %xmm5, %xmm5 -; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5 -; X32-SSE-NEXT: movdqa %xmm5, %xmm4 -; X32-SSE-NEXT: pandn %xmm2, %xmm4 -; X32-SSE-NEXT: psraw $1, %xmm2 -; X32-SSE-NEXT: pand %xmm5, %xmm2 -; X32-SSE-NEXT: por %xmm4, %xmm2 -; X32-SSE-NEXT: psrlw $8, %xmm2 -; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X32-SSE-NEXT: pxor %xmm4, %xmm4 -; X32-SSE-NEXT: pcmpgtw %xmm1, %xmm4 -; X32-SSE-NEXT: movdqa %xmm4, %xmm5 -; X32-SSE-NEXT: pandn %xmm0, %xmm5 -; X32-SSE-NEXT: psraw $4, %xmm0 -; X32-SSE-NEXT: pand %xmm4, %xmm0 -; X32-SSE-NEXT: por %xmm5, %xmm0 -; X32-SSE-NEXT: paddw %xmm1, %xmm1 -; X32-SSE-NEXT: pxor %xmm4, %xmm4 -; X32-SSE-NEXT: pcmpgtw %xmm1, %xmm4 -; X32-SSE-NEXT: movdqa %xmm4, %xmm5 -; X32-SSE-NEXT: pandn %xmm0, %xmm5 -; X32-SSE-NEXT: psraw $2, %xmm0 -; X32-SSE-NEXT: pand %xmm4, %xmm0 -; X32-SSE-NEXT: por %xmm5, %xmm0 -; X32-SSE-NEXT: paddw %xmm1, %xmm1 -; X32-SSE-NEXT: pcmpgtw %xmm1, %xmm3 -; X32-SSE-NEXT: movdqa %xmm3, %xmm1 -; X32-SSE-NEXT: pandn %xmm0, %xmm1 -; X32-SSE-NEXT: psraw $1, %xmm0 -; X32-SSE-NEXT: pand %xmm3, %xmm0 -; X32-SSE-NEXT: por %xmm1, %xmm0 -; X32-SSE-NEXT: psrlw $8, %xmm0 -; X32-SSE-NEXT: packuswb %xmm2, %xmm0 +; X32-SSE-NEXT: psllq $56, %xmm0 +; X32-SSE-NEXT: movdqa %xmm0, %xmm2 +; X32-SSE-NEXT: psrad $31, %xmm2 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] +; X32-SSE-NEXT: psrad $24, %xmm0 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648] +; X32-SSE-NEXT: movdqa %xmm2, %xmm3 +; X32-SSE-NEXT: psrlq %xmm1, %xmm3 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] +; X32-SSE-NEXT: psrlq %xmm4, %xmm2 +; X32-SSE-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1] +; X32-SSE-NEXT: movdqa %xmm0, %xmm3 +; X32-SSE-NEXT: psrlq %xmm1, %xmm3 +; X32-SSE-NEXT: psrlq %xmm4, %xmm0 +; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1] +; X32-SSE-NEXT: xorpd %xmm2, %xmm0 +; X32-SSE-NEXT: psubq %xmm2, %xmm0 ; X32-SSE-NEXT: retl %shift = ashr <2 x i8> %a, %b ret <2 x i8> %shift @@ -1160,46 +926,154 @@ define <2 x i8> @var_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind { define <2 x i32> @splatvar_shift_v2i32(<2 x i32> %a, <2 x i32> %b) nounwind { ; SSE2-LABEL: splatvar_shift_v2i32: ; SSE2: # %bb.0: -; SSE2-NEXT: xorps %xmm2, %xmm2 -; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3] -; SSE2-NEXT: psrad %xmm2, %xmm0 +; SSE2-NEXT: psllq $32, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,0,1] +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808] +; SSE2-NEXT: movdqa %xmm1, %xmm3 +; SSE2-NEXT: psrlq %xmm0, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1] +; SSE2-NEXT: psrlq %xmm4, %xmm1 +; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm3[0],xmm1[1] +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: psrlq %xmm0, %xmm3 +; SSE2-NEXT: psrlq %xmm4, %xmm2 +; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1] +; SSE2-NEXT: xorpd %xmm1, %xmm2 +; SSE2-NEXT: psubq %xmm1, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: splatvar_shift_v2i32: ; SSE41: # %bb.0: -; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero -; SSE41-NEXT: psrad %xmm1, %xmm0 +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: psllq $32, %xmm2 +; SSE41-NEXT: psrad $31, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,0,1] +; SSE41-NEXT: pxor %xmm1, %xmm1 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE41-NEXT: movdqa %xmm2, %xmm3 +; SSE41-NEXT: psrlq %xmm0, %xmm3 +; SSE41-NEXT: psrlq %xmm1, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4,5,6,7] +; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808] +; SSE41-NEXT: movdqa %xmm3, %xmm4 +; SSE41-NEXT: psrlq %xmm1, %xmm4 +; SSE41-NEXT: psrlq %xmm0, %xmm3 +; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7] +; SSE41-NEXT: pxor %xmm3, %xmm2 +; SSE41-NEXT: psubq %xmm3, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm0 ; SSE41-NEXT: retq ; -; AVX-LABEL: splatvar_shift_v2i32: -; AVX: # %bb.0: -; AVX-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero -; AVX-NEXT: vpsrad %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: splatvar_shift_v2i32: +; AVX1: # %bb.0: +; AVX1-NEXT: vpsllq $32, %xmm0, %xmm2 +; AVX1-NEXT: vpsrad $31, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1] +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; AVX1-NEXT: vpsrlq %xmm2, %xmm0, %xmm3 +; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808] +; AVX1-NEXT: vpsrlq %xmm1, %xmm3, %xmm1 +; AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] +; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: retq ; -; XOP-LABEL: splatvar_shift_v2i32: -; XOP: # %bb.0: -; XOP-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero -; XOP-NEXT: vpsrad %xmm1, %xmm0, %xmm0 -; XOP-NEXT: retq +; AVX2-LABEL: splatvar_shift_v2i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpsllq $32, %xmm0, %xmm2 +; AVX2-NEXT: vpsrad $31, %xmm2, %xmm2 +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] +; AVX2-NEXT: vpbroadcastq %xmm1, %xmm1 +; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] +; AVX2-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] +; AVX2-NEXT: vpsrlvq %xmm1, %xmm2, %xmm1 +; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: retq +; +; XOPAVX1-LABEL: splatvar_shift_v2i32: +; XOPAVX1: # %bb.0: +; XOPAVX1-NEXT: vpsllq $32, %xmm0, %xmm0 +; XOPAVX1-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0 +; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1] +; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; XOPAVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] +; XOPAVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1 +; XOPAVX1-NEXT: vpshaq %xmm1, %xmm0, %xmm0 +; XOPAVX1-NEXT: retq +; +; XOPAVX2-LABEL: splatvar_shift_v2i32: +; XOPAVX2: # %bb.0: +; XOPAVX2-NEXT: vpsllq $32, %xmm0, %xmm0 +; XOPAVX2-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0 +; XOPAVX2-NEXT: vpbroadcastq %xmm1, %xmm1 +; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; XOPAVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] +; XOPAVX2-NEXT: vpsubq %xmm1, %xmm2, %xmm1 +; XOPAVX2-NEXT: vpshaq %xmm1, %xmm0, %xmm0 +; XOPAVX2-NEXT: retq ; ; AVX512-LABEL: splatvar_shift_v2i32: ; AVX512: # %bb.0: -; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero -; AVX512-NEXT: vpsrad %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpsllq $32, %xmm0, %xmm0 +; AVX512-NEXT: vpsraq $32, %zmm0, %zmm0 +; AVX512-NEXT: vpbroadcastq %xmm1, %xmm1 +; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] +; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq ; ; AVX512VL-LABEL: splatvar_shift_v2i32: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero -; AVX512VL-NEXT: vpsrad %xmm1, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsllq $32, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsraq $32, %xmm0, %xmm0 +; AVX512VL-NEXT: vpbroadcastq %xmm1, %xmm1 +; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] +; AVX512VL-NEXT: vpsravq %xmm1, %xmm0, %xmm0 ; AVX512VL-NEXT: retq ; ; X32-SSE-LABEL: splatvar_shift_v2i32: ; X32-SSE: # %bb.0: -; X32-SSE-NEXT: xorps %xmm2, %xmm2 -; X32-SSE-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3] -; X32-SSE-NEXT: psrad %xmm2, %xmm0 +; X32-SSE-NEXT: psllq $32, %xmm0 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] +; X32-SSE-NEXT: psrad $31, %xmm0 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; X32-SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [4294967295,0,4294967295,0] +; X32-SSE-NEXT: pand %xmm1, %xmm0 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,2147483648,0,2147483648] +; X32-SSE-NEXT: movdqa %xmm3, %xmm4 +; X32-SSE-NEXT: psrlq %xmm0, %xmm4 +; X32-SSE-NEXT: xorps %xmm5, %xmm5 +; X32-SSE-NEXT: movss {{.*#+}} xmm5 = xmm1[0],xmm5[1,2,3] +; X32-SSE-NEXT: psrlq %xmm5, %xmm3 +; X32-SSE-NEXT: movsd {{.*#+}} xmm3 = xmm4[0],xmm3[1] +; X32-SSE-NEXT: movdqa %xmm2, %xmm1 +; X32-SSE-NEXT: psrlq %xmm5, %xmm1 +; X32-SSE-NEXT: psrlq %xmm0, %xmm2 +; X32-SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[0],xmm1[1] +; X32-SSE-NEXT: xorpd %xmm3, %xmm2 +; X32-SSE-NEXT: psubq %xmm3, %xmm2 +; X32-SSE-NEXT: movdqa %xmm2, %xmm0 ; X32-SSE-NEXT: retl %splat = shufflevector <2 x i32> %b, <2 x i32> undef, <2 x i32> zeroinitializer %shift = ashr <2 x i32> %a, %splat @@ -1209,46 +1083,146 @@ define <2 x i32> @splatvar_shift_v2i32(<2 x i32> %a, <2 x i32> %b) nounwind { define <4 x i16> @splatvar_shift_v4i16(<4 x i16> %a, <4 x i16> %b) nounwind { ; SSE2-LABEL: splatvar_shift_v4i16: ; SSE2: # %bb.0: -; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1] -; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; SSE2-NEXT: psraw %xmm1, %xmm0 +; SSE2-NEXT: pslld $16, %xmm0 +; SSE2-NEXT: psrad $16, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,0,0] +; SSE2-NEXT: pand {{.*}}(%rip), %xmm2 +; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[2,3,3,3,4,5,6,7] +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: psrad %xmm1, %xmm3 +; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[0,1,1,1,4,5,6,7] +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: psrad %xmm4, %xmm1 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] +; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7] +; SSE2-NEXT: movdqa %xmm0, %xmm4 +; SSE2-NEXT: psrad %xmm3, %xmm4 +; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7] +; SSE2-NEXT: psrad %xmm2, %xmm0 +; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1] +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,3] +; SSE2-NEXT: movaps %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: splatvar_shift_v4i16: ; SSE41: # %bb.0: -; SSE41-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero -; SSE41-NEXT: psraw %xmm1, %xmm0 +; SSE41-NEXT: pslld $16, %xmm0 +; SSE41-NEXT: psrad $16, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] +; SSE41-NEXT: pxor %xmm2, %xmm2 +; SSE41-NEXT: movdqa %xmm1, %xmm3 +; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm2[1],xmm3[2],xmm2[3],xmm3[4],xmm2[5],xmm3[6],xmm2[7] +; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm3[2,3,3,3,4,5,6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm5 +; SSE41-NEXT: psrad %xmm4, %xmm5 +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] +; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm3[2,3,3,3,4,5,6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm6 +; SSE41-NEXT: psrad %xmm4, %xmm6 +; SSE41-NEXT: pblendw {{.*#+}} xmm6 = xmm5[0,1,2,3],xmm6[4,5,6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5],xmm1[6],xmm2[7] +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: psrad %xmm1, %xmm2 +; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[0,1,1,1,4,5,6,7] +; SSE41-NEXT: psrad %xmm1, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,3],xmm0[4,5],xmm6[6,7] ; SSE41-NEXT: retq ; -; AVX-LABEL: splatvar_shift_v4i16: -; AVX: # %bb.0: -; AVX-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero -; AVX-NEXT: vpsraw %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: splatvar_shift_v4i16: +; AVX1: # %bb.0: +; AVX1-NEXT: vpslld $16, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $16, %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7] +; AVX1-NEXT: vpsrldq {{.*#+}} xmm3 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3 +; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm4 +; AVX1-NEXT: vpsrad %xmm4, %xmm0, %xmm4 +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7] +; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm1[2],xmm2[2],xmm1[3],xmm2[3] +; AVX1-NEXT: vpsrad %xmm2, %xmm0, %xmm2 +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero +; AVX1-NEXT: vpsrad %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7] +; AVX1-NEXT: retq ; -; XOP-LABEL: splatvar_shift_v4i16: -; XOP: # %bb.0: -; XOP-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero -; XOP-NEXT: vpsraw %xmm1, %xmm0, %xmm0 -; XOP-NEXT: retq +; AVX2-LABEL: splatvar_shift_v4i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vpslld $16, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $16, %xmm0, %xmm0 +; AVX2-NEXT: vpbroadcastd %xmm1, %xmm1 +; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7] +; AVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: retq +; +; XOPAVX1-LABEL: splatvar_shift_v4i16: +; XOPAVX1: # %bb.0: +; XOPAVX1-NEXT: vpslld $16, %xmm0, %xmm0 +; XOPAVX1-NEXT: vpsrad $16, %xmm0, %xmm0 +; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] +; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; XOPAVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7] +; XOPAVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1 +; XOPAVX1-NEXT: vpshad %xmm1, %xmm0, %xmm0 +; XOPAVX1-NEXT: retq +; +; XOPAVX2-LABEL: splatvar_shift_v4i16: +; XOPAVX2: # %bb.0: +; XOPAVX2-NEXT: vpslld $16, %xmm0, %xmm0 +; XOPAVX2-NEXT: vpsrad $16, %xmm0, %xmm0 +; XOPAVX2-NEXT: vpbroadcastd %xmm1, %xmm1 +; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; XOPAVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7] +; XOPAVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0 +; XOPAVX2-NEXT: retq ; ; AVX512-LABEL: splatvar_shift_v4i16: ; AVX512: # %bb.0: -; AVX512-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero -; AVX512-NEXT: vpsraw %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpslld $16, %xmm0, %xmm0 +; AVX512-NEXT: vpsrad $16, %xmm0, %xmm0 +; AVX512-NEXT: vpbroadcastd %xmm1, %xmm1 +; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7] +; AVX512-NEXT: vpsravd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: retq ; ; AVX512VL-LABEL: splatvar_shift_v4i16: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero -; AVX512VL-NEXT: vpsraw %xmm1, %xmm0, %xmm0 +; AVX512VL-NEXT: vpslld $16, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsrad $16, %xmm0, %xmm0 +; AVX512VL-NEXT: vpbroadcastd %xmm1, %xmm1 +; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512VL-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7] +; AVX512VL-NEXT: vpsravd %xmm1, %xmm0, %xmm0 ; AVX512VL-NEXT: retq ; ; X32-SSE-LABEL: splatvar_shift_v4i16: ; X32-SSE: # %bb.0: -; X32-SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1] -; X32-SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; X32-SSE-NEXT: psraw %xmm1, %xmm0 +; X32-SSE-NEXT: pslld $16, %xmm0 +; X32-SSE-NEXT: psrad $16, %xmm0 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,0,0] +; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm2 +; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[2,3,3,3,4,5,6,7] +; X32-SSE-NEXT: movdqa %xmm0, %xmm3 +; X32-SSE-NEXT: psrad %xmm1, %xmm3 +; X32-SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[0,1,1,1,4,5,6,7] +; X32-SSE-NEXT: movdqa %xmm0, %xmm1 +; X32-SSE-NEXT: psrad %xmm4, %xmm1 +; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] +; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] +; X32-SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7] +; X32-SSE-NEXT: movdqa %xmm0, %xmm4 +; X32-SSE-NEXT: psrad %xmm3, %xmm4 +; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7] +; X32-SSE-NEXT: psrad %xmm2, %xmm0 +; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1] +; X32-SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,3] +; X32-SSE-NEXT: movaps %xmm1, %xmm0 ; X32-SSE-NEXT: retl %splat = shufflevector <4 x i16> %b, <4 x i16> undef, <4 x i32> zeroinitializer %shift = ashr <4 x i16> %a, %splat @@ -1258,46 +1232,160 @@ define <4 x i16> @splatvar_shift_v4i16(<4 x i16> %a, <4 x i16> %b) nounwind { define <2 x i16> @splatvar_shift_v2i16(<2 x i16> %a, <2 x i16> %b) nounwind { ; SSE2-LABEL: splatvar_shift_v2i16: ; SSE2: # %bb.0: -; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1] -; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; SSE2-NEXT: psraw %xmm1, %xmm0 +; SSE2-NEXT: psllq $48, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: psrad $31, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] +; SSE2-NEXT: psrad $16, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1] +; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: psrlq %xmm1, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] +; SSE2-NEXT: psrlq %xmm4, %xmm2 +; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1] +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: psrlq %xmm1, %xmm3 +; SSE2-NEXT: psrlq %xmm4, %xmm0 +; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1] +; SSE2-NEXT: xorpd %xmm2, %xmm0 +; SSE2-NEXT: psubq %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: splatvar_shift_v2i16: ; SSE41: # %bb.0: -; SSE41-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero -; SSE41-NEXT: psraw %xmm1, %xmm0 +; SSE41-NEXT: psllq $48, %xmm0 +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: psrad $31, %xmm2 +; SSE41-NEXT: psrad $16, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1] +; SSE41-NEXT: pxor %xmm2, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7] +; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,0,1] +; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: psrlq %xmm1, %xmm3 +; SSE41-NEXT: psrlq %xmm2, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] +; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808] +; SSE41-NEXT: movdqa %xmm3, %xmm4 +; SSE41-NEXT: psrlq %xmm2, %xmm4 +; SSE41-NEXT: psrlq %xmm1, %xmm3 +; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7] +; SSE41-NEXT: pxor %xmm3, %xmm0 +; SSE41-NEXT: psubq %xmm3, %xmm0 ; SSE41-NEXT: retq ; -; AVX-LABEL: splatvar_shift_v2i16: -; AVX: # %bb.0: -; AVX-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero -; AVX-NEXT: vpsraw %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: splatvar_shift_v2i16: +; AVX1: # %bb.0: +; AVX1-NEXT: vpsllq $48, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2 +; AVX1-NEXT: vpsrad $16, %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1] +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; AVX1-NEXT: vpsrlq %xmm2, %xmm0, %xmm3 +; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808] +; AVX1-NEXT: vpsrlq %xmm1, %xmm3, %xmm1 +; AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] +; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: retq ; -; XOP-LABEL: splatvar_shift_v2i16: -; XOP: # %bb.0: -; XOP-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero -; XOP-NEXT: vpsraw %xmm1, %xmm0, %xmm0 -; XOP-NEXT: retq +; AVX2-LABEL: splatvar_shift_v2i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vpsllq $48, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $31, %xmm0, %xmm2 +; AVX2-NEXT: vpsrad $16, %xmm0, %xmm0 +; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] +; AVX2-NEXT: vpbroadcastq %xmm1, %xmm1 +; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7] +; AVX2-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] +; AVX2-NEXT: vpsrlvq %xmm1, %xmm2, %xmm1 +; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: retq +; +; XOPAVX1-LABEL: splatvar_shift_v2i16: +; XOPAVX1: # %bb.0: +; XOPAVX1-NEXT: vpsllq $48, %xmm0, %xmm0 +; XOPAVX1-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0 +; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1] +; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; XOPAVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7] +; XOPAVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1 +; XOPAVX1-NEXT: vpshaq %xmm1, %xmm0, %xmm0 +; XOPAVX1-NEXT: retq +; +; XOPAVX2-LABEL: splatvar_shift_v2i16: +; XOPAVX2: # %bb.0: +; XOPAVX2-NEXT: vpsllq $48, %xmm0, %xmm0 +; XOPAVX2-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0 +; XOPAVX2-NEXT: vpbroadcastq %xmm1, %xmm1 +; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; XOPAVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7] +; XOPAVX2-NEXT: vpsubq %xmm1, %xmm2, %xmm1 +; XOPAVX2-NEXT: vpshaq %xmm1, %xmm0, %xmm0 +; XOPAVX2-NEXT: retq ; ; AVX512-LABEL: splatvar_shift_v2i16: ; AVX512: # %bb.0: -; AVX512-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero -; AVX512-NEXT: vpsraw %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpsllq $48, %xmm0, %xmm0 +; AVX512-NEXT: vpsraq $48, %zmm0, %zmm0 +; AVX512-NEXT: vpbroadcastq %xmm1, %xmm1 +; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7] +; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq ; ; AVX512VL-LABEL: splatvar_shift_v2i16: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero -; AVX512VL-NEXT: vpsraw %xmm1, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsllq $48, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsraq $48, %xmm0, %xmm0 +; AVX512VL-NEXT: vpbroadcastq %xmm1, %xmm1 +; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512VL-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7] +; AVX512VL-NEXT: vpsravq %xmm1, %xmm0, %xmm0 ; AVX512VL-NEXT: retq ; ; X32-SSE-LABEL: splatvar_shift_v2i16: ; X32-SSE: # %bb.0: -; X32-SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1] -; X32-SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; X32-SSE-NEXT: psraw %xmm1, %xmm0 +; X32-SSE-NEXT: psllq $48, %xmm0 +; X32-SSE-NEXT: movdqa %xmm0, %xmm2 +; X32-SSE-NEXT: psrad $31, %xmm2 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] +; X32-SSE-NEXT: psrad $16, %xmm0 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1] +; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648] +; X32-SSE-NEXT: movdqa %xmm2, %xmm3 +; X32-SSE-NEXT: psrlq %xmm1, %xmm3 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] +; X32-SSE-NEXT: psrlq %xmm4, %xmm2 +; X32-SSE-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1] +; X32-SSE-NEXT: movdqa %xmm0, %xmm3 +; X32-SSE-NEXT: psrlq %xmm1, %xmm3 +; X32-SSE-NEXT: psrlq %xmm4, %xmm0 +; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1] +; X32-SSE-NEXT: xorpd %xmm2, %xmm0 +; X32-SSE-NEXT: psubq %xmm2, %xmm0 ; X32-SSE-NEXT: retl %splat = shufflevector <2 x i16> %b, <2 x i16> undef, <2 x i32> zeroinitializer %shift = ashr <2 x i16> %a, %splat @@ -1307,139 +1395,203 @@ define <2 x i16> @splatvar_shift_v2i16(<2 x i16> %a, <2 x i16> %b) nounwind { define <8 x i8> @splatvar_shift_v8i8(<8 x i8> %a, <8 x i8> %b) nounwind { ; SSE2-LABEL: splatvar_shift_v8i8: ; SSE2: # %bb.0: -; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0] -; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; SSE2-NEXT: psrlw %xmm1, %xmm0 -; SSE2-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE2-NEXT: psrlw %xmm1, %xmm2 -; SSE2-NEXT: psrlw $8, %xmm2 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7] -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: psllw $8, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: psraw $8, %xmm3 +; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,0,2,3,4,5,6,7] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0] +; SSE2-NEXT: psllw $12, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: psraw $15, %xmm0 +; SSE2-NEXT: psraw $15, %xmm2 +; SSE2-NEXT: pand %xmm0, %xmm2 +; SSE2-NEXT: pandn %xmm3, %xmm0 +; SSE2-NEXT: por %xmm2, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psraw $15, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: pandn %xmm0, %xmm3 +; SSE2-NEXT: psraw $4, %xmm0 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psraw $15, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: pandn %xmm0, %xmm3 +; SSE2-NEXT: psraw $2, %xmm0 ; SSE2-NEXT: pand %xmm2, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896] -; SSE2-NEXT: psrlw %xmm1, %xmm2 -; SSE2-NEXT: pxor %xmm2, %xmm0 -; SSE2-NEXT: psubb %xmm2, %xmm0 +; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm1 +; SSE2-NEXT: psraw $15, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: pandn %xmm0, %xmm2 +; SSE2-NEXT: psraw $1, %xmm0 +; SSE2-NEXT: pand %xmm1, %xmm0 +; SSE2-NEXT: por %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: splatvar_shift_v8i8: ; SSE41: # %bb.0: -; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero -; SSE41-NEXT: psrlw %xmm1, %xmm0 -; SSE41-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE41-NEXT: psrlw %xmm1, %xmm2 -; SSE41-NEXT: pshufb {{.*#+}} xmm2 = xmm2[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; SSE41-NEXT: pand %xmm2, %xmm0 -; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896] -; SSE41-NEXT: psrlw %xmm1, %xmm2 -; SSE41-NEXT: pxor %xmm2, %xmm0 -; SSE41-NEXT: psubb %xmm2, %xmm0 +; SSE41-NEXT: movdqa %xmm1, %xmm2 +; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: psllw $8, %xmm3 +; SSE41-NEXT: movdqa %xmm3, %xmm1 +; SSE41-NEXT: psraw $8, %xmm1 +; SSE41-NEXT: pshufb {{.*#+}} xmm2 = xmm2[0],zero,xmm2[0],zero,xmm2[0],zero,xmm2[0],zero,xmm2[0],zero,xmm2[0],zero,xmm2[0],zero,xmm2[0],zero +; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: psllw $12, %xmm0 +; SSE41-NEXT: psllw $4, %xmm2 +; SSE41-NEXT: por %xmm0, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm4 +; SSE41-NEXT: paddw %xmm2, %xmm4 +; SSE41-NEXT: psraw $15, %xmm3 +; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm2 +; SSE41-NEXT: psraw $4, %xmm2 +; SSE41-NEXT: movdqa %xmm4, %xmm0 +; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm2 +; SSE41-NEXT: psraw $2, %xmm2 +; SSE41-NEXT: paddw %xmm4, %xmm4 +; SSE41-NEXT: movdqa %xmm4, %xmm0 +; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm2 +; SSE41-NEXT: psraw $1, %xmm2 +; SSE41-NEXT: paddw %xmm4, %xmm4 +; SSE41-NEXT: movdqa %xmm4, %xmm0 +; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: retq ; ; AVX1-LABEL: splatvar_shift_v8i8: ; AVX1: # %bb.0: -; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero -; AVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm2 -; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896] -; AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm1 -; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsllw $8, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $8, %xmm0, %xmm2 +; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero +; AVX1-NEXT: vpsllw $12, %xmm1, %xmm3 +; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1 +; AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm3 +; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0 +; AVX1-NEXT: vpblendvb %xmm1, %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1 +; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1 +; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm2 +; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1 +; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: splatvar_shift_v8i8: ; AVX2: # %bb.0: -; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero -; AVX2-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm2 -; AVX2-NEXT: vpsrlw $8, %xmm2, %xmm2 -; AVX2-NEXT: vpbroadcastb %xmm2, %xmm2 -; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896] -; AVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm1 -; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpsubb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpsllw $8, %xmm0, %xmm0 +; AVX2-NEXT: vpsraw $8, %xmm0, %xmm0 +; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 +; AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; -; XOPAVX1-LABEL: splatvar_shift_v8i8: -; XOPAVX1: # %bb.0: -; XOPAVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; XOPAVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7] -; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; XOPAVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1 -; XOPAVX1-NEXT: vpshab %xmm1, %xmm0, %xmm0 -; XOPAVX1-NEXT: retq -; -; XOPAVX2-LABEL: splatvar_shift_v8i8: -; XOPAVX2: # %bb.0: -; XOPAVX2-NEXT: vpbroadcastb %xmm1, %xmm1 -; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; XOPAVX2-NEXT: vpsubb %xmm1, %xmm2, %xmm1 -; XOPAVX2-NEXT: vpshab %xmm1, %xmm0, %xmm0 -; XOPAVX2-NEXT: retq +; XOP-LABEL: splatvar_shift_v8i8: +; XOP: # %bb.0: +; XOP-NEXT: vpsllw $8, %xmm0, %xmm0 +; XOP-NEXT: vpsraw $8, %xmm0, %xmm0 +; XOP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero +; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; XOP-NEXT: vpsubw %xmm1, %xmm2, %xmm1 +; XOP-NEXT: vpshaw %xmm1, %xmm0, %xmm0 +; XOP-NEXT: retq ; ; AVX512DQ-LABEL: splatvar_shift_v8i8: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: vpbroadcastb %xmm1, %xmm1 -; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0 -; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero -; AVX512DQ-NEXT: vpsravd %zmm1, %zmm0, %zmm0 -; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512DQ-NEXT: vpsllw $8, %xmm0, %xmm0 +; AVX512DQ-NEXT: vpsraw $8, %xmm0, %xmm0 +; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero +; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX512DQ-NEXT: vpmovsxwd %xmm0, %ymm0 +; AVX512DQ-NEXT: vpsravd %ymm1, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; ; AVX512BW-LABEL: splatvar_shift_v8i8: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vpbroadcastb %xmm1, %xmm1 -; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0 -; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX512BW-NEXT: vpsllw $8, %xmm0, %xmm0 +; AVX512BW-NEXT: vpsraw $8, %xmm0, %xmm0 +; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero ; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; ; AVX512DQVL-LABEL: splatvar_shift_v8i8: ; AVX512DQVL: # %bb.0: -; AVX512DQVL-NEXT: vpbroadcastb %xmm1, %xmm1 -; AVX512DQVL-NEXT: vpmovsxbd %xmm0, %zmm0 -; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero -; AVX512DQVL-NEXT: vpsravd %zmm1, %zmm0, %zmm0 -; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512DQVL-NEXT: vpsllw $8, %xmm0, %xmm0 +; AVX512DQVL-NEXT: vpsraw $8, %xmm0, %xmm0 +; AVX512DQVL-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero +; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX512DQVL-NEXT: vpmovsxwd %xmm0, %ymm0 +; AVX512DQVL-NEXT: vpsravd %ymm1, %ymm0, %ymm0 +; AVX512DQVL-NEXT: vpmovdw %ymm0, %xmm0 ; AVX512DQVL-NEXT: vzeroupper ; AVX512DQVL-NEXT: retq ; ; AVX512BWVL-LABEL: splatvar_shift_v8i8: ; AVX512BWVL: # %bb.0: -; AVX512BWVL-NEXT: vpbroadcastb %xmm1, %xmm1 -; AVX512BWVL-NEXT: vpmovsxbw %xmm0, %ymm0 -; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero -; AVX512BWVL-NEXT: vpsravw %ymm1, %ymm0, %ymm0 -; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0 -; AVX512BWVL-NEXT: vzeroupper +; AVX512BWVL-NEXT: vpsllw $8, %xmm0, %xmm0 +; AVX512BWVL-NEXT: vpsraw $8, %xmm0, %xmm0 +; AVX512BWVL-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero +; AVX512BWVL-NEXT: vpsravw %xmm1, %xmm0, %xmm0 ; AVX512BWVL-NEXT: retq ; ; X32-SSE-LABEL: splatvar_shift_v8i8: ; X32-SSE: # %bb.0: -; X32-SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0] -; X32-SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; X32-SSE-NEXT: psrlw %xmm1, %xmm0 -; X32-SSE-NEXT: pcmpeqd %xmm2, %xmm2 -; X32-SSE-NEXT: psrlw %xmm1, %xmm2 -; X32-SSE-NEXT: psrlw $8, %xmm2 -; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7] -; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] +; X32-SSE-NEXT: movdqa %xmm0, %xmm2 +; X32-SSE-NEXT: psllw $8, %xmm2 +; X32-SSE-NEXT: movdqa %xmm2, %xmm3 +; X32-SSE-NEXT: psraw $8, %xmm3 +; X32-SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,0,2,3,4,5,6,7] +; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0] +; X32-SSE-NEXT: psllw $12, %xmm1 +; X32-SSE-NEXT: movdqa %xmm1, %xmm0 +; X32-SSE-NEXT: psraw $15, %xmm0 +; X32-SSE-NEXT: psraw $15, %xmm2 +; X32-SSE-NEXT: pand %xmm0, %xmm2 +; X32-SSE-NEXT: pandn %xmm3, %xmm0 +; X32-SSE-NEXT: por %xmm2, %xmm0 +; X32-SSE-NEXT: paddw %xmm1, %xmm1 +; X32-SSE-NEXT: movdqa %xmm1, %xmm2 +; X32-SSE-NEXT: psraw $15, %xmm2 +; X32-SSE-NEXT: movdqa %xmm2, %xmm3 +; X32-SSE-NEXT: pandn %xmm0, %xmm3 +; X32-SSE-NEXT: psraw $4, %xmm0 +; X32-SSE-NEXT: pand %xmm2, %xmm0 +; X32-SSE-NEXT: por %xmm3, %xmm0 +; X32-SSE-NEXT: paddw %xmm1, %xmm1 +; X32-SSE-NEXT: movdqa %xmm1, %xmm2 +; X32-SSE-NEXT: psraw $15, %xmm2 +; X32-SSE-NEXT: movdqa %xmm2, %xmm3 +; X32-SSE-NEXT: pandn %xmm0, %xmm3 +; X32-SSE-NEXT: psraw $2, %xmm0 ; X32-SSE-NEXT: pand %xmm2, %xmm0 -; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896] -; X32-SSE-NEXT: psrlw %xmm1, %xmm2 -; X32-SSE-NEXT: pxor %xmm2, %xmm0 -; X32-SSE-NEXT: psubb %xmm2, %xmm0 +; X32-SSE-NEXT: por %xmm3, %xmm0 +; X32-SSE-NEXT: paddw %xmm1, %xmm1 +; X32-SSE-NEXT: psraw $15, %xmm1 +; X32-SSE-NEXT: movdqa %xmm1, %xmm2 +; X32-SSE-NEXT: pandn %xmm0, %xmm2 +; X32-SSE-NEXT: psraw $1, %xmm0 +; X32-SSE-NEXT: pand %xmm1, %xmm0 +; X32-SSE-NEXT: por %xmm2, %xmm0 ; X32-SSE-NEXT: retl %splat = shufflevector <8 x i8> %b, <8 x i8> undef, <8 x i32> zeroinitializer %shift = ashr <8 x i8> %a, %splat @@ -1449,139 +1601,133 @@ define <8 x i8> @splatvar_shift_v8i8(<8 x i8> %a, <8 x i8> %b) nounwind { define <4 x i8> @splatvar_shift_v4i8(<4 x i8> %a, <4 x i8> %b) nounwind { ; SSE2-LABEL: splatvar_shift_v4i8: ; SSE2: # %bb.0: -; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0] -; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; SSE2-NEXT: psrlw %xmm1, %xmm0 -; SSE2-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE2-NEXT: psrlw %xmm1, %xmm2 -; SSE2-NEXT: psrlw $8, %xmm2 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7] -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] -; SSE2-NEXT: pand %xmm2, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896] -; SSE2-NEXT: psrlw %xmm1, %xmm2 -; SSE2-NEXT: pxor %xmm2, %xmm0 -; SSE2-NEXT: psubb %xmm2, %xmm0 +; SSE2-NEXT: pslld $24, %xmm0 +; SSE2-NEXT: psrad $24, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,0,0] +; SSE2-NEXT: pand {{.*}}(%rip), %xmm2 +; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[2,3,3,3,4,5,6,7] +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: psrad %xmm1, %xmm3 +; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[0,1,1,1,4,5,6,7] +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: psrad %xmm4, %xmm1 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] +; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7] +; SSE2-NEXT: movdqa %xmm0, %xmm4 +; SSE2-NEXT: psrad %xmm3, %xmm4 +; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7] +; SSE2-NEXT: psrad %xmm2, %xmm0 +; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1] +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,3] +; SSE2-NEXT: movaps %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: splatvar_shift_v4i8: ; SSE41: # %bb.0: -; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero -; SSE41-NEXT: psrlw %xmm1, %xmm0 -; SSE41-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE41-NEXT: psrlw %xmm1, %xmm2 -; SSE41-NEXT: pshufb {{.*#+}} xmm2 = xmm2[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; SSE41-NEXT: pand %xmm2, %xmm0 -; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896] -; SSE41-NEXT: psrlw %xmm1, %xmm2 -; SSE41-NEXT: pxor %xmm2, %xmm0 -; SSE41-NEXT: psubb %xmm2, %xmm0 +; SSE41-NEXT: pslld $24, %xmm0 +; SSE41-NEXT: psrad $24, %xmm0 +; SSE41-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero +; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: psrad %xmm2, %xmm3 +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm5 +; SSE41-NEXT: psrad %xmm4, %xmm5 +; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7] +; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: psrad %xmm1, %xmm3 +; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7] +; SSE41-NEXT: psrad %xmm1, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7] ; SSE41-NEXT: retq ; ; AVX1-LABEL: splatvar_shift_v4i8: ; AVX1: # %bb.0: -; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero -; AVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm2 -; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896] -; AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm1 -; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpslld $24, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $24, %xmm0, %xmm0 +; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero +; AVX1-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; AVX1-NEXT: vpsrad %xmm2, %xmm0, %xmm2 +; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3 +; AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7] +; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm1[2],xmm3[2],xmm1[3],xmm3[3] +; AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3 +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero +; AVX1-NEXT: vpsrad %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] ; AVX1-NEXT: retq ; ; AVX2-LABEL: splatvar_shift_v4i8: ; AVX2: # %bb.0: -; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero -; AVX2-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm2 -; AVX2-NEXT: vpsrlw $8, %xmm2, %xmm2 -; AVX2-NEXT: vpbroadcastb %xmm2, %xmm2 -; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896] -; AVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm1 -; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpsubb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpslld $24, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $24, %xmm0, %xmm0 +; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero +; AVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq ; ; XOPAVX1-LABEL: splatvar_shift_v4i8: ; XOPAVX1: # %bb.0: -; XOPAVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; XOPAVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7] +; XOPAVX1-NEXT: vpslld $24, %xmm0, %xmm0 +; XOPAVX1-NEXT: vpsrad $24, %xmm0, %xmm0 +; XOPAVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero ; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; XOPAVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1 -; XOPAVX1-NEXT: vpshab %xmm1, %xmm0, %xmm0 +; XOPAVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1 +; XOPAVX1-NEXT: vpshad %xmm1, %xmm0, %xmm0 ; XOPAVX1-NEXT: retq ; ; XOPAVX2-LABEL: splatvar_shift_v4i8: ; XOPAVX2: # %bb.0: -; XOPAVX2-NEXT: vpbroadcastb %xmm1, %xmm1 -; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; XOPAVX2-NEXT: vpsubb %xmm1, %xmm2, %xmm1 -; XOPAVX2-NEXT: vpshab %xmm1, %xmm0, %xmm0 +; XOPAVX2-NEXT: vpslld $24, %xmm0, %xmm0 +; XOPAVX2-NEXT: vpsrad $24, %xmm0, %xmm0 +; XOPAVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero +; XOPAVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0 ; XOPAVX2-NEXT: retq ; -; AVX512DQ-LABEL: splatvar_shift_v4i8: -; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: vpbroadcastb %xmm1, %xmm1 -; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0 -; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero -; AVX512DQ-NEXT: vpsravd %zmm1, %zmm0, %zmm0 -; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0 -; AVX512DQ-NEXT: vzeroupper -; AVX512DQ-NEXT: retq -; -; AVX512BW-LABEL: splatvar_shift_v4i8: -; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vpbroadcastb %xmm1, %xmm1 -; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0 -; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero -; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq -; -; AVX512DQVL-LABEL: splatvar_shift_v4i8: -; AVX512DQVL: # %bb.0: -; AVX512DQVL-NEXT: vpbroadcastb %xmm1, %xmm1 -; AVX512DQVL-NEXT: vpmovsxbd %xmm0, %zmm0 -; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero -; AVX512DQVL-NEXT: vpsravd %zmm1, %zmm0, %zmm0 -; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0 -; AVX512DQVL-NEXT: vzeroupper -; AVX512DQVL-NEXT: retq +; AVX512-LABEL: splatvar_shift_v4i8: +; AVX512: # %bb.0: +; AVX512-NEXT: vpslld $24, %xmm0, %xmm0 +; AVX512-NEXT: vpsrad $24, %xmm0, %xmm0 +; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero +; AVX512-NEXT: vpsravd %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: retq ; -; AVX512BWVL-LABEL: splatvar_shift_v4i8: -; AVX512BWVL: # %bb.0: -; AVX512BWVL-NEXT: vpbroadcastb %xmm1, %xmm1 -; AVX512BWVL-NEXT: vpmovsxbw %xmm0, %ymm0 -; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero -; AVX512BWVL-NEXT: vpsravw %ymm1, %ymm0, %ymm0 -; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0 -; AVX512BWVL-NEXT: vzeroupper -; AVX512BWVL-NEXT: retq +; AVX512VL-LABEL: splatvar_shift_v4i8: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpslld $24, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsrad $24, %xmm0, %xmm0 +; AVX512VL-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero +; AVX512VL-NEXT: vpsravd %xmm1, %xmm0, %xmm0 +; AVX512VL-NEXT: retq ; ; X32-SSE-LABEL: splatvar_shift_v4i8: ; X32-SSE: # %bb.0: -; X32-SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0] -; X32-SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; X32-SSE-NEXT: psrlw %xmm1, %xmm0 -; X32-SSE-NEXT: pcmpeqd %xmm2, %xmm2 -; X32-SSE-NEXT: psrlw %xmm1, %xmm2 -; X32-SSE-NEXT: psrlw $8, %xmm2 -; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7] -; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] -; X32-SSE-NEXT: pand %xmm2, %xmm0 -; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896] -; X32-SSE-NEXT: psrlw %xmm1, %xmm2 -; X32-SSE-NEXT: pxor %xmm2, %xmm0 -; X32-SSE-NEXT: psubb %xmm2, %xmm0 +; X32-SSE-NEXT: pslld $24, %xmm0 +; X32-SSE-NEXT: psrad $24, %xmm0 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,0,0] +; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm2 +; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[2,3,3,3,4,5,6,7] +; X32-SSE-NEXT: movdqa %xmm0, %xmm3 +; X32-SSE-NEXT: psrad %xmm1, %xmm3 +; X32-SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[0,1,1,1,4,5,6,7] +; X32-SSE-NEXT: movdqa %xmm0, %xmm1 +; X32-SSE-NEXT: psrad %xmm4, %xmm1 +; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] +; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] +; X32-SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7] +; X32-SSE-NEXT: movdqa %xmm0, %xmm4 +; X32-SSE-NEXT: psrad %xmm3, %xmm4 +; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7] +; X32-SSE-NEXT: psrad %xmm2, %xmm0 +; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1] +; X32-SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,3] +; X32-SSE-NEXT: movaps %xmm1, %xmm0 ; X32-SSE-NEXT: retl %splat = shufflevector <4 x i8> %b, <4 x i8> undef, <4 x i32> zeroinitializer %shift = ashr <4 x i8> %a, %splat @@ -1591,130 +1737,138 @@ define <4 x i8> @splatvar_shift_v4i8(<4 x i8> %a, <4 x i8> %b) nounwind { define <2 x i8> @splatvar_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind { ; SSE2-LABEL: splatvar_shift_v2i8: ; SSE2: # %bb.0: -; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0] -; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; SSE2-NEXT: psrlw %xmm1, %xmm0 -; SSE2-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE2-NEXT: psrlw %xmm1, %xmm2 -; SSE2-NEXT: psrlw $8, %xmm2 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7] -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] -; SSE2-NEXT: pand %xmm2, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896] -; SSE2-NEXT: psrlw %xmm1, %xmm2 -; SSE2-NEXT: pxor %xmm2, %xmm0 -; SSE2-NEXT: psubb %xmm2, %xmm0 +; SSE2-NEXT: psllq $56, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: psrad $31, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] +; SSE2-NEXT: psrad $24, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1] +; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: psrlq %xmm1, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] +; SSE2-NEXT: psrlq %xmm4, %xmm2 +; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1] +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: psrlq %xmm1, %xmm3 +; SSE2-NEXT: psrlq %xmm4, %xmm0 +; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1] +; SSE2-NEXT: xorpd %xmm2, %xmm0 +; SSE2-NEXT: psubq %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: splatvar_shift_v2i8: ; SSE41: # %bb.0: -; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero -; SSE41-NEXT: psrlw %xmm1, %xmm0 -; SSE41-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE41-NEXT: psrlw %xmm1, %xmm2 -; SSE41-NEXT: pshufb {{.*#+}} xmm2 = xmm2[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; SSE41-NEXT: pand %xmm2, %xmm0 -; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896] -; SSE41-NEXT: psrlw %xmm1, %xmm2 +; SSE41-NEXT: psllq $56, %xmm0 +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: psrad $31, %xmm2 +; SSE41-NEXT: psrad $24, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; SSE41-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero +; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] +; SSE41-NEXT: movdqa %xmm2, %xmm3 +; SSE41-NEXT: psrlq %xmm1, %xmm3 +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] +; SSE41-NEXT: psrlq %xmm4, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: psrlq %xmm4, %xmm3 +; SSE41-NEXT: psrlq %xmm1, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] ; SSE41-NEXT: pxor %xmm2, %xmm0 -; SSE41-NEXT: psubb %xmm2, %xmm0 +; SSE41-NEXT: psubq %xmm2, %xmm0 ; SSE41-NEXT: retq ; ; AVX1-LABEL: splatvar_shift_v2i8: ; AVX1: # %bb.0: -; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero -; AVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm2 -; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896] -; AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm1 -; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsllq $56, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2 +; AVX1-NEXT: vpsrad $24, %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero +; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] +; AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] +; AVX1-NEXT: vpsrlq %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7] +; AVX1-NEXT: vpsrlq %xmm4, %xmm0, %xmm3 +; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] +; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: splatvar_shift_v2i8: ; AVX2: # %bb.0: -; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero -; AVX2-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm2 -; AVX2-NEXT: vpsrlw $8, %xmm2, %xmm2 -; AVX2-NEXT: vpbroadcastb %xmm2, %xmm2 -; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896] -; AVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm1 -; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpsubb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpsllq $56, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $31, %xmm0, %xmm2 +; AVX2-NEXT: vpsrad $24, %xmm0, %xmm0 +; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] +; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero +; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] +; AVX2-NEXT: vpsrlvq %xmm1, %xmm2, %xmm2 +; AVX2-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpsubq %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: retq ; ; XOP-LABEL: splatvar_shift_v2i8: ; XOP: # %bb.0: -; XOP-NEXT: insertq {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7,u,u,u,u,u,u,u,u] +; XOP-NEXT: vpsllq $56, %xmm0, %xmm0 +; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0 ; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; XOP-NEXT: vpsubb %xmm1, %xmm2, %xmm1 -; XOP-NEXT: vpshab %xmm1, %xmm0, %xmm0 +; XOP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero +; XOP-NEXT: vpsubq %xmm1, %xmm2, %xmm1 +; XOP-NEXT: vpshaq %xmm1, %xmm0, %xmm0 ; XOP-NEXT: retq ; -; AVX512DQ-LABEL: splatvar_shift_v2i8: -; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: vpbroadcastb %xmm1, %xmm1 -; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0 -; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero -; AVX512DQ-NEXT: vpsravd %zmm1, %zmm0, %zmm0 -; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0 -; AVX512DQ-NEXT: vzeroupper -; AVX512DQ-NEXT: retq -; -; AVX512BW-LABEL: splatvar_shift_v2i8: -; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vpbroadcastb %xmm1, %xmm1 -; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0 -; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero -; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq -; -; AVX512DQVL-LABEL: splatvar_shift_v2i8: -; AVX512DQVL: # %bb.0: -; AVX512DQVL-NEXT: vpbroadcastb %xmm1, %xmm1 -; AVX512DQVL-NEXT: vpmovsxbd %xmm0, %zmm0 -; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero -; AVX512DQVL-NEXT: vpsravd %zmm1, %zmm0, %zmm0 -; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0 -; AVX512DQVL-NEXT: vzeroupper -; AVX512DQVL-NEXT: retq +; AVX512-LABEL: splatvar_shift_v2i8: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllq $56, %xmm0, %xmm0 +; AVX512-NEXT: vpsraq $56, %zmm0, %zmm0 +; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero +; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq ; -; AVX512BWVL-LABEL: splatvar_shift_v2i8: -; AVX512BWVL: # %bb.0: -; AVX512BWVL-NEXT: vpbroadcastb %xmm1, %xmm1 -; AVX512BWVL-NEXT: vpmovsxbw %xmm0, %ymm0 -; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero -; AVX512BWVL-NEXT: vpsravw %ymm1, %ymm0, %ymm0 -; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0 -; AVX512BWVL-NEXT: vzeroupper -; AVX512BWVL-NEXT: retq +; AVX512VL-LABEL: splatvar_shift_v2i8: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllq $56, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsraq $56, %xmm0, %xmm0 +; AVX512VL-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero +; AVX512VL-NEXT: vpsravq %xmm1, %xmm0, %xmm0 +; AVX512VL-NEXT: retq ; ; X32-SSE-LABEL: splatvar_shift_v2i8: ; X32-SSE: # %bb.0: -; X32-SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0] -; X32-SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; X32-SSE-NEXT: psrlw %xmm1, %xmm0 -; X32-SSE-NEXT: pcmpeqd %xmm2, %xmm2 -; X32-SSE-NEXT: psrlw %xmm1, %xmm2 -; X32-SSE-NEXT: psrlw $8, %xmm2 -; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7] -; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] -; X32-SSE-NEXT: pand %xmm2, %xmm0 -; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896] -; X32-SSE-NEXT: psrlw %xmm1, %xmm2 -; X32-SSE-NEXT: pxor %xmm2, %xmm0 -; X32-SSE-NEXT: psubb %xmm2, %xmm0 +; X32-SSE-NEXT: psllq $56, %xmm0 +; X32-SSE-NEXT: movdqa %xmm0, %xmm2 +; X32-SSE-NEXT: psrad $31, %xmm2 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] +; X32-SSE-NEXT: psrad $24, %xmm0 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1] +; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648] +; X32-SSE-NEXT: movdqa %xmm2, %xmm3 +; X32-SSE-NEXT: psrlq %xmm1, %xmm3 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] +; X32-SSE-NEXT: psrlq %xmm4, %xmm2 +; X32-SSE-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1] +; X32-SSE-NEXT: movdqa %xmm0, %xmm3 +; X32-SSE-NEXT: psrlq %xmm1, %xmm3 +; X32-SSE-NEXT: psrlq %xmm4, %xmm0 +; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1] +; X32-SSE-NEXT: xorpd %xmm2, %xmm0 +; X32-SSE-NEXT: psubq %xmm2, %xmm0 ; X32-SSE-NEXT: retl %splat = shufflevector <2 x i8> %b, <2 x i8> undef, <2 x i32> zeroinitializer %shift = ashr <2 x i8> %a, %splat @@ -1728,62 +1882,100 @@ define <2 x i8> @splatvar_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind { define <2 x i32> @constant_shift_v2i32(<2 x i32> %a) nounwind { ; SSE2-LABEL: constant_shift_v2i32: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: psrad $4, %xmm1 -; SSE2-NEXT: psrad $5, %xmm0 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; SSE2-NEXT: psllq $32, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3] +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: psrlq $4, %xmm0 +; SSE2-NEXT: psrlq $5, %xmm1 +; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] +; SSE2-NEXT: movapd {{.*#+}} xmm0 = [576460752303423488,288230376151711744] +; SSE2-NEXT: xorpd %xmm0, %xmm1 +; SSE2-NEXT: psubq %xmm0, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: constant_shift_v2i32: ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: psrad $5, %xmm1 -; SSE41-NEXT: psrad $4, %xmm0 -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7] +; SSE41-NEXT: psllq $32, %xmm1 +; SSE41-NEXT: psrad $31, %xmm1 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: psrlq $5, %xmm0 +; SSE41-NEXT: psrlq $4, %xmm1 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm0[4,5,6,7] +; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [576460752303423488,288230376151711744] +; SSE41-NEXT: pxor %xmm0, %xmm1 +; SSE41-NEXT: psubq %xmm0, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: retq ; ; AVX1-LABEL: constant_shift_v2i32: ; AVX1: # %bb.0: -; AVX1-NEXT: vpsrad $5, %xmm0, %xmm1 -; AVX1-NEXT: vpsrad $4, %xmm0, %xmm0 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7] +; AVX1-NEXT: vpsllq $32, %xmm0, %xmm1 +; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; AVX1-NEXT: vpsrlq $5, %xmm0, %xmm1 +; AVX1-NEXT: vpsrlq $4, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [576460752303423488,288230376151711744] +; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: constant_shift_v2i32: ; AVX2: # %bb.0: -; AVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: vpsllq $32, %xmm0, %xmm1 +; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1 +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] +; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [576460752303423488,288230376151711744] +; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq ; -; XOPAVX1-LABEL: constant_shift_v2i32: -; XOPAVX1: # %bb.0: -; XOPAVX1-NEXT: vpshad {{.*}}(%rip), %xmm0, %xmm0 -; XOPAVX1-NEXT: retq -; -; XOPAVX2-LABEL: constant_shift_v2i32: -; XOPAVX2: # %bb.0: -; XOPAVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 -; XOPAVX2-NEXT: retq +; XOP-LABEL: constant_shift_v2i32: +; XOP: # %bb.0: +; XOP-NEXT: vpsllq $32, %xmm0, %xmm0 +; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: retq ; ; AVX512-LABEL: constant_shift_v2i32: ; AVX512: # %bb.0: -; AVX512-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 +; AVX512-NEXT: vpsllq $32, %xmm0, %xmm0 +; AVX512-NEXT: vpsraq $32, %zmm0, %zmm0 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [4,5] +; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq ; ; AVX512VL-LABEL: constant_shift_v2i32: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 +; AVX512VL-NEXT: vpsllq $32, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsraq $32, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsravq {{.*}}(%rip), %xmm0, %xmm0 ; AVX512VL-NEXT: retq ; ; X32-SSE-LABEL: constant_shift_v2i32: ; X32-SSE: # %bb.0: -; X32-SSE-NEXT: movdqa %xmm0, %xmm1 -; X32-SSE-NEXT: psrad $4, %xmm1 -; X32-SSE-NEXT: psrad $5, %xmm0 -; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; X32-SSE-NEXT: psllq $32, %xmm0 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3] +; X32-SSE-NEXT: psrad $31, %xmm0 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] ; X32-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; X32-SSE-NEXT: movdqa %xmm1, %xmm0 +; X32-SSE-NEXT: psrlq $4, %xmm0 +; X32-SSE-NEXT: psrlq $5, %xmm1 +; X32-SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] +; X32-SSE-NEXT: movapd {{.*#+}} xmm0 = [3.7857669957336791E-270,2.0522684006491881E-289] +; X32-SSE-NEXT: xorpd %xmm0, %xmm1 +; X32-SSE-NEXT: psubq %xmm0, %xmm1 +; X32-SSE-NEXT: movdqa %xmm1, %xmm0 ; X32-SSE-NEXT: retl %shift = ashr <2 x i32> %a, <i32 4, i32 5> ret <2 x i32> %shift @@ -1792,83 +1984,96 @@ define <2 x i32> @constant_shift_v2i32(<2 x i32> %a) nounwind { define <4 x i16> @constant_shift_v4i16(<4 x i16> %a) nounwind { ; SSE2-LABEL: constant_shift_v4i16: ; SSE2: # %bb.0: +; SSE2-NEXT: pslld $16, %xmm0 ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: psraw $2, %xmm1 -; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0] -; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3] -; SSE2-NEXT: movaps {{.*#+}} xmm2 = [65535,0,65535,0,65535,65535,65535,65535] +; SSE2-NEXT: psrad $16, %xmm1 +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: psrad $19, %xmm2 +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: psrad $18, %xmm3 +; SSE2-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm2[1] +; SSE2-NEXT: psrad $17, %xmm0 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3] ; SSE2-NEXT: movaps %xmm1, %xmm0 -; SSE2-NEXT: andps %xmm2, %xmm0 -; SSE2-NEXT: psraw $1, %xmm1 -; SSE2-NEXT: andnps %xmm1, %xmm2 -; SSE2-NEXT: orps %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: constant_shift_v4i16: ; SSE41: # %bb.0: -; SSE41-NEXT: movdqa {{.*#+}} xmm1 = <u,32768,16384,8192,u,u,u,u> -; SSE41-NEXT: pmulhw %xmm0, %xmm1 -; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7] -; SSE41-NEXT: psraw $1, %xmm0 -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5,6,7] +; SSE41-NEXT: pslld $16, %xmm0 +; SSE41-NEXT: movdqa %xmm0, %xmm1 +; SSE41-NEXT: psrad $16, %xmm1 +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: psrad $19, %xmm2 +; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: psrad $17, %xmm3 +; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7] +; SSE41-NEXT: psrad $18, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7] ; SSE41-NEXT: retq ; -; AVX-LABEL: constant_shift_v4i16: -; AVX: # %bb.0: -; AVX-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm1 -; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7] -; AVX-NEXT: vpsraw $1, %xmm0, %xmm0 -; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5,6,7] -; AVX-NEXT: retq +; AVX1-LABEL: constant_shift_v4i16: +; AVX1: # %bb.0: +; AVX1-NEXT: vpslld $16, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $16, %xmm0, %xmm1 +; AVX1-NEXT: vpsrad $19, %xmm0, %xmm2 +; AVX1-NEXT: vpsrad $17, %xmm0, %xmm3 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7] +; AVX1-NEXT: vpsrad $18, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; AVX1-NEXT: retq ; -; XOP-LABEL: constant_shift_v4i16: -; XOP: # %bb.0: -; XOP-NEXT: vpshaw {{.*}}(%rip), %xmm0, %xmm0 -; XOP-NEXT: retq +; AVX2-LABEL: constant_shift_v4i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vpslld $16, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $16, %xmm0, %xmm0 +; AVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: retq ; -; AVX512DQ-LABEL: constant_shift_v4i16: -; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: vpmovsxwd %xmm0, %ymm0 -; AVX512DQ-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0 -; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX512DQ-NEXT: vzeroupper -; AVX512DQ-NEXT: retq +; XOPAVX1-LABEL: constant_shift_v4i16: +; XOPAVX1: # %bb.0: +; XOPAVX1-NEXT: vpslld $16, %xmm0, %xmm0 +; XOPAVX1-NEXT: vpsrad $16, %xmm0, %xmm0 +; XOPAVX1-NEXT: vpshad {{.*}}(%rip), %xmm0, %xmm0 +; XOPAVX1-NEXT: retq ; -; AVX512BW-LABEL: constant_shift_v4i16: -; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 -; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = <0,1,2,3,u,u,u,u> -; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq +; XOPAVX2-LABEL: constant_shift_v4i16: +; XOPAVX2: # %bb.0: +; XOPAVX2-NEXT: vpslld $16, %xmm0, %xmm0 +; XOPAVX2-NEXT: vpsrad $16, %xmm0, %xmm0 +; XOPAVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 +; XOPAVX2-NEXT: retq ; -; AVX512DQVL-LABEL: constant_shift_v4i16: -; AVX512DQVL: # %bb.0: -; AVX512DQVL-NEXT: vpmovsxwd %xmm0, %ymm0 -; AVX512DQVL-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0 -; AVX512DQVL-NEXT: vpmovdw %ymm0, %xmm0 -; AVX512DQVL-NEXT: vzeroupper -; AVX512DQVL-NEXT: retq +; AVX512-LABEL: constant_shift_v4i16: +; AVX512: # %bb.0: +; AVX512-NEXT: vpslld $16, %xmm0, %xmm0 +; AVX512-NEXT: vpsrad $16, %xmm0, %xmm0 +; AVX512-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 +; AVX512-NEXT: retq ; -; AVX512BWVL-LABEL: constant_shift_v4i16: -; AVX512BWVL: # %bb.0: -; AVX512BWVL-NEXT: vpsravw {{.*}}(%rip), %xmm0, %xmm0 -; AVX512BWVL-NEXT: retq +; AVX512VL-LABEL: constant_shift_v4i16: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpslld $16, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsrad $16, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 +; AVX512VL-NEXT: retq ; ; X32-SSE-LABEL: constant_shift_v4i16: ; X32-SSE: # %bb.0: +; X32-SSE-NEXT: pslld $16, %xmm0 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1 -; X32-SSE-NEXT: psraw $2, %xmm1 -; X32-SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0] -; X32-SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3] -; X32-SSE-NEXT: movaps {{.*#+}} xmm2 = [65535,0,65535,0,65535,65535,65535,65535] +; X32-SSE-NEXT: psrad $16, %xmm1 +; X32-SSE-NEXT: movdqa %xmm0, %xmm2 +; X32-SSE-NEXT: psrad $19, %xmm2 +; X32-SSE-NEXT: movdqa %xmm0, %xmm3 +; X32-SSE-NEXT: psrad $18, %xmm3 +; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm2[1] +; X32-SSE-NEXT: psrad $17, %xmm0 +; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; X32-SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3] ; X32-SSE-NEXT: movaps %xmm1, %xmm0 -; X32-SSE-NEXT: andps %xmm2, %xmm0 -; X32-SSE-NEXT: psraw $1, %xmm1 -; X32-SSE-NEXT: andnps %xmm1, %xmm2 -; X32-SSE-NEXT: orps %xmm2, %xmm0 ; X32-SSE-NEXT: retl %shift = ashr <4 x i16> %a, <i16 0, i16 1, i16 2, i16 3> ret <4 x i16> %shift @@ -1877,336 +2082,425 @@ define <4 x i16> @constant_shift_v4i16(<4 x i16> %a) nounwind { define <2 x i16> @constant_shift_v2i16(<2 x i16> %a) nounwind { ; SSE2-LABEL: constant_shift_v2i16: ; SSE2: # %bb.0: +; SSE2-NEXT: psllq $48, %xmm0 ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: psraw $3, %xmm1 -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,65535,65535,65535,65535,65535,65535] -; SSE2-NEXT: psraw $2, %xmm0 -; SSE2-NEXT: pand %xmm2, %xmm0 -; SSE2-NEXT: pandn %xmm1, %xmm2 -; SSE2-NEXT: por %xmm2, %xmm0 +; SSE2-NEXT: psrad $31, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] +; SSE2-NEXT: psrad $16, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: psrlq $2, %xmm1 +; SSE2-NEXT: psrlq $3, %xmm0 +; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; SSE2-NEXT: movapd {{.*#+}} xmm1 = [2305843009213693952,1152921504606846976] +; SSE2-NEXT: xorpd %xmm1, %xmm0 +; SSE2-NEXT: psubq %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: constant_shift_v2i16: ; SSE41: # %bb.0: +; SSE41-NEXT: psllq $48, %xmm0 ; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: psraw $3, %xmm1 -; SSE41-NEXT: psraw $2, %xmm0 -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6,7] +; SSE41-NEXT: psrad $31, %xmm1 +; SSE41-NEXT: psrad $16, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm1 +; SSE41-NEXT: psrlq $3, %xmm1 +; SSE41-NEXT: psrlq $2, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] +; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [2305843009213693952,1152921504606846976] +; SSE41-NEXT: pxor %xmm1, %xmm0 +; SSE41-NEXT: psubq %xmm1, %xmm0 ; SSE41-NEXT: retq ; -; AVX-LABEL: constant_shift_v2i16: -; AVX: # %bb.0: -; AVX-NEXT: vpsraw $3, %xmm0, %xmm1 -; AVX-NEXT: vpsraw $2, %xmm0, %xmm0 -; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6,7] -; AVX-NEXT: retq +; AVX1-LABEL: constant_shift_v2i16: +; AVX1: # %bb.0: +; AVX1-NEXT: vpsllq $48, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1 +; AVX1-NEXT: vpsrad $16, %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; AVX1-NEXT: vpsrlq $3, %xmm0, %xmm1 +; AVX1-NEXT: vpsrlq $2, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [2305843009213693952,1152921504606846976] +; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: constant_shift_v2i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vpsllq $48, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $31, %xmm0, %xmm1 +; AVX2-NEXT: vpsrad $16, %xmm0, %xmm0 +; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] +; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [2305843009213693952,1152921504606846976] +; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: retq ; ; XOP-LABEL: constant_shift_v2i16: ; XOP: # %bb.0: -; XOP-NEXT: vpshaw {{.*}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: vpsllq $48, %xmm0, %xmm0 +; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0 ; XOP-NEXT: retq ; -; AVX512DQ-LABEL: constant_shift_v2i16: -; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: vpsraw $3, %xmm0, %xmm1 -; AVX512DQ-NEXT: vpsraw $2, %xmm0, %xmm0 -; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6,7] -; AVX512DQ-NEXT: retq -; -; AVX512BW-LABEL: constant_shift_v2i16: -; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 -; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = <2,3,u,u,u,u,u,u> -; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq -; -; AVX512DQVL-LABEL: constant_shift_v2i16: -; AVX512DQVL: # %bb.0: -; AVX512DQVL-NEXT: vpsraw $3, %xmm0, %xmm1 -; AVX512DQVL-NEXT: vpsraw $2, %xmm0, %xmm0 -; AVX512DQVL-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6,7] -; AVX512DQVL-NEXT: retq +; AVX512-LABEL: constant_shift_v2i16: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllq $48, %xmm0, %xmm0 +; AVX512-NEXT: vpsraq $48, %zmm0, %zmm0 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [2,3] +; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq ; -; AVX512BWVL-LABEL: constant_shift_v2i16: -; AVX512BWVL: # %bb.0: -; AVX512BWVL-NEXT: vpsravw {{.*}}(%rip), %xmm0, %xmm0 -; AVX512BWVL-NEXT: retq +; AVX512VL-LABEL: constant_shift_v2i16: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllq $48, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsraq $48, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsravq {{.*}}(%rip), %xmm0, %xmm0 +; AVX512VL-NEXT: retq ; ; X32-SSE-LABEL: constant_shift_v2i16: ; X32-SSE: # %bb.0: +; X32-SSE-NEXT: psllq $48, %xmm0 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1 -; X32-SSE-NEXT: psraw $3, %xmm1 -; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,65535,65535,65535,65535,65535,65535] -; X32-SSE-NEXT: psraw $2, %xmm0 -; X32-SSE-NEXT: pand %xmm2, %xmm0 -; X32-SSE-NEXT: pandn %xmm1, %xmm2 -; X32-SSE-NEXT: por %xmm2, %xmm0 +; X32-SSE-NEXT: psrad $31, %xmm1 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] +; X32-SSE-NEXT: psrad $16, %xmm0 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; X32-SSE-NEXT: movdqa %xmm0, %xmm1 +; X32-SSE-NEXT: psrlq $2, %xmm1 +; X32-SSE-NEXT: psrlq $3, %xmm0 +; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; X32-SSE-NEXT: movapd {{.*#+}} xmm1 = [1.4916681462400413E-154,1.2882297539194267E-231] +; X32-SSE-NEXT: xorpd %xmm1, %xmm0 +; X32-SSE-NEXT: psubq %xmm1, %xmm0 ; X32-SSE-NEXT: retl %shift = ashr <2 x i16> %a, <i16 2, i16 3> ret <2 x i16> %shift } define <8 x i8> @constant_shift_v8i8(<8 x i8> %a) nounwind { -; SSE-LABEL: constant_shift_v8i8: -; SSE: # %bb.0: -; SSE-NEXT: pxor %xmm1, %xmm1 -; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] -; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE-NEXT: psraw $8, %xmm0 -; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0 -; SSE-NEXT: psrlw $8, %xmm0 -; SSE-NEXT: packuswb %xmm2, %xmm0 -; SSE-NEXT: retq +; SSE2-LABEL: constant_shift_v8i8: +; SSE2: # %bb.0: +; SSE2-NEXT: psllw $8, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: psraw $8, %xmm1 +; SSE2-NEXT: psraw $12, %xmm0 +; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; SSE2-NEXT: movapd %xmm0, %xmm1 +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[2,3] +; SSE2-NEXT: psraw $2, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSE2-NEXT: movaps {{.*#+}} xmm2 = [65535,0,65535,0,65535,0,65535,0] +; SSE2-NEXT: movaps %xmm1, %xmm0 +; SSE2-NEXT: andps %xmm2, %xmm0 +; SSE2-NEXT: psraw $1, %xmm1 +; SSE2-NEXT: andnps %xmm1, %xmm2 +; SSE2-NEXT: orps %xmm2, %xmm0 +; SSE2-NEXT: retq ; -; AVX1-LABEL: constant_shift_v8i8: -; AVX1: # %bb.0: -; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] -; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; AVX1-NEXT: vpsraw $8, %xmm0, %xmm0 -; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0 -; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: retq +; SSE41-LABEL: constant_shift_v8i8: +; SSE41: # %bb.0: +; SSE41-NEXT: psllw $8, %xmm0 +; SSE41-NEXT: movdqa %xmm0, %xmm1 +; SSE41-NEXT: psraw $8, %xmm1 +; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <u,32768,16384,8192,4096,2048,1024,512> +; SSE41-NEXT: pmulhw %xmm1, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7] +; SSE41-NEXT: psraw $9, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5,6,7] +; SSE41-NEXT: retq ; -; AVX2-LABEL: constant_shift_v8i8: -; AVX2: # %bb.0: -; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0 -; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0 -; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: retq +; AVX-LABEL: constant_shift_v8i8: +; AVX: # %bb.0: +; AVX-NEXT: vpsllw $8, %xmm0, %xmm0 +; AVX-NEXT: vpsraw $8, %xmm0, %xmm1 +; AVX-NEXT: vpmulhw {{.*}}(%rip), %xmm1, %xmm2 +; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7] +; AVX-NEXT: vpsraw $9, %xmm0, %xmm0 +; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5,6,7] +; AVX-NEXT: retq ; ; XOP-LABEL: constant_shift_v8i8: ; XOP: # %bb.0: -; XOP-NEXT: vpshab {{.*}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: vpsllw $8, %xmm0, %xmm0 +; XOP-NEXT: vpsraw $8, %xmm0, %xmm0 +; XOP-NEXT: vpshaw {{.*}}(%rip), %xmm0, %xmm0 ; XOP-NEXT: retq ; ; AVX512DQ-LABEL: constant_shift_v8i8: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0 -; AVX512DQ-NEXT: vpsravd {{.*}}(%rip), %zmm0, %zmm0 -; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512DQ-NEXT: vpsllw $8, %xmm0, %xmm0 +; AVX512DQ-NEXT: vpsraw $8, %xmm0, %xmm0 +; AVX512DQ-NEXT: vpmovsxwd %xmm0, %ymm0 +; AVX512DQ-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0 +; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; ; AVX512BW-LABEL: constant_shift_v8i8: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,0,0,0,0,0,0,0,0] -; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0 +; AVX512BW-NEXT: vpsllw $8, %xmm0, %xmm0 +; AVX512BW-NEXT: vpsraw $8, %xmm0, %xmm0 +; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7] ; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; ; AVX512DQVL-LABEL: constant_shift_v8i8: ; AVX512DQVL: # %bb.0: -; AVX512DQVL-NEXT: vpmovsxbd %xmm0, %zmm0 -; AVX512DQVL-NEXT: vpsravd {{.*}}(%rip), %zmm0, %zmm0 -; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512DQVL-NEXT: vpsllw $8, %xmm0, %xmm0 +; AVX512DQVL-NEXT: vpsraw $8, %xmm0, %xmm0 +; AVX512DQVL-NEXT: vpmovsxwd %xmm0, %ymm0 +; AVX512DQVL-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0 +; AVX512DQVL-NEXT: vpmovdw %ymm0, %xmm0 ; AVX512DQVL-NEXT: vzeroupper ; AVX512DQVL-NEXT: retq ; ; AVX512BWVL-LABEL: constant_shift_v8i8: ; AVX512BWVL: # %bb.0: -; AVX512BWVL-NEXT: vpmovsxbw %xmm0, %ymm0 -; AVX512BWVL-NEXT: vpsravw {{.*}}(%rip), %ymm0, %ymm0 -; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0 -; AVX512BWVL-NEXT: vzeroupper +; AVX512BWVL-NEXT: vpsllw $8, %xmm0, %xmm0 +; AVX512BWVL-NEXT: vpsraw $8, %xmm0, %xmm0 +; AVX512BWVL-NEXT: vpsravw {{.*}}(%rip), %xmm0, %xmm0 ; AVX512BWVL-NEXT: retq ; ; X32-SSE-LABEL: constant_shift_v8i8: ; X32-SSE: # %bb.0: -; X32-SSE-NEXT: pxor %xmm1, %xmm1 -; X32-SSE-NEXT: movdqa %xmm0, %xmm2 -; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] -; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X32-SSE-NEXT: psraw $8, %xmm0 -; X32-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 -; X32-SSE-NEXT: psrlw $8, %xmm0 -; X32-SSE-NEXT: packuswb %xmm2, %xmm0 +; X32-SSE-NEXT: psllw $8, %xmm0 +; X32-SSE-NEXT: movdqa %xmm0, %xmm1 +; X32-SSE-NEXT: psraw $8, %xmm1 +; X32-SSE-NEXT: psraw $12, %xmm0 +; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; X32-SSE-NEXT: movapd %xmm0, %xmm1 +; X32-SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[2,3] +; X32-SSE-NEXT: psraw $2, %xmm0 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; X32-SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; X32-SSE-NEXT: movaps {{.*#+}} xmm2 = [65535,0,65535,0,65535,0,65535,0] +; X32-SSE-NEXT: movaps %xmm1, %xmm0 +; X32-SSE-NEXT: andps %xmm2, %xmm0 +; X32-SSE-NEXT: psraw $1, %xmm1 +; X32-SSE-NEXT: andnps %xmm1, %xmm2 +; X32-SSE-NEXT: orps %xmm2, %xmm0 ; X32-SSE-NEXT: retl %shift = ashr <8 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7> ret <8 x i8> %shift } define <4 x i8> @constant_shift_v4i8(<4 x i8> %a) nounwind { -; SSE-LABEL: constant_shift_v4i8: -; SSE: # %bb.0: -; SSE-NEXT: pxor %xmm1, %xmm1 -; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] -; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE-NEXT: psraw $8, %xmm0 -; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0 -; SSE-NEXT: psrlw $8, %xmm0 -; SSE-NEXT: packuswb %xmm2, %xmm0 -; SSE-NEXT: retq +; SSE2-LABEL: constant_shift_v4i8: +; SSE2: # %bb.0: +; SSE2-NEXT: pslld $24, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: psrad $24, %xmm1 +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: psrad $27, %xmm2 +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: psrad $26, %xmm3 +; SSE2-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm2[1] +; SSE2-NEXT: psrad $25, %xmm0 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3] +; SSE2-NEXT: movaps %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSE41-LABEL: constant_shift_v4i8: +; SSE41: # %bb.0: +; SSE41-NEXT: pslld $24, %xmm0 +; SSE41-NEXT: movdqa %xmm0, %xmm1 +; SSE41-NEXT: psrad $24, %xmm1 +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: psrad $27, %xmm2 +; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: psrad $25, %xmm3 +; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7] +; SSE41-NEXT: psrad $26, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7] +; SSE41-NEXT: retq ; ; AVX1-LABEL: constant_shift_v4i8: ; AVX1: # %bb.0: -; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] -; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; AVX1-NEXT: vpsraw $8, %xmm0, %xmm0 -; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0 -; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpslld $24, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $24, %xmm0, %xmm1 +; AVX1-NEXT: vpsrad $27, %xmm0, %xmm2 +; AVX1-NEXT: vpsrad $25, %xmm0, %xmm3 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7] +; AVX1-NEXT: vpsrad $26, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] ; AVX1-NEXT: retq ; ; AVX2-LABEL: constant_shift_v4i8: ; AVX2: # %bb.0: -; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0 -; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0 -; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vzeroupper +; AVX2-NEXT: vpslld $24, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $24, %xmm0, %xmm0 +; AVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 ; AVX2-NEXT: retq ; -; XOP-LABEL: constant_shift_v4i8: -; XOP: # %bb.0: -; XOP-NEXT: vpshab {{.*}}(%rip), %xmm0, %xmm0 -; XOP-NEXT: retq -; -; AVX512DQ-LABEL: constant_shift_v4i8: -; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0 -; AVX512DQ-NEXT: vpsravd {{.*}}(%rip), %zmm0, %zmm0 -; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0 -; AVX512DQ-NEXT: vzeroupper -; AVX512DQ-NEXT: retq +; XOPAVX1-LABEL: constant_shift_v4i8: +; XOPAVX1: # %bb.0: +; XOPAVX1-NEXT: vpslld $24, %xmm0, %xmm0 +; XOPAVX1-NEXT: vpsrad $24, %xmm0, %xmm0 +; XOPAVX1-NEXT: vpshad {{.*}}(%rip), %xmm0, %xmm0 +; XOPAVX1-NEXT: retq ; -; AVX512BW-LABEL: constant_shift_v4i8: -; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,0,0,0,0,0,0,0,0,0,0,0,0] -; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0 -; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq +; XOPAVX2-LABEL: constant_shift_v4i8: +; XOPAVX2: # %bb.0: +; XOPAVX2-NEXT: vpslld $24, %xmm0, %xmm0 +; XOPAVX2-NEXT: vpsrad $24, %xmm0, %xmm0 +; XOPAVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 +; XOPAVX2-NEXT: retq ; -; AVX512DQVL-LABEL: constant_shift_v4i8: -; AVX512DQVL: # %bb.0: -; AVX512DQVL-NEXT: vpmovsxbd %xmm0, %zmm0 -; AVX512DQVL-NEXT: vpsravd {{.*}}(%rip), %zmm0, %zmm0 -; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0 -; AVX512DQVL-NEXT: vzeroupper -; AVX512DQVL-NEXT: retq +; AVX512-LABEL: constant_shift_v4i8: +; AVX512: # %bb.0: +; AVX512-NEXT: vpslld $24, %xmm0, %xmm0 +; AVX512-NEXT: vpsrad $24, %xmm0, %xmm0 +; AVX512-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 +; AVX512-NEXT: retq ; -; AVX512BWVL-LABEL: constant_shift_v4i8: -; AVX512BWVL: # %bb.0: -; AVX512BWVL-NEXT: vpmovsxbw %xmm0, %ymm0 -; AVX512BWVL-NEXT: vpsravw {{.*}}(%rip), %ymm0, %ymm0 -; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0 -; AVX512BWVL-NEXT: vzeroupper -; AVX512BWVL-NEXT: retq +; AVX512VL-LABEL: constant_shift_v4i8: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpslld $24, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsrad $24, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 +; AVX512VL-NEXT: retq ; ; X32-SSE-LABEL: constant_shift_v4i8: ; X32-SSE: # %bb.0: -; X32-SSE-NEXT: pxor %xmm1, %xmm1 +; X32-SSE-NEXT: pslld $24, %xmm0 +; X32-SSE-NEXT: movdqa %xmm0, %xmm1 +; X32-SSE-NEXT: psrad $24, %xmm1 ; X32-SSE-NEXT: movdqa %xmm0, %xmm2 -; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] -; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X32-SSE-NEXT: psraw $8, %xmm0 -; X32-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 -; X32-SSE-NEXT: psrlw $8, %xmm0 -; X32-SSE-NEXT: packuswb %xmm2, %xmm0 +; X32-SSE-NEXT: psrad $27, %xmm2 +; X32-SSE-NEXT: movdqa %xmm0, %xmm3 +; X32-SSE-NEXT: psrad $26, %xmm3 +; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm2[1] +; X32-SSE-NEXT: psrad $25, %xmm0 +; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; X32-SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3] +; X32-SSE-NEXT: movaps %xmm1, %xmm0 ; X32-SSE-NEXT: retl %shift = ashr <4 x i8> %a, <i8 0, i8 1, i8 2, i8 3> ret <4 x i8> %shift } define <2 x i8> @constant_shift_v2i8(<2 x i8> %a) nounwind { -; SSE-LABEL: constant_shift_v2i8: -; SSE: # %bb.0: -; SSE-NEXT: pxor %xmm1, %xmm1 -; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] -; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE-NEXT: psraw $8, %xmm0 -; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0 -; SSE-NEXT: psrlw $8, %xmm0 -; SSE-NEXT: packuswb %xmm2, %xmm0 -; SSE-NEXT: retq +; SSE2-LABEL: constant_shift_v2i8: +; SSE2: # %bb.0: +; SSE2-NEXT: psllq $56, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: psrad $31, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] +; SSE2-NEXT: psrad $24, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: psrlq $2, %xmm1 +; SSE2-NEXT: psrlq $3, %xmm0 +; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; SSE2-NEXT: movapd {{.*#+}} xmm1 = [2305843009213693952,1152921504606846976] +; SSE2-NEXT: xorpd %xmm1, %xmm0 +; SSE2-NEXT: psubq %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSE41-LABEL: constant_shift_v2i8: +; SSE41: # %bb.0: +; SSE41-NEXT: psllq $56, %xmm0 +; SSE41-NEXT: movdqa %xmm0, %xmm1 +; SSE41-NEXT: psrad $31, %xmm1 +; SSE41-NEXT: psrad $24, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm1 +; SSE41-NEXT: psrlq $3, %xmm1 +; SSE41-NEXT: psrlq $2, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] +; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [2305843009213693952,1152921504606846976] +; SSE41-NEXT: pxor %xmm1, %xmm0 +; SSE41-NEXT: psubq %xmm1, %xmm0 +; SSE41-NEXT: retq ; ; AVX1-LABEL: constant_shift_v2i8: ; AVX1: # %bb.0: -; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] -; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; AVX1-NEXT: vpsraw $8, %xmm0, %xmm0 -; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0 -; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsllq $56, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1 +; AVX1-NEXT: vpsrad $24, %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; AVX1-NEXT: vpsrlq $3, %xmm0, %xmm1 +; AVX1-NEXT: vpsrlq $2, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [2305843009213693952,1152921504606846976] +; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: constant_shift_v2i8: ; AVX2: # %bb.0: -; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0 -; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0 -; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vzeroupper +; AVX2-NEXT: vpsllq $56, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $31, %xmm0, %xmm1 +; AVX2-NEXT: vpsrad $24, %xmm0, %xmm0 +; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] +; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [2305843009213693952,1152921504606846976] +; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq ; ; XOP-LABEL: constant_shift_v2i8: ; XOP: # %bb.0: -; XOP-NEXT: vpshab {{.*}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: vpsllq $56, %xmm0, %xmm0 +; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0 ; XOP-NEXT: retq ; -; AVX512DQ-LABEL: constant_shift_v2i8: -; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0 -; AVX512DQ-NEXT: vpsravd {{.*}}(%rip), %zmm0, %zmm0 -; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0 -; AVX512DQ-NEXT: vzeroupper -; AVX512DQ-NEXT: retq -; -; AVX512BW-LABEL: constant_shift_v2i8: -; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [2,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0] -; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0 -; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq -; -; AVX512DQVL-LABEL: constant_shift_v2i8: -; AVX512DQVL: # %bb.0: -; AVX512DQVL-NEXT: vpmovsxbd %xmm0, %zmm0 -; AVX512DQVL-NEXT: vpsravd {{.*}}(%rip), %zmm0, %zmm0 -; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0 -; AVX512DQVL-NEXT: vzeroupper -; AVX512DQVL-NEXT: retq +; AVX512-LABEL: constant_shift_v2i8: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllq $56, %xmm0, %xmm0 +; AVX512-NEXT: vpsraq $56, %zmm0, %zmm0 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [2,3] +; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq ; -; AVX512BWVL-LABEL: constant_shift_v2i8: -; AVX512BWVL: # %bb.0: -; AVX512BWVL-NEXT: vpmovsxbw %xmm0, %ymm0 -; AVX512BWVL-NEXT: vpsravw {{.*}}(%rip), %ymm0, %ymm0 -; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0 -; AVX512BWVL-NEXT: vzeroupper -; AVX512BWVL-NEXT: retq +; AVX512VL-LABEL: constant_shift_v2i8: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllq $56, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsraq $56, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsravq {{.*}}(%rip), %xmm0, %xmm0 +; AVX512VL-NEXT: retq ; ; X32-SSE-LABEL: constant_shift_v2i8: ; X32-SSE: # %bb.0: -; X32-SSE-NEXT: pxor %xmm1, %xmm1 -; X32-SSE-NEXT: movdqa %xmm0, %xmm2 -; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] -; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X32-SSE-NEXT: psraw $8, %xmm0 -; X32-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 -; X32-SSE-NEXT: psrlw $8, %xmm0 -; X32-SSE-NEXT: packuswb %xmm2, %xmm0 +; X32-SSE-NEXT: psllq $56, %xmm0 +; X32-SSE-NEXT: movdqa %xmm0, %xmm1 +; X32-SSE-NEXT: psrad $31, %xmm1 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] +; X32-SSE-NEXT: psrad $24, %xmm0 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; X32-SSE-NEXT: movdqa %xmm0, %xmm1 +; X32-SSE-NEXT: psrlq $2, %xmm1 +; X32-SSE-NEXT: psrlq $3, %xmm0 +; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; X32-SSE-NEXT: movapd {{.*#+}} xmm1 = [1.4916681462400413E-154,1.2882297539194267E-231] +; X32-SSE-NEXT: xorpd %xmm1, %xmm0 +; X32-SSE-NEXT: psubq %xmm1, %xmm0 ; X32-SSE-NEXT: retl %shift = ashr <2 x i8> %a, <i8 2, i8 3> ret <2 x i8> %shift @@ -2217,34 +2511,88 @@ define <2 x i8> @constant_shift_v2i8(<2 x i8> %a) nounwind { ; define <2 x i32> @splatconstant_shift_v2i32(<2 x i32> %a) nounwind { -; SSE-LABEL: splatconstant_shift_v2i32: -; SSE: # %bb.0: -; SSE-NEXT: psrad $5, %xmm0 -; SSE-NEXT: retq +; SSE2-LABEL: splatconstant_shift_v2i32: +; SSE2: # %bb.0: +; SSE2-NEXT: psllq $32, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3] +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: psrad $5, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] +; SSE2-NEXT: psrlq $5, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-NEXT: retq ; -; AVX-LABEL: splatconstant_shift_v2i32: -; AVX: # %bb.0: -; AVX-NEXT: vpsrad $5, %xmm0, %xmm0 -; AVX-NEXT: retq +; SSE41-LABEL: splatconstant_shift_v2i32: +; SSE41: # %bb.0: +; SSE41-NEXT: movdqa %xmm0, %xmm1 +; SSE41-NEXT: psllq $32, %xmm1 +; SSE41-NEXT: psrad $31, %xmm1 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: psrad $5, %xmm0 +; SSE41-NEXT: psrlq $5, %xmm1 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7] +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: retq +; +; AVX1-LABEL: splatconstant_shift_v2i32: +; AVX1: # %bb.0: +; AVX1-NEXT: vpsllq $32, %xmm0, %xmm1 +; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; AVX1-NEXT: vpsrad $5, %xmm0, %xmm1 +; AVX1-NEXT: vpsrlq $5, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; AVX1-NEXT: retq +; +; AVX2-LABEL: splatconstant_shift_v2i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpsllq $32, %xmm0, %xmm1 +; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1 +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] +; AVX2-NEXT: vpsrad $5, %xmm0, %xmm1 +; AVX2-NEXT: vpsrlq $5, %xmm0, %xmm0 +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] +; AVX2-NEXT: retq ; ; XOP-LABEL: splatconstant_shift_v2i32: ; XOP: # %bb.0: -; XOP-NEXT: vpsrad $5, %xmm0, %xmm0 +; XOP-NEXT: vpsllq $32, %xmm0, %xmm0 +; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0 ; XOP-NEXT: retq ; ; AVX512-LABEL: splatconstant_shift_v2i32: ; AVX512: # %bb.0: -; AVX512-NEXT: vpsrad $5, %xmm0, %xmm0 +; AVX512-NEXT: vpsllq $32, %xmm0, %xmm0 +; AVX512-NEXT: vpsraq $37, %zmm0, %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq ; ; AVX512VL-LABEL: splatconstant_shift_v2i32: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vpsrad $5, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsllq $32, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsraq $37, %xmm0, %xmm0 ; AVX512VL-NEXT: retq ; ; X32-SSE-LABEL: splatconstant_shift_v2i32: ; X32-SSE: # %bb.0: +; X32-SSE-NEXT: psllq $32, %xmm0 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3] +; X32-SSE-NEXT: psrad $31, %xmm0 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; X32-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; X32-SSE-NEXT: movdqa %xmm1, %xmm0 ; X32-SSE-NEXT: psrad $5, %xmm0 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] +; X32-SSE-NEXT: psrlq $5, %xmm1 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3] +; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; X32-SSE-NEXT: retl %shift = ashr <2 x i32> %a, <i32 5, i32 5> ret <2 x i32> %shift @@ -2253,66 +2601,135 @@ define <2 x i32> @splatconstant_shift_v2i32(<2 x i32> %a) nounwind { define <4 x i16> @splatconstant_shift_v4i16(<4 x i16> %a) nounwind { ; SSE-LABEL: splatconstant_shift_v4i16: ; SSE: # %bb.0: -; SSE-NEXT: psraw $3, %xmm0 +; SSE-NEXT: pslld $16, %xmm0 +; SSE-NEXT: psrad $19, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: splatconstant_shift_v4i16: ; AVX: # %bb.0: -; AVX-NEXT: vpsraw $3, %xmm0, %xmm0 +; AVX-NEXT: vpslld $16, %xmm0, %xmm0 +; AVX-NEXT: vpsrad $19, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; XOP-LABEL: splatconstant_shift_v4i16: ; XOP: # %bb.0: -; XOP-NEXT: vpsraw $3, %xmm0, %xmm0 +; XOP-NEXT: vpslld $16, %xmm0, %xmm0 +; XOP-NEXT: vpsrad $19, %xmm0, %xmm0 ; XOP-NEXT: retq ; ; AVX512-LABEL: splatconstant_shift_v4i16: ; AVX512: # %bb.0: -; AVX512-NEXT: vpsraw $3, %xmm0, %xmm0 +; AVX512-NEXT: vpslld $16, %xmm0, %xmm0 +; AVX512-NEXT: vpsrad $19, %xmm0, %xmm0 ; AVX512-NEXT: retq ; ; AVX512VL-LABEL: splatconstant_shift_v4i16: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vpsraw $3, %xmm0, %xmm0 +; AVX512VL-NEXT: vpslld $16, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsrad $19, %xmm0, %xmm0 ; AVX512VL-NEXT: retq ; ; X32-SSE-LABEL: splatconstant_shift_v4i16: ; X32-SSE: # %bb.0: -; X32-SSE-NEXT: psraw $3, %xmm0 +; X32-SSE-NEXT: pslld $16, %xmm0 +; X32-SSE-NEXT: psrad $19, %xmm0 ; X32-SSE-NEXT: retl %shift = ashr <4 x i16> %a, <i16 3, i16 3, i16 3, i16 3> ret <4 x i16> %shift } define <2 x i16> @splatconstant_shift_v2i16(<2 x i16> %a) nounwind { -; SSE-LABEL: splatconstant_shift_v2i16: -; SSE: # %bb.0: -; SSE-NEXT: psraw $3, %xmm0 -; SSE-NEXT: retq +; SSE2-LABEL: splatconstant_shift_v2i16: +; SSE2: # %bb.0: +; SSE2-NEXT: psllq $48, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: psrad $31, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] +; SSE2-NEXT: psrad $16, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: psrad $3, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] +; SSE2-NEXT: psrlq $3, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-NEXT: retq ; -; AVX-LABEL: splatconstant_shift_v2i16: -; AVX: # %bb.0: -; AVX-NEXT: vpsraw $3, %xmm0, %xmm0 -; AVX-NEXT: retq +; SSE41-LABEL: splatconstant_shift_v2i16: +; SSE41: # %bb.0: +; SSE41-NEXT: psllq $48, %xmm0 +; SSE41-NEXT: movdqa %xmm0, %xmm1 +; SSE41-NEXT: psrad $31, %xmm1 +; SSE41-NEXT: psrad $16, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm1 +; SSE41-NEXT: psrad $3, %xmm1 +; SSE41-NEXT: psrlq $3, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; SSE41-NEXT: retq +; +; AVX1-LABEL: splatconstant_shift_v2i16: +; AVX1: # %bb.0: +; AVX1-NEXT: vpsllq $48, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1 +; AVX1-NEXT: vpsrad $16, %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; AVX1-NEXT: vpsrad $3, %xmm0, %xmm1 +; AVX1-NEXT: vpsrlq $3, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; AVX1-NEXT: retq +; +; AVX2-LABEL: splatconstant_shift_v2i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vpsllq $48, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $31, %xmm0, %xmm1 +; AVX2-NEXT: vpsrad $16, %xmm0, %xmm0 +; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] +; AVX2-NEXT: vpsrad $3, %xmm0, %xmm1 +; AVX2-NEXT: vpsrlq $3, %xmm0, %xmm0 +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] +; AVX2-NEXT: retq ; ; XOP-LABEL: splatconstant_shift_v2i16: ; XOP: # %bb.0: -; XOP-NEXT: vpsraw $3, %xmm0, %xmm0 +; XOP-NEXT: vpsllq $48, %xmm0, %xmm0 +; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0 ; XOP-NEXT: retq ; ; AVX512-LABEL: splatconstant_shift_v2i16: ; AVX512: # %bb.0: -; AVX512-NEXT: vpsraw $3, %xmm0, %xmm0 +; AVX512-NEXT: vpsllq $48, %xmm0, %xmm0 +; AVX512-NEXT: vpsraq $51, %zmm0, %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq ; ; AVX512VL-LABEL: splatconstant_shift_v2i16: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vpsraw $3, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsllq $48, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsraq $51, %xmm0, %xmm0 ; AVX512VL-NEXT: retq ; ; X32-SSE-LABEL: splatconstant_shift_v2i16: ; X32-SSE: # %bb.0: -; X32-SSE-NEXT: psraw $3, %xmm0 +; X32-SSE-NEXT: psllq $48, %xmm0 +; X32-SSE-NEXT: movdqa %xmm0, %xmm1 +; X32-SSE-NEXT: psrad $31, %xmm1 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] +; X32-SSE-NEXT: psrad $16, %xmm0 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; X32-SSE-NEXT: movdqa %xmm0, %xmm1 +; X32-SSE-NEXT: psrad $3, %xmm1 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] +; X32-SSE-NEXT: psrlq $3, %xmm0 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X32-SSE-NEXT: retl %shift = ashr <2 x i16> %a, <i16 3, i16 3> ret <2 x i16> %shift @@ -2321,52 +2738,38 @@ define <2 x i16> @splatconstant_shift_v2i16(<2 x i16> %a) nounwind { define <8 x i8> @splatconstant_shift_v8i8(<8 x i8> %a) nounwind { ; SSE-LABEL: splatconstant_shift_v8i8: ; SSE: # %bb.0: -; SSE-NEXT: psrlw $3, %xmm0 -; SSE-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] -; SSE-NEXT: pxor %xmm1, %xmm0 -; SSE-NEXT: psubb %xmm1, %xmm0 +; SSE-NEXT: psllw $8, %xmm0 +; SSE-NEXT: psraw $11, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: splatconstant_shift_v8i8: ; AVX: # %bb.0: -; AVX-NEXT: vpsrlw $3, %xmm0, %xmm0 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 -; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] -; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vpsubb %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsllw $8, %xmm0, %xmm0 +; AVX-NEXT: vpsraw $11, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; XOP-LABEL: splatconstant_shift_v8i8: ; XOP: # %bb.0: -; XOP-NEXT: vpshab {{.*}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: vpsllw $8, %xmm0, %xmm0 +; XOP-NEXT: vpsraw $11, %xmm0, %xmm0 ; XOP-NEXT: retq ; ; AVX512-LABEL: splatconstant_shift_v8i8: ; AVX512: # %bb.0: -; AVX512-NEXT: vpsrlw $3, %xmm0, %xmm0 -; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 -; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] -; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 -; AVX512-NEXT: vpsubb %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpsllw $8, %xmm0, %xmm0 +; AVX512-NEXT: vpsraw $11, %xmm0, %xmm0 ; AVX512-NEXT: retq ; ; AVX512VL-LABEL: splatconstant_shift_v8i8: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vpsrlw $3, %xmm0, %xmm0 -; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 -; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] -; AVX512VL-NEXT: vpxor %xmm1, %xmm0, %xmm0 -; AVX512VL-NEXT: vpsubb %xmm1, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsllw $8, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsraw $11, %xmm0, %xmm0 ; AVX512VL-NEXT: retq ; ; X32-SSE-LABEL: splatconstant_shift_v8i8: ; X32-SSE: # %bb.0: -; X32-SSE-NEXT: psrlw $3, %xmm0 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 -; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] -; X32-SSE-NEXT: pxor %xmm1, %xmm0 -; X32-SSE-NEXT: psubb %xmm1, %xmm0 +; X32-SSE-NEXT: psllw $8, %xmm0 +; X32-SSE-NEXT: psraw $11, %xmm0 ; X32-SSE-NEXT: retl %shift = ashr <8 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3> ret <8 x i8> %shift @@ -2375,106 +2778,135 @@ define <8 x i8> @splatconstant_shift_v8i8(<8 x i8> %a) nounwind { define <4 x i8> @splatconstant_shift_v4i8(<4 x i8> %a) nounwind { ; SSE-LABEL: splatconstant_shift_v4i8: ; SSE: # %bb.0: -; SSE-NEXT: psrlw $3, %xmm0 -; SSE-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] -; SSE-NEXT: pxor %xmm1, %xmm0 -; SSE-NEXT: psubb %xmm1, %xmm0 +; SSE-NEXT: pslld $24, %xmm0 +; SSE-NEXT: psrad $27, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: splatconstant_shift_v4i8: ; AVX: # %bb.0: -; AVX-NEXT: vpsrlw $3, %xmm0, %xmm0 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 -; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] -; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vpsubb %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpslld $24, %xmm0, %xmm0 +; AVX-NEXT: vpsrad $27, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; XOP-LABEL: splatconstant_shift_v4i8: ; XOP: # %bb.0: -; XOP-NEXT: vpshab {{.*}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: vpslld $24, %xmm0, %xmm0 +; XOP-NEXT: vpsrad $27, %xmm0, %xmm0 ; XOP-NEXT: retq ; ; AVX512-LABEL: splatconstant_shift_v4i8: ; AVX512: # %bb.0: -; AVX512-NEXT: vpsrlw $3, %xmm0, %xmm0 -; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 -; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] -; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 -; AVX512-NEXT: vpsubb %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpslld $24, %xmm0, %xmm0 +; AVX512-NEXT: vpsrad $27, %xmm0, %xmm0 ; AVX512-NEXT: retq ; ; AVX512VL-LABEL: splatconstant_shift_v4i8: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vpsrlw $3, %xmm0, %xmm0 -; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 -; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] -; AVX512VL-NEXT: vpxor %xmm1, %xmm0, %xmm0 -; AVX512VL-NEXT: vpsubb %xmm1, %xmm0, %xmm0 +; AVX512VL-NEXT: vpslld $24, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsrad $27, %xmm0, %xmm0 ; AVX512VL-NEXT: retq ; ; X32-SSE-LABEL: splatconstant_shift_v4i8: ; X32-SSE: # %bb.0: -; X32-SSE-NEXT: psrlw $3, %xmm0 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 -; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] -; X32-SSE-NEXT: pxor %xmm1, %xmm0 -; X32-SSE-NEXT: psubb %xmm1, %xmm0 +; X32-SSE-NEXT: pslld $24, %xmm0 +; X32-SSE-NEXT: psrad $27, %xmm0 ; X32-SSE-NEXT: retl %shift = ashr <4 x i8> %a, <i8 3, i8 3, i8 3, i8 3> ret <4 x i8> %shift } define <2 x i8> @splatconstant_shift_v2i8(<2 x i8> %a) nounwind { -; SSE-LABEL: splatconstant_shift_v2i8: -; SSE: # %bb.0: -; SSE-NEXT: psrlw $3, %xmm0 -; SSE-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] -; SSE-NEXT: pxor %xmm1, %xmm0 -; SSE-NEXT: psubb %xmm1, %xmm0 -; SSE-NEXT: retq +; SSE2-LABEL: splatconstant_shift_v2i8: +; SSE2: # %bb.0: +; SSE2-NEXT: psllq $56, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: psrad $31, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] +; SSE2-NEXT: psrad $24, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: psrad $3, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] +; SSE2-NEXT: psrlq $3, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-NEXT: retq ; -; AVX-LABEL: splatconstant_shift_v2i8: -; AVX: # %bb.0: -; AVX-NEXT: vpsrlw $3, %xmm0, %xmm0 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 -; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] -; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vpsubb %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; SSE41-LABEL: splatconstant_shift_v2i8: +; SSE41: # %bb.0: +; SSE41-NEXT: psllq $56, %xmm0 +; SSE41-NEXT: movdqa %xmm0, %xmm1 +; SSE41-NEXT: psrad $31, %xmm1 +; SSE41-NEXT: psrad $24, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm1 +; SSE41-NEXT: psrad $3, %xmm1 +; SSE41-NEXT: psrlq $3, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; SSE41-NEXT: retq +; +; AVX1-LABEL: splatconstant_shift_v2i8: +; AVX1: # %bb.0: +; AVX1-NEXT: vpsllq $56, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1 +; AVX1-NEXT: vpsrad $24, %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; AVX1-NEXT: vpsrad $3, %xmm0, %xmm1 +; AVX1-NEXT: vpsrlq $3, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; AVX1-NEXT: retq +; +; AVX2-LABEL: splatconstant_shift_v2i8: +; AVX2: # %bb.0: +; AVX2-NEXT: vpsllq $56, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $31, %xmm0, %xmm1 +; AVX2-NEXT: vpsrad $24, %xmm0, %xmm0 +; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] +; AVX2-NEXT: vpsrad $3, %xmm0, %xmm1 +; AVX2-NEXT: vpsrlq $3, %xmm0, %xmm0 +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] +; AVX2-NEXT: retq ; ; XOP-LABEL: splatconstant_shift_v2i8: ; XOP: # %bb.0: -; XOP-NEXT: vpshab {{.*}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: vpsllq $56, %xmm0, %xmm0 +; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0 ; XOP-NEXT: retq ; ; AVX512-LABEL: splatconstant_shift_v2i8: ; AVX512: # %bb.0: -; AVX512-NEXT: vpsrlw $3, %xmm0, %xmm0 -; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 -; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] -; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 -; AVX512-NEXT: vpsubb %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpsllq $56, %xmm0, %xmm0 +; AVX512-NEXT: vpsraq $59, %zmm0, %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq ; ; AVX512VL-LABEL: splatconstant_shift_v2i8: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vpsrlw $3, %xmm0, %xmm0 -; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 -; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] -; AVX512VL-NEXT: vpxor %xmm1, %xmm0, %xmm0 -; AVX512VL-NEXT: vpsubb %xmm1, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsllq $56, %xmm0, %xmm0 +; AVX512VL-NEXT: vpsraq $59, %xmm0, %xmm0 ; AVX512VL-NEXT: retq ; ; X32-SSE-LABEL: splatconstant_shift_v2i8: ; X32-SSE: # %bb.0: -; X32-SSE-NEXT: psrlw $3, %xmm0 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 -; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] -; X32-SSE-NEXT: pxor %xmm1, %xmm0 -; X32-SSE-NEXT: psubb %xmm1, %xmm0 +; X32-SSE-NEXT: psllq $56, %xmm0 +; X32-SSE-NEXT: movdqa %xmm0, %xmm1 +; X32-SSE-NEXT: psrad $31, %xmm1 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] +; X32-SSE-NEXT: psrad $24, %xmm0 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; X32-SSE-NEXT: movdqa %xmm0, %xmm1 +; X32-SSE-NEXT: psrad $3, %xmm1 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] +; X32-SSE-NEXT: psrlq $3, %xmm0 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X32-SSE-NEXT: retl %shift = ashr <2 x i8> %a, <i8 3, i8 3> ret <2 x i8> %shift |