diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2019-04-25 09:34:36 +0000 |
---|---|---|
committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2019-04-25 09:34:36 +0000 |
commit | 0a7d1b3ce1d2ff51e49c260d022bb99bfb4e1dc7 (patch) | |
tree | aa9537a5b579d72484539e8680a1011b4c37c1ce /llvm/test/CodeGen/X86/bitcast-vector-bool.ll | |
parent | d144572dac23c3af6a1ea5efbdffd8813138e216 (diff) | |
download | bcm5719-llvm-0a7d1b3ce1d2ff51e49c260d022bb99bfb4e1dc7.tar.gz bcm5719-llvm-0a7d1b3ce1d2ff51e49c260d022bb99bfb4e1dc7.zip |
[X86][SSE] combineBitcastvxi1 - add support for bitcasting to non-scalar integers
Truncate the movmsk scalar integer result to the equivalent scalar integer width as before but then bitcast to the requested type.
We still have the issue identified in PR41594 but D61114 should handle this.
llvm-svn: 359176
Diffstat (limited to 'llvm/test/CodeGen/X86/bitcast-vector-bool.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/bitcast-vector-bool.ll | 1898 |
1 files changed, 177 insertions, 1721 deletions
diff --git a/llvm/test/CodeGen/X86/bitcast-vector-bool.ll b/llvm/test/CodeGen/X86/bitcast-vector-bool.ll index 5304a7d4a09..50f40757e0c 100644 --- a/llvm/test/CodeGen/X86/bitcast-vector-bool.ll +++ b/llvm/test/CodeGen/X86/bitcast-vector-bool.ll @@ -58,54 +58,21 @@ define i1 @bitcast_v2i64_to_v2i1(<2 x i64> %a0) nounwind { define i2 @bitcast_v4i32_to_v2i2(<4 x i32> %a0) nounwind { ; SSE2-SSSE3-LABEL: bitcast_v4i32_to_v2i2: ; SSE2-SSSE3: # %bb.0: -; SSE2-SSSE3-NEXT: pxor %xmm1, %xmm1 -; SSE2-SSSE3-NEXT: pcmpgtd %xmm0, %xmm1 -; SSE2-SSSE3-NEXT: movd %xmm1, %eax -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] -; SSE2-SSSE3-NEXT: movd %xmm0, %ecx -; SSE2-SSSE3-NEXT: andb $1, %cl -; SSE2-SSSE3-NEXT: addb %cl, %cl -; SSE2-SSSE3-NEXT: subb %al, %cl -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] -; SSE2-SSSE3-NEXT: movd %xmm0, %eax -; SSE2-SSSE3-NEXT: andb $1, %al -; SSE2-SSSE3-NEXT: shlb $2, %al -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3] -; SSE2-SSSE3-NEXT: movd %xmm0, %edx -; SSE2-SSSE3-NEXT: shlb $3, %dl -; SSE2-SSSE3-NEXT: orb %al, %dl -; SSE2-SSSE3-NEXT: orb %cl, %dl -; SSE2-SSSE3-NEXT: andb $15, %dl -; SSE2-SSSE3-NEXT: movzbl %dl, %eax +; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax ; SSE2-SSSE3-NEXT: movl %eax, %ecx -; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: andl $3, %ecx ; SSE2-SSSE3-NEXT: movq %rcx, %xmm0 -; SSE2-SSSE3-NEXT: andl $3, %eax +; SSE2-SSSE3-NEXT: shrl $2, %eax ; SSE2-SSSE3-NEXT: movq %rax, %xmm1 -; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] -; SSE2-SSSE3-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-SSSE3-NEXT: addb -{{[0-9]+}}(%rsp), %al ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: bitcast_v4i32_to_v2i2: ; AVX12: # %bb.0: -; AVX12-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX12-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0 -; AVX12-NEXT: vmovd %xmm0, %eax -; AVX12-NEXT: vpextrd $1, %xmm0, %ecx -; AVX12-NEXT: andb $1, %cl -; AVX12-NEXT: addb %cl, %cl -; AVX12-NEXT: subb %al, %cl -; AVX12-NEXT: vpextrd $2, %xmm0, %eax -; AVX12-NEXT: andb $1, %al -; AVX12-NEXT: shlb $2, %al -; AVX12-NEXT: vpextrd $3, %xmm0, %edx -; AVX12-NEXT: shlb $3, %dl -; AVX12-NEXT: orb %al, %dl -; AVX12-NEXT: orb %cl, %dl -; AVX12-NEXT: andb $15, %dl -; AVX12-NEXT: movzbl %dl, %eax +; AVX12-NEXT: vmovmskps %xmm0, %eax ; AVX12-NEXT: movl %eax, %ecx ; AVX12-NEXT: shrl $2, %ecx ; AVX12-NEXT: vmovd %ecx, %xmm0 @@ -144,39 +111,10 @@ define i2 @bitcast_v4i32_to_v2i2(<4 x i32> %a0) nounwind { define i4 @bitcast_v8i16_to_v2i4(<8 x i16> %a0) nounwind { ; SSE2-SSSE3-LABEL: bitcast_v8i16_to_v2i4: ; SSE2-SSSE3: # %bb.0: -; SSE2-SSSE3-NEXT: pxor %xmm1, %xmm1 -; SSE2-SSSE3-NEXT: pcmpgtw %xmm0, %xmm1 -; SSE2-SSSE3-NEXT: movd %xmm1, %ecx -; SSE2-SSSE3-NEXT: pextrw $1, %xmm1, %eax -; SSE2-SSSE3-NEXT: andb $1, %al -; SSE2-SSSE3-NEXT: addb %al, %al -; SSE2-SSSE3-NEXT: subb %cl, %al -; SSE2-SSSE3-NEXT: pextrw $2, %xmm1, %ecx -; SSE2-SSSE3-NEXT: andb $1, %cl -; SSE2-SSSE3-NEXT: shlb $2, %cl -; SSE2-SSSE3-NEXT: pextrw $3, %xmm1, %edx -; SSE2-SSSE3-NEXT: andb $1, %dl -; SSE2-SSSE3-NEXT: shlb $3, %dl -; SSE2-SSSE3-NEXT: orb %cl, %dl -; SSE2-SSSE3-NEXT: pextrw $4, %xmm1, %ecx -; SSE2-SSSE3-NEXT: andb $1, %cl -; SSE2-SSSE3-NEXT: shlb $4, %cl -; SSE2-SSSE3-NEXT: orb %dl, %cl -; SSE2-SSSE3-NEXT: pextrw $5, %xmm1, %edx -; SSE2-SSSE3-NEXT: andb $1, %dl -; SSE2-SSSE3-NEXT: shlb $5, %dl -; SSE2-SSSE3-NEXT: orb %cl, %dl -; SSE2-SSSE3-NEXT: pextrw $6, %xmm1, %ecx -; SSE2-SSSE3-NEXT: andb $1, %cl -; SSE2-SSSE3-NEXT: shlb $6, %cl -; SSE2-SSSE3-NEXT: orb %dl, %cl -; SSE2-SSSE3-NEXT: pextrw $7, %xmm1, %edx -; SSE2-SSSE3-NEXT: shlb $7, %dl -; SSE2-SSSE3-NEXT: orb %cl, %dl -; SSE2-SSSE3-NEXT: orb %al, %dl -; SSE2-SSSE3-NEXT: movzbl %dl, %eax -; SSE2-SSSE3-NEXT: movq %rax, %rcx -; SSE2-SSSE3-NEXT: shrq $4, %rcx +; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm0 +; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax +; SSE2-SSSE3-NEXT: movzbl %al, %ecx +; SSE2-SSSE3-NEXT: shrl $4, %ecx ; SSE2-SSSE3-NEXT: movq %rcx, %xmm0 ; SSE2-SSSE3-NEXT: andl $15, %eax ; SSE2-SSSE3-NEXT: movq %rax, %xmm1 @@ -188,38 +126,9 @@ define i4 @bitcast_v8i16_to_v2i4(<8 x i16> %a0) nounwind { ; ; AVX12-LABEL: bitcast_v8i16_to_v2i4: ; AVX12: # %bb.0: -; AVX12-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX12-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0 -; AVX12-NEXT: vmovd %xmm0, %ecx -; AVX12-NEXT: vpextrw $1, %xmm0, %eax -; AVX12-NEXT: andb $1, %al -; AVX12-NEXT: addb %al, %al -; AVX12-NEXT: subb %cl, %al -; AVX12-NEXT: vpextrw $2, %xmm0, %ecx -; AVX12-NEXT: andb $1, %cl -; AVX12-NEXT: shlb $2, %cl -; AVX12-NEXT: vpextrw $3, %xmm0, %edx -; AVX12-NEXT: andb $1, %dl -; AVX12-NEXT: shlb $3, %dl -; AVX12-NEXT: orb %cl, %dl -; AVX12-NEXT: vpextrw $4, %xmm0, %ecx -; AVX12-NEXT: andb $1, %cl -; AVX12-NEXT: shlb $4, %cl -; AVX12-NEXT: orb %dl, %cl -; AVX12-NEXT: vpextrw $5, %xmm0, %edx -; AVX12-NEXT: andb $1, %dl -; AVX12-NEXT: shlb $5, %dl -; AVX12-NEXT: orb %cl, %dl -; AVX12-NEXT: vpextrw $6, %xmm0, %ecx -; AVX12-NEXT: andb $1, %cl -; AVX12-NEXT: shlb $6, %cl -; AVX12-NEXT: orb %dl, %cl -; AVX12-NEXT: vpextrw $7, %xmm0, %edx -; AVX12-NEXT: shlb $7, %dl -; AVX12-NEXT: orb %cl, %dl -; AVX12-NEXT: orb %al, %dl -; AVX12-NEXT: movzbl %dl, %eax -; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 +; AVX12-NEXT: vpmovmskb %xmm0, %eax +; AVX12-NEXT: movzbl %al, %ecx ; AVX12-NEXT: shrl $4, %ecx ; AVX12-NEXT: vmovd %ecx, %xmm0 ; AVX12-NEXT: andl $15, %eax @@ -255,68 +164,8 @@ define i4 @bitcast_v8i16_to_v2i4(<8 x i16> %a0) nounwind { define i8 @bitcast_v16i8_to_v2i8(<16 x i8> %a0) nounwind { ; SSE2-LABEL: bitcast_v16i8_to_v2i8: ; SSE2: # %bb.0: -; SSE2-NEXT: pxor %xmm1, %xmm1 -; SSE2-NEXT: pcmpgtb %xmm0, %xmm1 -; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-NEXT: andl $1, %eax -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: leal (%rcx,%rax,2), %eax -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: leal (%rax,%rcx,4), %eax -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: leal (%rax,%rcx,8), %eax -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: shll $4, %ecx -; SSE2-NEXT: orl %eax, %ecx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-NEXT: andl $1, %eax -; SSE2-NEXT: shll $5, %eax -; SSE2-NEXT: orl %ecx, %eax -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: shll $6, %ecx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE2-NEXT: andl $1, %edx -; SSE2-NEXT: shll $7, %edx -; SSE2-NEXT: orl %ecx, %edx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: shll $8, %ecx -; SSE2-NEXT: orl %edx, %ecx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE2-NEXT: andl $1, %edx -; SSE2-NEXT: shll $9, %edx -; SSE2-NEXT: orl %ecx, %edx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: shll $10, %ecx -; SSE2-NEXT: orl %edx, %ecx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE2-NEXT: andl $1, %edx -; SSE2-NEXT: shll $11, %edx -; SSE2-NEXT: orl %ecx, %edx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: shll $12, %ecx -; SSE2-NEXT: orl %edx, %ecx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE2-NEXT: andl $1, %edx -; SSE2-NEXT: shll $13, %edx -; SSE2-NEXT: orl %ecx, %edx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: shll $14, %ecx -; SSE2-NEXT: orl %edx, %ecx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE2-NEXT: shll $15, %edx -; SSE2-NEXT: orl %ecx, %edx -; SSE2-NEXT: orl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm0 +; SSE2-NEXT: pmovmskb %xmm0, %eax +; SSE2-NEXT: movd %eax, %xmm0 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3] @@ -327,68 +176,8 @@ define i8 @bitcast_v16i8_to_v2i8(<16 x i8> %a0) nounwind { ; ; SSSE3-LABEL: bitcast_v16i8_to_v2i8: ; SSSE3: # %bb.0: -; SSSE3-NEXT: pxor %xmm1, %xmm1 -; SSSE3-NEXT: pcmpgtb %xmm0, %xmm1 -; SSSE3-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSSE3-NEXT: andl $1, %eax -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: leal (%rcx,%rax,2), %eax -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: leal (%rax,%rcx,4), %eax -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: leal (%rax,%rcx,8), %eax -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: shll $4, %ecx -; SSSE3-NEXT: orl %eax, %ecx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSSE3-NEXT: andl $1, %eax -; SSSE3-NEXT: shll $5, %eax -; SSSE3-NEXT: orl %ecx, %eax -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: shll $6, %ecx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSSE3-NEXT: andl $1, %edx -; SSSE3-NEXT: shll $7, %edx -; SSSE3-NEXT: orl %ecx, %edx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: shll $8, %ecx -; SSSE3-NEXT: orl %edx, %ecx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSSE3-NEXT: andl $1, %edx -; SSSE3-NEXT: shll $9, %edx -; SSSE3-NEXT: orl %ecx, %edx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: shll $10, %ecx -; SSSE3-NEXT: orl %edx, %ecx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSSE3-NEXT: andl $1, %edx -; SSSE3-NEXT: shll $11, %edx -; SSSE3-NEXT: orl %ecx, %edx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: shll $12, %ecx -; SSSE3-NEXT: orl %edx, %ecx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSSE3-NEXT: andl $1, %edx -; SSSE3-NEXT: shll $13, %edx -; SSSE3-NEXT: orl %ecx, %edx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: shll $14, %ecx -; SSSE3-NEXT: orl %edx, %ecx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSSE3-NEXT: shll $15, %edx -; SSSE3-NEXT: orl %ecx, %edx -; SSSE3-NEXT: orl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm0 +; SSSE3-NEXT: pmovmskb %xmm0, %eax +; SSSE3-NEXT: movd %eax, %xmm0 ; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero ; SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al @@ -397,67 +186,8 @@ define i8 @bitcast_v16i8_to_v2i8(<16 x i8> %a0) nounwind { ; ; AVX12-LABEL: bitcast_v16i8_to_v2i8: ; AVX12: # %bb.0: -; AVX12-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX12-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0 -; AVX12-NEXT: vpextrb $1, %xmm0, %eax -; AVX12-NEXT: andl $1, %eax -; AVX12-NEXT: vpextrb $0, %xmm0, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: leal (%rcx,%rax,2), %eax -; AVX12-NEXT: vpextrb $2, %xmm0, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: leal (%rax,%rcx,4), %eax -; AVX12-NEXT: vpextrb $3, %xmm0, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: leal (%rax,%rcx,8), %eax -; AVX12-NEXT: vpextrb $4, %xmm0, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: shll $4, %ecx -; AVX12-NEXT: orl %eax, %ecx -; AVX12-NEXT: vpextrb $5, %xmm0, %eax -; AVX12-NEXT: andl $1, %eax -; AVX12-NEXT: shll $5, %eax -; AVX12-NEXT: orl %ecx, %eax -; AVX12-NEXT: vpextrb $6, %xmm0, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: shll $6, %ecx -; AVX12-NEXT: vpextrb $7, %xmm0, %edx -; AVX12-NEXT: andl $1, %edx -; AVX12-NEXT: shll $7, %edx -; AVX12-NEXT: orl %ecx, %edx -; AVX12-NEXT: vpextrb $8, %xmm0, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: shll $8, %ecx -; AVX12-NEXT: orl %edx, %ecx -; AVX12-NEXT: vpextrb $9, %xmm0, %edx -; AVX12-NEXT: andl $1, %edx -; AVX12-NEXT: shll $9, %edx -; AVX12-NEXT: orl %ecx, %edx -; AVX12-NEXT: vpextrb $10, %xmm0, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: shll $10, %ecx -; AVX12-NEXT: orl %edx, %ecx -; AVX12-NEXT: vpextrb $11, %xmm0, %edx -; AVX12-NEXT: andl $1, %edx -; AVX12-NEXT: shll $11, %edx -; AVX12-NEXT: orl %ecx, %edx -; AVX12-NEXT: vpextrb $12, %xmm0, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: shll $12, %ecx -; AVX12-NEXT: orl %edx, %ecx -; AVX12-NEXT: vpextrb $13, %xmm0, %edx -; AVX12-NEXT: andl $1, %edx -; AVX12-NEXT: shll $13, %edx -; AVX12-NEXT: orl %ecx, %edx -; AVX12-NEXT: vpextrb $14, %xmm0, %ecx -; AVX12-NEXT: andl $1, %ecx -; AVX12-NEXT: shll $14, %ecx -; AVX12-NEXT: orl %edx, %ecx -; AVX12-NEXT: vpextrb $15, %xmm0, %edx -; AVX12-NEXT: shll $15, %edx -; AVX12-NEXT: orl %ecx, %edx -; AVX12-NEXT: orl %eax, %edx -; AVX12-NEXT: vmovd %edx, %xmm0 +; AVX12-NEXT: vpmovmskb %xmm0, %eax +; AVX12-NEXT: vmovd %eax, %xmm0 ; AVX12-NEXT: vpextrb $0, %xmm0, %ecx ; AVX12-NEXT: vpextrb $1, %xmm0, %eax ; AVX12-NEXT: addb %cl, %al @@ -489,44 +219,8 @@ define i8 @bitcast_v16i8_to_v2i8(<16 x i8> %a0) nounwind { define i2 @bitcast_v4i64_to_v2i2(<4 x i64> %a0) nounwind { ; SSE2-SSSE3-LABEL: bitcast_v4i64_to_v2i2: ; SSE2-SSSE3: # %bb.0: -; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648] -; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm1 -; SSE2-SSSE3-NEXT: movdqa %xmm2, %xmm3 -; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm3 -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2] -; SSE2-SSSE3-NEXT: pcmpeqd %xmm2, %xmm1 -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3] -; SSE2-SSSE3-NEXT: pand %xmm4, %xmm5 -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3] -; SSE2-SSSE3-NEXT: por %xmm5, %xmm1 -; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm0 -; SSE2-SSSE3-NEXT: movdqa %xmm2, %xmm3 -; SSE2-SSSE3-NEXT: pcmpgtd %xmm0, %xmm3 -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2] -; SSE2-SSSE3-NEXT: pcmpeqd %xmm2, %xmm0 -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0 -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3] -; SSE2-SSSE3-NEXT: por %xmm0, %xmm2 -; SSE2-SSSE3-NEXT: movdqa %xmm2, %xmm0 -; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,2],xmm1[0,2] -; SSE2-SSSE3-NEXT: movd %xmm0, %eax -; SSE2-SSSE3-NEXT: andb $1, %al -; SSE2-SSSE3-NEXT: addb %al, %al -; SSE2-SSSE3-NEXT: movd %xmm2, %ecx -; SSE2-SSSE3-NEXT: andb $1, %cl -; SSE2-SSSE3-NEXT: orb %al, %cl -; SSE2-SSSE3-NEXT: movd %xmm1, %eax -; SSE2-SSSE3-NEXT: andb $1, %al -; SSE2-SSSE3-NEXT: shlb $2, %al -; SSE2-SSSE3-NEXT: orb %cl, %al -; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2] -; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3] -; SSE2-SSSE3-NEXT: movd %xmm2, %ecx -; SSE2-SSSE3-NEXT: shlb $3, %cl -; SSE2-SSSE3-NEXT: orb %al, %cl -; SSE2-SSSE3-NEXT: andb $15, %cl -; SSE2-SSSE3-NEXT: movzbl %cl, %eax +; SSE2-SSSE3-NEXT: packssdw %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax ; SSE2-SSSE3-NEXT: movl %eax, %ecx ; SSE2-SSSE3-NEXT: shrl $2, %ecx ; SSE2-SSSE3-NEXT: movq %rcx, %xmm0 @@ -538,72 +232,20 @@ define i2 @bitcast_v4i64_to_v2i2(<4 x i64> %a0) nounwind { ; SSE2-SSSE3-NEXT: addb -{{[0-9]+}}(%rsp), %al ; SSE2-SSSE3-NEXT: retq ; -; AVX1-LABEL: bitcast_v4i64_to_v2i2: -; AVX1: # %bb.0: -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm1 -; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0 -; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vmovd %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: vpextrd $1, %xmm0, %ecx -; AVX1-NEXT: andb $1, %cl -; AVX1-NEXT: addb %cl, %cl -; AVX1-NEXT: orb %al, %cl -; AVX1-NEXT: vpextrd $2, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: shlb $2, %al -; AVX1-NEXT: orb %cl, %al -; AVX1-NEXT: vpextrd $3, %xmm0, %ecx -; AVX1-NEXT: shlb $3, %cl -; AVX1-NEXT: orb %al, %cl -; AVX1-NEXT: andb $15, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $2, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm0 -; AVX1-NEXT: andl $3, %eax -; AVX1-NEXT: vmovd %eax, %xmm1 -; AVX1-NEXT: vpextrb $0, %xmm1, %ecx -; AVX1-NEXT: vpextrb $0, %xmm0, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: # kill: def $al killed $al killed $eax -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: retq -; -; AVX2-LABEL: bitcast_v4i64_to_v2i2: -; AVX2: # %bb.0: -; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vmovd %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: vpextrd $1, %xmm0, %ecx -; AVX2-NEXT: andb $1, %cl -; AVX2-NEXT: addb %cl, %cl -; AVX2-NEXT: orb %al, %cl -; AVX2-NEXT: vpextrd $2, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: shlb $2, %al -; AVX2-NEXT: orb %cl, %al -; AVX2-NEXT: vpextrd $3, %xmm0, %ecx -; AVX2-NEXT: shlb $3, %cl -; AVX2-NEXT: orb %al, %cl -; AVX2-NEXT: andb $15, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $2, %ecx -; AVX2-NEXT: vmovd %ecx, %xmm0 -; AVX2-NEXT: andl $3, %eax -; AVX2-NEXT: vmovd %eax, %xmm1 -; AVX2-NEXT: vpextrb $0, %xmm1, %ecx -; AVX2-NEXT: vpextrb $0, %xmm0, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: # kill: def $al killed $al killed $eax -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: retq +; AVX12-LABEL: bitcast_v4i64_to_v2i2: +; AVX12: # %bb.0: +; AVX12-NEXT: vmovmskpd %ymm0, %eax +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $2, %ecx +; AVX12-NEXT: vmovd %ecx, %xmm0 +; AVX12-NEXT: andl $3, %eax +; AVX12-NEXT: vmovd %eax, %xmm1 +; AVX12-NEXT: vpextrb $0, %xmm1, %ecx +; AVX12-NEXT: vpextrb $0, %xmm0, %eax +; AVX12-NEXT: addb %cl, %al +; AVX12-NEXT: # kill: def $al killed $al killed $eax +; AVX12-NEXT: vzeroupper +; AVX12-NEXT: retq ; ; AVX512-LABEL: bitcast_v4i64_to_v2i2: ; AVX512: # %bb.0: @@ -633,41 +275,11 @@ define i2 @bitcast_v4i64_to_v2i2(<4 x i64> %a0) nounwind { define i4 @bitcast_v8i32_to_v2i4(<8 x i32> %a0) nounwind { ; SSE2-SSSE3-LABEL: bitcast_v8i32_to_v2i4: ; SSE2-SSSE3: # %bb.0: -; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm2 -; SSE2-SSSE3-NEXT: pxor %xmm3, %xmm3 -; SSE2-SSSE3-NEXT: pcmpgtd %xmm0, %xmm3 -; SSE2-SSSE3-NEXT: pextrw $0, %xmm3, %ecx -; SSE2-SSSE3-NEXT: pextrw $2, %xmm3, %eax -; SSE2-SSSE3-NEXT: andb $1, %al -; SSE2-SSSE3-NEXT: addb %al, %al -; SSE2-SSSE3-NEXT: subb %cl, %al -; SSE2-SSSE3-NEXT: pextrw $4, %xmm3, %ecx -; SSE2-SSSE3-NEXT: andb $1, %cl -; SSE2-SSSE3-NEXT: shlb $2, %cl -; SSE2-SSSE3-NEXT: pextrw $6, %xmm3, %edx -; SSE2-SSSE3-NEXT: andb $1, %dl -; SSE2-SSSE3-NEXT: shlb $3, %dl -; SSE2-SSSE3-NEXT: orb %cl, %dl -; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm2 -; SSE2-SSSE3-NEXT: pextrw $0, %xmm2, %ecx -; SSE2-SSSE3-NEXT: andb $1, %cl -; SSE2-SSSE3-NEXT: shlb $4, %cl -; SSE2-SSSE3-NEXT: orb %dl, %cl -; SSE2-SSSE3-NEXT: pextrw $2, %xmm2, %edx -; SSE2-SSSE3-NEXT: andb $1, %dl -; SSE2-SSSE3-NEXT: shlb $5, %dl -; SSE2-SSSE3-NEXT: orb %cl, %dl -; SSE2-SSSE3-NEXT: pextrw $4, %xmm2, %ecx -; SSE2-SSSE3-NEXT: andb $1, %cl -; SSE2-SSSE3-NEXT: shlb $6, %cl -; SSE2-SSSE3-NEXT: orb %dl, %cl -; SSE2-SSSE3-NEXT: pextrw $6, %xmm2, %edx -; SSE2-SSSE3-NEXT: shlb $7, %dl -; SSE2-SSSE3-NEXT: orb %cl, %dl -; SSE2-SSSE3-NEXT: orb %al, %dl -; SSE2-SSSE3-NEXT: movzbl %dl, %eax -; SSE2-SSSE3-NEXT: movq %rax, %rcx -; SSE2-SSSE3-NEXT: shrq $4, %rcx +; SSE2-SSSE3-NEXT: packssdw %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm0 +; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax +; SSE2-SSSE3-NEXT: movzbl %al, %ecx +; SSE2-SSSE3-NEXT: shrl $4, %ecx ; SSE2-SSSE3-NEXT: movq %rcx, %xmm0 ; SSE2-SSSE3-NEXT: andl $15, %eax ; SSE2-SSSE3-NEXT: movq %rax, %xmm1 @@ -677,98 +289,20 @@ define i4 @bitcast_v8i32_to_v2i4(<8 x i32> %a0) nounwind { ; SSE2-SSSE3-NEXT: addb -{{[0-9]+}}(%rsp), %al ; SSE2-SSSE3-NEXT: retq ; -; AVX1-LABEL: bitcast_v8i32_to_v2i4: -; AVX1: # %bb.0: -; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX1-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm2 -; AVX1-NEXT: vpextrw $0, %xmm2, %ecx -; AVX1-NEXT: vpextrw $2, %xmm2, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: addb %al, %al -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: vpextrw $4, %xmm2, %ecx -; AVX1-NEXT: andb $1, %cl -; AVX1-NEXT: shlb $2, %cl -; AVX1-NEXT: vpextrw $6, %xmm2, %edx -; AVX1-NEXT: andb $1, %dl -; AVX1-NEXT: shlb $3, %dl -; AVX1-NEXT: orb %cl, %dl -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0 -; AVX1-NEXT: vpextrw $0, %xmm0, %ecx -; AVX1-NEXT: andb $1, %cl -; AVX1-NEXT: shlb $4, %cl -; AVX1-NEXT: orb %dl, %cl -; AVX1-NEXT: vpextrw $2, %xmm0, %edx -; AVX1-NEXT: andb $1, %dl -; AVX1-NEXT: shlb $5, %dl -; AVX1-NEXT: orb %cl, %dl -; AVX1-NEXT: vpextrw $4, %xmm0, %ecx -; AVX1-NEXT: andb $1, %cl -; AVX1-NEXT: shlb $6, %cl -; AVX1-NEXT: orb %dl, %cl -; AVX1-NEXT: vpextrw $6, %xmm0, %edx -; AVX1-NEXT: shlb $7, %dl -; AVX1-NEXT: orb %cl, %dl -; AVX1-NEXT: orb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $4, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm0 -; AVX1-NEXT: andl $15, %eax -; AVX1-NEXT: vmovd %eax, %xmm1 -; AVX1-NEXT: vpextrb $0, %xmm1, %ecx -; AVX1-NEXT: vpextrb $0, %xmm0, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: # kill: def $al killed $al killed $eax -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: retq -; -; AVX2-LABEL: bitcast_v8i32_to_v2i4: -; AVX2: # %bb.0: -; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0 -; AVX2-NEXT: vpextrw $0, %xmm0, %ecx -; AVX2-NEXT: vpextrw $2, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: addb %al, %al -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: vpextrw $4, %xmm0, %ecx -; AVX2-NEXT: andb $1, %cl -; AVX2-NEXT: shlb $2, %cl -; AVX2-NEXT: vpextrw $6, %xmm0, %edx -; AVX2-NEXT: andb $1, %dl -; AVX2-NEXT: shlb $3, %dl -; AVX2-NEXT: orb %cl, %dl -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 -; AVX2-NEXT: vpextrw $0, %xmm0, %ecx -; AVX2-NEXT: andb $1, %cl -; AVX2-NEXT: shlb $4, %cl -; AVX2-NEXT: orb %dl, %cl -; AVX2-NEXT: vpextrw $2, %xmm0, %edx -; AVX2-NEXT: andb $1, %dl -; AVX2-NEXT: shlb $5, %dl -; AVX2-NEXT: orb %cl, %dl -; AVX2-NEXT: vpextrw $4, %xmm0, %ecx -; AVX2-NEXT: andb $1, %cl -; AVX2-NEXT: shlb $6, %cl -; AVX2-NEXT: orb %dl, %cl -; AVX2-NEXT: vpextrw $6, %xmm0, %edx -; AVX2-NEXT: shlb $7, %dl -; AVX2-NEXT: orb %cl, %dl -; AVX2-NEXT: orb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: shrl $4, %ecx -; AVX2-NEXT: vmovd %ecx, %xmm0 -; AVX2-NEXT: andl $15, %eax -; AVX2-NEXT: vmovd %eax, %xmm1 -; AVX2-NEXT: vpextrb $0, %xmm1, %ecx -; AVX2-NEXT: vpextrb $0, %xmm0, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: # kill: def $al killed $al killed $eax -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: retq +; AVX12-LABEL: bitcast_v8i32_to_v2i4: +; AVX12: # %bb.0: +; AVX12-NEXT: vmovmskps %ymm0, %eax +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $4, %ecx +; AVX12-NEXT: vmovd %ecx, %xmm0 +; AVX12-NEXT: andl $15, %eax +; AVX12-NEXT: vmovd %eax, %xmm1 +; AVX12-NEXT: vpextrb $0, %xmm1, %ecx +; AVX12-NEXT: vpextrb $0, %xmm0, %eax +; AVX12-NEXT: addb %cl, %al +; AVX12-NEXT: # kill: def $al killed $al killed $eax +; AVX12-NEXT: vzeroupper +; AVX12-NEXT: retq ; ; AVX512-LABEL: bitcast_v8i32_to_v2i4: ; AVX512: # %bb.0: @@ -797,71 +331,9 @@ define i4 @bitcast_v8i32_to_v2i4(<8 x i32> %a0) nounwind { define i8 @bitcast_v16i16_to_v2i8(<16 x i16> %a0) nounwind { ; SSE2-LABEL: bitcast_v16i16_to_v2i8: ; SSE2: # %bb.0: -; SSE2-NEXT: pxor %xmm2, %xmm2 -; SSE2-NEXT: pxor %xmm3, %xmm3 -; SSE2-NEXT: pcmpgtw %xmm1, %xmm3 -; SSE2-NEXT: pcmpgtw %xmm0, %xmm2 -; SSE2-NEXT: packsswb %xmm3, %xmm2 -; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-NEXT: andl $1, %eax -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: leal (%rcx,%rax,2), %eax -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: leal (%rax,%rcx,4), %eax -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: leal (%rax,%rcx,8), %eax -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: shll $4, %ecx -; SSE2-NEXT: orl %eax, %ecx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-NEXT: andl $1, %eax -; SSE2-NEXT: shll $5, %eax -; SSE2-NEXT: orl %ecx, %eax -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: shll $6, %ecx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE2-NEXT: andl $1, %edx -; SSE2-NEXT: shll $7, %edx -; SSE2-NEXT: orl %ecx, %edx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: shll $8, %ecx -; SSE2-NEXT: orl %edx, %ecx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE2-NEXT: andl $1, %edx -; SSE2-NEXT: shll $9, %edx -; SSE2-NEXT: orl %ecx, %edx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: shll $10, %ecx -; SSE2-NEXT: orl %edx, %ecx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE2-NEXT: andl $1, %edx -; SSE2-NEXT: shll $11, %edx -; SSE2-NEXT: orl %ecx, %edx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: shll $12, %ecx -; SSE2-NEXT: orl %edx, %ecx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE2-NEXT: andl $1, %edx -; SSE2-NEXT: shll $13, %edx -; SSE2-NEXT: orl %ecx, %edx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: shll $14, %ecx -; SSE2-NEXT: orl %edx, %ecx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE2-NEXT: shll $15, %edx -; SSE2-NEXT: orl %ecx, %edx -; SSE2-NEXT: orl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm0 +; SSE2-NEXT: packsswb %xmm1, %xmm0 +; SSE2-NEXT: pmovmskb %xmm0, %eax +; SSE2-NEXT: movd %eax, %xmm0 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3] @@ -872,71 +344,9 @@ define i8 @bitcast_v16i16_to_v2i8(<16 x i16> %a0) nounwind { ; ; SSSE3-LABEL: bitcast_v16i16_to_v2i8: ; SSSE3: # %bb.0: -; SSSE3-NEXT: pxor %xmm2, %xmm2 -; SSSE3-NEXT: pxor %xmm3, %xmm3 -; SSSE3-NEXT: pcmpgtw %xmm1, %xmm3 -; SSSE3-NEXT: pcmpgtw %xmm0, %xmm2 -; SSSE3-NEXT: packsswb %xmm3, %xmm2 -; SSSE3-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSSE3-NEXT: andl $1, %eax -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: leal (%rcx,%rax,2), %eax -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: leal (%rax,%rcx,4), %eax -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: leal (%rax,%rcx,8), %eax -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: shll $4, %ecx -; SSSE3-NEXT: orl %eax, %ecx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSSE3-NEXT: andl $1, %eax -; SSSE3-NEXT: shll $5, %eax -; SSSE3-NEXT: orl %ecx, %eax -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: shll $6, %ecx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSSE3-NEXT: andl $1, %edx -; SSSE3-NEXT: shll $7, %edx -; SSSE3-NEXT: orl %ecx, %edx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: shll $8, %ecx -; SSSE3-NEXT: orl %edx, %ecx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSSE3-NEXT: andl $1, %edx -; SSSE3-NEXT: shll $9, %edx -; SSSE3-NEXT: orl %ecx, %edx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: shll $10, %ecx -; SSSE3-NEXT: orl %edx, %ecx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSSE3-NEXT: andl $1, %edx -; SSSE3-NEXT: shll $11, %edx -; SSSE3-NEXT: orl %ecx, %edx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: shll $12, %ecx -; SSSE3-NEXT: orl %edx, %ecx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSSE3-NEXT: andl $1, %edx -; SSSE3-NEXT: shll $13, %edx -; SSSE3-NEXT: orl %ecx, %edx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: shll $14, %ecx -; SSSE3-NEXT: orl %edx, %ecx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSSE3-NEXT: shll $15, %edx -; SSSE3-NEXT: orl %ecx, %edx -; SSSE3-NEXT: orl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm0 +; SSSE3-NEXT: packsswb %xmm1, %xmm0 +; SSSE3-NEXT: pmovmskb %xmm0, %eax +; SSSE3-NEXT: movd %eax, %xmm0 ; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero ; SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al @@ -945,69 +355,10 @@ define i8 @bitcast_v16i16_to_v2i8(<16 x i16> %a0) nounwind { ; ; AVX1-LABEL: bitcast_v16i16_to_v2i8: ; AVX1: # %bb.0: -; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX1-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm2 -; AVX1-NEXT: vpextrb $2, %xmm2, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpextrb $0, %xmm2, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: leal (%rcx,%rax,2), %eax -; AVX1-NEXT: vpextrb $4, %xmm2, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: leal (%rax,%rcx,4), %eax -; AVX1-NEXT: vpextrb $6, %xmm2, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: leal (%rax,%rcx,8), %eax -; AVX1-NEXT: vpextrb $8, %xmm2, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $4, %ecx -; AVX1-NEXT: orl %eax, %ecx -; AVX1-NEXT: vpextrb $10, %xmm2, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: shll $5, %eax -; AVX1-NEXT: orl %ecx, %eax -; AVX1-NEXT: vpextrb $12, %xmm2, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $6, %ecx -; AVX1-NEXT: vpextrb $14, %xmm2, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $7, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0 -; AVX1-NEXT: vpextrb $0, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $8, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $2, %xmm0, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $9, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpextrb $4, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $10, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $6, %xmm0, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $11, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpextrb $8, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $12, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $10, %xmm0, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $13, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpextrb $12, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $14, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $14, %xmm0, %edx -; AVX1-NEXT: shll $15, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: orl %eax, %edx -; AVX1-NEXT: vmovd %edx, %xmm0 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpmovmskb %xmm0, %eax +; AVX1-NEXT: vmovd %eax, %xmm0 ; AVX1-NEXT: vpextrb $0, %xmm0, %ecx ; AVX1-NEXT: vpextrb $1, %xmm0, %eax ; AVX1-NEXT: addb %cl, %al @@ -1019,66 +370,10 @@ define i8 @bitcast_v16i16_to_v2i8(<16 x i16> %a0) nounwind { ; AVX2: # %bb.0: ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0 -; AVX2-NEXT: vpextrb $2, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpextrb $0, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: leal (%rcx,%rax,2), %eax -; AVX2-NEXT: vpextrb $4, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: leal (%rax,%rcx,4), %eax -; AVX2-NEXT: vpextrb $6, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: leal (%rax,%rcx,8), %eax -; AVX2-NEXT: vpextrb $8, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $4, %ecx -; AVX2-NEXT: orl %eax, %ecx -; AVX2-NEXT: vpextrb $10, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: shll $5, %eax -; AVX2-NEXT: orl %ecx, %eax -; AVX2-NEXT: vpextrb $12, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $6, %ecx -; AVX2-NEXT: vpextrb $14, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $7, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 -; AVX2-NEXT: vpextrb $0, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $8, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $2, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $9, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpextrb $4, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $10, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $6, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $11, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpextrb $8, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $12, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $10, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $13, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpextrb $12, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $14, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $14, %xmm0, %edx -; AVX2-NEXT: shll $15, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: orl %eax, %edx -; AVX2-NEXT: vmovd %edx, %xmm0 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpmovmskb %xmm0, %eax +; AVX2-NEXT: vmovd %eax, %xmm0 ; AVX2-NEXT: vpextrb $0, %xmm0, %ecx ; AVX2-NEXT: vpextrb $1, %xmm0, %eax ; AVX2-NEXT: addb %cl, %al @@ -1108,288 +403,40 @@ define i8 @bitcast_v16i16_to_v2i8(<16 x i16> %a0) nounwind { define i16 @bitcast_v32i8_to_v2i16(<32 x i8> %a0) nounwind { ; SSE2-SSSE3-LABEL: bitcast_v32i8_to_v2i16: ; SSE2-SSSE3: # %bb.0: +; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax +; SSE2-SSSE3-NEXT: pmovmskb %xmm1, %ecx +; SSE2-SSSE3-NEXT: shll $16, %ecx +; SSE2-SSSE3-NEXT: orl %eax, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: pextrw $0, %xmm0, %ecx +; SSE2-SSSE3-NEXT: pextrw $1, %xmm0, %eax +; SSE2-SSSE3-NEXT: addl %ecx, %eax +; SSE2-SSSE3-NEXT: # kill: def $ax killed $ax killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: bitcast_v32i8_to_v2i16: ; AVX1: # %bb.0: -; AVX1-NEXT: pushq %rbp -; AVX1-NEXT: movq %rsp, %rbp -; AVX1-NEXT: andq $-32, %rsp -; AVX1-NEXT: subq $32, %rsp -; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX1-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm2 -; AVX1-NEXT: vpextrb $1, %xmm2, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpextrb $0, %xmm2, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: leal (%rcx,%rax,2), %eax -; AVX1-NEXT: vpextrb $2, %xmm2, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: leal (%rax,%rcx,4), %eax -; AVX1-NEXT: vpextrb $3, %xmm2, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: leal (%rax,%rcx,8), %eax -; AVX1-NEXT: vpextrb $4, %xmm2, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $4, %ecx -; AVX1-NEXT: orl %eax, %ecx -; AVX1-NEXT: vpextrb $5, %xmm2, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: shll $5, %eax -; AVX1-NEXT: orl %ecx, %eax -; AVX1-NEXT: vpextrb $6, %xmm2, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $6, %ecx -; AVX1-NEXT: vpextrb $7, %xmm2, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $7, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpextrb $8, %xmm2, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $8, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $9, %xmm2, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $9, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpextrb $10, %xmm2, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $10, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $11, %xmm2, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $11, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpextrb $12, %xmm2, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $12, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $13, %xmm2, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $13, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpextrb $14, %xmm2, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $14, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $15, %xmm2, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $15, %edx -; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0 -; AVX1-NEXT: vpextrb $0, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpmovmskb %xmm0, %ecx ; AVX1-NEXT: shll $16, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $1, %xmm0, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $17, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpextrb $2, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $18, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $3, %xmm0, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $19, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpextrb $4, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $20, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $5, %xmm0, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $21, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpextrb $6, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $22, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $7, %xmm0, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $23, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpextrb $8, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $24, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $9, %xmm0, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $25, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpextrb $10, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $26, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $11, %xmm0, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $27, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpextrb $12, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $28, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $13, %xmm0, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $29, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpextrb $14, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $30, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $15, %xmm0, %edx -; AVX1-NEXT: shll $31, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: orl %eax, %edx -; AVX1-NEXT: vmovd %edx, %xmm0 +; AVX1-NEXT: orl %eax, %ecx +; AVX1-NEXT: vmovd %ecx, %xmm0 ; AVX1-NEXT: vpextrw $0, %xmm0, %ecx ; AVX1-NEXT: vpextrw $1, %xmm0, %eax ; AVX1-NEXT: addl %ecx, %eax ; AVX1-NEXT: # kill: def $ax killed $ax killed $eax -; AVX1-NEXT: movq %rbp, %rsp -; AVX1-NEXT: popq %rbp ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: bitcast_v32i8_to_v2i16: ; AVX2: # %bb.0: -; AVX2-NEXT: pushq %rbp -; AVX2-NEXT: movq %rsp, %rbp -; AVX2-NEXT: andq $-32, %rsp -; AVX2-NEXT: subq $32, %rsp -; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0 -; AVX2-NEXT: vpextrb $1, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpextrb $0, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: leal (%rcx,%rax,2), %eax -; AVX2-NEXT: vpextrb $2, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: leal (%rax,%rcx,4), %eax -; AVX2-NEXT: vpextrb $3, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: leal (%rax,%rcx,8), %eax -; AVX2-NEXT: vpextrb $4, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $4, %ecx -; AVX2-NEXT: orl %eax, %ecx -; AVX2-NEXT: vpextrb $5, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: shll $5, %eax -; AVX2-NEXT: orl %ecx, %eax -; AVX2-NEXT: vpextrb $6, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $6, %ecx -; AVX2-NEXT: vpextrb $7, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $7, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpextrb $8, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $8, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $9, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $9, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpextrb $10, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $10, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $11, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $11, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpextrb $12, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $12, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $13, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $13, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpextrb $14, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $14, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $15, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $15, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 -; AVX2-NEXT: vpextrb $0, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $16, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $1, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $17, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpextrb $2, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $18, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $3, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $19, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpextrb $4, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $20, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $5, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $21, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpextrb $6, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $22, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $7, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $23, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpextrb $8, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $24, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $9, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $25, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpextrb $10, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $26, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $11, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $27, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpextrb $12, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $28, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $13, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $29, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpextrb $14, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $30, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $15, %xmm0, %edx -; AVX2-NEXT: shll $31, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: orl %eax, %edx -; AVX2-NEXT: vmovd %edx, %xmm0 +; AVX2-NEXT: vpmovmskb %ymm0, %eax +; AVX2-NEXT: vmovd %eax, %xmm0 ; AVX2-NEXT: vpextrw $0, %xmm0, %ecx ; AVX2-NEXT: vpextrw $1, %xmm0, %eax ; AVX2-NEXT: addl %ecx, %eax ; AVX2-NEXT: # kill: def $ax killed $ax killed $eax -; AVX2-NEXT: movq %rbp, %rsp -; AVX2-NEXT: popq %rbp ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -1426,74 +473,49 @@ define i4 @bitcast_v8i64_to_v2i4(<8 x i64> %a0) nounwind { ; SSE2-SSSE3-LABEL: bitcast_v8i64_to_v2i4: ; SSE2-SSSE3: # %bb.0: ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648] -; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm0 +; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm3 ; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm5 -; SSE2-SSSE3-NEXT: pcmpgtd %xmm0, %xmm5 +; SSE2-SSSE3-NEXT: pcmpgtd %xmm3, %xmm5 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2] -; SSE2-SSSE3-NEXT: pcmpeqd %xmm4, %xmm0 -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; SSE2-SSSE3-NEXT: pand %xmm6, %xmm0 +; SSE2-SSSE3-NEXT: pcmpeqd %xmm4, %xmm3 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; SSE2-SSSE3-NEXT: pand %xmm6, %xmm3 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3] -; SSE2-SSSE3-NEXT: por %xmm0, %xmm5 -; SSE2-SSSE3-NEXT: pextrw $0, %xmm5, %eax -; SSE2-SSSE3-NEXT: andb $1, %al -; SSE2-SSSE3-NEXT: pextrw $4, %xmm5, %ecx -; SSE2-SSSE3-NEXT: andb $1, %cl -; SSE2-SSSE3-NEXT: addb %cl, %cl -; SSE2-SSSE3-NEXT: orb %al, %cl +; SSE2-SSSE3-NEXT: por %xmm3, %xmm5 +; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm2 +; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm3 +; SSE2-SSSE3-NEXT: pcmpgtd %xmm2, %xmm3 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2] +; SSE2-SSSE3-NEXT: pcmpeqd %xmm4, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,1,3,3] +; SSE2-SSSE3-NEXT: pand %xmm6, %xmm7 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3] +; SSE2-SSSE3-NEXT: por %xmm7, %xmm2 +; SSE2-SSSE3-NEXT: packssdw %xmm5, %xmm2 ; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm1 -; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm0 -; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,0,2,2] +; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm3 +; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm3 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2] ; SSE2-SSSE3-NEXT: pcmpeqd %xmm4, %xmm1 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] ; SSE2-SSSE3-NEXT: pand %xmm5, %xmm1 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; SSE2-SSSE3-NEXT: por %xmm1, %xmm3 +; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm0 +; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm1 +; SSE2-SSSE3-NEXT: pcmpgtd %xmm0, %xmm1 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,0,2,2] +; SSE2-SSSE3-NEXT: pcmpeqd %xmm4, %xmm0 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; SSE2-SSSE3-NEXT: por %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: pextrw $0, %xmm0, %eax -; SSE2-SSSE3-NEXT: andb $1, %al -; SSE2-SSSE3-NEXT: shlb $2, %al -; SSE2-SSSE3-NEXT: orb %cl, %al -; SSE2-SSSE3-NEXT: pextrw $4, %xmm0, %ecx -; SSE2-SSSE3-NEXT: andb $1, %cl -; SSE2-SSSE3-NEXT: shlb $3, %cl -; SSE2-SSSE3-NEXT: orb %al, %cl -; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm2 -; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm0 -; SSE2-SSSE3-NEXT: pcmpgtd %xmm2, %xmm0 -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2] -; SSE2-SSSE3-NEXT: pcmpeqd %xmm4, %xmm2 -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; SSE2-SSSE3-NEXT: pand %xmm1, %xmm2 -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; SSE2-SSSE3-NEXT: por %xmm2, %xmm0 -; SSE2-SSSE3-NEXT: pextrw $0, %xmm0, %edx -; SSE2-SSSE3-NEXT: andb $1, %dl -; SSE2-SSSE3-NEXT: shlb $4, %dl -; SSE2-SSSE3-NEXT: orb %cl, %dl -; SSE2-SSSE3-NEXT: pextrw $4, %xmm0, %eax -; SSE2-SSSE3-NEXT: andb $1, %al -; SSE2-SSSE3-NEXT: shlb $5, %al -; SSE2-SSSE3-NEXT: orb %dl, %al -; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm3 -; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm0 -; SSE2-SSSE3-NEXT: pcmpgtd %xmm3, %xmm0 -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2] -; SSE2-SSSE3-NEXT: pcmpeqd %xmm4, %xmm3 -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3] -; SSE2-SSSE3-NEXT: pand %xmm1, %xmm2 -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; SSE2-SSSE3-NEXT: por %xmm2, %xmm0 -; SSE2-SSSE3-NEXT: pextrw $0, %xmm0, %ecx -; SSE2-SSSE3-NEXT: andb $1, %cl -; SSE2-SSSE3-NEXT: shlb $6, %cl -; SSE2-SSSE3-NEXT: pextrw $4, %xmm0, %edx -; SSE2-SSSE3-NEXT: shlb $7, %dl -; SSE2-SSSE3-NEXT: orb %cl, %dl -; SSE2-SSSE3-NEXT: orb %al, %dl -; SSE2-SSSE3-NEXT: movzbl %dl, %eax -; SSE2-SSSE3-NEXT: movq %rax, %rcx -; SSE2-SSSE3-NEXT: shrq $4, %rcx +; SSE2-SSSE3-NEXT: pand %xmm5, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; SSE2-SSSE3-NEXT: por %xmm0, %xmm1 +; SSE2-SSSE3-NEXT: packssdw %xmm3, %xmm1 +; SSE2-SSSE3-NEXT: packssdw %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm1 +; SSE2-SSSE3-NEXT: pmovmskb %xmm1, %eax +; SSE2-SSSE3-NEXT: movzbl %al, %ecx +; SSE2-SSSE3-NEXT: shrl $4, %ecx ; SSE2-SSSE3-NEXT: movq %rcx, %xmm0 ; SSE2-SSSE3-NEXT: andl $15, %eax ; SSE2-SSSE3-NEXT: movq %rax, %xmm1 @@ -1505,46 +527,15 @@ define i4 @bitcast_v8i64_to_v2i4(<8 x i64> %a0) nounwind { ; ; AVX1-LABEL: bitcast_v8i64_to_v2i4: ; AVX1: # %bb.0: -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2 -; AVX1-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm1 -; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpcmpgtq %xmm0, %xmm3, %xmm0 ; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vmovd %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: vpextrw $1, %xmm0, %ecx -; AVX1-NEXT: andb $1, %cl -; AVX1-NEXT: addb %cl, %cl -; AVX1-NEXT: orb %al, %cl -; AVX1-NEXT: vpextrw $2, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: shlb $2, %al -; AVX1-NEXT: orb %cl, %al -; AVX1-NEXT: vpextrw $3, %xmm0, %ecx -; AVX1-NEXT: andb $1, %cl -; AVX1-NEXT: shlb $3, %cl -; AVX1-NEXT: orb %al, %cl -; AVX1-NEXT: vpextrw $4, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: shlb $4, %al -; AVX1-NEXT: orb %cl, %al -; AVX1-NEXT: vpextrw $5, %xmm0, %ecx -; AVX1-NEXT: andb $1, %cl -; AVX1-NEXT: shlb $5, %cl -; AVX1-NEXT: orb %al, %cl -; AVX1-NEXT: vpextrw $6, %xmm0, %eax -; AVX1-NEXT: andb $1, %al -; AVX1-NEXT: shlb $6, %al -; AVX1-NEXT: vpextrw $7, %xmm0, %edx -; AVX1-NEXT: shlb $7, %dl -; AVX1-NEXT: orb %al, %dl -; AVX1-NEXT: orb %cl, %dl -; AVX1-NEXT: movzbl %dl, %eax +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vmovmskps %ymm0, %eax ; AVX1-NEXT: movl %eax, %ecx ; AVX1-NEXT: shrl $4, %ecx ; AVX1-NEXT: vmovd %ecx, %xmm0 @@ -1561,42 +552,10 @@ define i4 @bitcast_v8i64_to_v2i4(<8 x i64> %a0) nounwind { ; AVX2: # %bb.0: ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX2-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm1 -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3 -; AVX2-NEXT: vpackssdw %xmm3, %xmm1, %xmm1 ; AVX2-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX2-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vmovd %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: vpextrw $1, %xmm0, %ecx -; AVX2-NEXT: andb $1, %cl -; AVX2-NEXT: addb %cl, %cl -; AVX2-NEXT: orb %al, %cl -; AVX2-NEXT: vpextrw $2, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: shlb $2, %al -; AVX2-NEXT: orb %cl, %al -; AVX2-NEXT: vpextrw $3, %xmm0, %ecx -; AVX2-NEXT: andb $1, %cl -; AVX2-NEXT: shlb $3, %cl -; AVX2-NEXT: orb %al, %cl -; AVX2-NEXT: vpextrw $4, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: shlb $4, %al -; AVX2-NEXT: orb %cl, %al -; AVX2-NEXT: vpextrw $5, %xmm0, %ecx -; AVX2-NEXT: andb $1, %cl -; AVX2-NEXT: shlb $5, %cl -; AVX2-NEXT: orb %al, %cl -; AVX2-NEXT: vpextrw $6, %xmm0, %eax -; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: shlb $6, %al -; AVX2-NEXT: vpextrw $7, %xmm0, %edx -; AVX2-NEXT: shlb $7, %dl -; AVX2-NEXT: orb %al, %dl -; AVX2-NEXT: orb %cl, %dl -; AVX2-NEXT: movzbl %dl, %eax +; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX2-NEXT: vmovmskps %ymm0, %eax ; AVX2-NEXT: movl %eax, %ecx ; AVX2-NEXT: shrl $4, %ecx ; AVX2-NEXT: vmovd %ecx, %xmm0 @@ -1636,82 +595,11 @@ define i4 @bitcast_v8i64_to_v2i4(<8 x i64> %a0) nounwind { define i8 @bitcast_v16i32_to_v2i8(<16 x i32> %a0) nounwind { ; SSE2-LABEL: bitcast_v16i32_to_v2i8: ; SSE2: # %bb.0: -; SSE2-NEXT: pxor %xmm4, %xmm4 -; SSE2-NEXT: pxor %xmm5, %xmm5 -; SSE2-NEXT: pcmpgtd %xmm3, %xmm5 -; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255] -; SSE2-NEXT: pand %xmm3, %xmm5 -; SSE2-NEXT: pxor %xmm6, %xmm6 -; SSE2-NEXT: pcmpgtd %xmm2, %xmm6 -; SSE2-NEXT: pand %xmm3, %xmm6 -; SSE2-NEXT: packuswb %xmm5, %xmm6 -; SSE2-NEXT: pxor %xmm2, %xmm2 -; SSE2-NEXT: pcmpgtd %xmm1, %xmm2 -; SSE2-NEXT: pand %xmm3, %xmm2 -; SSE2-NEXT: pcmpgtd %xmm0, %xmm4 -; SSE2-NEXT: pand %xmm3, %xmm4 -; SSE2-NEXT: packuswb %xmm2, %xmm4 -; SSE2-NEXT: packuswb %xmm6, %xmm4 -; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-NEXT: andl $1, %eax -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: leal (%rcx,%rax,2), %eax -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: leal (%rax,%rcx,4), %eax -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: leal (%rax,%rcx,8), %eax -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: shll $4, %ecx -; SSE2-NEXT: orl %eax, %ecx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-NEXT: andl $1, %eax -; SSE2-NEXT: shll $5, %eax -; SSE2-NEXT: orl %ecx, %eax -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: shll $6, %ecx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE2-NEXT: andl $1, %edx -; SSE2-NEXT: shll $7, %edx -; SSE2-NEXT: orl %ecx, %edx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: shll $8, %ecx -; SSE2-NEXT: orl %edx, %ecx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE2-NEXT: andl $1, %edx -; SSE2-NEXT: shll $9, %edx -; SSE2-NEXT: orl %ecx, %edx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: shll $10, %ecx -; SSE2-NEXT: orl %edx, %ecx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE2-NEXT: andl $1, %edx -; SSE2-NEXT: shll $11, %edx -; SSE2-NEXT: orl %ecx, %edx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: shll $12, %ecx -; SSE2-NEXT: orl %edx, %ecx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE2-NEXT: andl $1, %edx -; SSE2-NEXT: shll $13, %edx -; SSE2-NEXT: orl %ecx, %edx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: andl $1, %ecx -; SSE2-NEXT: shll $14, %ecx -; SSE2-NEXT: orl %edx, %ecx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE2-NEXT: shll $15, %edx -; SSE2-NEXT: orl %ecx, %edx -; SSE2-NEXT: orl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm0 +; SSE2-NEXT: packssdw %xmm3, %xmm2 +; SSE2-NEXT: packssdw %xmm1, %xmm0 +; SSE2-NEXT: packsswb %xmm2, %xmm0 +; SSE2-NEXT: pmovmskb %xmm0, %eax +; SSE2-NEXT: movd %eax, %xmm0 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3] @@ -1722,83 +610,11 @@ define i8 @bitcast_v16i32_to_v2i8(<16 x i32> %a0) nounwind { ; ; SSSE3-LABEL: bitcast_v16i32_to_v2i8: ; SSSE3: # %bb.0: -; SSSE3-NEXT: pxor %xmm4, %xmm4 -; SSSE3-NEXT: pxor %xmm5, %xmm5 -; SSSE3-NEXT: pcmpgtd %xmm1, %xmm5 -; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> -; SSSE3-NEXT: pshufb %xmm1, %xmm5 -; SSSE3-NEXT: pxor %xmm6, %xmm6 -; SSSE3-NEXT: pcmpgtd %xmm0, %xmm6 -; SSSE3-NEXT: pshufb %xmm1, %xmm6 -; SSSE3-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1] -; SSSE3-NEXT: pxor %xmm0, %xmm0 -; SSSE3-NEXT: pcmpgtd %xmm3, %xmm0 -; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = <u,u,u,u,0,4,8,12,u,u,u,u,u,u,u,u> -; SSSE3-NEXT: pshufb %xmm1, %xmm0 -; SSSE3-NEXT: pcmpgtd %xmm2, %xmm4 -; SSSE3-NEXT: pshufb %xmm1, %xmm4 -; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1] -; SSSE3-NEXT: movsd {{.*#+}} xmm4 = xmm6[0],xmm4[1] -; SSSE3-NEXT: movapd %xmm4, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSSE3-NEXT: andl $1, %eax -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: leal (%rcx,%rax,2), %eax -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: leal (%rax,%rcx,4), %eax -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: leal (%rax,%rcx,8), %eax -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: shll $4, %ecx -; SSSE3-NEXT: orl %eax, %ecx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSSE3-NEXT: andl $1, %eax -; SSSE3-NEXT: shll $5, %eax -; SSSE3-NEXT: orl %ecx, %eax -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: shll $6, %ecx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSSE3-NEXT: andl $1, %edx -; SSSE3-NEXT: shll $7, %edx -; SSSE3-NEXT: orl %ecx, %edx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: shll $8, %ecx -; SSSE3-NEXT: orl %edx, %ecx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSSE3-NEXT: andl $1, %edx -; SSSE3-NEXT: shll $9, %edx -; SSSE3-NEXT: orl %ecx, %edx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: shll $10, %ecx -; SSSE3-NEXT: orl %edx, %ecx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSSE3-NEXT: andl $1, %edx -; SSSE3-NEXT: shll $11, %edx -; SSSE3-NEXT: orl %ecx, %edx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: shll $12, %ecx -; SSSE3-NEXT: orl %edx, %ecx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSSE3-NEXT: andl $1, %edx -; SSSE3-NEXT: shll $13, %edx -; SSSE3-NEXT: orl %ecx, %edx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: andl $1, %ecx -; SSSE3-NEXT: shll $14, %ecx -; SSSE3-NEXT: orl %edx, %ecx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSSE3-NEXT: shll $15, %edx -; SSSE3-NEXT: orl %ecx, %edx -; SSSE3-NEXT: orl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm0 +; SSSE3-NEXT: packssdw %xmm3, %xmm2 +; SSSE3-NEXT: packssdw %xmm1, %xmm0 +; SSSE3-NEXT: packsswb %xmm2, %xmm0 +; SSSE3-NEXT: pmovmskb %xmm0, %eax +; SSSE3-NEXT: movd %eax, %xmm0 ; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero ; SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al @@ -1807,72 +623,13 @@ define i8 @bitcast_v16i32_to_v2i8(<16 x i32> %a0) nounwind { ; ; AVX1-LABEL: bitcast_v16i32_to_v2i8: ; AVX1: # %bb.0: -; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpcmpgtd %xmm0, %xmm2, %xmm3 -; AVX1-NEXT: vpextrb $4, %xmm3, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpextrb $0, %xmm3, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: leal (%rcx,%rax,2), %eax -; AVX1-NEXT: vpextrb $8, %xmm3, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: leal (%rax,%rcx,4), %eax -; AVX1-NEXT: vpextrb $12, %xmm3, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: leal (%rax,%rcx,8), %eax -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vpcmpgtd %xmm0, %xmm2, %xmm0 -; AVX1-NEXT: vpextrb $0, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $4, %ecx -; AVX1-NEXT: orl %eax, %ecx -; AVX1-NEXT: vpextrb $4, %xmm0, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: shll $5, %eax -; AVX1-NEXT: orl %ecx, %eax -; AVX1-NEXT: vpextrb $8, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $6, %ecx -; AVX1-NEXT: vpextrb $12, %xmm0, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $7, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpcmpgtd %xmm1, %xmm2, %xmm0 -; AVX1-NEXT: vpextrb $0, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $8, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $4, %xmm0, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $9, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpextrb $8, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $10, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $12, %xmm0, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $11, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm0 -; AVX1-NEXT: vpcmpgtd %xmm0, %xmm2, %xmm0 -; AVX1-NEXT: vpextrb $0, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $12, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $4, %xmm0, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $13, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpextrb $8, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $14, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $12, %xmm0, %edx -; AVX1-NEXT: shll $15, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: orl %eax, %edx -; AVX1-NEXT: vmovd %edx, %xmm0 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpmovmskb %xmm0, %eax +; AVX1-NEXT: vmovd %eax, %xmm0 ; AVX1-NEXT: vpextrb $0, %xmm0, %ecx ; AVX1-NEXT: vpextrb $1, %xmm0, %eax ; AVX1-NEXT: addb %cl, %al @@ -1883,69 +640,14 @@ define i8 @bitcast_v16i32_to_v2i8(<16 x i32> %a0) nounwind { ; AVX2-LABEL: bitcast_v16i32_to_v2i8: ; AVX2: # %bb.0: ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpcmpgtd %ymm1, %ymm2, %ymm1 ; AVX2-NEXT: vpcmpgtd %ymm0, %ymm2, %ymm0 -; AVX2-NEXT: vpextrb $4, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpextrb $0, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: leal (%rcx,%rax,2), %eax -; AVX2-NEXT: vpextrb $8, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: leal (%rax,%rcx,4), %eax -; AVX2-NEXT: vpextrb $12, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: leal (%rax,%rcx,8), %eax -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 -; AVX2-NEXT: vpextrb $0, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $4, %ecx -; AVX2-NEXT: orl %eax, %ecx -; AVX2-NEXT: vpextrb $4, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: shll $5, %eax -; AVX2-NEXT: orl %ecx, %eax -; AVX2-NEXT: vpextrb $8, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $6, %ecx -; AVX2-NEXT: vpextrb $12, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $7, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpcmpgtd %ymm1, %ymm2, %ymm0 -; AVX2-NEXT: vpextrb $0, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $8, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $4, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $9, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpextrb $8, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $10, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $12, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $11, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 -; AVX2-NEXT: vpextrb $0, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $12, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $4, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $13, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpextrb $8, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $14, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $12, %xmm0, %edx -; AVX2-NEXT: shll $15, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: orl %eax, %edx -; AVX2-NEXT: vmovd %edx, %xmm0 +; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpmovmskb %xmm0, %eax +; AVX2-NEXT: vmovd %eax, %xmm0 ; AVX2-NEXT: vpextrb $0, %xmm0, %ecx ; AVX2-NEXT: vpextrb $1, %xmm0, %eax ; AVX2-NEXT: addb %cl, %al @@ -1976,293 +678,47 @@ define i8 @bitcast_v16i32_to_v2i8(<16 x i32> %a0) nounwind { define i16 @bitcast_v32i16_to_v2i16(<32 x i16> %a0) nounwind { ; SSE2-SSSE3-LABEL: bitcast_v32i16_to_v2i16: ; SSE2-SSSE3: # %bb.0: +; SSE2-SSSE3-NEXT: packsswb %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax +; SSE2-SSSE3-NEXT: packsswb %xmm3, %xmm2 +; SSE2-SSSE3-NEXT: pmovmskb %xmm2, %ecx +; SSE2-SSSE3-NEXT: shll $16, %ecx +; SSE2-SSSE3-NEXT: orl %eax, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: pextrw $0, %xmm0, %ecx +; SSE2-SSSE3-NEXT: pextrw $1, %xmm0, %eax +; SSE2-SSSE3-NEXT: addl %ecx, %eax +; SSE2-SSSE3-NEXT: # kill: def $ax killed $ax killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: bitcast_v32i16_to_v2i16: ; AVX1: # %bb.0: -; AVX1-NEXT: pushq %rbp -; AVX1-NEXT: movq %rsp, %rbp -; AVX1-NEXT: andq $-32, %rsp -; AVX1-NEXT: subq $32, %rsp -; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpcmpgtw %xmm0, %xmm2, %xmm3 -; AVX1-NEXT: vpextrb $2, %xmm3, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: vpextrb $0, %xmm3, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: leal (%rcx,%rax,2), %eax -; AVX1-NEXT: vpextrb $4, %xmm3, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: leal (%rax,%rcx,4), %eax -; AVX1-NEXT: vpextrb $6, %xmm3, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: leal (%rax,%rcx,8), %eax -; AVX1-NEXT: vpextrb $8, %xmm3, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $4, %ecx -; AVX1-NEXT: orl %eax, %ecx -; AVX1-NEXT: vpextrb $10, %xmm3, %eax -; AVX1-NEXT: andl $1, %eax -; AVX1-NEXT: shll $5, %eax -; AVX1-NEXT: orl %ecx, %eax -; AVX1-NEXT: vpextrb $12, %xmm3, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $6, %ecx -; AVX1-NEXT: vpextrb $14, %xmm3, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $7, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vpcmpgtw %xmm0, %xmm2, %xmm0 -; AVX1-NEXT: vpextrb $0, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $8, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $2, %xmm0, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $9, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpextrb $4, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $10, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $6, %xmm0, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $11, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpextrb $8, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $12, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $10, %xmm0, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $13, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpextrb $12, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $14, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $14, %xmm0, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $15, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpcmpgtw %xmm1, %xmm2, %xmm0 -; AVX1-NEXT: vpextrb $0, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $16, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $2, %xmm0, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $17, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpextrb $4, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $18, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $6, %xmm0, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $19, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpextrb $8, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $20, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $10, %xmm0, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $21, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpextrb $12, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $22, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $14, %xmm0, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $23, %edx -; AVX1-NEXT: orl %ecx, %edx +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm0 -; AVX1-NEXT: vpcmpgtw %xmm0, %xmm2, %xmm0 -; AVX1-NEXT: vpextrb $0, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $24, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $2, %xmm0, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $25, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpextrb $4, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $26, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $6, %xmm0, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $27, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpextrb $8, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $28, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $10, %xmm0, %edx -; AVX1-NEXT: andl $1, %edx -; AVX1-NEXT: shll $29, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpextrb $12, %xmm0, %ecx -; AVX1-NEXT: andl $1, %ecx -; AVX1-NEXT: shll $30, %ecx -; AVX1-NEXT: orl %edx, %ecx -; AVX1-NEXT: vpextrb $14, %xmm0, %edx -; AVX1-NEXT: shll $31, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: orl %eax, %edx -; AVX1-NEXT: vmovd %edx, %xmm0 +; AVX1-NEXT: vpacksswb %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vpmovmskb %xmm0, %ecx +; AVX1-NEXT: shll $16, %ecx +; AVX1-NEXT: orl %eax, %ecx +; AVX1-NEXT: vmovd %ecx, %xmm0 ; AVX1-NEXT: vpextrw $0, %xmm0, %ecx ; AVX1-NEXT: vpextrw $1, %xmm0, %eax ; AVX1-NEXT: addl %ecx, %eax ; AVX1-NEXT: # kill: def $ax killed $ax killed $eax -; AVX1-NEXT: movq %rbp, %rsp -; AVX1-NEXT: popq %rbp ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: bitcast_v32i16_to_v2i16: ; AVX2: # %bb.0: -; AVX2-NEXT: pushq %rbp -; AVX2-NEXT: movq %rsp, %rbp -; AVX2-NEXT: andq $-32, %rsp -; AVX2-NEXT: subq $32, %rsp -; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX2-NEXT: vpcmpgtw %ymm0, %ymm2, %ymm0 -; AVX2-NEXT: vpextrb $2, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpextrb $0, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: leal (%rcx,%rax,2), %eax -; AVX2-NEXT: vpextrb $4, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: leal (%rax,%rcx,4), %eax -; AVX2-NEXT: vpextrb $6, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: leal (%rax,%rcx,8), %eax -; AVX2-NEXT: vpextrb $8, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $4, %ecx -; AVX2-NEXT: orl %eax, %ecx -; AVX2-NEXT: vpextrb $10, %xmm0, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: shll $5, %eax -; AVX2-NEXT: orl %ecx, %eax -; AVX2-NEXT: vpextrb $12, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $6, %ecx -; AVX2-NEXT: vpextrb $14, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $7, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 -; AVX2-NEXT: vpextrb $0, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $8, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $2, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $9, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpextrb $4, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $10, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $6, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $11, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpextrb $8, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $12, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $10, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $13, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpextrb $12, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $14, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $14, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $15, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpcmpgtw %ymm1, %ymm2, %ymm0 -; AVX2-NEXT: vpextrb $0, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $16, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $2, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $17, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpextrb $4, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $18, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $6, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $19, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpextrb $8, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $20, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $10, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $21, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpextrb $12, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $22, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $14, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $23, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 -; AVX2-NEXT: vpextrb $0, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $24, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $2, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $25, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpextrb $4, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $26, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $6, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $27, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpextrb $8, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $28, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $10, %xmm0, %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: shll $29, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpextrb $12, %xmm0, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: shll $30, %ecx -; AVX2-NEXT: orl %edx, %ecx -; AVX2-NEXT: vpextrb $14, %xmm0, %edx -; AVX2-NEXT: shll $31, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: orl %eax, %edx -; AVX2-NEXT: vmovd %edx, %xmm0 +; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX2-NEXT: vpmovmskb %ymm0, %eax +; AVX2-NEXT: vmovd %eax, %xmm0 ; AVX2-NEXT: vpextrw $0, %xmm0, %ecx ; AVX2-NEXT: vpextrw $1, %xmm0, %eax ; AVX2-NEXT: addl %ecx, %eax ; AVX2-NEXT: # kill: def $ax killed $ax killed $eax -; AVX2-NEXT: movq %rbp, %rsp -; AVX2-NEXT: popq %rbp ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -2291,16 +747,16 @@ define i16 @bitcast_v32i16_to_v2i16(<32 x i16> %a0) nounwind { ret i16 %5 } -define i32 @bitcast_v64i8_to_v2i8(<64 x i8> %a0) nounwind { -; SSE2-SSSE3-LABEL: bitcast_v64i8_to_v2i8: +define i32 @bitcast_v64i8_to_v2i32(<64 x i8> %a0) nounwind { +; SSE2-SSSE3-LABEL: bitcast_v64i8_to_v2i32: ; SSE2-SSSE3: # %bb.0: ; SSE2-SSSE3-NEXT: retq ; -; AVX12-LABEL: bitcast_v64i8_to_v2i8: +; AVX12-LABEL: bitcast_v64i8_to_v2i32: ; AVX12: # %bb.0: ; AVX12-NEXT: retq ; -; AVX512-LABEL: bitcast_v64i8_to_v2i8: +; AVX512-LABEL: bitcast_v64i8_to_v2i32: ; AVX512: # %bb.0: ; AVX512-NEXT: vpmovb2m %zmm0, %k0 ; AVX512-NEXT: kmovq %k0, -{{[0-9]+}}(%rsp) |