diff options
| author | Zvi Rackover <zvi.rackover@intel.com> | 2018-01-24 15:02:16 +0000 |
|---|---|---|
| committer | Zvi Rackover <zvi.rackover@intel.com> | 2018-01-24 15:02:16 +0000 |
| commit | 22bfa7e5743cb5c8f44b796a5b3162aa1637e564 (patch) | |
| tree | 440d655c1f79dc0cbafe9c965787e17ebb036802 /llvm/test/CodeGen | |
| parent | f15886eb30776345ff81c37c7a7449c32330af29 (diff) | |
| download | bcm5719-llvm-22bfa7e5743cb5c8f44b796a5b3162aa1637e564.tar.gz bcm5719-llvm-22bfa7e5743cb5c8f44b796a5b3162aa1637e564.zip | |
X86 Tests: Add more sdiv combine cases. NFC
Add cases with vector non-splat pow2 contant divider.
llvm-svn: 323329
Diffstat (limited to 'llvm/test/CodeGen')
| -rw-r--r-- | llvm/test/CodeGen/X86/combine-sdiv.ll | 3169 |
1 files changed, 3160 insertions, 9 deletions
diff --git a/llvm/test/CodeGen/X86/combine-sdiv.ll b/llvm/test/CodeGen/X86/combine-sdiv.ll index b32a58c0ec6..9662958a9a9 100644 --- a/llvm/test/CodeGen/X86/combine-sdiv.ll +++ b/llvm/test/CodeGen/X86/combine-sdiv.ll @@ -1,7 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX1 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2ORLATER,AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX,AVX2ORLATER,AVX512,AVX512F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX,AVX2ORLATER,AVX512,AVX512BW ; fold (sdiv undef, x) -> 0 define i32 @combine_sdiv_undef0(i32 %x) { @@ -195,11 +197,11 @@ define <4 x i32> @combine_vec_sdiv_by_pos1(<4 x i32> %x) { ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] ; AVX1-NEXT: retq ; -; AVX2-LABEL: combine_vec_sdiv_by_pos1: -; AVX2: # %bb.0: -; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 -; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 -; AVX2-NEXT: retq +; AVX2ORLATER-LABEL: combine_vec_sdiv_by_pos1: +; AVX2ORLATER: # %bb.0: +; AVX2ORLATER-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX2ORLATER-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 +; AVX2ORLATER-NEXT: retq %1 = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255> %2 = sdiv <4 x i32> %1, <i32 1, i32 4, i32 8, i32 16> ret <4 x i32> %2 @@ -228,8 +230,2224 @@ define <4 x i32> @combine_vec_sdiv_by_pow2a(<4 x i32> %x) { ret <4 x i32> %1 } -define <4 x i32> @combine_vec_sdiv_by_pow2b(<4 x i32> %x) { -; SSE-LABEL: combine_vec_sdiv_by_pow2b: +define <16 x i8> @combine_vec_sdiv_by_pow2b_v16i8(<16 x i8> %x) { +; SSE-LABEL: combine_vec_sdiv_by_pow2b_v16i8: +; SSE: # %bb.0: +; SSE-NEXT: pextrb $1, %xmm0, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarb $7, %cl +; SSE-NEXT: shrb $6, %cl +; SSE-NEXT: addb %al, %cl +; SSE-NEXT: sarb $2, %cl +; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: pextrb $0, %xmm0, %ecx +; SSE-NEXT: movd %ecx, %xmm1 +; SSE-NEXT: pinsrb $1, %eax, %xmm1 +; SSE-NEXT: pextrb $2, %xmm0, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: shrb $7, %cl +; SSE-NEXT: addb %al, %cl +; SSE-NEXT: sarb %cl +; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: pinsrb $2, %eax, %xmm1 +; SSE-NEXT: pextrb $3, %xmm0, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarb $7, %cl +; SSE-NEXT: shrb $4, %cl +; SSE-NEXT: addb %al, %cl +; SSE-NEXT: sarb $4, %cl +; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: pinsrb $3, %eax, %xmm1 +; SSE-NEXT: pextrb $4, %xmm0, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarb $7, %cl +; SSE-NEXT: shrb $5, %cl +; SSE-NEXT: addb %al, %cl +; SSE-NEXT: sarb $3, %cl +; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: pinsrb $4, %eax, %xmm1 +; SSE-NEXT: pextrb $5, %xmm0, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarb $7, %cl +; SSE-NEXT: shrb $3, %cl +; SSE-NEXT: addb %al, %cl +; SSE-NEXT: sarb $5, %cl +; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: pinsrb $5, %eax, %xmm1 +; SSE-NEXT: pextrb $6, %xmm0, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarb $7, %cl +; SSE-NEXT: shrb $2, %cl +; SSE-NEXT: addb %al, %cl +; SSE-NEXT: sarb $6, %cl +; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: pinsrb $6, %eax, %xmm1 +; SSE-NEXT: pextrb $7, %xmm0, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: shrb $7, %cl +; SSE-NEXT: addb %al, %cl +; SSE-NEXT: sarb %cl +; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: pinsrb $7, %eax, %xmm1 +; SSE-NEXT: pextrb $8, %xmm0, %eax +; SSE-NEXT: pinsrb $8, %eax, %xmm1 +; SSE-NEXT: pextrb $9, %xmm0, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarb $7, %cl +; SSE-NEXT: shrb $6, %cl +; SSE-NEXT: addb %al, %cl +; SSE-NEXT: sarb $2, %cl +; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: pinsrb $9, %eax, %xmm1 +; SSE-NEXT: pextrb $10, %xmm0, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: shrb $7, %cl +; SSE-NEXT: addb %al, %cl +; SSE-NEXT: sarb %cl +; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: pinsrb $10, %eax, %xmm1 +; SSE-NEXT: pextrb $11, %xmm0, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarb $7, %cl +; SSE-NEXT: shrb $4, %cl +; SSE-NEXT: addb %al, %cl +; SSE-NEXT: sarb $4, %cl +; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: pinsrb $11, %eax, %xmm1 +; SSE-NEXT: pextrb $12, %xmm0, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarb $7, %cl +; SSE-NEXT: shrb $5, %cl +; SSE-NEXT: addb %al, %cl +; SSE-NEXT: sarb $3, %cl +; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: pinsrb $12, %eax, %xmm1 +; SSE-NEXT: pextrb $13, %xmm0, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarb $7, %cl +; SSE-NEXT: shrb $3, %cl +; SSE-NEXT: addb %al, %cl +; SSE-NEXT: sarb $5, %cl +; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: pinsrb $13, %eax, %xmm1 +; SSE-NEXT: pextrb $14, %xmm0, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarb $7, %cl +; SSE-NEXT: shrb $2, %cl +; SSE-NEXT: addb %al, %cl +; SSE-NEXT: sarb $6, %cl +; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: pinsrb $14, %eax, %xmm1 +; SSE-NEXT: pextrb $15, %xmm0, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: shrb $7, %cl +; SSE-NEXT: addb %al, %cl +; SSE-NEXT: sarb %cl +; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: pinsrb $15, %eax, %xmm1 +; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_sdiv_by_pow2b_v16i8: +; AVX: # %bb.0: +; AVX-NEXT: vpextrb $1, %xmm0, %eax +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: sarb $7, %cl +; AVX-NEXT: shrb $6, %cl +; AVX-NEXT: addb %al, %cl +; AVX-NEXT: sarb $2, %cl +; AVX-NEXT: movzbl %cl, %eax +; AVX-NEXT: vpextrb $0, %xmm0, %ecx +; AVX-NEXT: vmovd %ecx, %xmm1 +; AVX-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $2, %xmm0, %eax +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: shrb $7, %cl +; AVX-NEXT: addb %al, %cl +; AVX-NEXT: sarb %cl +; AVX-NEXT: movzbl %cl, %eax +; AVX-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $3, %xmm0, %eax +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: sarb $7, %cl +; AVX-NEXT: shrb $4, %cl +; AVX-NEXT: addb %al, %cl +; AVX-NEXT: sarb $4, %cl +; AVX-NEXT: movzbl %cl, %eax +; AVX-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $4, %xmm0, %eax +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: sarb $7, %cl +; AVX-NEXT: shrb $5, %cl +; AVX-NEXT: addb %al, %cl +; AVX-NEXT: sarb $3, %cl +; AVX-NEXT: movzbl %cl, %eax +; AVX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $5, %xmm0, %eax +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: sarb $7, %cl +; AVX-NEXT: shrb $3, %cl +; AVX-NEXT: addb %al, %cl +; AVX-NEXT: sarb $5, %cl +; AVX-NEXT: movzbl %cl, %eax +; AVX-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $6, %xmm0, %eax +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: sarb $7, %cl +; AVX-NEXT: shrb $2, %cl +; AVX-NEXT: addb %al, %cl +; AVX-NEXT: sarb $6, %cl +; AVX-NEXT: movzbl %cl, %eax +; AVX-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $7, %xmm0, %eax +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: shrb $7, %cl +; AVX-NEXT: addb %al, %cl +; AVX-NEXT: sarb %cl +; AVX-NEXT: movzbl %cl, %eax +; AVX-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $8, %xmm0, %eax +; AVX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $9, %xmm0, %eax +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: sarb $7, %cl +; AVX-NEXT: shrb $6, %cl +; AVX-NEXT: addb %al, %cl +; AVX-NEXT: sarb $2, %cl +; AVX-NEXT: movzbl %cl, %eax +; AVX-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $10, %xmm0, %eax +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: shrb $7, %cl +; AVX-NEXT: addb %al, %cl +; AVX-NEXT: sarb %cl +; AVX-NEXT: movzbl %cl, %eax +; AVX-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $11, %xmm0, %eax +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: sarb $7, %cl +; AVX-NEXT: shrb $4, %cl +; AVX-NEXT: addb %al, %cl +; AVX-NEXT: sarb $4, %cl +; AVX-NEXT: movzbl %cl, %eax +; AVX-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $12, %xmm0, %eax +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: sarb $7, %cl +; AVX-NEXT: shrb $5, %cl +; AVX-NEXT: addb %al, %cl +; AVX-NEXT: sarb $3, %cl +; AVX-NEXT: movzbl %cl, %eax +; AVX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $13, %xmm0, %eax +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: sarb $7, %cl +; AVX-NEXT: shrb $3, %cl +; AVX-NEXT: addb %al, %cl +; AVX-NEXT: sarb $5, %cl +; AVX-NEXT: movzbl %cl, %eax +; AVX-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $14, %xmm0, %eax +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: sarb $7, %cl +; AVX-NEXT: shrb $2, %cl +; AVX-NEXT: addb %al, %cl +; AVX-NEXT: sarb $6, %cl +; AVX-NEXT: movzbl %cl, %eax +; AVX-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $15, %xmm0, %eax +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: shrb $7, %cl +; AVX-NEXT: addb %al, %cl +; AVX-NEXT: sarb %cl +; AVX-NEXT: movzbl %cl, %eax +; AVX-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = sdiv <16 x i8> %x, <i8 1, i8 4, i8 2, i8 16, i8 8, i8 32, i8 64, i8 2, i8 1, i8 4, i8 2, i8 16, i8 8, i8 32, i8 64, i8 2> + ret <16 x i8> %1 +} + +define <8 x i16> @combine_vec_sdiv_by_pow2b_v8i16(<8 x i16> %x) { +; SSE-LABEL: combine_vec_sdiv_by_pow2b_v8i16: +; SSE: # %bb.0: +; SSE-NEXT: movdqa %xmm0, %xmm1 +; SSE-NEXT: pxor %xmm0, %xmm0 +; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7] +; SSE-NEXT: pextrw $1, %xmm1, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $14, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $2, %cx +; SSE-NEXT: pinsrw $1, %ecx, %xmm0 +; SSE-NEXT: pextrw $2, %xmm1, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: shrl $15, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw %cx +; SSE-NEXT: pinsrw $2, %ecx, %xmm0 +; SSE-NEXT: pextrw $3, %xmm1, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $12, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $4, %cx +; SSE-NEXT: pinsrw $3, %ecx, %xmm0 +; SSE-NEXT: pextrw $4, %xmm1, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $13, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $3, %cx +; SSE-NEXT: pinsrw $4, %ecx, %xmm0 +; SSE-NEXT: pextrw $5, %xmm1, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $11, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $5, %cx +; SSE-NEXT: pinsrw $5, %ecx, %xmm0 +; SSE-NEXT: pextrw $6, %xmm1, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $10, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $6, %cx +; SSE-NEXT: pinsrw $6, %ecx, %xmm0 +; SSE-NEXT: pextrw $7, %xmm1, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: shrl $15, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw %cx +; SSE-NEXT: pinsrw $7, %ecx, %xmm0 +; SSE-NEXT: retq +; +; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v8i16: +; AVX1: # %bb.0: +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7] +; AVX1-NEXT: vpextrw $1, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $14, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $2, %cx +; AVX1-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: vpextrw $2, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $15, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw %cx +; AVX1-NEXT: vpinsrw $2, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: vpextrw $3, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $12, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $4, %cx +; AVX1-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: vpextrw $4, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $13, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $3, %cx +; AVX1-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: vpextrw $5, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $11, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $5, %cx +; AVX1-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: vpextrw $6, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $10, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $6, %cx +; AVX1-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: vpextrw $7, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $15, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw %cx +; AVX1-NEXT: vpinsrw $7, %ecx, %xmm1, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: combine_vec_sdiv_by_pow2b_v8i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7] +; AVX2-NEXT: vpextrw $1, %xmm0, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $14, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $2, %cx +; AVX2-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: vpextrw $2, %xmm0, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $15, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw %cx +; AVX2-NEXT: vpinsrw $2, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: vpextrw $3, %xmm0, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $12, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $4, %cx +; AVX2-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: vpextrw $4, %xmm0, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $13, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $3, %cx +; AVX2-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: vpextrw $5, %xmm0, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $11, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $5, %cx +; AVX2-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: vpextrw $6, %xmm0, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $10, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $6, %cx +; AVX2-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: vpextrw $7, %xmm0, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $15, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw %cx +; AVX2-NEXT: vpinsrw $7, %ecx, %xmm1, %xmm0 +; AVX2-NEXT: retq +; +; AVX512F-LABEL: combine_vec_sdiv_by_pow2b_v8i16: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; AVX512F-NEXT: vmovss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] +; AVX512F-NEXT: vpextrw $1, %xmm0, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $14, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $2, %cx +; AVX512F-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1 +; AVX512F-NEXT: vpextrw $2, %xmm0, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: shrl $15, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw %cx +; AVX512F-NEXT: vpinsrw $2, %ecx, %xmm1, %xmm1 +; AVX512F-NEXT: vpextrw $3, %xmm0, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $12, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $4, %cx +; AVX512F-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1 +; AVX512F-NEXT: vpextrw $4, %xmm0, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $13, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $3, %cx +; AVX512F-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1 +; AVX512F-NEXT: vpextrw $5, %xmm0, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $11, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $5, %cx +; AVX512F-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1 +; AVX512F-NEXT: vpextrw $6, %xmm0, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $10, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $6, %cx +; AVX512F-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1 +; AVX512F-NEXT: vpextrw $7, %xmm0, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: shrl $15, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw %cx +; AVX512F-NEXT: vpinsrw $7, %ecx, %xmm1, %xmm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: combine_vec_sdiv_by_pow2b_v8i16: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512BW-NEXT: vmovss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] +; AVX512BW-NEXT: vpextrw $1, %xmm0, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $14, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $2, %cx +; AVX512BW-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1 +; AVX512BW-NEXT: vpextrw $2, %xmm0, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: shrl $15, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw %cx +; AVX512BW-NEXT: vpinsrw $2, %ecx, %xmm1, %xmm1 +; AVX512BW-NEXT: vpextrw $3, %xmm0, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $12, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $4, %cx +; AVX512BW-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1 +; AVX512BW-NEXT: vpextrw $4, %xmm0, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $13, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $3, %cx +; AVX512BW-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1 +; AVX512BW-NEXT: vpextrw $5, %xmm0, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $11, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $5, %cx +; AVX512BW-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1 +; AVX512BW-NEXT: vpextrw $6, %xmm0, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $10, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $6, %cx +; AVX512BW-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1 +; AVX512BW-NEXT: vpextrw $7, %xmm0, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: shrl $15, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw %cx +; AVX512BW-NEXT: vpinsrw $7, %ecx, %xmm1, %xmm0 +; AVX512BW-NEXT: retq + %1 = sdiv <8 x i16> %x, <i16 1, i16 4, i16 2, i16 16, i16 8, i16 32, i16 64, i16 2> + ret <8 x i16> %1 +} + +define <16 x i16> @combine_vec_sdiv_by_pow2b_v16i16(<16 x i16> %x) { +; SSE-LABEL: combine_vec_sdiv_by_pow2b_v16i16: +; SSE: # %bb.0: +; SSE-NEXT: movdqa %xmm1, %xmm2 +; SSE-NEXT: movdqa %xmm0, %xmm3 +; SSE-NEXT: pxor %xmm1, %xmm1 +; SSE-NEXT: pxor %xmm0, %xmm0 +; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1],xmm0[2,3,4,5,6,7] +; SSE-NEXT: pextrw $1, %xmm3, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $14, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $2, %cx +; SSE-NEXT: pinsrw $1, %ecx, %xmm0 +; SSE-NEXT: pextrw $2, %xmm3, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: shrl $15, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw %cx +; SSE-NEXT: pinsrw $2, %ecx, %xmm0 +; SSE-NEXT: pextrw $3, %xmm3, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $12, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $4, %cx +; SSE-NEXT: pinsrw $3, %ecx, %xmm0 +; SSE-NEXT: pextrw $4, %xmm3, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $13, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $3, %cx +; SSE-NEXT: pinsrw $4, %ecx, %xmm0 +; SSE-NEXT: pextrw $5, %xmm3, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $11, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $5, %cx +; SSE-NEXT: pinsrw $5, %ecx, %xmm0 +; SSE-NEXT: pextrw $6, %xmm3, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $10, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $6, %cx +; SSE-NEXT: pinsrw $6, %ecx, %xmm0 +; SSE-NEXT: pextrw $7, %xmm3, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: shrl $15, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw %cx +; SSE-NEXT: pinsrw $7, %ecx, %xmm0 +; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3,4,5,6,7] +; SSE-NEXT: pextrw $1, %xmm2, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $14, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $2, %cx +; SSE-NEXT: pinsrw $1, %ecx, %xmm1 +; SSE-NEXT: pextrw $2, %xmm2, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: shrl $15, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw %cx +; SSE-NEXT: pinsrw $2, %ecx, %xmm1 +; SSE-NEXT: pextrw $3, %xmm2, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $12, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $4, %cx +; SSE-NEXT: pinsrw $3, %ecx, %xmm1 +; SSE-NEXT: pextrw $4, %xmm2, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $13, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $3, %cx +; SSE-NEXT: pinsrw $4, %ecx, %xmm1 +; SSE-NEXT: pextrw $5, %xmm2, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $11, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $5, %cx +; SSE-NEXT: pinsrw $5, %ecx, %xmm1 +; SSE-NEXT: pextrw $6, %xmm2, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $10, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $6, %cx +; SSE-NEXT: pinsrw $6, %ecx, %xmm1 +; SSE-NEXT: pextrw $7, %xmm2, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: shrl $15, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw %cx +; SSE-NEXT: pinsrw $7, %ecx, %xmm1 +; SSE-NEXT: retq +; +; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v16i16: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm1[0,1],xmm2[2,3,4,5,6,7] +; AVX1-NEXT: vpextrw $1, %xmm1, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $14, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $2, %cx +; AVX1-NEXT: vpinsrw $1, %ecx, %xmm3, %xmm3 +; AVX1-NEXT: vpextrw $2, %xmm1, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $15, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw %cx +; AVX1-NEXT: vpinsrw $2, %ecx, %xmm3, %xmm3 +; AVX1-NEXT: vpextrw $3, %xmm1, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $12, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $4, %cx +; AVX1-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm3 +; AVX1-NEXT: vpextrw $4, %xmm1, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $13, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $3, %cx +; AVX1-NEXT: vpinsrw $4, %ecx, %xmm3, %xmm3 +; AVX1-NEXT: vpextrw $5, %xmm1, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $11, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $5, %cx +; AVX1-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3 +; AVX1-NEXT: vpextrw $6, %xmm1, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $10, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $6, %cx +; AVX1-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3 +; AVX1-NEXT: vpextrw $7, %xmm1, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $15, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw %cx +; AVX1-NEXT: vpinsrw $7, %ecx, %xmm3, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3,4,5,6,7] +; AVX1-NEXT: vpextrw $1, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $14, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $2, %cx +; AVX1-NEXT: vpinsrw $1, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $2, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $15, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw %cx +; AVX1-NEXT: vpinsrw $2, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $3, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $12, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $4, %cx +; AVX1-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $4, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $13, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $3, %cx +; AVX1-NEXT: vpinsrw $4, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $5, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $11, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $5, %cx +; AVX1-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $6, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $10, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $6, %cx +; AVX1-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $7, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $15, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw %cx +; AVX1-NEXT: vpinsrw $7, %ecx, %xmm2, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: combine_vec_sdiv_by_pow2b_v16i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpblendw {{.*#+}} xmm3 = xmm1[0,1],xmm2[2,3,4,5,6,7] +; AVX2-NEXT: vpextrw $1, %xmm1, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $14, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $2, %cx +; AVX2-NEXT: vpinsrw $1, %ecx, %xmm3, %xmm3 +; AVX2-NEXT: vpextrw $2, %xmm1, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $15, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw %cx +; AVX2-NEXT: vpinsrw $2, %ecx, %xmm3, %xmm3 +; AVX2-NEXT: vpextrw $3, %xmm1, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $12, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $4, %cx +; AVX2-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm3 +; AVX2-NEXT: vpextrw $4, %xmm1, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $13, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $3, %cx +; AVX2-NEXT: vpinsrw $4, %ecx, %xmm3, %xmm3 +; AVX2-NEXT: vpextrw $5, %xmm1, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $11, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $5, %cx +; AVX2-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3 +; AVX2-NEXT: vpextrw $6, %xmm1, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $10, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $6, %cx +; AVX2-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3 +; AVX2-NEXT: vpextrw $7, %xmm1, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $15, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw %cx +; AVX2-NEXT: vpinsrw $7, %ecx, %xmm3, %xmm1 +; AVX2-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3,4,5,6,7] +; AVX2-NEXT: vpextrw $1, %xmm0, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $14, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $2, %cx +; AVX2-NEXT: vpinsrw $1, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $2, %xmm0, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $15, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw %cx +; AVX2-NEXT: vpinsrw $2, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $3, %xmm0, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $12, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $4, %cx +; AVX2-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $4, %xmm0, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $13, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $3, %cx +; AVX2-NEXT: vpinsrw $4, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $5, %xmm0, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $11, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $5, %cx +; AVX2-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $6, %xmm0, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $10, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $6, %cx +; AVX2-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $7, %xmm0, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $15, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw %cx +; AVX2-NEXT: vpinsrw $7, %ecx, %xmm2, %xmm0 +; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: retq +; +; AVX512F-LABEL: combine_vec_sdiv_by_pow2b_v16i16: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX512F-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; AVX512F-NEXT: vmovss {{.*#+}} xmm3 = xmm1[0],xmm2[1,2,3] +; AVX512F-NEXT: vpextrw $1, %xmm1, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $14, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $2, %cx +; AVX512F-NEXT: vpinsrw $1, %ecx, %xmm3, %xmm3 +; AVX512F-NEXT: vpextrw $2, %xmm1, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: shrl $15, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw %cx +; AVX512F-NEXT: vpinsrw $2, %ecx, %xmm3, %xmm3 +; AVX512F-NEXT: vpextrw $3, %xmm1, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $12, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $4, %cx +; AVX512F-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm3 +; AVX512F-NEXT: vpextrw $4, %xmm1, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $13, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $3, %cx +; AVX512F-NEXT: vpinsrw $4, %ecx, %xmm3, %xmm3 +; AVX512F-NEXT: vpextrw $5, %xmm1, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $11, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $5, %cx +; AVX512F-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3 +; AVX512F-NEXT: vpextrw $6, %xmm1, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $10, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $6, %cx +; AVX512F-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3 +; AVX512F-NEXT: vpextrw $7, %xmm1, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: shrl $15, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw %cx +; AVX512F-NEXT: vpinsrw $7, %ecx, %xmm3, %xmm1 +; AVX512F-NEXT: vmovss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3] +; AVX512F-NEXT: vpextrw $1, %xmm0, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $14, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $2, %cx +; AVX512F-NEXT: vpinsrw $1, %ecx, %xmm2, %xmm2 +; AVX512F-NEXT: vpextrw $2, %xmm0, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: shrl $15, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw %cx +; AVX512F-NEXT: vpinsrw $2, %ecx, %xmm2, %xmm2 +; AVX512F-NEXT: vpextrw $3, %xmm0, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $12, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $4, %cx +; AVX512F-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2 +; AVX512F-NEXT: vpextrw $4, %xmm0, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $13, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $3, %cx +; AVX512F-NEXT: vpinsrw $4, %ecx, %xmm2, %xmm2 +; AVX512F-NEXT: vpextrw $5, %xmm0, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $11, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $5, %cx +; AVX512F-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2 +; AVX512F-NEXT: vpextrw $6, %xmm0, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $10, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $6, %cx +; AVX512F-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2 +; AVX512F-NEXT: vpextrw $7, %xmm0, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: shrl $15, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw %cx +; AVX512F-NEXT: vpinsrw $7, %ecx, %xmm2, %xmm0 +; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: combine_vec_sdiv_by_pow2b_v16i16: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512BW-NEXT: vmovss {{.*#+}} xmm3 = xmm1[0],xmm2[1,2,3] +; AVX512BW-NEXT: vpextrw $1, %xmm1, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $14, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $2, %cx +; AVX512BW-NEXT: vpinsrw $1, %ecx, %xmm3, %xmm3 +; AVX512BW-NEXT: vpextrw $2, %xmm1, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: shrl $15, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw %cx +; AVX512BW-NEXT: vpinsrw $2, %ecx, %xmm3, %xmm3 +; AVX512BW-NEXT: vpextrw $3, %xmm1, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $12, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $4, %cx +; AVX512BW-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm3 +; AVX512BW-NEXT: vpextrw $4, %xmm1, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $13, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $3, %cx +; AVX512BW-NEXT: vpinsrw $4, %ecx, %xmm3, %xmm3 +; AVX512BW-NEXT: vpextrw $5, %xmm1, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $11, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $5, %cx +; AVX512BW-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3 +; AVX512BW-NEXT: vpextrw $6, %xmm1, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $10, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $6, %cx +; AVX512BW-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3 +; AVX512BW-NEXT: vpextrw $7, %xmm1, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: shrl $15, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw %cx +; AVX512BW-NEXT: vpinsrw $7, %ecx, %xmm3, %xmm1 +; AVX512BW-NEXT: vmovss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3] +; AVX512BW-NEXT: vpextrw $1, %xmm0, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $14, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $2, %cx +; AVX512BW-NEXT: vpinsrw $1, %ecx, %xmm2, %xmm2 +; AVX512BW-NEXT: vpextrw $2, %xmm0, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: shrl $15, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw %cx +; AVX512BW-NEXT: vpinsrw $2, %ecx, %xmm2, %xmm2 +; AVX512BW-NEXT: vpextrw $3, %xmm0, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $12, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $4, %cx +; AVX512BW-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2 +; AVX512BW-NEXT: vpextrw $4, %xmm0, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $13, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $3, %cx +; AVX512BW-NEXT: vpinsrw $4, %ecx, %xmm2, %xmm2 +; AVX512BW-NEXT: vpextrw $5, %xmm0, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $11, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $5, %cx +; AVX512BW-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2 +; AVX512BW-NEXT: vpextrw $6, %xmm0, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $10, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $6, %cx +; AVX512BW-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2 +; AVX512BW-NEXT: vpextrw $7, %xmm0, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: shrl $15, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw %cx +; AVX512BW-NEXT: vpinsrw $7, %ecx, %xmm2, %xmm0 +; AVX512BW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX512BW-NEXT: retq + %1 = sdiv <16 x i16> %x, <i16 1, i16 4, i16 2, i16 16, i16 8, i16 32, i16 64, i16 2, i16 1, i16 4, i16 2, i16 16, i16 8, i16 32, i16 64, i16 2> + ret <16 x i16> %1 +} + +define <32 x i16> @combine_vec_sdiv_by_pow2b_v32i16(<32 x i16> %x) { +; SSE-LABEL: combine_vec_sdiv_by_pow2b_v32i16: +; SSE: # %bb.0: +; SSE-NEXT: movdqa %xmm3, %xmm4 +; SSE-NEXT: movdqa %xmm1, %xmm5 +; SSE-NEXT: movdqa %xmm0, %xmm1 +; SSE-NEXT: pxor %xmm3, %xmm3 +; SSE-NEXT: pxor %xmm0, %xmm0 +; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7] +; SSE-NEXT: pextrw $1, %xmm1, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $14, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $2, %cx +; SSE-NEXT: pinsrw $1, %ecx, %xmm0 +; SSE-NEXT: pextrw $2, %xmm1, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: shrl $15, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw %cx +; SSE-NEXT: pinsrw $2, %ecx, %xmm0 +; SSE-NEXT: pextrw $3, %xmm1, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $12, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $4, %cx +; SSE-NEXT: pinsrw $3, %ecx, %xmm0 +; SSE-NEXT: pextrw $4, %xmm1, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $13, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $3, %cx +; SSE-NEXT: pinsrw $4, %ecx, %xmm0 +; SSE-NEXT: pextrw $5, %xmm1, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $11, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $5, %cx +; SSE-NEXT: pinsrw $5, %ecx, %xmm0 +; SSE-NEXT: pextrw $6, %xmm1, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $10, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $6, %cx +; SSE-NEXT: pinsrw $6, %ecx, %xmm0 +; SSE-NEXT: pextrw $7, %xmm1, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: shrl $15, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw %cx +; SSE-NEXT: pinsrw $7, %ecx, %xmm0 +; SSE-NEXT: pxor %xmm1, %xmm1 +; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm5[0,1],xmm1[2,3,4,5,6,7] +; SSE-NEXT: pextrw $1, %xmm5, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $14, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $2, %cx +; SSE-NEXT: pinsrw $1, %ecx, %xmm1 +; SSE-NEXT: pextrw $2, %xmm5, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: shrl $15, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw %cx +; SSE-NEXT: pinsrw $2, %ecx, %xmm1 +; SSE-NEXT: pextrw $3, %xmm5, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $12, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $4, %cx +; SSE-NEXT: pinsrw $3, %ecx, %xmm1 +; SSE-NEXT: pextrw $4, %xmm5, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $13, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $3, %cx +; SSE-NEXT: pinsrw $4, %ecx, %xmm1 +; SSE-NEXT: pextrw $5, %xmm5, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $11, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $5, %cx +; SSE-NEXT: pinsrw $5, %ecx, %xmm1 +; SSE-NEXT: pextrw $6, %xmm5, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $10, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $6, %cx +; SSE-NEXT: pinsrw $6, %ecx, %xmm1 +; SSE-NEXT: pextrw $7, %xmm5, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: shrl $15, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw %cx +; SSE-NEXT: pinsrw $7, %ecx, %xmm1 +; SSE-NEXT: pxor %xmm5, %xmm5 +; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm2[0,1],xmm5[2,3,4,5,6,7] +; SSE-NEXT: pextrw $1, %xmm2, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $14, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $2, %cx +; SSE-NEXT: pinsrw $1, %ecx, %xmm5 +; SSE-NEXT: pextrw $2, %xmm2, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: shrl $15, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw %cx +; SSE-NEXT: pinsrw $2, %ecx, %xmm5 +; SSE-NEXT: pextrw $3, %xmm2, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $12, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $4, %cx +; SSE-NEXT: pinsrw $3, %ecx, %xmm5 +; SSE-NEXT: pextrw $4, %xmm2, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $13, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $3, %cx +; SSE-NEXT: pinsrw $4, %ecx, %xmm5 +; SSE-NEXT: pextrw $5, %xmm2, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $11, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $5, %cx +; SSE-NEXT: pinsrw $5, %ecx, %xmm5 +; SSE-NEXT: pextrw $6, %xmm2, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $10, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $6, %cx +; SSE-NEXT: pinsrw $6, %ecx, %xmm5 +; SSE-NEXT: pextrw $7, %xmm2, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: shrl $15, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw %cx +; SSE-NEXT: pinsrw $7, %ecx, %xmm5 +; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3,4,5,6,7] +; SSE-NEXT: pextrw $1, %xmm4, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $14, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $2, %cx +; SSE-NEXT: pinsrw $1, %ecx, %xmm3 +; SSE-NEXT: pextrw $2, %xmm4, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: shrl $15, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw %cx +; SSE-NEXT: pinsrw $2, %ecx, %xmm3 +; SSE-NEXT: pextrw $3, %xmm4, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $12, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $4, %cx +; SSE-NEXT: pinsrw $3, %ecx, %xmm3 +; SSE-NEXT: pextrw $4, %xmm4, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $13, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $3, %cx +; SSE-NEXT: pinsrw $4, %ecx, %xmm3 +; SSE-NEXT: pextrw $5, %xmm4, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $11, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $5, %cx +; SSE-NEXT: pinsrw $5, %ecx, %xmm3 +; SSE-NEXT: pextrw $6, %xmm4, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarw $15, %cx +; SSE-NEXT: movzwl %cx, %ecx +; SSE-NEXT: shrl $10, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw $6, %cx +; SSE-NEXT: pinsrw $6, %ecx, %xmm3 +; SSE-NEXT: pextrw $7, %xmm4, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: shrl $15, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarw %cx +; SSE-NEXT: pinsrw $7, %ecx, %xmm3 +; SSE-NEXT: movdqa %xmm5, %xmm2 +; SSE-NEXT: retq +; +; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v32i16: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm3[0,1],xmm2[2,3,4,5,6,7] +; AVX1-NEXT: vpextrw $1, %xmm3, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $14, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $2, %cx +; AVX1-NEXT: vpinsrw $1, %ecx, %xmm4, %xmm4 +; AVX1-NEXT: vpextrw $2, %xmm3, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $15, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw %cx +; AVX1-NEXT: vpinsrw $2, %ecx, %xmm4, %xmm4 +; AVX1-NEXT: vpextrw $3, %xmm3, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $12, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $4, %cx +; AVX1-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm4 +; AVX1-NEXT: vpextrw $4, %xmm3, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $13, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $3, %cx +; AVX1-NEXT: vpinsrw $4, %ecx, %xmm4, %xmm4 +; AVX1-NEXT: vpextrw $5, %xmm3, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $11, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $5, %cx +; AVX1-NEXT: vpinsrw $5, %ecx, %xmm4, %xmm4 +; AVX1-NEXT: vpextrw $6, %xmm3, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $10, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $6, %cx +; AVX1-NEXT: vpinsrw $6, %ecx, %xmm4, %xmm4 +; AVX1-NEXT: vpextrw $7, %xmm3, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $15, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw %cx +; AVX1-NEXT: vpinsrw $7, %ecx, %xmm4, %xmm3 +; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm0[0,1],xmm2[2,3,4,5,6,7] +; AVX1-NEXT: vpextrw $1, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $14, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $2, %cx +; AVX1-NEXT: vpinsrw $1, %ecx, %xmm4, %xmm4 +; AVX1-NEXT: vpextrw $2, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $15, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw %cx +; AVX1-NEXT: vpinsrw $2, %ecx, %xmm4, %xmm4 +; AVX1-NEXT: vpextrw $3, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $12, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $4, %cx +; AVX1-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm4 +; AVX1-NEXT: vpextrw $4, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $13, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $3, %cx +; AVX1-NEXT: vpinsrw $4, %ecx, %xmm4, %xmm4 +; AVX1-NEXT: vpextrw $5, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $11, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $5, %cx +; AVX1-NEXT: vpinsrw $5, %ecx, %xmm4, %xmm4 +; AVX1-NEXT: vpextrw $6, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $10, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $6, %cx +; AVX1-NEXT: vpinsrw $6, %ecx, %xmm4, %xmm4 +; AVX1-NEXT: vpextrw $7, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $15, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw %cx +; AVX1-NEXT: vpinsrw $7, %ecx, %xmm4, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 +; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm3[0,1],xmm2[2,3,4,5,6,7] +; AVX1-NEXT: vpextrw $1, %xmm3, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $14, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $2, %cx +; AVX1-NEXT: vpinsrw $1, %ecx, %xmm4, %xmm4 +; AVX1-NEXT: vpextrw $2, %xmm3, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $15, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw %cx +; AVX1-NEXT: vpinsrw $2, %ecx, %xmm4, %xmm4 +; AVX1-NEXT: vpextrw $3, %xmm3, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $12, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $4, %cx +; AVX1-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm4 +; AVX1-NEXT: vpextrw $4, %xmm3, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $13, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $3, %cx +; AVX1-NEXT: vpinsrw $4, %ecx, %xmm4, %xmm4 +; AVX1-NEXT: vpextrw $5, %xmm3, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $11, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $5, %cx +; AVX1-NEXT: vpinsrw $5, %ecx, %xmm4, %xmm4 +; AVX1-NEXT: vpextrw $6, %xmm3, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $10, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $6, %cx +; AVX1-NEXT: vpinsrw $6, %ecx, %xmm4, %xmm4 +; AVX1-NEXT: vpextrw $7, %xmm3, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $15, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw %cx +; AVX1-NEXT: vpinsrw $7, %ecx, %xmm4, %xmm3 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2,3,4,5,6,7] +; AVX1-NEXT: vpextrw $1, %xmm1, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $14, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $2, %cx +; AVX1-NEXT: vpinsrw $1, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $2, %xmm1, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $15, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw %cx +; AVX1-NEXT: vpinsrw $2, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $3, %xmm1, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $12, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $4, %cx +; AVX1-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $4, %xmm1, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $13, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $3, %cx +; AVX1-NEXT: vpinsrw $4, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $5, %xmm1, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $11, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $5, %cx +; AVX1-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $6, %xmm1, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarw $15, %cx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: shrl $10, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw $6, %cx +; AVX1-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $7, %xmm1, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $15, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarw %cx +; AVX1-NEXT: vpinsrw $7, %ecx, %xmm2, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: combine_vec_sdiv_by_pow2b_v32i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3 +; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpblendw {{.*#+}} xmm4 = xmm3[0,1],xmm2[2,3,4,5,6,7] +; AVX2-NEXT: vpextrw $1, %xmm3, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $14, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $2, %cx +; AVX2-NEXT: vpinsrw $1, %ecx, %xmm4, %xmm4 +; AVX2-NEXT: vpextrw $2, %xmm3, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $15, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw %cx +; AVX2-NEXT: vpinsrw $2, %ecx, %xmm4, %xmm4 +; AVX2-NEXT: vpextrw $3, %xmm3, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $12, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $4, %cx +; AVX2-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm4 +; AVX2-NEXT: vpextrw $4, %xmm3, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $13, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $3, %cx +; AVX2-NEXT: vpinsrw $4, %ecx, %xmm4, %xmm4 +; AVX2-NEXT: vpextrw $5, %xmm3, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $11, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $5, %cx +; AVX2-NEXT: vpinsrw $5, %ecx, %xmm4, %xmm4 +; AVX2-NEXT: vpextrw $6, %xmm3, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $10, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $6, %cx +; AVX2-NEXT: vpinsrw $6, %ecx, %xmm4, %xmm4 +; AVX2-NEXT: vpextrw $7, %xmm3, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $15, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw %cx +; AVX2-NEXT: vpinsrw $7, %ecx, %xmm4, %xmm3 +; AVX2-NEXT: vpblendw {{.*#+}} xmm4 = xmm0[0,1],xmm2[2,3,4,5,6,7] +; AVX2-NEXT: vpextrw $1, %xmm0, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $14, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $2, %cx +; AVX2-NEXT: vpinsrw $1, %ecx, %xmm4, %xmm4 +; AVX2-NEXT: vpextrw $2, %xmm0, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $15, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw %cx +; AVX2-NEXT: vpinsrw $2, %ecx, %xmm4, %xmm4 +; AVX2-NEXT: vpextrw $3, %xmm0, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $12, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $4, %cx +; AVX2-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm4 +; AVX2-NEXT: vpextrw $4, %xmm0, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $13, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $3, %cx +; AVX2-NEXT: vpinsrw $4, %ecx, %xmm4, %xmm4 +; AVX2-NEXT: vpextrw $5, %xmm0, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $11, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $5, %cx +; AVX2-NEXT: vpinsrw $5, %ecx, %xmm4, %xmm4 +; AVX2-NEXT: vpextrw $6, %xmm0, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $10, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $6, %cx +; AVX2-NEXT: vpinsrw $6, %ecx, %xmm4, %xmm4 +; AVX2-NEXT: vpextrw $7, %xmm0, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $15, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw %cx +; AVX2-NEXT: vpinsrw $7, %ecx, %xmm4, %xmm0 +; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3 +; AVX2-NEXT: vpblendw {{.*#+}} xmm4 = xmm3[0,1],xmm2[2,3,4,5,6,7] +; AVX2-NEXT: vpextrw $1, %xmm3, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $14, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $2, %cx +; AVX2-NEXT: vpinsrw $1, %ecx, %xmm4, %xmm4 +; AVX2-NEXT: vpextrw $2, %xmm3, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $15, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw %cx +; AVX2-NEXT: vpinsrw $2, %ecx, %xmm4, %xmm4 +; AVX2-NEXT: vpextrw $3, %xmm3, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $12, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $4, %cx +; AVX2-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm4 +; AVX2-NEXT: vpextrw $4, %xmm3, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $13, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $3, %cx +; AVX2-NEXT: vpinsrw $4, %ecx, %xmm4, %xmm4 +; AVX2-NEXT: vpextrw $5, %xmm3, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $11, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $5, %cx +; AVX2-NEXT: vpinsrw $5, %ecx, %xmm4, %xmm4 +; AVX2-NEXT: vpextrw $6, %xmm3, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $10, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $6, %cx +; AVX2-NEXT: vpinsrw $6, %ecx, %xmm4, %xmm4 +; AVX2-NEXT: vpextrw $7, %xmm3, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $15, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw %cx +; AVX2-NEXT: vpinsrw $7, %ecx, %xmm4, %xmm3 +; AVX2-NEXT: vpblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2,3,4,5,6,7] +; AVX2-NEXT: vpextrw $1, %xmm1, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $14, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $2, %cx +; AVX2-NEXT: vpinsrw $1, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $2, %xmm1, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $15, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw %cx +; AVX2-NEXT: vpinsrw $2, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $3, %xmm1, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $12, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $4, %cx +; AVX2-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $4, %xmm1, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $13, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $3, %cx +; AVX2-NEXT: vpinsrw $4, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $5, %xmm1, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $11, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $5, %cx +; AVX2-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $6, %xmm1, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarw $15, %cx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: shrl $10, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw $6, %cx +; AVX2-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $7, %xmm1, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $15, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarw %cx +; AVX2-NEXT: vpinsrw $7, %ecx, %xmm2, %xmm1 +; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1 +; AVX2-NEXT: retq +; +; AVX512F-LABEL: combine_vec_sdiv_by_pow2b_v32i16: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX512F-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; AVX512F-NEXT: vmovss {{.*#+}} xmm4 = xmm3[0],xmm2[1,2,3] +; AVX512F-NEXT: vpextrw $1, %xmm3, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $14, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $2, %cx +; AVX512F-NEXT: vpinsrw $1, %ecx, %xmm4, %xmm4 +; AVX512F-NEXT: vpextrw $2, %xmm3, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: shrl $15, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw %cx +; AVX512F-NEXT: vpinsrw $2, %ecx, %xmm4, %xmm4 +; AVX512F-NEXT: vpextrw $3, %xmm3, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $12, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $4, %cx +; AVX512F-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm4 +; AVX512F-NEXT: vpextrw $4, %xmm3, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $13, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $3, %cx +; AVX512F-NEXT: vpinsrw $4, %ecx, %xmm4, %xmm4 +; AVX512F-NEXT: vpextrw $5, %xmm3, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $11, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $5, %cx +; AVX512F-NEXT: vpinsrw $5, %ecx, %xmm4, %xmm4 +; AVX512F-NEXT: vpextrw $6, %xmm3, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $10, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $6, %cx +; AVX512F-NEXT: vpinsrw $6, %ecx, %xmm4, %xmm4 +; AVX512F-NEXT: vpextrw $7, %xmm3, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: shrl $15, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw %cx +; AVX512F-NEXT: vpinsrw $7, %ecx, %xmm4, %xmm3 +; AVX512F-NEXT: vmovss {{.*#+}} xmm4 = xmm0[0],xmm2[1,2,3] +; AVX512F-NEXT: vpextrw $1, %xmm0, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $14, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $2, %cx +; AVX512F-NEXT: vpinsrw $1, %ecx, %xmm4, %xmm4 +; AVX512F-NEXT: vpextrw $2, %xmm0, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: shrl $15, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw %cx +; AVX512F-NEXT: vpinsrw $2, %ecx, %xmm4, %xmm4 +; AVX512F-NEXT: vpextrw $3, %xmm0, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $12, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $4, %cx +; AVX512F-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm4 +; AVX512F-NEXT: vpextrw $4, %xmm0, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $13, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $3, %cx +; AVX512F-NEXT: vpinsrw $4, %ecx, %xmm4, %xmm4 +; AVX512F-NEXT: vpextrw $5, %xmm0, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $11, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $5, %cx +; AVX512F-NEXT: vpinsrw $5, %ecx, %xmm4, %xmm4 +; AVX512F-NEXT: vpextrw $6, %xmm0, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $10, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $6, %cx +; AVX512F-NEXT: vpinsrw $6, %ecx, %xmm4, %xmm4 +; AVX512F-NEXT: vpextrw $7, %xmm0, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: shrl $15, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw %cx +; AVX512F-NEXT: vpinsrw $7, %ecx, %xmm4, %xmm0 +; AVX512F-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0 +; AVX512F-NEXT: vextractf128 $1, %ymm1, %xmm3 +; AVX512F-NEXT: vmovss {{.*#+}} xmm4 = xmm3[0],xmm2[1,2,3] +; AVX512F-NEXT: vpextrw $1, %xmm3, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $14, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $2, %cx +; AVX512F-NEXT: vpinsrw $1, %ecx, %xmm4, %xmm4 +; AVX512F-NEXT: vpextrw $2, %xmm3, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: shrl $15, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw %cx +; AVX512F-NEXT: vpinsrw $2, %ecx, %xmm4, %xmm4 +; AVX512F-NEXT: vpextrw $3, %xmm3, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $12, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $4, %cx +; AVX512F-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm4 +; AVX512F-NEXT: vpextrw $4, %xmm3, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $13, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $3, %cx +; AVX512F-NEXT: vpinsrw $4, %ecx, %xmm4, %xmm4 +; AVX512F-NEXT: vpextrw $5, %xmm3, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $11, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $5, %cx +; AVX512F-NEXT: vpinsrw $5, %ecx, %xmm4, %xmm4 +; AVX512F-NEXT: vpextrw $6, %xmm3, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $10, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $6, %cx +; AVX512F-NEXT: vpinsrw $6, %ecx, %xmm4, %xmm4 +; AVX512F-NEXT: vpextrw $7, %xmm3, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: shrl $15, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw %cx +; AVX512F-NEXT: vpinsrw $7, %ecx, %xmm4, %xmm3 +; AVX512F-NEXT: vmovss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3] +; AVX512F-NEXT: vpextrw $1, %xmm1, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $14, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $2, %cx +; AVX512F-NEXT: vpinsrw $1, %ecx, %xmm2, %xmm2 +; AVX512F-NEXT: vpextrw $2, %xmm1, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: shrl $15, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw %cx +; AVX512F-NEXT: vpinsrw $2, %ecx, %xmm2, %xmm2 +; AVX512F-NEXT: vpextrw $3, %xmm1, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $12, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $4, %cx +; AVX512F-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2 +; AVX512F-NEXT: vpextrw $4, %xmm1, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $13, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $3, %cx +; AVX512F-NEXT: vpinsrw $4, %ecx, %xmm2, %xmm2 +; AVX512F-NEXT: vpextrw $5, %xmm1, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $11, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $5, %cx +; AVX512F-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2 +; AVX512F-NEXT: vpextrw $6, %xmm1, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: sarw $15, %cx +; AVX512F-NEXT: movzwl %cx, %ecx +; AVX512F-NEXT: shrl $10, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw $6, %cx +; AVX512F-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2 +; AVX512F-NEXT: vpextrw $7, %xmm1, %eax +; AVX512F-NEXT: movl %eax, %ecx +; AVX512F-NEXT: shrl $15, %ecx +; AVX512F-NEXT: addl %eax, %ecx +; AVX512F-NEXT: sarw %cx +; AVX512F-NEXT: vpinsrw $7, %ecx, %xmm2, %xmm1 +; AVX512F-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: combine_vec_sdiv_by_pow2b_v32i16: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vextractf32x4 $3, %zmm0, %xmm2 +; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512BW-NEXT: vmovss {{.*#+}} xmm3 = xmm2[0],xmm1[1,2,3] +; AVX512BW-NEXT: vpextrw $1, %xmm2, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $14, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $2, %cx +; AVX512BW-NEXT: vpinsrw $1, %ecx, %xmm3, %xmm3 +; AVX512BW-NEXT: vpextrw $2, %xmm2, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: shrl $15, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw %cx +; AVX512BW-NEXT: vpinsrw $2, %ecx, %xmm3, %xmm3 +; AVX512BW-NEXT: vpextrw $3, %xmm2, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $12, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $4, %cx +; AVX512BW-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm3 +; AVX512BW-NEXT: vpextrw $4, %xmm2, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $13, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $3, %cx +; AVX512BW-NEXT: vpinsrw $4, %ecx, %xmm3, %xmm3 +; AVX512BW-NEXT: vpextrw $5, %xmm2, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $11, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $5, %cx +; AVX512BW-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3 +; AVX512BW-NEXT: vpextrw $6, %xmm2, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $10, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $6, %cx +; AVX512BW-NEXT: vpinsrw $6, %ecx, %xmm3, %xmm3 +; AVX512BW-NEXT: vpextrw $7, %xmm2, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: shrl $15, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw %cx +; AVX512BW-NEXT: vpinsrw $7, %ecx, %xmm3, %xmm2 +; AVX512BW-NEXT: vextractf32x4 $2, %zmm0, %xmm3 +; AVX512BW-NEXT: vmovss {{.*#+}} xmm4 = xmm3[0],xmm1[1,2,3] +; AVX512BW-NEXT: vpextrw $1, %xmm3, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $14, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $2, %cx +; AVX512BW-NEXT: vpinsrw $1, %ecx, %xmm4, %xmm4 +; AVX512BW-NEXT: vpextrw $2, %xmm3, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: shrl $15, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw %cx +; AVX512BW-NEXT: vpinsrw $2, %ecx, %xmm4, %xmm4 +; AVX512BW-NEXT: vpextrw $3, %xmm3, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $12, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $4, %cx +; AVX512BW-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm4 +; AVX512BW-NEXT: vpextrw $4, %xmm3, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $13, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $3, %cx +; AVX512BW-NEXT: vpinsrw $4, %ecx, %xmm4, %xmm4 +; AVX512BW-NEXT: vpextrw $5, %xmm3, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $11, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $5, %cx +; AVX512BW-NEXT: vpinsrw $5, %ecx, %xmm4, %xmm4 +; AVX512BW-NEXT: vpextrw $6, %xmm3, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $10, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $6, %cx +; AVX512BW-NEXT: vpinsrw $6, %ecx, %xmm4, %xmm4 +; AVX512BW-NEXT: vpextrw $7, %xmm3, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: shrl $15, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw %cx +; AVX512BW-NEXT: vpinsrw $7, %ecx, %xmm4, %xmm3 +; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2 +; AVX512BW-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX512BW-NEXT: vmovss {{.*#+}} xmm4 = xmm3[0],xmm1[1,2,3] +; AVX512BW-NEXT: vpextrw $1, %xmm3, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $14, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $2, %cx +; AVX512BW-NEXT: vpinsrw $1, %ecx, %xmm4, %xmm4 +; AVX512BW-NEXT: vpextrw $2, %xmm3, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: shrl $15, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw %cx +; AVX512BW-NEXT: vpinsrw $2, %ecx, %xmm4, %xmm4 +; AVX512BW-NEXT: vpextrw $3, %xmm3, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $12, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $4, %cx +; AVX512BW-NEXT: vpinsrw $3, %ecx, %xmm4, %xmm4 +; AVX512BW-NEXT: vpextrw $4, %xmm3, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $13, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $3, %cx +; AVX512BW-NEXT: vpinsrw $4, %ecx, %xmm4, %xmm4 +; AVX512BW-NEXT: vpextrw $5, %xmm3, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $11, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $5, %cx +; AVX512BW-NEXT: vpinsrw $5, %ecx, %xmm4, %xmm4 +; AVX512BW-NEXT: vpextrw $6, %xmm3, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $10, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $6, %cx +; AVX512BW-NEXT: vpinsrw $6, %ecx, %xmm4, %xmm4 +; AVX512BW-NEXT: vpextrw $7, %xmm3, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: shrl $15, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw %cx +; AVX512BW-NEXT: vpinsrw $7, %ecx, %xmm4, %xmm3 +; AVX512BW-NEXT: vmovss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] +; AVX512BW-NEXT: vpextrw $1, %xmm0, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $14, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $2, %cx +; AVX512BW-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1 +; AVX512BW-NEXT: vpextrw $2, %xmm0, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: shrl $15, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw %cx +; AVX512BW-NEXT: vpinsrw $2, %ecx, %xmm1, %xmm1 +; AVX512BW-NEXT: vpextrw $3, %xmm0, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $12, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $4, %cx +; AVX512BW-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1 +; AVX512BW-NEXT: vpextrw $4, %xmm0, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $13, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $3, %cx +; AVX512BW-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1 +; AVX512BW-NEXT: vpextrw $5, %xmm0, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $11, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $5, %cx +; AVX512BW-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1 +; AVX512BW-NEXT: vpextrw $6, %xmm0, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: sarw $15, %cx +; AVX512BW-NEXT: movzwl %cx, %ecx +; AVX512BW-NEXT: shrl $10, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw $6, %cx +; AVX512BW-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1 +; AVX512BW-NEXT: vpextrw $7, %xmm0, %eax +; AVX512BW-NEXT: movl %eax, %ecx +; AVX512BW-NEXT: shrl $15, %ecx +; AVX512BW-NEXT: addl %eax, %ecx +; AVX512BW-NEXT: sarw %cx +; AVX512BW-NEXT: vpinsrw $7, %ecx, %xmm1, %xmm0 +; AVX512BW-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0 +; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 +; AVX512BW-NEXT: retq + %1 = sdiv <32 x i16> %x, <i16 1, i16 4, i16 2, i16 16, i16 8, i16 32, i16 64, i16 2, i16 1, i16 4, i16 2, i16 16, i16 8, i16 32, i16 64, i16 2, i16 1, i16 4, i16 2, i16 16, i16 8, i16 32, i16 64, i16 2, i16 1, i16 4, i16 2, i16 16, i16 8, i16 32, i16 64, i16 2> + ret <32 x i16> %1 +} + +define <4 x i32> @combine_vec_sdiv_by_pow2b_v4i32(<4 x i32> %x) { +; SSE-LABEL: combine_vec_sdiv_by_pow2b_v4i32: ; SSE: # %bb.0: ; SSE-NEXT: pextrd $1, %xmm0, %eax ; SSE-NEXT: movl %eax, %ecx @@ -254,7 +2472,7 @@ define <4 x i32> @combine_vec_sdiv_by_pow2b(<4 x i32> %x) { ; SSE-NEXT: pinsrd $3, %eax, %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: combine_vec_sdiv_by_pow2b: +; AVX-LABEL: combine_vec_sdiv_by_pow2b_v4i32: ; AVX: # %bb.0: ; AVX-NEXT: vpextrd $1, %xmm0, %eax ; AVX-NEXT: movl %eax, %ecx @@ -281,3 +2499,936 @@ define <4 x i32> @combine_vec_sdiv_by_pow2b(<4 x i32> %x) { %1 = sdiv <4 x i32> %x, <i32 1, i32 4, i32 8, i32 16> ret <4 x i32> %1 } + +define <8 x i32> @combine_vec_sdiv_by_pow2b_v8i32(<8 x i32> %x) { +; SSE-LABEL: combine_vec_sdiv_by_pow2b_v8i32: +; SSE: # %bb.0: +; SSE-NEXT: pextrd $1, %xmm0, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarl $31, %ecx +; SSE-NEXT: shrl $30, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarl $2, %ecx +; SSE-NEXT: pextrd $2, %xmm0, %eax +; SSE-NEXT: pextrd $3, %xmm0, %edx +; SSE-NEXT: pinsrd $1, %ecx, %xmm0 +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarl $31, %ecx +; SSE-NEXT: shrl $29, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarl $3, %ecx +; SSE-NEXT: pinsrd $2, %ecx, %xmm0 +; SSE-NEXT: movl %edx, %eax +; SSE-NEXT: sarl $31, %eax +; SSE-NEXT: shrl $28, %eax +; SSE-NEXT: addl %edx, %eax +; SSE-NEXT: sarl $4, %eax +; SSE-NEXT: pinsrd $3, %eax, %xmm0 +; SSE-NEXT: pextrd $1, %xmm1, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarl $31, %ecx +; SSE-NEXT: shrl $30, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarl $2, %ecx +; SSE-NEXT: pextrd $2, %xmm1, %eax +; SSE-NEXT: pextrd $3, %xmm1, %edx +; SSE-NEXT: pinsrd $1, %ecx, %xmm1 +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarl $31, %ecx +; SSE-NEXT: shrl $29, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarl $3, %ecx +; SSE-NEXT: pinsrd $2, %ecx, %xmm1 +; SSE-NEXT: movl %edx, %eax +; SSE-NEXT: sarl $31, %eax +; SSE-NEXT: shrl $28, %eax +; SSE-NEXT: addl %edx, %eax +; SSE-NEXT: sarl $4, %eax +; SSE-NEXT: pinsrd $3, %eax, %xmm1 +; SSE-NEXT: retq +; +; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v8i32: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpextrd $1, %xmm1, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarl $31, %ecx +; AVX1-NEXT: shrl $30, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarl $2, %ecx +; AVX1-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm2 +; AVX1-NEXT: vpextrd $2, %xmm1, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarl $31, %ecx +; AVX1-NEXT: shrl $29, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarl $3, %ecx +; AVX1-NEXT: vpinsrd $2, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrd $3, %xmm1, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarl $31, %ecx +; AVX1-NEXT: shrl $28, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarl $4, %ecx +; AVX1-NEXT: vpinsrd $3, %ecx, %xmm2, %xmm1 +; AVX1-NEXT: vpextrd $1, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarl $31, %ecx +; AVX1-NEXT: shrl $30, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarl $2, %ecx +; AVX1-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm2 +; AVX1-NEXT: vpextrd $2, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarl $31, %ecx +; AVX1-NEXT: shrl $29, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarl $3, %ecx +; AVX1-NEXT: vpinsrd $2, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrd $3, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarl $31, %ecx +; AVX1-NEXT: shrl $28, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarl $4, %ecx +; AVX1-NEXT: vpinsrd $3, %ecx, %xmm2, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2ORLATER-LABEL: combine_vec_sdiv_by_pow2b_v8i32: +; AVX2ORLATER: # %bb.0: +; AVX2ORLATER-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2ORLATER-NEXT: vpextrd $1, %xmm1, %eax +; AVX2ORLATER-NEXT: movl %eax, %ecx +; AVX2ORLATER-NEXT: sarl $31, %ecx +; AVX2ORLATER-NEXT: shrl $30, %ecx +; AVX2ORLATER-NEXT: addl %eax, %ecx +; AVX2ORLATER-NEXT: sarl $2, %ecx +; AVX2ORLATER-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm2 +; AVX2ORLATER-NEXT: vpextrd $2, %xmm1, %eax +; AVX2ORLATER-NEXT: movl %eax, %ecx +; AVX2ORLATER-NEXT: sarl $31, %ecx +; AVX2ORLATER-NEXT: shrl $29, %ecx +; AVX2ORLATER-NEXT: addl %eax, %ecx +; AVX2ORLATER-NEXT: sarl $3, %ecx +; AVX2ORLATER-NEXT: vpinsrd $2, %ecx, %xmm2, %xmm2 +; AVX2ORLATER-NEXT: vpextrd $3, %xmm1, %eax +; AVX2ORLATER-NEXT: movl %eax, %ecx +; AVX2ORLATER-NEXT: sarl $31, %ecx +; AVX2ORLATER-NEXT: shrl $28, %ecx +; AVX2ORLATER-NEXT: addl %eax, %ecx +; AVX2ORLATER-NEXT: sarl $4, %ecx +; AVX2ORLATER-NEXT: vpinsrd $3, %ecx, %xmm2, %xmm1 +; AVX2ORLATER-NEXT: vpextrd $1, %xmm0, %eax +; AVX2ORLATER-NEXT: movl %eax, %ecx +; AVX2ORLATER-NEXT: sarl $31, %ecx +; AVX2ORLATER-NEXT: shrl $30, %ecx +; AVX2ORLATER-NEXT: addl %eax, %ecx +; AVX2ORLATER-NEXT: sarl $2, %ecx +; AVX2ORLATER-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm2 +; AVX2ORLATER-NEXT: vpextrd $2, %xmm0, %eax +; AVX2ORLATER-NEXT: movl %eax, %ecx +; AVX2ORLATER-NEXT: sarl $31, %ecx +; AVX2ORLATER-NEXT: shrl $29, %ecx +; AVX2ORLATER-NEXT: addl %eax, %ecx +; AVX2ORLATER-NEXT: sarl $3, %ecx +; AVX2ORLATER-NEXT: vpinsrd $2, %ecx, %xmm2, %xmm2 +; AVX2ORLATER-NEXT: vpextrd $3, %xmm0, %eax +; AVX2ORLATER-NEXT: movl %eax, %ecx +; AVX2ORLATER-NEXT: sarl $31, %ecx +; AVX2ORLATER-NEXT: shrl $28, %ecx +; AVX2ORLATER-NEXT: addl %eax, %ecx +; AVX2ORLATER-NEXT: sarl $4, %ecx +; AVX2ORLATER-NEXT: vpinsrd $3, %ecx, %xmm2, %xmm0 +; AVX2ORLATER-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2ORLATER-NEXT: retq + %1 = sdiv <8 x i32> %x, <i32 1, i32 4, i32 8, i32 16, i32 1, i32 4, i32 8, i32 16> + ret <8 x i32> %1 +} + +define <16 x i32> @combine_vec_sdiv_by_pow2b_v16i32(<16 x i32> %x) { +; SSE-LABEL: combine_vec_sdiv_by_pow2b_v16i32: +; SSE: # %bb.0: +; SSE-NEXT: pextrd $1, %xmm0, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarl $31, %ecx +; SSE-NEXT: shrl $30, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarl $2, %ecx +; SSE-NEXT: pextrd $2, %xmm0, %eax +; SSE-NEXT: pextrd $3, %xmm0, %edx +; SSE-NEXT: pinsrd $1, %ecx, %xmm0 +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarl $31, %ecx +; SSE-NEXT: shrl $29, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarl $3, %ecx +; SSE-NEXT: pinsrd $2, %ecx, %xmm0 +; SSE-NEXT: movl %edx, %eax +; SSE-NEXT: sarl $31, %eax +; SSE-NEXT: shrl $28, %eax +; SSE-NEXT: addl %edx, %eax +; SSE-NEXT: sarl $4, %eax +; SSE-NEXT: pinsrd $3, %eax, %xmm0 +; SSE-NEXT: pextrd $1, %xmm1, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarl $31, %ecx +; SSE-NEXT: shrl $30, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarl $2, %ecx +; SSE-NEXT: pextrd $2, %xmm1, %eax +; SSE-NEXT: pextrd $3, %xmm1, %edx +; SSE-NEXT: pinsrd $1, %ecx, %xmm1 +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarl $31, %ecx +; SSE-NEXT: shrl $29, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarl $3, %ecx +; SSE-NEXT: pinsrd $2, %ecx, %xmm1 +; SSE-NEXT: movl %edx, %eax +; SSE-NEXT: sarl $31, %eax +; SSE-NEXT: shrl $28, %eax +; SSE-NEXT: addl %edx, %eax +; SSE-NEXT: sarl $4, %eax +; SSE-NEXT: pinsrd $3, %eax, %xmm1 +; SSE-NEXT: pextrd $1, %xmm2, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarl $31, %ecx +; SSE-NEXT: shrl $30, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarl $2, %ecx +; SSE-NEXT: pextrd $2, %xmm2, %eax +; SSE-NEXT: pextrd $3, %xmm2, %edx +; SSE-NEXT: pinsrd $1, %ecx, %xmm2 +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarl $31, %ecx +; SSE-NEXT: shrl $29, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarl $3, %ecx +; SSE-NEXT: pinsrd $2, %ecx, %xmm2 +; SSE-NEXT: movl %edx, %eax +; SSE-NEXT: sarl $31, %eax +; SSE-NEXT: shrl $28, %eax +; SSE-NEXT: addl %edx, %eax +; SSE-NEXT: sarl $4, %eax +; SSE-NEXT: pinsrd $3, %eax, %xmm2 +; SSE-NEXT: pextrd $1, %xmm3, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarl $31, %ecx +; SSE-NEXT: shrl $30, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarl $2, %ecx +; SSE-NEXT: pextrd $2, %xmm3, %eax +; SSE-NEXT: pextrd $3, %xmm3, %edx +; SSE-NEXT: pinsrd $1, %ecx, %xmm3 +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarl $31, %ecx +; SSE-NEXT: shrl $29, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarl $3, %ecx +; SSE-NEXT: pinsrd $2, %ecx, %xmm3 +; SSE-NEXT: movl %edx, %eax +; SSE-NEXT: sarl $31, %eax +; SSE-NEXT: shrl $28, %eax +; SSE-NEXT: addl %edx, %eax +; SSE-NEXT: sarl $4, %eax +; SSE-NEXT: pinsrd $3, %eax, %xmm3 +; SSE-NEXT: retq +; +; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v16i32: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vpextrd $1, %xmm2, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarl $31, %ecx +; AVX1-NEXT: shrl $30, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarl $2, %ecx +; AVX1-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm3 +; AVX1-NEXT: vpextrd $2, %xmm2, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarl $31, %ecx +; AVX1-NEXT: shrl $29, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarl $3, %ecx +; AVX1-NEXT: vpinsrd $2, %ecx, %xmm3, %xmm3 +; AVX1-NEXT: vpextrd $3, %xmm2, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarl $31, %ecx +; AVX1-NEXT: shrl $28, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarl $4, %ecx +; AVX1-NEXT: vpinsrd $3, %ecx, %xmm3, %xmm2 +; AVX1-NEXT: vpextrd $1, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarl $31, %ecx +; AVX1-NEXT: shrl $30, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarl $2, %ecx +; AVX1-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm3 +; AVX1-NEXT: vpextrd $2, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarl $31, %ecx +; AVX1-NEXT: shrl $29, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarl $3, %ecx +; AVX1-NEXT: vpinsrd $2, %ecx, %xmm3, %xmm3 +; AVX1-NEXT: vpextrd $3, %xmm0, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarl $31, %ecx +; AVX1-NEXT: shrl $28, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarl $4, %ecx +; AVX1-NEXT: vpinsrd $3, %ecx, %xmm3, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vpextrd $1, %xmm2, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarl $31, %ecx +; AVX1-NEXT: shrl $30, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarl $2, %ecx +; AVX1-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm3 +; AVX1-NEXT: vpextrd $2, %xmm2, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarl $31, %ecx +; AVX1-NEXT: shrl $29, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarl $3, %ecx +; AVX1-NEXT: vpinsrd $2, %ecx, %xmm3, %xmm3 +; AVX1-NEXT: vpextrd $3, %xmm2, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarl $31, %ecx +; AVX1-NEXT: shrl $28, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarl $4, %ecx +; AVX1-NEXT: vpinsrd $3, %ecx, %xmm3, %xmm2 +; AVX1-NEXT: vpextrd $1, %xmm1, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarl $31, %ecx +; AVX1-NEXT: shrl $30, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarl $2, %ecx +; AVX1-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm3 +; AVX1-NEXT: vpextrd $2, %xmm1, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarl $31, %ecx +; AVX1-NEXT: shrl $29, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarl $3, %ecx +; AVX1-NEXT: vpinsrd $2, %ecx, %xmm3, %xmm3 +; AVX1-NEXT: vpextrd $3, %xmm1, %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: sarl $31, %ecx +; AVX1-NEXT: shrl $28, %ecx +; AVX1-NEXT: addl %eax, %ecx +; AVX1-NEXT: sarl $4, %ecx +; AVX1-NEXT: vpinsrd $3, %ecx, %xmm3, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: combine_vec_sdiv_by_pow2b_v16i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX2-NEXT: vpextrd $1, %xmm2, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarl $31, %ecx +; AVX2-NEXT: shrl $30, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarl $2, %ecx +; AVX2-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm3 +; AVX2-NEXT: vpextrd $2, %xmm2, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarl $31, %ecx +; AVX2-NEXT: shrl $29, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarl $3, %ecx +; AVX2-NEXT: vpinsrd $2, %ecx, %xmm3, %xmm3 +; AVX2-NEXT: vpextrd $3, %xmm2, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarl $31, %ecx +; AVX2-NEXT: shrl $28, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarl $4, %ecx +; AVX2-NEXT: vpinsrd $3, %ecx, %xmm3, %xmm2 +; AVX2-NEXT: vpextrd $1, %xmm0, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarl $31, %ecx +; AVX2-NEXT: shrl $30, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarl $2, %ecx +; AVX2-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm3 +; AVX2-NEXT: vpextrd $2, %xmm0, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarl $31, %ecx +; AVX2-NEXT: shrl $29, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarl $3, %ecx +; AVX2-NEXT: vpinsrd $2, %ecx, %xmm3, %xmm3 +; AVX2-NEXT: vpextrd $3, %xmm0, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarl $31, %ecx +; AVX2-NEXT: shrl $28, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarl $4, %ecx +; AVX2-NEXT: vpinsrd $3, %ecx, %xmm3, %xmm0 +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpextrd $1, %xmm2, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarl $31, %ecx +; AVX2-NEXT: shrl $30, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarl $2, %ecx +; AVX2-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm3 +; AVX2-NEXT: vpextrd $2, %xmm2, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarl $31, %ecx +; AVX2-NEXT: shrl $29, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarl $3, %ecx +; AVX2-NEXT: vpinsrd $2, %ecx, %xmm3, %xmm3 +; AVX2-NEXT: vpextrd $3, %xmm2, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarl $31, %ecx +; AVX2-NEXT: shrl $28, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarl $4, %ecx +; AVX2-NEXT: vpinsrd $3, %ecx, %xmm3, %xmm2 +; AVX2-NEXT: vpextrd $1, %xmm1, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarl $31, %ecx +; AVX2-NEXT: shrl $30, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarl $2, %ecx +; AVX2-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm3 +; AVX2-NEXT: vpextrd $2, %xmm1, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarl $31, %ecx +; AVX2-NEXT: shrl $29, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarl $3, %ecx +; AVX2-NEXT: vpinsrd $2, %ecx, %xmm3, %xmm3 +; AVX2-NEXT: vpextrd $3, %xmm1, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: sarl $31, %ecx +; AVX2-NEXT: shrl $28, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: sarl $4, %ecx +; AVX2-NEXT: vpinsrd $3, %ecx, %xmm3, %xmm1 +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX2-NEXT: retq +; +; AVX512-LABEL: combine_vec_sdiv_by_pow2b_v16i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vextracti32x4 $3, %zmm0, %xmm1 +; AVX512-NEXT: vpextrd $1, %xmm1, %eax +; AVX512-NEXT: movl %eax, %ecx +; AVX512-NEXT: sarl $31, %ecx +; AVX512-NEXT: shrl $30, %ecx +; AVX512-NEXT: addl %eax, %ecx +; AVX512-NEXT: sarl $2, %ecx +; AVX512-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm2 +; AVX512-NEXT: vpextrd $2, %xmm1, %eax +; AVX512-NEXT: movl %eax, %ecx +; AVX512-NEXT: sarl $31, %ecx +; AVX512-NEXT: shrl $29, %ecx +; AVX512-NEXT: addl %eax, %ecx +; AVX512-NEXT: sarl $3, %ecx +; AVX512-NEXT: vpinsrd $2, %ecx, %xmm2, %xmm2 +; AVX512-NEXT: vpextrd $3, %xmm1, %eax +; AVX512-NEXT: movl %eax, %ecx +; AVX512-NEXT: sarl $31, %ecx +; AVX512-NEXT: shrl $28, %ecx +; AVX512-NEXT: addl %eax, %ecx +; AVX512-NEXT: sarl $4, %ecx +; AVX512-NEXT: vpinsrd $3, %ecx, %xmm2, %xmm1 +; AVX512-NEXT: vextracti32x4 $2, %zmm0, %xmm2 +; AVX512-NEXT: vpextrd $1, %xmm2, %eax +; AVX512-NEXT: movl %eax, %ecx +; AVX512-NEXT: sarl $31, %ecx +; AVX512-NEXT: shrl $30, %ecx +; AVX512-NEXT: addl %eax, %ecx +; AVX512-NEXT: sarl $2, %ecx +; AVX512-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm3 +; AVX512-NEXT: vpextrd $2, %xmm2, %eax +; AVX512-NEXT: movl %eax, %ecx +; AVX512-NEXT: sarl $31, %ecx +; AVX512-NEXT: shrl $29, %ecx +; AVX512-NEXT: addl %eax, %ecx +; AVX512-NEXT: sarl $3, %ecx +; AVX512-NEXT: vpinsrd $2, %ecx, %xmm3, %xmm3 +; AVX512-NEXT: vpextrd $3, %xmm2, %eax +; AVX512-NEXT: movl %eax, %ecx +; AVX512-NEXT: sarl $31, %ecx +; AVX512-NEXT: shrl $28, %ecx +; AVX512-NEXT: addl %eax, %ecx +; AVX512-NEXT: sarl $4, %ecx +; AVX512-NEXT: vpinsrd $3, %ecx, %xmm3, %xmm2 +; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 +; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX512-NEXT: vpextrd $1, %xmm2, %eax +; AVX512-NEXT: movl %eax, %ecx +; AVX512-NEXT: sarl $31, %ecx +; AVX512-NEXT: shrl $30, %ecx +; AVX512-NEXT: addl %eax, %ecx +; AVX512-NEXT: sarl $2, %ecx +; AVX512-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm3 +; AVX512-NEXT: vpextrd $2, %xmm2, %eax +; AVX512-NEXT: movl %eax, %ecx +; AVX512-NEXT: sarl $31, %ecx +; AVX512-NEXT: shrl $29, %ecx +; AVX512-NEXT: addl %eax, %ecx +; AVX512-NEXT: sarl $3, %ecx +; AVX512-NEXT: vpinsrd $2, %ecx, %xmm3, %xmm3 +; AVX512-NEXT: vpextrd $3, %xmm2, %eax +; AVX512-NEXT: movl %eax, %ecx +; AVX512-NEXT: sarl $31, %ecx +; AVX512-NEXT: shrl $28, %ecx +; AVX512-NEXT: addl %eax, %ecx +; AVX512-NEXT: sarl $4, %ecx +; AVX512-NEXT: vpinsrd $3, %ecx, %xmm3, %xmm2 +; AVX512-NEXT: vpextrd $1, %xmm0, %eax +; AVX512-NEXT: movl %eax, %ecx +; AVX512-NEXT: sarl $31, %ecx +; AVX512-NEXT: shrl $30, %ecx +; AVX512-NEXT: addl %eax, %ecx +; AVX512-NEXT: sarl $2, %ecx +; AVX512-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm3 +; AVX512-NEXT: vpextrd $2, %xmm0, %eax +; AVX512-NEXT: movl %eax, %ecx +; AVX512-NEXT: sarl $31, %ecx +; AVX512-NEXT: shrl $29, %ecx +; AVX512-NEXT: addl %eax, %ecx +; AVX512-NEXT: sarl $3, %ecx +; AVX512-NEXT: vpinsrd $2, %ecx, %xmm3, %xmm3 +; AVX512-NEXT: vpextrd $3, %xmm0, %eax +; AVX512-NEXT: movl %eax, %ecx +; AVX512-NEXT: sarl $31, %ecx +; AVX512-NEXT: shrl $28, %ecx +; AVX512-NEXT: addl %eax, %ecx +; AVX512-NEXT: sarl $4, %ecx +; AVX512-NEXT: vpinsrd $3, %ecx, %xmm3, %xmm0 +; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512-NEXT: retq + %1 = sdiv <16 x i32> %x, <i32 1, i32 4, i32 8, i32 16, i32 1, i32 4, i32 8, i32 16, i32 1, i32 4, i32 8, i32 16, i32 1, i32 4, i32 8, i32 16> + ret <16 x i32> %1 +} + +define <2 x i64> @combine_vec_sdiv_by_pow2b_v2i64(<2 x i64> %x) { +; SSE-LABEL: combine_vec_sdiv_by_pow2b_v2i64: +; SSE: # %bb.0: +; SSE-NEXT: pextrq $1, %xmm0, %rax +; SSE-NEXT: movq %rax, %rcx +; SSE-NEXT: sarq $63, %rcx +; SSE-NEXT: shrq $62, %rcx +; SSE-NEXT: addq %rax, %rcx +; SSE-NEXT: sarq $2, %rcx +; SSE-NEXT: movq %rcx, %xmm1 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_sdiv_by_pow2b_v2i64: +; AVX: # %bb.0: +; AVX-NEXT: vpextrq $1, %xmm0, %rax +; AVX-NEXT: movq %rax, %rcx +; AVX-NEXT: sarq $63, %rcx +; AVX-NEXT: shrq $62, %rcx +; AVX-NEXT: addq %rax, %rcx +; AVX-NEXT: sarq $2, %rcx +; AVX-NEXT: vmovq %rcx, %xmm1 +; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX-NEXT: retq + %1 = sdiv <2 x i64> %x, <i64 1, i64 4> + ret <2 x i64> %1 +} + +define <4 x i64> @combine_vec_sdiv_by_pow2b_v4i64(<4 x i64> %x) { +; SSE-LABEL: combine_vec_sdiv_by_pow2b_v4i64: +; SSE: # %bb.0: +; SSE-NEXT: pextrq $1, %xmm1, %rax +; SSE-NEXT: movq %rax, %rcx +; SSE-NEXT: sarq $63, %rcx +; SSE-NEXT: shrq $60, %rcx +; SSE-NEXT: addq %rax, %rcx +; SSE-NEXT: sarq $4, %rcx +; SSE-NEXT: movq %rcx, %xmm2 +; SSE-NEXT: movq %xmm1, %rax +; SSE-NEXT: movq %rax, %rcx +; SSE-NEXT: sarq $63, %rcx +; SSE-NEXT: shrq $61, %rcx +; SSE-NEXT: addq %rax, %rcx +; SSE-NEXT: sarq $3, %rcx +; SSE-NEXT: movq %rcx, %xmm1 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE-NEXT: pextrq $1, %xmm0, %rax +; SSE-NEXT: movq %rax, %rcx +; SSE-NEXT: sarq $63, %rcx +; SSE-NEXT: shrq $62, %rcx +; SSE-NEXT: addq %rax, %rcx +; SSE-NEXT: sarq $2, %rcx +; SSE-NEXT: movq %rcx, %xmm2 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; SSE-NEXT: retq +; +; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v4i64: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpextrq $1, %xmm1, %rax +; AVX1-NEXT: movq %rax, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: shrq $60, %rcx +; AVX1-NEXT: addq %rax, %rcx +; AVX1-NEXT: sarq $4, %rcx +; AVX1-NEXT: vmovq %rcx, %xmm2 +; AVX1-NEXT: vmovq %xmm1, %rax +; AVX1-NEXT: movq %rax, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: shrq $61, %rcx +; AVX1-NEXT: addq %rax, %rcx +; AVX1-NEXT: sarq $3, %rcx +; AVX1-NEXT: vmovq %rcx, %xmm1 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX1-NEXT: vpextrq $1, %xmm0, %rax +; AVX1-NEXT: movq %rax, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: shrq $62, %rcx +; AVX1-NEXT: addq %rax, %rcx +; AVX1-NEXT: sarq $2, %rcx +; AVX1-NEXT: vmovq %rcx, %xmm2 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2ORLATER-LABEL: combine_vec_sdiv_by_pow2b_v4i64: +; AVX2ORLATER: # %bb.0: +; AVX2ORLATER-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2ORLATER-NEXT: vpextrq $1, %xmm1, %rax +; AVX2ORLATER-NEXT: movq %rax, %rcx +; AVX2ORLATER-NEXT: sarq $63, %rcx +; AVX2ORLATER-NEXT: shrq $60, %rcx +; AVX2ORLATER-NEXT: addq %rax, %rcx +; AVX2ORLATER-NEXT: sarq $4, %rcx +; AVX2ORLATER-NEXT: vmovq %rcx, %xmm2 +; AVX2ORLATER-NEXT: vmovq %xmm1, %rax +; AVX2ORLATER-NEXT: movq %rax, %rcx +; AVX2ORLATER-NEXT: sarq $63, %rcx +; AVX2ORLATER-NEXT: shrq $61, %rcx +; AVX2ORLATER-NEXT: addq %rax, %rcx +; AVX2ORLATER-NEXT: sarq $3, %rcx +; AVX2ORLATER-NEXT: vmovq %rcx, %xmm1 +; AVX2ORLATER-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX2ORLATER-NEXT: vpextrq $1, %xmm0, %rax +; AVX2ORLATER-NEXT: movq %rax, %rcx +; AVX2ORLATER-NEXT: sarq $63, %rcx +; AVX2ORLATER-NEXT: shrq $62, %rcx +; AVX2ORLATER-NEXT: addq %rax, %rcx +; AVX2ORLATER-NEXT: sarq $2, %rcx +; AVX2ORLATER-NEXT: vmovq %rcx, %xmm2 +; AVX2ORLATER-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; AVX2ORLATER-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2ORLATER-NEXT: retq + %1 = sdiv <4 x i64> %x, <i64 1, i64 4, i64 8, i64 16> + ret <4 x i64> %1 +} + +define <8 x i64> @combine_vec_sdiv_by_pow2b_v8i64(<8 x i64> %x) { +; SSE-LABEL: combine_vec_sdiv_by_pow2b_v8i64: +; SSE: # %bb.0: +; SSE-NEXT: pextrq $1, %xmm1, %rax +; SSE-NEXT: movq %rax, %rcx +; SSE-NEXT: sarq $63, %rcx +; SSE-NEXT: shrq $60, %rcx +; SSE-NEXT: addq %rax, %rcx +; SSE-NEXT: sarq $4, %rcx +; SSE-NEXT: movq %rcx, %xmm4 +; SSE-NEXT: movq %xmm1, %rax +; SSE-NEXT: movq %rax, %rcx +; SSE-NEXT: sarq $63, %rcx +; SSE-NEXT: shrq $61, %rcx +; SSE-NEXT: addq %rax, %rcx +; SSE-NEXT: sarq $3, %rcx +; SSE-NEXT: movq %rcx, %xmm1 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0] +; SSE-NEXT: pextrq $1, %xmm3, %rax +; SSE-NEXT: movq %rax, %rcx +; SSE-NEXT: sarq $63, %rcx +; SSE-NEXT: shrq $60, %rcx +; SSE-NEXT: addq %rax, %rcx +; SSE-NEXT: sarq $4, %rcx +; SSE-NEXT: movq %rcx, %xmm4 +; SSE-NEXT: movq %xmm3, %rax +; SSE-NEXT: movq %rax, %rcx +; SSE-NEXT: sarq $63, %rcx +; SSE-NEXT: shrq $61, %rcx +; SSE-NEXT: addq %rax, %rcx +; SSE-NEXT: sarq $3, %rcx +; SSE-NEXT: movq %rcx, %xmm3 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0] +; SSE-NEXT: pextrq $1, %xmm0, %rax +; SSE-NEXT: movq %rax, %rcx +; SSE-NEXT: sarq $63, %rcx +; SSE-NEXT: shrq $62, %rcx +; SSE-NEXT: addq %rax, %rcx +; SSE-NEXT: sarq $2, %rcx +; SSE-NEXT: movq %rcx, %xmm4 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0] +; SSE-NEXT: pextrq $1, %xmm2, %rax +; SSE-NEXT: movq %rax, %rcx +; SSE-NEXT: sarq $63, %rcx +; SSE-NEXT: shrq $62, %rcx +; SSE-NEXT: addq %rax, %rcx +; SSE-NEXT: sarq $2, %rcx +; SSE-NEXT: movq %rcx, %xmm4 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0] +; SSE-NEXT: retq +; +; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v8i64: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vpextrq $1, %xmm2, %rax +; AVX1-NEXT: movq %rax, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: shrq $60, %rcx +; AVX1-NEXT: addq %rax, %rcx +; AVX1-NEXT: sarq $4, %rcx +; AVX1-NEXT: vmovq %rcx, %xmm3 +; AVX1-NEXT: vmovq %xmm2, %rax +; AVX1-NEXT: movq %rax, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: shrq $61, %rcx +; AVX1-NEXT: addq %rax, %rcx +; AVX1-NEXT: sarq $3, %rcx +; AVX1-NEXT: vmovq %rcx, %xmm2 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; AVX1-NEXT: vpextrq $1, %xmm0, %rax +; AVX1-NEXT: movq %rax, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: shrq $62, %rcx +; AVX1-NEXT: addq %rax, %rcx +; AVX1-NEXT: sarq $2, %rcx +; AVX1-NEXT: vmovq %rcx, %xmm3 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vpextrq $1, %xmm2, %rax +; AVX1-NEXT: movq %rax, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: shrq $60, %rcx +; AVX1-NEXT: addq %rax, %rcx +; AVX1-NEXT: sarq $4, %rcx +; AVX1-NEXT: vmovq %rcx, %xmm3 +; AVX1-NEXT: vmovq %xmm2, %rax +; AVX1-NEXT: movq %rax, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: shrq $61, %rcx +; AVX1-NEXT: addq %rax, %rcx +; AVX1-NEXT: sarq $3, %rcx +; AVX1-NEXT: vmovq %rcx, %xmm2 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; AVX1-NEXT: vpextrq $1, %xmm1, %rax +; AVX1-NEXT: movq %rax, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: shrq $62, %rcx +; AVX1-NEXT: addq %rax, %rcx +; AVX1-NEXT: sarq $2, %rcx +; AVX1-NEXT: vmovq %rcx, %xmm3 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: combine_vec_sdiv_by_pow2b_v8i64: +; AVX2: # %bb.0: +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX2-NEXT: vpextrq $1, %xmm2, %rax +; AVX2-NEXT: movq %rax, %rcx +; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: shrq $60, %rcx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: sarq $4, %rcx +; AVX2-NEXT: vmovq %rcx, %xmm3 +; AVX2-NEXT: vmovq %xmm2, %rax +; AVX2-NEXT: movq %rax, %rcx +; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: shrq $61, %rcx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: sarq $3, %rcx +; AVX2-NEXT: vmovq %rcx, %xmm2 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; AVX2-NEXT: vpextrq $1, %xmm0, %rax +; AVX2-NEXT: movq %rax, %rcx +; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: shrq $62, %rcx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: sarq $2, %rcx +; AVX2-NEXT: vmovq %rcx, %xmm3 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpextrq $1, %xmm2, %rax +; AVX2-NEXT: movq %rax, %rcx +; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: shrq $60, %rcx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: sarq $4, %rcx +; AVX2-NEXT: vmovq %rcx, %xmm3 +; AVX2-NEXT: vmovq %xmm2, %rax +; AVX2-NEXT: movq %rax, %rcx +; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: shrq $61, %rcx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: sarq $3, %rcx +; AVX2-NEXT: vmovq %rcx, %xmm2 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; AVX2-NEXT: vpextrq $1, %xmm1, %rax +; AVX2-NEXT: movq %rax, %rcx +; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: shrq $62, %rcx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: sarq $2, %rcx +; AVX2-NEXT: vmovq %rcx, %xmm3 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX2-NEXT: retq +; +; AVX512-LABEL: combine_vec_sdiv_by_pow2b_v8i64: +; AVX512: # %bb.0: +; AVX512-NEXT: vextracti32x4 $3, %zmm0, %xmm1 +; AVX512-NEXT: vpextrq $1, %xmm1, %rax +; AVX512-NEXT: movq %rax, %rcx +; AVX512-NEXT: sarq $63, %rcx +; AVX512-NEXT: shrq $60, %rcx +; AVX512-NEXT: addq %rax, %rcx +; AVX512-NEXT: sarq $4, %rcx +; AVX512-NEXT: vmovq %rcx, %xmm2 +; AVX512-NEXT: vmovq %xmm1, %rax +; AVX512-NEXT: movq %rax, %rcx +; AVX512-NEXT: sarq $63, %rcx +; AVX512-NEXT: shrq $61, %rcx +; AVX512-NEXT: addq %rax, %rcx +; AVX512-NEXT: sarq $3, %rcx +; AVX512-NEXT: vmovq %rcx, %xmm1 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX512-NEXT: vextracti32x4 $2, %zmm0, %xmm2 +; AVX512-NEXT: vpextrq $1, %xmm2, %rax +; AVX512-NEXT: movq %rax, %rcx +; AVX512-NEXT: sarq $63, %rcx +; AVX512-NEXT: shrq $62, %rcx +; AVX512-NEXT: addq %rax, %rcx +; AVX512-NEXT: sarq $2, %rcx +; AVX512-NEXT: vmovq %rcx, %xmm3 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 +; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX512-NEXT: vpextrq $1, %xmm2, %rax +; AVX512-NEXT: movq %rax, %rcx +; AVX512-NEXT: sarq $63, %rcx +; AVX512-NEXT: shrq $60, %rcx +; AVX512-NEXT: addq %rax, %rcx +; AVX512-NEXT: sarq $4, %rcx +; AVX512-NEXT: vmovq %rcx, %xmm3 +; AVX512-NEXT: vmovq %xmm2, %rax +; AVX512-NEXT: movq %rax, %rcx +; AVX512-NEXT: sarq $63, %rcx +; AVX512-NEXT: shrq $61, %rcx +; AVX512-NEXT: addq %rax, %rcx +; AVX512-NEXT: sarq $3, %rcx +; AVX512-NEXT: vmovq %rcx, %xmm2 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; AVX512-NEXT: vpextrq $1, %xmm0, %rax +; AVX512-NEXT: movq %rax, %rcx +; AVX512-NEXT: sarq $63, %rcx +; AVX512-NEXT: shrq $62, %rcx +; AVX512-NEXT: addq %rax, %rcx +; AVX512-NEXT: sarq $2, %rcx +; AVX512-NEXT: vmovq %rcx, %xmm3 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] +; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512-NEXT: retq + %1 = sdiv <8 x i64> %x, <i64 1, i64 4, i64 8, i64 16, i64 1, i64 4, i64 8, i64 16> + ret <8 x i64> %1 +} + +define <4 x i32> @combine_vec_sdiv_by_pow2b_PosAndNeg(<4 x i32> %x) { +; SSE-LABEL: combine_vec_sdiv_by_pow2b_PosAndNeg: +; SSE: # %bb.0: +; SSE-NEXT: pextrd $1, %xmm0, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarl $31, %ecx +; SSE-NEXT: shrl $30, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarl $2, %ecx +; SSE-NEXT: negl %ecx +; SSE-NEXT: pextrd $2, %xmm0, %eax +; SSE-NEXT: pextrd $3, %xmm0, %edx +; SSE-NEXT: pinsrd $1, %ecx, %xmm0 +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarl $31, %ecx +; SSE-NEXT: shrl $29, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: sarl $3, %ecx +; SSE-NEXT: pinsrd $2, %ecx, %xmm0 +; SSE-NEXT: movl %edx, %eax +; SSE-NEXT: sarl $31, %eax +; SSE-NEXT: shrl $28, %eax +; SSE-NEXT: addl %edx, %eax +; SSE-NEXT: sarl $4, %eax +; SSE-NEXT: negl %eax +; SSE-NEXT: pinsrd $3, %eax, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_sdiv_by_pow2b_PosAndNeg: +; AVX: # %bb.0: +; AVX-NEXT: vpextrd $1, %xmm0, %eax +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: sarl $31, %ecx +; AVX-NEXT: shrl $30, %ecx +; AVX-NEXT: addl %eax, %ecx +; AVX-NEXT: sarl $2, %ecx +; AVX-NEXT: negl %ecx +; AVX-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm1 +; AVX-NEXT: vpextrd $2, %xmm0, %eax +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: sarl $31, %ecx +; AVX-NEXT: shrl $29, %ecx +; AVX-NEXT: addl %eax, %ecx +; AVX-NEXT: sarl $3, %ecx +; AVX-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpextrd $3, %xmm0, %eax +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: sarl $31, %ecx +; AVX-NEXT: shrl $28, %ecx +; AVX-NEXT: addl %eax, %ecx +; AVX-NEXT: sarl $4, %ecx +; AVX-NEXT: negl %ecx +; AVX-NEXT: vpinsrd $3, %ecx, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = sdiv <4 x i32> %x, <i32 1, i32 -4, i32 8, i32 -16> + ret <4 x i32> %1 +} + +define <4 x i32> @combine_vec_sdiv_by_pow2b_undef1(<4 x i32> %x) { +; CHECK-LABEL: combine_vec_sdiv_by_pow2b_undef1: +; CHECK: # %bb.0: +; CHECK-NEXT: retq + %1 = sdiv <4 x i32> %x, <i32 undef, i32 -4, i32 undef, i32 -16> + ret <4 x i32> %1 +} + +define <4 x i32> @combine_vec_sdiv_by_pow2b_undef2(<4 x i32> %x) { +; CHECK-LABEL: combine_vec_sdiv_by_pow2b_undef2: +; CHECK: # %bb.0: +; CHECK-NEXT: retq + %1 = sdiv <4 x i32> %x, <i32 undef, i32 4, i32 undef, i32 16> + ret <4 x i32> %1 +} + +define <4 x i32> @combine_vec_sdiv_by_pow2b_undef3(<4 x i32> %x) { +; CHECK-LABEL: combine_vec_sdiv_by_pow2b_undef3: +; CHECK: # %bb.0: +; CHECK-NEXT: retq + %1 = sdiv <4 x i32> %x, <i32 undef, i32 -4, i32 undef, i32 16> + ret <4 x i32> %1 +} |

