diff options
-rw-r--r-- | llvm/test/CodeGen/X86/combine-udiv.ll | 670 |
1 files changed, 515 insertions, 155 deletions
diff --git a/llvm/test/CodeGen/X86/combine-udiv.ll b/llvm/test/CodeGen/X86/combine-udiv.ll index 17dd7a07870..5b8824e7595 100644 --- a/llvm/test/CodeGen/X86/combine-udiv.ll +++ b/llvm/test/CodeGen/X86/combine-udiv.ll @@ -1,7 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX1 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+xop | FileCheck %s --check-prefixes=CHECK,XOP ; fold (udiv x, 1) -> x define i32 @combine_udiv_by_one(i32 %x) { @@ -47,6 +49,13 @@ define <4 x i32> @combine_vec_udiv_by_negone(<4 x i32> %x) { ; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpsrld $31, %xmm0, %xmm0 ; AVX-NEXT: retq +; +; XOP-LABEL: combine_vec_udiv_by_negone: +; XOP: # %bb.0: +; XOP-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; XOP-NEXT: vpcomeqd %xmm1, %xmm0, %xmm0 +; XOP-NEXT: vpsrld $31, %xmm0, %xmm0 +; XOP-NEXT: retq %1 = udiv <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1> ret <4 x i32> %1 } @@ -72,6 +81,11 @@ define <4 x i32> @combine_vec_udiv_by_minsigned(<4 x i32> %x) { ; AVX: # %bb.0: ; AVX-NEXT: vpsrld $31, %xmm0, %xmm0 ; AVX-NEXT: retq +; +; XOP-LABEL: combine_vec_udiv_by_minsigned: +; XOP: # %bb.0: +; XOP-NEXT: vpsrld $31, %xmm0, %xmm0 +; XOP-NEXT: retq %1 = udiv <4 x i32> %x, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648> ret <4 x i32> %1 } @@ -89,31 +103,62 @@ define i32 @combine_udiv_zero(i32 %x) { } define <4 x i32> @combine_vec_udiv_zero(<4 x i32> %x) { -; SSE-LABEL: combine_vec_udiv_zero: -; SSE: # %bb.0: -; SSE-NEXT: pextrd $1, %xmm0, %ecx -; SSE-NEXT: xorl %eax, %eax -; SSE-NEXT: xorl %edx, %edx -; SSE-NEXT: divl %ecx -; SSE-NEXT: movl %eax, %ecx -; SSE-NEXT: movd %xmm0, %esi -; SSE-NEXT: xorl %eax, %eax -; SSE-NEXT: xorl %edx, %edx -; SSE-NEXT: divl %esi -; SSE-NEXT: movd %eax, %xmm1 -; SSE-NEXT: pinsrd $1, %ecx, %xmm1 -; SSE-NEXT: pextrd $2, %xmm0, %ecx -; SSE-NEXT: xorl %eax, %eax -; SSE-NEXT: xorl %edx, %edx -; SSE-NEXT: divl %ecx -; SSE-NEXT: pinsrd $2, %eax, %xmm1 -; SSE-NEXT: pextrd $3, %xmm0, %ecx -; SSE-NEXT: xorl %eax, %eax -; SSE-NEXT: xorl %edx, %edx -; SSE-NEXT: divl %ecx -; SSE-NEXT: pinsrd $3, %eax, %xmm1 -; SSE-NEXT: movdqa %xmm1, %xmm0 -; SSE-NEXT: retq +; SSE2-LABEL: combine_vec_udiv_zero: +; SSE2: # %bb.0: +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] +; SSE2-NEXT: movd %xmm1, %ecx +; SSE2-NEXT: xorl %eax, %eax +; SSE2-NEXT: xorl %edx, %edx +; SSE2-NEXT: divl %ecx +; SSE2-NEXT: movd %eax, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] +; SSE2-NEXT: movd %xmm2, %ecx +; SSE2-NEXT: xorl %eax, %eax +; SSE2-NEXT: xorl %edx, %edx +; SSE2-NEXT: divl %ecx +; SSE2-NEXT: movd %eax, %xmm2 +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSE2-NEXT: movd %xmm0, %ecx +; SSE2-NEXT: xorl %eax, %eax +; SSE2-NEXT: xorl %edx, %edx +; SSE2-NEXT: divl %ecx +; SSE2-NEXT: movd %eax, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; SSE2-NEXT: movd %xmm0, %ecx +; SSE2-NEXT: xorl %eax, %eax +; SSE2-NEXT: xorl %edx, %edx +; SSE2-NEXT: divl %ecx +; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSE41-LABEL: combine_vec_udiv_zero: +; SSE41: # %bb.0: +; SSE41-NEXT: pextrd $1, %xmm0, %ecx +; SSE41-NEXT: xorl %eax, %eax +; SSE41-NEXT: xorl %edx, %edx +; SSE41-NEXT: divl %ecx +; SSE41-NEXT: movl %eax, %ecx +; SSE41-NEXT: movd %xmm0, %esi +; SSE41-NEXT: xorl %eax, %eax +; SSE41-NEXT: xorl %edx, %edx +; SSE41-NEXT: divl %esi +; SSE41-NEXT: movd %eax, %xmm1 +; SSE41-NEXT: pinsrd $1, %ecx, %xmm1 +; SSE41-NEXT: pextrd $2, %xmm0, %ecx +; SSE41-NEXT: xorl %eax, %eax +; SSE41-NEXT: xorl %edx, %edx +; SSE41-NEXT: divl %ecx +; SSE41-NEXT: pinsrd $2, %eax, %xmm1 +; SSE41-NEXT: pextrd $3, %xmm0, %ecx +; SSE41-NEXT: xorl %eax, %eax +; SSE41-NEXT: xorl %edx, %edx +; SSE41-NEXT: divl %ecx +; SSE41-NEXT: pinsrd $3, %eax, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: retq ; ; AVX-LABEL: combine_vec_udiv_zero: ; AVX: # %bb.0: @@ -139,6 +184,31 @@ define <4 x i32> @combine_vec_udiv_zero(<4 x i32> %x) { ; AVX-NEXT: divl %ecx ; AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0 ; AVX-NEXT: retq +; +; XOP-LABEL: combine_vec_udiv_zero: +; XOP: # %bb.0: +; XOP-NEXT: vpextrd $1, %xmm0, %ecx +; XOP-NEXT: xorl %eax, %eax +; XOP-NEXT: xorl %edx, %edx +; XOP-NEXT: divl %ecx +; XOP-NEXT: movl %eax, %ecx +; XOP-NEXT: vmovd %xmm0, %esi +; XOP-NEXT: xorl %eax, %eax +; XOP-NEXT: xorl %edx, %edx +; XOP-NEXT: divl %esi +; XOP-NEXT: vmovd %eax, %xmm1 +; XOP-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1 +; XOP-NEXT: vpextrd $2, %xmm0, %ecx +; XOP-NEXT: xorl %eax, %eax +; XOP-NEXT: xorl %edx, %edx +; XOP-NEXT: divl %ecx +; XOP-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1 +; XOP-NEXT: vpextrd $3, %xmm0, %ecx +; XOP-NEXT: xorl %eax, %eax +; XOP-NEXT: xorl %edx, %edx +; XOP-NEXT: divl %ecx +; XOP-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0 +; XOP-NEXT: retq %1 = udiv <4 x i32> zeroinitializer, %x ret <4 x i32> %1 } @@ -156,27 +226,54 @@ define i32 @combine_udiv_dupe(i32 %x) { } define <4 x i32> @combine_vec_udiv_dupe(<4 x i32> %x) { -; SSE-LABEL: combine_vec_udiv_dupe: -; SSE: # %bb.0: -; SSE-NEXT: pextrd $1, %xmm0, %eax -; SSE-NEXT: xorl %edx, %edx -; SSE-NEXT: divl %eax -; SSE-NEXT: movl %eax, %ecx -; SSE-NEXT: movd %xmm0, %eax -; SSE-NEXT: xorl %edx, %edx -; SSE-NEXT: divl %eax -; SSE-NEXT: movd %eax, %xmm1 -; SSE-NEXT: pinsrd $1, %ecx, %xmm1 -; SSE-NEXT: pextrd $2, %xmm0, %eax -; SSE-NEXT: xorl %edx, %edx -; SSE-NEXT: divl %eax -; SSE-NEXT: pinsrd $2, %eax, %xmm1 -; SSE-NEXT: pextrd $3, %xmm0, %eax -; SSE-NEXT: xorl %edx, %edx -; SSE-NEXT: divl %eax -; SSE-NEXT: pinsrd $3, %eax, %xmm1 -; SSE-NEXT: movdqa %xmm1, %xmm0 -; SSE-NEXT: retq +; SSE2-LABEL: combine_vec_udiv_dupe: +; SSE2: # %bb.0: +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] +; SSE2-NEXT: movd %xmm1, %eax +; SSE2-NEXT: xorl %edx, %edx +; SSE2-NEXT: divl %eax +; SSE2-NEXT: movd %eax, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] +; SSE2-NEXT: movd %xmm2, %eax +; SSE2-NEXT: xorl %edx, %edx +; SSE2-NEXT: divl %eax +; SSE2-NEXT: movd %eax, %xmm2 +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: xorl %edx, %edx +; SSE2-NEXT: divl %eax +; SSE2-NEXT: movd %eax, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: xorl %edx, %edx +; SSE2-NEXT: divl %eax +; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSE41-LABEL: combine_vec_udiv_dupe: +; SSE41: # %bb.0: +; SSE41-NEXT: pextrd $1, %xmm0, %eax +; SSE41-NEXT: xorl %edx, %edx +; SSE41-NEXT: divl %eax +; SSE41-NEXT: movl %eax, %ecx +; SSE41-NEXT: movd %xmm0, %eax +; SSE41-NEXT: xorl %edx, %edx +; SSE41-NEXT: divl %eax +; SSE41-NEXT: movd %eax, %xmm1 +; SSE41-NEXT: pinsrd $1, %ecx, %xmm1 +; SSE41-NEXT: pextrd $2, %xmm0, %eax +; SSE41-NEXT: xorl %edx, %edx +; SSE41-NEXT: divl %eax +; SSE41-NEXT: pinsrd $2, %eax, %xmm1 +; SSE41-NEXT: pextrd $3, %xmm0, %eax +; SSE41-NEXT: xorl %edx, %edx +; SSE41-NEXT: divl %eax +; SSE41-NEXT: pinsrd $3, %eax, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: retq ; ; AVX-LABEL: combine_vec_udiv_dupe: ; AVX: # %bb.0: @@ -198,6 +295,27 @@ define <4 x i32> @combine_vec_udiv_dupe(<4 x i32> %x) { ; AVX-NEXT: divl %eax ; AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0 ; AVX-NEXT: retq +; +; XOP-LABEL: combine_vec_udiv_dupe: +; XOP: # %bb.0: +; XOP-NEXT: vpextrd $1, %xmm0, %eax +; XOP-NEXT: xorl %edx, %edx +; XOP-NEXT: divl %eax +; XOP-NEXT: movl %eax, %ecx +; XOP-NEXT: vmovd %xmm0, %eax +; XOP-NEXT: xorl %edx, %edx +; XOP-NEXT: divl %eax +; XOP-NEXT: vmovd %eax, %xmm1 +; XOP-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1 +; XOP-NEXT: vpextrd $2, %xmm0, %eax +; XOP-NEXT: xorl %edx, %edx +; XOP-NEXT: divl %eax +; XOP-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1 +; XOP-NEXT: vpextrd $3, %xmm0, %eax +; XOP-NEXT: xorl %edx, %edx +; XOP-NEXT: divl %eax +; XOP-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0 +; XOP-NEXT: retq %1 = udiv <4 x i32> %x, %x ret <4 x i32> %1 } @@ -213,23 +331,41 @@ define <4 x i32> @combine_vec_udiv_by_pow2a(<4 x i32> %x) { ; AVX: # %bb.0: ; AVX-NEXT: vpsrld $2, %xmm0, %xmm0 ; AVX-NEXT: retq +; +; XOP-LABEL: combine_vec_udiv_by_pow2a: +; XOP: # %bb.0: +; XOP-NEXT: vpsrld $2, %xmm0, %xmm0 +; XOP-NEXT: retq %1 = udiv <4 x i32> %x, <i32 4, i32 4, i32 4, i32 4> ret <4 x i32> %1 } define <4 x i32> @combine_vec_udiv_by_pow2b(<4 x i32> %x) { -; SSE-LABEL: combine_vec_udiv_by_pow2b: -; SSE: # %bb.0: -; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: psrld $3, %xmm1 -; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7] -; SSE-NEXT: psrld $4, %xmm0 -; SSE-NEXT: psrld $2, %xmm2 -; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7] -; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] -; SSE-NEXT: movdqa %xmm1, %xmm0 -; SSE-NEXT: retq +; SSE2-LABEL: combine_vec_udiv_by_pow2b: +; SSE2: # %bb.0: +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: psrld $4, %xmm1 +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: psrld $3, %xmm2 +; SSE2-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm1[1] +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: psrld $2, %xmm1 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm2[0,3] +; SSE2-NEXT: retq +; +; SSE41-LABEL: combine_vec_udiv_by_pow2b: +; SSE41: # %bb.0: +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: movdqa %xmm0, %xmm1 +; SSE41-NEXT: psrld $3, %xmm1 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7] +; SSE41-NEXT: psrld $4, %xmm0 +; SSE41-NEXT: psrld $2, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: retq ; ; AVX1-LABEL: combine_vec_udiv_by_pow2b: ; AVX1: # %bb.0: @@ -245,29 +381,54 @@ define <4 x i32> @combine_vec_udiv_by_pow2b(<4 x i32> %x) { ; AVX2: # %bb.0: ; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 ; AVX2-NEXT: retq +; +; XOP-LABEL: combine_vec_udiv_by_pow2b: +; XOP: # %bb.0: +; XOP-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: retq %1 = udiv <4 x i32> %x, <i32 1, i32 4, i32 8, i32 16> ret <4 x i32> %1 } define <4 x i32> @combine_vec_udiv_by_pow2c(<4 x i32> %x, <4 x i32> %y) { -; SSE-LABEL: combine_vec_udiv_by_pow2c: -; SSE: # %bb.0: -; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7] -; SSE-NEXT: movdqa %xmm0, %xmm3 -; SSE-NEXT: psrld %xmm2, %xmm3 -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7] -; SSE-NEXT: movdqa %xmm0, %xmm5 -; SSE-NEXT: psrld %xmm4, %xmm5 -; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7] -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7] -; SSE-NEXT: movdqa %xmm0, %xmm3 -; SSE-NEXT: psrld %xmm1, %xmm3 -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7] -; SSE-NEXT: psrld %xmm1, %xmm0 -; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7] -; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7] -; SSE-NEXT: retq +; SSE2-LABEL: combine_vec_udiv_by_pow2c: +; SSE2: # %bb.0: +; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7] +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: psrld %xmm2, %xmm3 +; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[0,1,1,1,4,5,6,7] +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: psrld %xmm4, %xmm2 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7] +; SSE2-NEXT: movdqa %xmm0, %xmm4 +; SSE2-NEXT: psrld %xmm3, %xmm4 +; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7] +; SSE2-NEXT: psrld %xmm1, %xmm0 +; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1] +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,3] +; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: retq +; +; SSE41-LABEL: combine_vec_udiv_by_pow2c: +; SSE41: # %bb.0: +; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: psrld %xmm2, %xmm3 +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm5 +; SSE41-NEXT: psrld %xmm4, %xmm5 +; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7] +; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: psrld %xmm1, %xmm3 +; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7] +; SSE41-NEXT: psrld %xmm1, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7] +; SSE41-NEXT: retq ; ; AVX1-LABEL: combine_vec_udiv_by_pow2c: ; AVX1: # %bb.0: @@ -289,6 +450,13 @@ define <4 x i32> @combine_vec_udiv_by_pow2c(<4 x i32> %x, <4 x i32> %y) { ; AVX2: # %bb.0: ; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq +; +; XOP-LABEL: combine_vec_udiv_by_pow2c: +; XOP: # %bb.0: +; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; XOP-NEXT: vpsubd %xmm1, %xmm2, %xmm1 +; XOP-NEXT: vpshld %xmm1, %xmm0, %xmm0 +; XOP-NEXT: retq %1 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %y %2 = udiv <4 x i32> %x, %1 ret <4 x i32> %2 @@ -296,25 +464,46 @@ define <4 x i32> @combine_vec_udiv_by_pow2c(<4 x i32> %x, <4 x i32> %y) { ; fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2 define <4 x i32> @combine_vec_udiv_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) { -; SSE-LABEL: combine_vec_udiv_by_shl_pow2a: -; SSE: # %bb.0: -; SSE-NEXT: paddd {{.*}}(%rip), %xmm1 -; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7] -; SSE-NEXT: movdqa %xmm0, %xmm3 -; SSE-NEXT: psrld %xmm2, %xmm3 -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7] -; SSE-NEXT: movdqa %xmm0, %xmm5 -; SSE-NEXT: psrld %xmm4, %xmm5 -; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7] -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7] -; SSE-NEXT: movdqa %xmm0, %xmm3 -; SSE-NEXT: psrld %xmm1, %xmm3 -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7] -; SSE-NEXT: psrld %xmm1, %xmm0 -; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7] -; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7] -; SSE-NEXT: retq +; SSE2-LABEL: combine_vec_udiv_by_shl_pow2a: +; SSE2: # %bb.0: +; SSE2-NEXT: paddd {{.*}}(%rip), %xmm1 +; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7] +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: psrld %xmm2, %xmm3 +; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[0,1,1,1,4,5,6,7] +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: psrld %xmm4, %xmm2 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7] +; SSE2-NEXT: movdqa %xmm0, %xmm4 +; SSE2-NEXT: psrld %xmm3, %xmm4 +; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7] +; SSE2-NEXT: psrld %xmm1, %xmm0 +; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1] +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,3] +; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: retq +; +; SSE41-LABEL: combine_vec_udiv_by_shl_pow2a: +; SSE41: # %bb.0: +; SSE41-NEXT: paddd {{.*}}(%rip), %xmm1 +; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: psrld %xmm2, %xmm3 +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm5 +; SSE41-NEXT: psrld %xmm4, %xmm5 +; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7] +; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: psrld %xmm1, %xmm3 +; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7] +; SSE41-NEXT: psrld %xmm1, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7] +; SSE41-NEXT: retq ; ; AVX1-LABEL: combine_vec_udiv_by_shl_pow2a: ; AVX1: # %bb.0: @@ -339,31 +528,59 @@ define <4 x i32> @combine_vec_udiv_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) { ; AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq +; +; XOP-LABEL: combine_vec_udiv_by_shl_pow2a: +; XOP: # %bb.0: +; XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [4294967294,4294967294,4294967294,4294967294] +; XOP-NEXT: vpsubd %xmm1, %xmm2, %xmm1 +; XOP-NEXT: vpshld %xmm1, %xmm0, %xmm0 +; XOP-NEXT: retq %1 = shl <4 x i32> <i32 4, i32 4, i32 4, i32 4>, %y %2 = udiv <4 x i32> %x, %1 ret <4 x i32> %2 } define <4 x i32> @combine_vec_udiv_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) { -; SSE-LABEL: combine_vec_udiv_by_shl_pow2b: -; SSE: # %bb.0: -; SSE-NEXT: paddd {{.*}}(%rip), %xmm1 -; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7] -; SSE-NEXT: movdqa %xmm0, %xmm3 -; SSE-NEXT: psrld %xmm2, %xmm3 -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7] -; SSE-NEXT: movdqa %xmm0, %xmm5 -; SSE-NEXT: psrld %xmm4, %xmm5 -; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7] -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7] -; SSE-NEXT: movdqa %xmm0, %xmm3 -; SSE-NEXT: psrld %xmm1, %xmm3 -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7] -; SSE-NEXT: psrld %xmm1, %xmm0 -; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7] -; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7] -; SSE-NEXT: retq +; SSE2-LABEL: combine_vec_udiv_by_shl_pow2b: +; SSE2: # %bb.0: +; SSE2-NEXT: paddd {{.*}}(%rip), %xmm1 +; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7] +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: psrld %xmm2, %xmm3 +; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[0,1,1,1,4,5,6,7] +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: psrld %xmm4, %xmm2 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7] +; SSE2-NEXT: movdqa %xmm0, %xmm4 +; SSE2-NEXT: psrld %xmm3, %xmm4 +; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7] +; SSE2-NEXT: psrld %xmm1, %xmm0 +; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1] +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,3] +; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: retq +; +; SSE41-LABEL: combine_vec_udiv_by_shl_pow2b: +; SSE41: # %bb.0: +; SSE41-NEXT: paddd {{.*}}(%rip), %xmm1 +; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: psrld %xmm2, %xmm3 +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm5 +; SSE41-NEXT: psrld %xmm4, %xmm5 +; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7] +; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: psrld %xmm1, %xmm3 +; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7] +; SSE41-NEXT: psrld %xmm1, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7] +; SSE41-NEXT: retq ; ; AVX1-LABEL: combine_vec_udiv_by_shl_pow2b: ; AVX1: # %bb.0: @@ -387,6 +604,13 @@ define <4 x i32> @combine_vec_udiv_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) { ; AVX2-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1 ; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq +; +; XOP-LABEL: combine_vec_udiv_by_shl_pow2b: +; XOP: # %bb.0: +; XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,4294967294,4294967293,4294967292] +; XOP-NEXT: vpsubd %xmm1, %xmm2, %xmm1 +; XOP-NEXT: vpshld %xmm1, %xmm0, %xmm0 +; XOP-NEXT: retq %1 = shl <4 x i32> <i32 1, i32 4, i32 8, i32 16>, %y %2 = udiv <4 x i32> %x, %1 ret <4 x i32> %2 @@ -425,26 +649,76 @@ define <8 x i16> @combine_vec_udiv_uniform(<8 x i16> %x) { ; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpsrlw $4, %xmm0, %xmm0 ; AVX-NEXT: retq +; +; XOP-LABEL: combine_vec_udiv_uniform: +; XOP: # %bb.0: +; XOP-NEXT: vpmulhuw {{.*}}(%rip), %xmm0, %xmm1 +; XOP-NEXT: vpsubw %xmm1, %xmm0, %xmm0 +; XOP-NEXT: vpsrlw $1, %xmm0, %xmm0 +; XOP-NEXT: vpaddw %xmm1, %xmm0, %xmm0 +; XOP-NEXT: vpsrlw $4, %xmm0, %xmm0 +; XOP-NEXT: retq %1 = udiv <8 x i16> %x, <i16 23, i16 23, i16 23, i16 23, i16 23, i16 23, i16 23, i16 23> ret <8 x i16> %1 } define <8 x i16> @combine_vec_udiv_nonuniform(<8 x i16> %x) { -; SSE-LABEL: combine_vec_udiv_nonuniform: -; SSE: # %bb.0: -; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: psrlw $3, %xmm1 -; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2],xmm1[3],xmm0[4,5,6,7] -; SSE-NEXT: pmulhuw {{.*}}(%rip), %xmm1 -; SSE-NEXT: psubw %xmm1, %xmm0 -; SSE-NEXT: movl $32768, %eax # imm = 0x8000 -; SSE-NEXT: movd %eax, %xmm2 -; SSE-NEXT: pmulhuw %xmm0, %xmm2 -; SSE-NEXT: paddw %xmm1, %xmm2 -; SSE-NEXT: movdqa {{.*#+}} xmm0 = <4096,2048,8,u,u,2,2,u> -; SSE-NEXT: pmulhuw %xmm2, %xmm0 -; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3,4],xmm0[5,6],xmm2[7] -; SSE-NEXT: retq +; SSE2-LABEL: combine_vec_udiv_nonuniform: +; SSE2: # %bb.0: +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,0,65535,65535,65535,65535] +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: pand %xmm1, %xmm2 +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: psrlw $3, %xmm3 +; SSE2-NEXT: pandn %xmm3, %xmm1 +; SSE2-NEXT: por %xmm2, %xmm1 +; SSE2-NEXT: pmulhuw {{.*}}(%rip), %xmm1 +; SSE2-NEXT: psubw %xmm1, %xmm0 +; SSE2-NEXT: movl $32768, %eax # imm = 0x8000 +; SSE2-NEXT: movd %eax, %xmm2 +; SSE2-NEXT: pmulhuw %xmm0, %xmm2 +; SSE2-NEXT: paddw %xmm1, %xmm2 +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,0,65535,65535,0,0,65535] +; SSE2-NEXT: movdqa %xmm2, %xmm0 +; SSE2-NEXT: pand %xmm1, %xmm0 +; SSE2-NEXT: psrlw $8, %xmm2 +; SSE2-NEXT: pandn %xmm2, %xmm1 +; SSE2-NEXT: por %xmm0, %xmm1 +; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,0,0,65535,65535,0] +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: pandn %xmm1, %xmm2 +; SSE2-NEXT: psrlw $4, %xmm1 +; SSE2-NEXT: pand %xmm0, %xmm1 +; SSE2-NEXT: por %xmm2, %xmm1 +; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,65535,65535,0,0,65535] +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: pand %xmm0, %xmm2 +; SSE2-NEXT: psrlw $2, %xmm1 +; SSE2-NEXT: pandn %xmm1, %xmm0 +; SSE2-NEXT: por %xmm2, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,0,0,65535,65535,0] +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: pandn %xmm0, %xmm2 +; SSE2-NEXT: psrlw $1, %xmm0 +; SSE2-NEXT: pand %xmm1, %xmm0 +; SSE2-NEXT: por %xmm2, %xmm0 +; SSE2-NEXT: retq +; +; SSE41-LABEL: combine_vec_udiv_nonuniform: +; SSE41: # %bb.0: +; SSE41-NEXT: movdqa %xmm0, %xmm1 +; SSE41-NEXT: psrlw $3, %xmm1 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2],xmm1[3],xmm0[4,5,6,7] +; SSE41-NEXT: pmulhuw {{.*}}(%rip), %xmm1 +; SSE41-NEXT: psubw %xmm1, %xmm0 +; SSE41-NEXT: movl $32768, %eax # imm = 0x8000 +; SSE41-NEXT: movd %eax, %xmm2 +; SSE41-NEXT: pmulhuw %xmm0, %xmm2 +; SSE41-NEXT: paddw %xmm1, %xmm2 +; SSE41-NEXT: movdqa {{.*#+}} xmm0 = <4096,2048,8,u,u,2,2,u> +; SSE41-NEXT: pmulhuw %xmm2, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3,4],xmm0[5,6],xmm2[7] +; SSE41-NEXT: retq ; ; AVX-LABEL: combine_vec_udiv_nonuniform: ; AVX: # %bb.0: @@ -459,20 +733,45 @@ define <8 x i16> @combine_vec_udiv_nonuniform(<8 x i16> %x) { ; AVX-NEXT: vpmulhuw {{.*}}(%rip), %xmm0, %xmm1 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4],xmm1[5,6],xmm0[7] ; AVX-NEXT: retq +; +; XOP-LABEL: combine_vec_udiv_nonuniform: +; XOP: # %bb.0: +; XOP-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm1 +; XOP-NEXT: vpmulhuw {{.*}}(%rip), %xmm1, %xmm1 +; XOP-NEXT: vpsubw %xmm1, %xmm0, %xmm0 +; XOP-NEXT: movl $32768, %eax # imm = 0x8000 +; XOP-NEXT: vmovd %eax, %xmm2 +; XOP-NEXT: vpmulhuw %xmm2, %xmm0, %xmm0 +; XOP-NEXT: vpaddw %xmm1, %xmm0, %xmm0 +; XOP-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: retq %1 = udiv <8 x i16> %x, <i16 23, i16 34, i16 -23, i16 56, i16 128, i16 -1, i16 -256, i16 -32768> ret <8 x i16> %1 } define <8 x i16> @combine_vec_udiv_nonuniform2(<8 x i16> %x) { -; SSE-LABEL: combine_vec_udiv_nonuniform2: -; SSE: # %bb.0: -; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: psrlw $1, %xmm1 -; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm0[1,2,3,4,5,6,7] -; SSE-NEXT: pmulhuw {{.*}}(%rip), %xmm1 -; SSE-NEXT: pmulhuw {{.*}}(%rip), %xmm1 -; SSE-NEXT: movdqa %xmm1, %xmm0 -; SSE-NEXT: retq +; SSE2-LABEL: combine_vec_udiv_nonuniform2: +; SSE2: # %bb.0: +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,65535,65535,65535,65535,65535] +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: pand %xmm2, %xmm1 +; SSE2-NEXT: psrlw $1, %xmm0 +; SSE2-NEXT: pandn %xmm0, %xmm2 +; SSE2-NEXT: por %xmm2, %xmm1 +; SSE2-NEXT: pmulhuw {{.*}}(%rip), %xmm1 +; SSE2-NEXT: pmulhuw {{.*}}(%rip), %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSE41-LABEL: combine_vec_udiv_nonuniform2: +; SSE41: # %bb.0: +; SSE41-NEXT: movdqa %xmm0, %xmm1 +; SSE41-NEXT: psrlw $1, %xmm1 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm0[1,2,3,4,5,6,7] +; SSE41-NEXT: pmulhuw {{.*}}(%rip), %xmm1 +; SSE41-NEXT: pmulhuw {{.*}}(%rip), %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: retq ; ; AVX-LABEL: combine_vec_udiv_nonuniform2: ; AVX: # %bb.0: @@ -481,6 +780,15 @@ define <8 x i16> @combine_vec_udiv_nonuniform2(<8 x i16> %x) { ; AVX-NEXT: vpmulhuw {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: vpmulhuw {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq +; +; XOP-LABEL: combine_vec_udiv_nonuniform2: +; XOP: # %bb.0: +; XOP-NEXT: movl $65535, %eax # imm = 0xFFFF +; XOP-NEXT: vmovd %eax, %xmm1 +; XOP-NEXT: vpshlw %xmm1, %xmm0, %xmm0 +; XOP-NEXT: vpmulhuw {{.*}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: retq %1 = udiv <8 x i16> %x, <i16 -34, i16 35, i16 36, i16 -37, i16 38, i16 -39, i16 40, i16 -41> ret <8 x i16> %1 } @@ -504,25 +812,67 @@ define <8 x i16> @combine_vec_udiv_nonuniform3(<8 x i16> %x) { ; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpmulhuw {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq +; +; XOP-LABEL: combine_vec_udiv_nonuniform3: +; XOP: # %bb.0: +; XOP-NEXT: vpmulhuw {{.*}}(%rip), %xmm0, %xmm1 +; XOP-NEXT: vpsubw %xmm1, %xmm0, %xmm0 +; XOP-NEXT: vpsrlw $1, %xmm0, %xmm0 +; XOP-NEXT: vpaddw %xmm1, %xmm0, %xmm0 +; XOP-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: retq %1 = udiv <8 x i16> %x, <i16 7, i16 23, i16 25, i16 27, i16 31, i16 47, i16 63, i16 127> ret <8 x i16> %1 } define <8 x i16> @pr38477(<8 x i16> %a0) { -; SSE-LABEL: pr38477: -; SSE: # %bb.0: -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,4957,57457,4103,16385,35545,2048,2115] -; SSE-NEXT: pmulhuw %xmm0, %xmm2 -; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: psubw %xmm2, %xmm1 -; SSE-NEXT: pmulhuw {{.*}}(%rip), %xmm1 -; SSE-NEXT: paddw %xmm2, %xmm1 -; SSE-NEXT: movdqa {{.*#+}} xmm2 = <u,1024,1024,16,4,1024,u,4096> -; SSE-NEXT: pmulhuw %xmm1, %xmm2 -; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5],xmm1[6],xmm2[7] -; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7] -; SSE-NEXT: movdqa %xmm1, %xmm0 -; SSE-NEXT: retq +; SSE2-LABEL: pr38477: +; SSE2: # %bb.0: +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,4957,57457,4103,16385,35545,2048,2115] +; SSE2-NEXT: pmulhuw %xmm0, %xmm1 +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: psubw %xmm1, %xmm3 +; SSE2-NEXT: pmulhuw {{.*}}(%rip), %xmm3 +; SSE2-NEXT: paddw %xmm1, %xmm3 +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,0,0,65535,65535,65535] +; SSE2-NEXT: movdqa %xmm3, %xmm1 +; SSE2-NEXT: pand %xmm2, %xmm1 +; SSE2-NEXT: psrlw $8, %xmm3 +; SSE2-NEXT: pandn %xmm3, %xmm2 +; SSE2-NEXT: por %xmm1, %xmm2 +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,65535,65535,0,65535] +; SSE2-NEXT: movdqa %xmm1, %xmm3 +; SSE2-NEXT: pandn %xmm2, %xmm3 +; SSE2-NEXT: psrlw $4, %xmm2 +; SSE2-NEXT: pand %xmm1, %xmm2 +; SSE2-NEXT: por %xmm3, %xmm2 +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,0,65535,65535,0,0] +; SSE2-NEXT: movdqa %xmm1, %xmm3 +; SSE2-NEXT: pandn %xmm2, %xmm3 +; SSE2-NEXT: psrlw $2, %xmm2 +; SSE2-NEXT: pand %xmm1, %xmm2 +; SSE2-NEXT: por %xmm3, %xmm2 +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,65535,65535,65535,65535] +; SSE2-NEXT: pand %xmm1, %xmm2 +; SSE2-NEXT: pandn %xmm0, %xmm1 +; SSE2-NEXT: por %xmm2, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSE41-LABEL: pr38477: +; SSE41: # %bb.0: +; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [0,4957,57457,4103,16385,35545,2048,2115] +; SSE41-NEXT: pmulhuw %xmm0, %xmm2 +; SSE41-NEXT: movdqa %xmm0, %xmm1 +; SSE41-NEXT: psubw %xmm2, %xmm1 +; SSE41-NEXT: pmulhuw {{.*}}(%rip), %xmm1 +; SSE41-NEXT: paddw %xmm2, %xmm1 +; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <u,1024,1024,16,4,1024,u,4096> +; SSE41-NEXT: pmulhuw %xmm1, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5],xmm1[6],xmm2[7] +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7] +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: retq ; ; AVX-LABEL: pr38477: ; AVX: # %bb.0: @@ -534,6 +884,16 @@ define <8 x i16> @pr38477(<8 x i16> %a0) { ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5],xmm1[6],xmm2[7] ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7] ; AVX-NEXT: retq +; +; XOP-LABEL: pr38477: +; XOP: # %bb.0: +; XOP-NEXT: vpmulhuw {{.*}}(%rip), %xmm0, %xmm1 +; XOP-NEXT: vpsubw %xmm1, %xmm0, %xmm2 +; XOP-NEXT: vpmulhuw {{.*}}(%rip), %xmm2, %xmm2 +; XOP-NEXT: vpaddw %xmm1, %xmm2, %xmm1 +; XOP-NEXT: vpshlw {{.*}}(%rip), %xmm1, %xmm1 +; XOP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7] +; XOP-NEXT: retq %1 = udiv <8 x i16> %a0, <i16 1, i16 119, i16 73, i16 -111, i16 -3, i16 118, i16 32, i16 31> ret <8 x i16> %1 } |