diff options
Diffstat (limited to 'llvm/test')
-rw-r--r-- | llvm/test/CodeGen/X86/combine-sdiv.ll | 47 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/combine-srem.ll | 28 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/combine-udiv.ll | 147 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/combine-urem.ll | 103 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/urem-power-of-two.ll | 13 |
5 files changed, 100 insertions, 238 deletions
diff --git a/llvm/test/CodeGen/X86/combine-sdiv.ll b/llvm/test/CodeGen/X86/combine-sdiv.ll index 3d59fb0b5be..ddb1786e37d 100644 --- a/llvm/test/CodeGen/X86/combine-sdiv.ll +++ b/llvm/test/CodeGen/X86/combine-sdiv.ll @@ -82,30 +82,33 @@ define <4 x i32> @combine_vec_sdiv_by_pos1(<4 x i32> %x) { ; SSE-LABEL: combine_vec_sdiv_by_pos1: ; SSE: # BB#0: ; SSE-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE-NEXT: pextrd $1, %xmm0, %eax -; SSE-NEXT: shrl $2, %eax -; SSE-NEXT: pextrd $2, %xmm0, %ecx -; SSE-NEXT: pextrd $3, %xmm0, %edx -; SSE-NEXT: pinsrd $1, %eax, %xmm0 -; SSE-NEXT: shrl $3, %ecx -; SSE-NEXT: pinsrd $2, %ecx, %xmm0 -; SSE-NEXT: shrl $4, %edx -; SSE-NEXT: pinsrd $3, %edx, %xmm0 +; SSE-NEXT: movdqa %xmm0, %xmm2 +; SSE-NEXT: movdqa %xmm0, %xmm1 +; SSE-NEXT: psrld $3, %xmm1 +; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7] +; SSE-NEXT: psrld $4, %xmm0 +; SSE-NEXT: psrld $2, %xmm2 +; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7] +; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] +; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: combine_vec_sdiv_by_pos1: -; AVX: # BB#0: -; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 -; AVX-NEXT: vpextrd $1, %xmm0, %eax -; AVX-NEXT: shrl $2, %eax -; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm1 -; AVX-NEXT: vpextrd $2, %xmm0, %eax -; AVX-NEXT: shrl $3, %eax -; AVX-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrd $3, %xmm0, %eax -; AVX-NEXT: shrl $4, %eax -; AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: combine_vec_sdiv_by_pos1: +; AVX1: # BB#0: +; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpsrld $4, %xmm0, %xmm1 +; AVX1-NEXT: vpsrld $2, %xmm0, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7] +; AVX1-NEXT: vpsrld $3, %xmm0, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; AVX1-NEXT: retq +; +; AVX2-LABEL: combine_vec_sdiv_by_pos1: +; AVX2: # BB#0: +; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: retq %1 = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255> %2 = sdiv <4 x i32> %1, <i32 1, i32 4, i32 8, i32 16> ret <4 x i32> %2 diff --git a/llvm/test/CodeGen/X86/combine-srem.ll b/llvm/test/CodeGen/X86/combine-srem.ll index a2de1e021c6..6c1956ac77c 100644 --- a/llvm/test/CodeGen/X86/combine-srem.ll +++ b/llvm/test/CodeGen/X86/combine-srem.ll @@ -54,36 +54,12 @@ define <4 x i32> @combine_vec_srem_by_pos0(<4 x i32> %x) { define <4 x i32> @combine_vec_srem_by_pos1(<4 x i32> %x) { ; SSE-LABEL: combine_vec_srem_by_pos1: ; SSE: # BB#0: -; SSE-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE-NEXT: pextrd $3, %xmm0, %eax -; SSE-NEXT: andl $15, %eax -; SSE-NEXT: movd %eax, %xmm1 -; SSE-NEXT: pextrd $2, %xmm0, %eax -; SSE-NEXT: andl $7, %eax -; SSE-NEXT: movd %eax, %xmm2 -; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSE-NEXT: pextrd $1, %xmm0, %eax -; SSE-NEXT: andl $3, %eax -; SSE-NEXT: movd %eax, %xmm0 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,2,3] -; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; SSE-NEXT: andps {{.*}}(%rip), %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_srem_by_pos1: ; AVX: # BB#0: -; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 -; AVX-NEXT: vpextrd $3, %xmm0, %eax -; AVX-NEXT: andl $15, %eax -; AVX-NEXT: vmovd %eax, %xmm1 -; AVX-NEXT: vpextrd $2, %xmm0, %eax -; AVX-NEXT: andl $7, %eax -; AVX-NEXT: vmovd %eax, %xmm2 -; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; AVX-NEXT: vpextrd $1, %xmm0, %eax -; AVX-NEXT: andl $3, %eax -; AVX-NEXT: vmovd %eax, %xmm0 -; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,2,3] -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq %1 = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255> %2 = srem <4 x i32> %1, <i32 1, i32 4, i32 8, i32 16> diff --git a/llvm/test/CodeGen/X86/combine-udiv.ll b/llvm/test/CodeGen/X86/combine-udiv.ll index 4a96a3fc914..71f6c3e6333 100644 --- a/llvm/test/CodeGen/X86/combine-udiv.ll +++ b/llvm/test/CodeGen/X86/combine-udiv.ll @@ -47,29 +47,31 @@ define <4 x i32> @combine_vec_udiv_by_pow2a(<4 x i32> %x) { define <4 x i32> @combine_vec_udiv_by_pow2b(<4 x i32> %x) { ; SSE-LABEL: combine_vec_udiv_by_pow2b: ; SSE: # BB#0: -; SSE-NEXT: pextrd $1, %xmm0, %eax -; SSE-NEXT: shrl $2, %eax -; SSE-NEXT: pextrd $2, %xmm0, %ecx -; SSE-NEXT: pextrd $3, %xmm0, %edx -; SSE-NEXT: pinsrd $1, %eax, %xmm0 -; SSE-NEXT: shrl $3, %ecx -; SSE-NEXT: pinsrd $2, %ecx, %xmm0 -; SSE-NEXT: shrl $4, %edx -; SSE-NEXT: pinsrd $3, %edx, %xmm0 +; SSE-NEXT: movdqa %xmm0, %xmm2 +; SSE-NEXT: movdqa %xmm0, %xmm1 +; SSE-NEXT: psrld $3, %xmm1 +; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7] +; SSE-NEXT: psrld $4, %xmm0 +; SSE-NEXT: psrld $2, %xmm2 +; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7] +; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] +; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: combine_vec_udiv_by_pow2b: -; AVX: # BB#0: -; AVX-NEXT: vpextrd $1, %xmm0, %eax -; AVX-NEXT: shrl $2, %eax -; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm1 -; AVX-NEXT: vpextrd $2, %xmm0, %eax -; AVX-NEXT: shrl $3, %eax -; AVX-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrd $3, %xmm0, %eax -; AVX-NEXT: shrl $4, %eax -; AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: combine_vec_udiv_by_pow2b: +; AVX1: # BB#0: +; AVX1-NEXT: vpsrld $4, %xmm0, %xmm1 +; AVX1-NEXT: vpsrld $2, %xmm0, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7] +; AVX1-NEXT: vpsrld $3, %xmm0, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; AVX1-NEXT: retq +; +; AVX2-LABEL: combine_vec_udiv_by_pow2b: +; AVX2: # BB#0: +; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: retq %1 = udiv <4 x i32> %x, <i32 1, i32 4, i32 8, i32 16> ret <4 x i32> %1 } @@ -129,88 +131,47 @@ define <4 x i32> @combine_vec_udiv_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @combine_vec_udiv_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) { ; SSE-LABEL: combine_vec_udiv_by_shl_pow2b: ; SSE: # BB#0: -; SSE-NEXT: pslld $23, %xmm1 ; SSE-NEXT: paddd {{.*}}(%rip), %xmm1 -; SSE-NEXT: cvttps2dq %xmm1, %xmm2 -; SSE-NEXT: pmulld {{.*}}(%rip), %xmm2 -; SSE-NEXT: pextrd $1, %xmm0, %eax -; SSE-NEXT: pextrd $1, %xmm2, %ecx -; SSE-NEXT: xorl %edx, %edx -; SSE-NEXT: divl %ecx -; SSE-NEXT: movl %eax, %ecx -; SSE-NEXT: movd %xmm0, %eax -; SSE-NEXT: movd %xmm2, %esi -; SSE-NEXT: xorl %edx, %edx -; SSE-NEXT: divl %esi -; SSE-NEXT: movd %eax, %xmm1 -; SSE-NEXT: pinsrd $1, %ecx, %xmm1 -; SSE-NEXT: pextrd $2, %xmm0, %eax -; SSE-NEXT: pextrd $2, %xmm2, %ecx -; SSE-NEXT: xorl %edx, %edx -; SSE-NEXT: divl %ecx -; SSE-NEXT: pinsrd $2, %eax, %xmm1 -; SSE-NEXT: pextrd $3, %xmm0, %eax -; SSE-NEXT: pextrd $3, %xmm2, %ecx -; SSE-NEXT: xorl %edx, %edx -; SSE-NEXT: divl %ecx -; SSE-NEXT: pinsrd $3, %eax, %xmm1 -; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: movdqa %xmm1, %xmm2 +; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; SSE-NEXT: movdqa %xmm0, %xmm3 +; SSE-NEXT: psrld %xmm2, %xmm3 +; SSE-NEXT: movdqa %xmm1, %xmm2 +; SSE-NEXT: psrlq $32, %xmm2 +; SSE-NEXT: movdqa %xmm0, %xmm4 +; SSE-NEXT: psrld %xmm2, %xmm4 +; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7] +; SSE-NEXT: pxor %xmm2, %xmm2 +; SSE-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero +; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3] +; SSE-NEXT: movdqa %xmm0, %xmm2 +; SSE-NEXT: psrld %xmm1, %xmm2 +; SSE-NEXT: psrld %xmm3, %xmm0 +; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] +; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7] ; SSE-NEXT: retq ; ; AVX1-LABEL: combine_vec_udiv_by_shl_pow2b: ; AVX1: # BB#0: -; AVX1-NEXT: vpslld $23, %xmm1, %xmm1 ; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1 -; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1 -; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; AVX1-NEXT: vpextrd $1, %xmm0, %eax -; AVX1-NEXT: vpextrd $1, %xmm1, %ecx -; AVX1-NEXT: xorl %edx, %edx -; AVX1-NEXT: divl %ecx -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: vmovd %xmm0, %eax -; AVX1-NEXT: vmovd %xmm1, %esi -; AVX1-NEXT: xorl %edx, %edx -; AVX1-NEXT: divl %esi -; AVX1-NEXT: vmovd %eax, %xmm2 -; AVX1-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2 -; AVX1-NEXT: vpextrd $2, %xmm0, %eax -; AVX1-NEXT: vpextrd $2, %xmm1, %ecx -; AVX1-NEXT: xorl %edx, %edx -; AVX1-NEXT: divl %ecx -; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrd $3, %xmm0, %eax -; AVX1-NEXT: vpextrd $3, %xmm1, %ecx -; AVX1-NEXT: xorl %edx, %edx -; AVX1-NEXT: divl %ecx -; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0 +; AVX1-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; AVX1-NEXT: vpsrld %xmm2, %xmm0, %xmm2 +; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3 +; AVX1-NEXT: vpsrld %xmm3, %xmm0, %xmm3 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7] +; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm1[2],xmm3[2],xmm1[3],xmm3[3] +; AVX1-NEXT: vpsrld %xmm3, %xmm0, %xmm3 +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero +; AVX1-NEXT: vpsrld %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] ; AVX1-NEXT: retq ; ; AVX2-LABEL: combine_vec_udiv_by_shl_pow2b: ; AVX2: # BB#0: -; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [1,4,8,16] -; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1 -; AVX2-NEXT: vpextrd $1, %xmm1, %ecx -; AVX2-NEXT: vpextrd $1, %xmm0, %eax -; AVX2-NEXT: xorl %edx, %edx -; AVX2-NEXT: divl %ecx -; AVX2-NEXT: movl %eax, %ecx -; AVX2-NEXT: vmovd %xmm1, %esi -; AVX2-NEXT: vmovd %xmm0, %eax -; AVX2-NEXT: xorl %edx, %edx -; AVX2-NEXT: divl %esi -; AVX2-NEXT: vmovd %eax, %xmm2 -; AVX2-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2 -; AVX2-NEXT: vpextrd $2, %xmm1, %ecx -; AVX2-NEXT: vpextrd $2, %xmm0, %eax -; AVX2-NEXT: xorl %edx, %edx -; AVX2-NEXT: divl %ecx -; AVX2-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrd $3, %xmm1, %ecx -; AVX2-NEXT: vpextrd $3, %xmm0, %eax -; AVX2-NEXT: xorl %edx, %edx -; AVX2-NEXT: divl %ecx -; AVX2-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0 +; AVX2-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1 +; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq %1 = shl <4 x i32> <i32 1, i32 4, i32 8, i32 16>, %y %2 = udiv <4 x i32> %x, %1 diff --git a/llvm/test/CodeGen/X86/combine-urem.ll b/llvm/test/CodeGen/X86/combine-urem.ll index 7e58c737e0b..f412e9ca631 100644 --- a/llvm/test/CodeGen/X86/combine-urem.ll +++ b/llvm/test/CodeGen/X86/combine-urem.ll @@ -53,34 +53,12 @@ define <4 x i32> @combine_vec_urem_by_pow2a(<4 x i32> %x) { define <4 x i32> @combine_vec_urem_by_pow2b(<4 x i32> %x) { ; SSE-LABEL: combine_vec_urem_by_pow2b: ; SSE: # BB#0: -; SSE-NEXT: pextrd $3, %xmm0, %eax -; SSE-NEXT: andl $15, %eax -; SSE-NEXT: movd %eax, %xmm1 -; SSE-NEXT: pextrd $2, %xmm0, %eax -; SSE-NEXT: andl $7, %eax -; SSE-NEXT: movd %eax, %xmm2 -; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSE-NEXT: pextrd $1, %xmm0, %eax -; SSE-NEXT: andl $3, %eax -; SSE-NEXT: movd %eax, %xmm0 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,2,3] -; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; SSE-NEXT: andps {{.*}}(%rip), %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_urem_by_pow2b: ; AVX: # BB#0: -; AVX-NEXT: vpextrd $3, %xmm0, %eax -; AVX-NEXT: andl $15, %eax -; AVX-NEXT: vmovd %eax, %xmm1 -; AVX-NEXT: vpextrd $2, %xmm0, %eax -; AVX-NEXT: andl $7, %eax -; AVX-NEXT: vmovd %eax, %xmm2 -; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; AVX-NEXT: vpextrd $1, %xmm0, %eax -; AVX-NEXT: andl $3, %eax -; AVX-NEXT: vmovd %eax, %xmm0 -; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,2,3] -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq %1 = urem <4 x i32> %x, <i32 1, i32 4, i32 8, i32 16> ret <4 x i32> %1 @@ -128,30 +106,11 @@ define <4 x i32> @combine_vec_urem_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) { ; SSE: # BB#0: ; SSE-NEXT: pslld $23, %xmm1 ; SSE-NEXT: paddd {{.*}}(%rip), %xmm1 -; SSE-NEXT: cvttps2dq %xmm1, %xmm2 -; SSE-NEXT: pmulld {{.*}}(%rip), %xmm2 -; SSE-NEXT: pextrd $1, %xmm0, %eax -; SSE-NEXT: pextrd $1, %xmm2, %ecx -; SSE-NEXT: xorl %edx, %edx -; SSE-NEXT: divl %ecx -; SSE-NEXT: movl %edx, %ecx -; SSE-NEXT: movd %xmm0, %eax -; SSE-NEXT: movd %xmm2, %esi -; SSE-NEXT: xorl %edx, %edx -; SSE-NEXT: divl %esi -; SSE-NEXT: movd %edx, %xmm1 -; SSE-NEXT: pinsrd $1, %ecx, %xmm1 -; SSE-NEXT: pextrd $2, %xmm0, %eax -; SSE-NEXT: pextrd $2, %xmm2, %ecx -; SSE-NEXT: xorl %edx, %edx -; SSE-NEXT: divl %ecx -; SSE-NEXT: pinsrd $2, %edx, %xmm1 -; SSE-NEXT: pextrd $3, %xmm0, %eax -; SSE-NEXT: pextrd $3, %xmm2, %ecx -; SSE-NEXT: xorl %edx, %edx -; SSE-NEXT: divl %ecx -; SSE-NEXT: pinsrd $3, %edx, %xmm1 -; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: cvttps2dq %xmm1, %xmm1 +; SSE-NEXT: pmulld {{.*}}(%rip), %xmm1 +; SSE-NEXT: pcmpeqd %xmm2, %xmm2 +; SSE-NEXT: paddd %xmm1, %xmm2 +; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX1-LABEL: combine_vec_urem_by_shl_pow2b: @@ -160,54 +119,18 @@ define <4 x i32> @combine_vec_urem_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) { ; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1 ; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; AVX1-NEXT: vpextrd $1, %xmm0, %eax -; AVX1-NEXT: vpextrd $1, %xmm1, %ecx -; AVX1-NEXT: xorl %edx, %edx -; AVX1-NEXT: divl %ecx -; AVX1-NEXT: movl %edx, %ecx -; AVX1-NEXT: vmovd %xmm0, %eax -; AVX1-NEXT: vmovd %xmm1, %esi -; AVX1-NEXT: xorl %edx, %edx -; AVX1-NEXT: divl %esi -; AVX1-NEXT: vmovd %edx, %xmm2 -; AVX1-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2 -; AVX1-NEXT: vpextrd $2, %xmm0, %eax -; AVX1-NEXT: vpextrd $2, %xmm1, %ecx -; AVX1-NEXT: xorl %edx, %edx -; AVX1-NEXT: divl %ecx -; AVX1-NEXT: vpinsrd $2, %edx, %xmm2, %xmm2 -; AVX1-NEXT: vpextrd $3, %xmm0, %eax -; AVX1-NEXT: vpextrd $3, %xmm1, %ecx -; AVX1-NEXT: xorl %edx, %edx -; AVX1-NEXT: divl %ecx -; AVX1-NEXT: vpinsrd $3, %edx, %xmm2, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: combine_vec_urem_by_shl_pow2b: ; AVX2: # BB#0: ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [1,4,8,16] ; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1 -; AVX2-NEXT: vpextrd $1, %xmm1, %ecx -; AVX2-NEXT: vpextrd $1, %xmm0, %eax -; AVX2-NEXT: xorl %edx, %edx -; AVX2-NEXT: divl %ecx -; AVX2-NEXT: movl %edx, %ecx -; AVX2-NEXT: vmovd %xmm1, %esi -; AVX2-NEXT: vmovd %xmm0, %eax -; AVX2-NEXT: xorl %edx, %edx -; AVX2-NEXT: divl %esi -; AVX2-NEXT: vmovd %edx, %xmm2 -; AVX2-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2 -; AVX2-NEXT: vpextrd $2, %xmm1, %ecx -; AVX2-NEXT: vpextrd $2, %xmm0, %eax -; AVX2-NEXT: xorl %edx, %edx -; AVX2-NEXT: divl %ecx -; AVX2-NEXT: vpinsrd $2, %edx, %xmm2, %xmm2 -; AVX2-NEXT: vpextrd $3, %xmm1, %ecx -; AVX2-NEXT: vpextrd $3, %xmm0, %eax -; AVX2-NEXT: xorl %edx, %edx -; AVX2-NEXT: divl %ecx -; AVX2-NEXT: vpinsrd $3, %edx, %xmm2, %xmm0 +; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq %1 = shl <4 x i32> <i32 1, i32 4, i32 8, i32 16>, %y %2 = urem <4 x i32> %x, %1 diff --git a/llvm/test/CodeGen/X86/urem-power-of-two.ll b/llvm/test/CodeGen/X86/urem-power-of-two.ll index 9e27809c297..469c573443e 100644 --- a/llvm/test/CodeGen/X86/urem-power-of-two.ll +++ b/llvm/test/CodeGen/X86/urem-power-of-two.ll @@ -31,18 +31,17 @@ define i25 @shift_left_pow_2(i25 %x, i25 %y) { ret i25 %urem } -; FIXME: A logically right-shifted sign bit is a power-of-2 or UB. +; A logically right-shifted sign bit is a power-of-2 or UB. define i16 @shift_right_pow_2(i16 %x, i16 %y) { ; CHECK-LABEL: shift_right_pow_2: ; CHECK: # BB#0: -; CHECK-NEXT: movl $32768, %r8d # imm = 0x8000 +; CHECK-NEXT: movl $32768, %eax # imm = 0x8000 ; CHECK-NEXT: movl %esi, %ecx -; CHECK-NEXT: shrl %cl, %r8d -; CHECK-NEXT: xorl %edx, %edx -; CHECK-NEXT: movl %edi, %eax -; CHECK-NEXT: divw %r8w -; CHECK-NEXT: movl %edx, %eax +; CHECK-NEXT: shrl %cl, %eax +; CHECK-NEXT: decl %eax +; CHECK-NEXT: andl %edi, %eax +; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill> ; CHECK-NEXT: retq ; %shr = lshr i16 -32768, %y |