diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll')
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll | 241 |
1 files changed, 53 insertions, 188 deletions
diff --git a/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll b/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll index 3f81d2e7b8c..cfd2fc625a6 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll @@ -87,88 +87,30 @@ define <4 x i64> @test_div7_4i64(<4 x i64> %a) nounwind { define <8 x i32> @test_div7_8i32(<8 x i32> %a) nounwind { ; AVX1-LABEL: test_div7_8i32: ; AVX1: # BB#0: -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpextrd $1, %xmm1, %eax -; AVX1-NEXT: cltq -; AVX1-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493 -; AVX1-NEXT: shrq $32, %rcx -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $31, %ecx -; AVX1-NEXT: sarl $2, %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: vmovd %xmm1, %ecx -; AVX1-NEXT: movslq %ecx, %rcx -; AVX1-NEXT: imulq $-1840700269, %rcx, %rdx # imm = 0xFFFFFFFF92492493 -; AVX1-NEXT: shrq $32, %rdx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: movl %ecx, %edx -; AVX1-NEXT: shrl $31, %edx -; AVX1-NEXT: sarl $2, %ecx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm2 -; AVX1-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrd $2, %xmm1, %eax -; AVX1-NEXT: cltq -; AVX1-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493 -; AVX1-NEXT: shrq $32, %rcx -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $31, %ecx -; AVX1-NEXT: sarl $2, %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrd $3, %xmm1, %eax -; AVX1-NEXT: cltq -; AVX1-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493 -; AVX1-NEXT: shrq $32, %rcx -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $31, %ecx -; AVX1-NEXT: sarl $2, %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm1 -; AVX1-NEXT: vpextrd $1, %xmm0, %eax -; AVX1-NEXT: cltq -; AVX1-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493 -; AVX1-NEXT: shrq $32, %rcx -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $31, %ecx -; AVX1-NEXT: sarl $2, %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: vmovd %xmm0, %ecx -; AVX1-NEXT: movslq %ecx, %rcx -; AVX1-NEXT: imulq $-1840700269, %rcx, %rdx # imm = 0xFFFFFFFF92492493 -; AVX1-NEXT: shrq $32, %rdx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: movl %ecx, %edx -; AVX1-NEXT: shrl $31, %edx -; AVX1-NEXT: sarl $2, %ecx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm2 -; AVX1-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrd $2, %xmm0, %eax -; AVX1-NEXT: cltq -; AVX1-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493 -; AVX1-NEXT: shrq $32, %rcx -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $31, %ecx -; AVX1-NEXT: sarl $2, %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrd $3, %xmm0, %eax -; AVX1-NEXT: cltq -; AVX1-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493 -; AVX1-NEXT: shrq $32, %rcx -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $31, %ecx -; AVX1-NEXT: sarl $2, %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vmovaps {{.*#+}} ymm1 = [2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027] +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[1,1,3,3] +; AVX1-NEXT: vpmuldq %xmm3, %xmm5, %xmm3 +; AVX1-NEXT: vpmuldq %xmm2, %xmm4, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7] +; AVX1-NEXT: vpaddd %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpsrld $31, %xmm2, %xmm3 +; AVX1-NEXT: vpsrad $2, %xmm2, %xmm2 +; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] +; AVX1-NEXT: vpmuldq %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7] +; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vpsrld $31, %xmm0, %xmm1 +; AVX1-NEXT: vpsrad $2, %xmm0, %xmm0 +; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_div7_8i32: @@ -406,112 +348,35 @@ define <4 x i64> @test_rem7_4i64(<4 x i64> %a) nounwind { define <8 x i32> @test_rem7_8i32(<8 x i32> %a) nounwind { ; AVX1-LABEL: test_rem7_8i32: ; AVX1: # BB#0: -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpextrd $1, %xmm1, %eax -; AVX1-NEXT: cltq -; AVX1-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493 -; AVX1-NEXT: shrq $32, %rcx -; AVX1-NEXT: addl %eax, %ecx -; AVX1-NEXT: movl %ecx, %edx -; AVX1-NEXT: shrl $31, %edx -; AVX1-NEXT: sarl $2, %ecx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: leal (,%rcx,8), %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: subl %edx, %eax -; AVX1-NEXT: vmovd %xmm1, %ecx -; AVX1-NEXT: movslq %ecx, %rcx -; AVX1-NEXT: imulq $-1840700269, %rcx, %rdx # imm = 0xFFFFFFFF92492493 -; AVX1-NEXT: shrq $32, %rdx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: movl %edx, %esi -; AVX1-NEXT: shrl $31, %esi -; AVX1-NEXT: sarl $2, %edx -; AVX1-NEXT: addl %esi, %edx -; AVX1-NEXT: leal (,%rdx,8), %esi -; AVX1-NEXT: subl %edx, %esi -; AVX1-NEXT: subl %esi, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm2 -; AVX1-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrd $2, %xmm1, %eax -; AVX1-NEXT: cltq -; AVX1-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493 -; AVX1-NEXT: shrq $32, %rcx -; AVX1-NEXT: addl %eax, %ecx -; AVX1-NEXT: movl %ecx, %edx -; AVX1-NEXT: shrl $31, %edx -; AVX1-NEXT: sarl $2, %ecx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: leal (,%rcx,8), %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: subl %edx, %eax -; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrd $3, %xmm1, %eax -; AVX1-NEXT: cltq -; AVX1-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493 -; AVX1-NEXT: shrq $32, %rcx -; AVX1-NEXT: addl %eax, %ecx -; AVX1-NEXT: movl %ecx, %edx -; AVX1-NEXT: shrl $31, %edx -; AVX1-NEXT: sarl $2, %ecx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: leal (,%rcx,8), %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: subl %edx, %eax -; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm1 -; AVX1-NEXT: vpextrd $1, %xmm0, %eax -; AVX1-NEXT: cltq -; AVX1-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493 -; AVX1-NEXT: shrq $32, %rcx -; AVX1-NEXT: addl %eax, %ecx -; AVX1-NEXT: movl %ecx, %edx -; AVX1-NEXT: shrl $31, %edx -; AVX1-NEXT: sarl $2, %ecx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: leal (,%rcx,8), %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: subl %edx, %eax -; AVX1-NEXT: vmovd %xmm0, %ecx -; AVX1-NEXT: movslq %ecx, %rcx -; AVX1-NEXT: imulq $-1840700269, %rcx, %rdx # imm = 0xFFFFFFFF92492493 -; AVX1-NEXT: shrq $32, %rdx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: movl %edx, %esi -; AVX1-NEXT: shrl $31, %esi -; AVX1-NEXT: sarl $2, %edx -; AVX1-NEXT: addl %esi, %edx -; AVX1-NEXT: leal (,%rdx,8), %esi -; AVX1-NEXT: subl %edx, %esi -; AVX1-NEXT: subl %esi, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm2 -; AVX1-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrd $2, %xmm0, %eax -; AVX1-NEXT: cltq -; AVX1-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493 -; AVX1-NEXT: shrq $32, %rcx -; AVX1-NEXT: addl %eax, %ecx -; AVX1-NEXT: movl %ecx, %edx -; AVX1-NEXT: shrl $31, %edx -; AVX1-NEXT: sarl $2, %ecx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: leal (,%rcx,8), %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: subl %edx, %eax -; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrd $3, %xmm0, %eax -; AVX1-NEXT: cltq -; AVX1-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493 -; AVX1-NEXT: shrq $32, %rcx -; AVX1-NEXT: addl %eax, %ecx -; AVX1-NEXT: movl %ecx, %edx -; AVX1-NEXT: shrl $31, %edx -; AVX1-NEXT: sarl $2, %ecx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: leal (,%rcx,8), %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: subl %edx, %eax -; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vmovaps {{.*#+}} ymm1 = [2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027] +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[1,1,3,3] +; AVX1-NEXT: vpmuldq %xmm3, %xmm5, %xmm3 +; AVX1-NEXT: vpmuldq %xmm2, %xmm4, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7] +; AVX1-NEXT: vpaddd %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpsrld $31, %xmm2, %xmm3 +; AVX1-NEXT: vpsrad $2, %xmm2, %xmm2 +; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [7,7,7,7] +; AVX1-NEXT: vpmulld %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpsubd %xmm2, %xmm4, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[1,1,3,3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] +; AVX1-NEXT: vpmuldq %xmm4, %xmm5, %xmm4 +; AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7] +; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm1 +; AVX1-NEXT: vpsrld $31, %xmm1, %xmm4 +; AVX1-NEXT: vpsrad $2, %xmm1, %xmm1 +; AVX1-NEXT: vpaddd %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpmulld %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_rem7_8i32: |

