diff options
Diffstat (limited to 'llvm/test')
-rw-r--r-- | llvm/test/CodeGen/X86/combine-sdiv.ll | 3 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/combine-srem.ll | 18 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/combine-udiv.ll | 113 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/combine-urem.ll | 126 |
4 files changed, 180 insertions, 80 deletions
diff --git a/llvm/test/CodeGen/X86/combine-sdiv.ll b/llvm/test/CodeGen/X86/combine-sdiv.ll index 2968addd316..3d59fb0b5be 100644 --- a/llvm/test/CodeGen/X86/combine-sdiv.ll +++ b/llvm/test/CodeGen/X86/combine-sdiv.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 ; fold (sdiv undef, x) -> 0 define <4 x i32> @combine_vec_sdiv_undef0(<4 x i32> %x) { diff --git a/llvm/test/CodeGen/X86/combine-srem.ll b/llvm/test/CodeGen/X86/combine-srem.ll index f400781c420..a2de1e021c6 100644 --- a/llvm/test/CodeGen/X86/combine-srem.ll +++ b/llvm/test/CodeGen/X86/combine-srem.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 ; fold (srem undef, x) -> 0 define <4 x i32> @combine_vec_srem_undef0(<4 x i32> %x) { @@ -35,11 +36,16 @@ define <4 x i32> @combine_vec_srem_by_pos0(<4 x i32> %x) { ; SSE-NEXT: andps {{.*}}(%rip), %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: combine_vec_srem_by_pos0: -; AVX: # BB#0: -; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm1 -; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: combine_vec_srem_by_pos0: +; AVX1: # BB#0: +; AVX1-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: combine_vec_srem_by_pos0: +; AVX2: # BB#0: +; AVX2-NEXT: vbroadcastss {{.*}}(%rip), %xmm1 +; AVX2-NEXT: vandps %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: retq %1 = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255> %2 = srem <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4> ret <4 x i32> %2 diff --git a/llvm/test/CodeGen/X86/combine-udiv.ll b/llvm/test/CodeGen/X86/combine-udiv.ll index 10ef6ce9bf9..4a96a3fc914 100644 --- a/llvm/test/CodeGen/X86/combine-udiv.ll +++ b/llvm/test/CodeGen/X86/combine-udiv.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 ; fold (udiv undef, x) -> 0 define <4 x i32> @combine_vec_udiv_undef0(<4 x i32> %x) { @@ -97,12 +98,29 @@ define <4 x i32> @combine_vec_udiv_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) { ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7] ; SSE-NEXT: retq ; -; AVX-LABEL: combine_vec_udiv_by_shl_pow2a: -; AVX: # BB#0: -; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2 -; AVX-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; AVX-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: combine_vec_udiv_by_shl_pow2a: +; AVX1: # BB#0: +; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; AVX1-NEXT: vpsrld %xmm2, %xmm0, %xmm2 +; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3 +; AVX1-NEXT: vpsrld %xmm3, %xmm0, %xmm3 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7] +; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm1[2],xmm3[2],xmm1[3],xmm3[3] +; AVX1-NEXT: vpsrld %xmm3, %xmm0, %xmm3 +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero +; AVX1-NEXT: vpsrld %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; AVX1-NEXT: retq +; +; AVX2-LABEL: combine_vec_udiv_by_shl_pow2a: +; AVX2: # BB#0: +; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2 +; AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: retq %1 = shl <4 x i32> <i32 4, i32 4, i32 4, i32 4>, %y %2 = udiv <4 x i32> %x, %1 ret <4 x i32> %2 @@ -139,32 +157,61 @@ define <4 x i32> @combine_vec_udiv_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) { ; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: combine_vec_udiv_by_shl_pow2b: -; AVX: # BB#0: -; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [1,4,8,16] -; AVX-NEXT: vpsllvd %xmm1, %xmm2, %xmm1 -; AVX-NEXT: vpextrd $1, %xmm1, %ecx -; AVX-NEXT: vpextrd $1, %xmm0, %eax -; AVX-NEXT: xorl %edx, %edx -; AVX-NEXT: divl %ecx -; AVX-NEXT: movl %eax, %ecx -; AVX-NEXT: vmovd %xmm1, %esi -; AVX-NEXT: vmovd %xmm0, %eax -; AVX-NEXT: xorl %edx, %edx -; AVX-NEXT: divl %esi -; AVX-NEXT: vmovd %eax, %xmm2 -; AVX-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2 -; AVX-NEXT: vpextrd $2, %xmm1, %ecx -; AVX-NEXT: vpextrd $2, %xmm0, %eax -; AVX-NEXT: xorl %edx, %edx -; AVX-NEXT: divl %ecx -; AVX-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrd $3, %xmm1, %ecx -; AVX-NEXT: vpextrd $3, %xmm0, %eax -; AVX-NEXT: xorl %edx, %edx -; AVX-NEXT: divl %ecx -; AVX-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: combine_vec_udiv_by_shl_pow2b: +; AVX1: # BB#0: +; AVX1-NEXT: vpslld $23, %xmm1, %xmm1 +; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1 +; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpextrd $1, %xmm0, %eax +; AVX1-NEXT: vpextrd $1, %xmm1, %ecx +; AVX1-NEXT: xorl %edx, %edx +; AVX1-NEXT: divl %ecx +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: vmovd %xmm0, %eax +; AVX1-NEXT: vmovd %xmm1, %esi +; AVX1-NEXT: xorl %edx, %edx +; AVX1-NEXT: divl %esi +; AVX1-NEXT: vmovd %eax, %xmm2 +; AVX1-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrd $2, %xmm0, %eax +; AVX1-NEXT: vpextrd $2, %xmm1, %ecx +; AVX1-NEXT: xorl %edx, %edx +; AVX1-NEXT: divl %ecx +; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrd $3, %xmm0, %eax +; AVX1-NEXT: vpextrd $3, %xmm1, %ecx +; AVX1-NEXT: xorl %edx, %edx +; AVX1-NEXT: divl %ecx +; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: combine_vec_udiv_by_shl_pow2b: +; AVX2: # BB#0: +; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [1,4,8,16] +; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1 +; AVX2-NEXT: vpextrd $1, %xmm1, %ecx +; AVX2-NEXT: vpextrd $1, %xmm0, %eax +; AVX2-NEXT: xorl %edx, %edx +; AVX2-NEXT: divl %ecx +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: vmovd %xmm1, %esi +; AVX2-NEXT: vmovd %xmm0, %eax +; AVX2-NEXT: xorl %edx, %edx +; AVX2-NEXT: divl %esi +; AVX2-NEXT: vmovd %eax, %xmm2 +; AVX2-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrd $2, %xmm1, %ecx +; AVX2-NEXT: vpextrd $2, %xmm0, %eax +; AVX2-NEXT: xorl %edx, %edx +; AVX2-NEXT: divl %ecx +; AVX2-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrd $3, %xmm1, %ecx +; AVX2-NEXT: vpextrd $3, %xmm0, %eax +; AVX2-NEXT: xorl %edx, %edx +; AVX2-NEXT: divl %ecx +; AVX2-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0 +; AVX2-NEXT: retq %1 = shl <4 x i32> <i32 1, i32 4, i32 8, i32 16>, %y %2 = udiv <4 x i32> %x, %1 ret <4 x i32> %2 diff --git a/llvm/test/CodeGen/X86/combine-urem.ll b/llvm/test/CodeGen/X86/combine-urem.ll index 0c39bb280e8..7e58c737e0b 100644 --- a/llvm/test/CodeGen/X86/combine-urem.ll +++ b/llvm/test/CodeGen/X86/combine-urem.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 ; fold (urem undef, x) -> 0 define <4 x i32> @combine_vec_urem_undef0(<4 x i32> %x) { @@ -35,11 +36,16 @@ define <4 x i32> @combine_vec_urem_by_pow2a(<4 x i32> %x) { ; SSE-NEXT: andps {{.*}}(%rip), %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: combine_vec_urem_by_pow2a: -; AVX: # BB#0: -; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm1 -; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: combine_vec_urem_by_pow2a: +; AVX1: # BB#0: +; AVX1-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: combine_vec_urem_by_pow2a: +; AVX2: # BB#0: +; AVX2-NEXT: vbroadcastss {{.*}}(%rip), %xmm1 +; AVX2-NEXT: vandps %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: retq %1 = urem <4 x i32> %x, <i32 4, i32 4, i32 4, i32 4> ret <4 x i32> %1 } @@ -93,14 +99,25 @@ define <4 x i32> @combine_vec_urem_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) { ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: combine_vec_urem_by_shl_pow2a: -; AVX: # BB#0: -; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2 -; AVX-NEXT: vpsllvd %xmm1, %xmm2, %xmm1 -; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: combine_vec_urem_by_shl_pow2a: +; AVX1: # BB#0: +; AVX1-NEXT: vpslld $23, %xmm1, %xmm1 +; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1 +; AVX1-NEXT: vpslld $2, %xmm1, %xmm1 +; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: combine_vec_urem_by_shl_pow2a: +; AVX2: # BB#0: +; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2 +; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1 +; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: retq %1 = shl <4 x i32> <i32 4, i32 4, i32 4, i32 4>, %y %2 = urem <4 x i32> %x, %1 ret <4 x i32> %2 @@ -137,32 +154,61 @@ define <4 x i32> @combine_vec_urem_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) { ; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: combine_vec_urem_by_shl_pow2b: -; AVX: # BB#0: -; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [1,4,8,16] -; AVX-NEXT: vpsllvd %xmm1, %xmm2, %xmm1 -; AVX-NEXT: vpextrd $1, %xmm1, %ecx -; AVX-NEXT: vpextrd $1, %xmm0, %eax -; AVX-NEXT: xorl %edx, %edx -; AVX-NEXT: divl %ecx -; AVX-NEXT: movl %edx, %ecx -; AVX-NEXT: vmovd %xmm1, %esi -; AVX-NEXT: vmovd %xmm0, %eax -; AVX-NEXT: xorl %edx, %edx -; AVX-NEXT: divl %esi -; AVX-NEXT: vmovd %edx, %xmm2 -; AVX-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2 -; AVX-NEXT: vpextrd $2, %xmm1, %ecx -; AVX-NEXT: vpextrd $2, %xmm0, %eax -; AVX-NEXT: xorl %edx, %edx -; AVX-NEXT: divl %ecx -; AVX-NEXT: vpinsrd $2, %edx, %xmm2, %xmm2 -; AVX-NEXT: vpextrd $3, %xmm1, %ecx -; AVX-NEXT: vpextrd $3, %xmm0, %eax -; AVX-NEXT: xorl %edx, %edx -; AVX-NEXT: divl %ecx -; AVX-NEXT: vpinsrd $3, %edx, %xmm2, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: combine_vec_urem_by_shl_pow2b: +; AVX1: # BB#0: +; AVX1-NEXT: vpslld $23, %xmm1, %xmm1 +; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1 +; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpextrd $1, %xmm0, %eax +; AVX1-NEXT: vpextrd $1, %xmm1, %ecx +; AVX1-NEXT: xorl %edx, %edx +; AVX1-NEXT: divl %ecx +; AVX1-NEXT: movl %edx, %ecx +; AVX1-NEXT: vmovd %xmm0, %eax +; AVX1-NEXT: vmovd %xmm1, %esi +; AVX1-NEXT: xorl %edx, %edx +; AVX1-NEXT: divl %esi +; AVX1-NEXT: vmovd %edx, %xmm2 +; AVX1-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrd $2, %xmm0, %eax +; AVX1-NEXT: vpextrd $2, %xmm1, %ecx +; AVX1-NEXT: xorl %edx, %edx +; AVX1-NEXT: divl %ecx +; AVX1-NEXT: vpinsrd $2, %edx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrd $3, %xmm0, %eax +; AVX1-NEXT: vpextrd $3, %xmm1, %ecx +; AVX1-NEXT: xorl %edx, %edx +; AVX1-NEXT: divl %ecx +; AVX1-NEXT: vpinsrd $3, %edx, %xmm2, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: combine_vec_urem_by_shl_pow2b: +; AVX2: # BB#0: +; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [1,4,8,16] +; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1 +; AVX2-NEXT: vpextrd $1, %xmm1, %ecx +; AVX2-NEXT: vpextrd $1, %xmm0, %eax +; AVX2-NEXT: xorl %edx, %edx +; AVX2-NEXT: divl %ecx +; AVX2-NEXT: movl %edx, %ecx +; AVX2-NEXT: vmovd %xmm1, %esi +; AVX2-NEXT: vmovd %xmm0, %eax +; AVX2-NEXT: xorl %edx, %edx +; AVX2-NEXT: divl %esi +; AVX2-NEXT: vmovd %edx, %xmm2 +; AVX2-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrd $2, %xmm1, %ecx +; AVX2-NEXT: vpextrd $2, %xmm0, %eax +; AVX2-NEXT: xorl %edx, %edx +; AVX2-NEXT: divl %ecx +; AVX2-NEXT: vpinsrd $2, %edx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrd $3, %xmm1, %ecx +; AVX2-NEXT: vpextrd $3, %xmm0, %eax +; AVX2-NEXT: xorl %edx, %edx +; AVX2-NEXT: divl %ecx +; AVX2-NEXT: vpinsrd $3, %edx, %xmm2, %xmm0 +; AVX2-NEXT: retq %1 = shl <4 x i32> <i32 1, i32 4, i32 8, i32 16>, %y %2 = urem <4 x i32> %x, %1 ret <4 x i32> %2 |