diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2017-10-03 16:56:57 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2017-10-03 16:56:57 +0000 |
| commit | 46a804cfd937211b6a8e07e1daa48f104e0c948f (patch) | |
| tree | 8be17b0f8ca604e521567a00a2bb6a424dee2179 | |
| parent | 491b6b5490711a91728dede600236936d2164702 (diff) | |
| download | bcm5719-llvm-46a804cfd937211b6a8e07e1daa48f104e0c948f.tar.gz bcm5719-llvm-46a804cfd937211b6a8e07e1daa48f104e0c948f.zip | |
[X86][SSE] Add bool vector extraction test cases from PR15215
llvm-svn: 314813
| -rw-r--r-- | llvm/test/CodeGen/X86/bool-vector.ll | 134 |
1 files changed, 134 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/bool-vector.ll b/llvm/test/CodeGen/X86/bool-vector.ll new file mode 100644 index 00000000000..0ee5c066be8 --- /dev/null +++ b/llvm/test/CodeGen/X86/bool-vector.ll @@ -0,0 +1,134 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32 --check-prefix=X32-SSE2 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32 --check-prefix=X32-AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX2 + +define i32 @PR15215_bad(<4 x i32> %input) { +; X32-SSE2-LABEL: PR15215_bad: +; X32-SSE2: # BB#0: # %entry +; X32-SSE2-NEXT: pslld $31, %xmm0 +; X32-SSE2-NEXT: psrad $31, %xmm0 +; X32-SSE2-NEXT: movmskps %xmm0, %eax +; X32-SSE2-NEXT: retl +; +; X32-AVX2-LABEL: PR15215_bad: +; X32-AVX2: # BB#0: # %entry +; X32-AVX2-NEXT: vpslld $31, %xmm0, %xmm0 +; X32-AVX2-NEXT: vpsrad $31, %xmm0, %xmm0 +; X32-AVX2-NEXT: vmovmskps %xmm0, %eax +; X32-AVX2-NEXT: retl +; +; X64-SSE2-LABEL: PR15215_bad: +; X64-SSE2: # BB#0: # %entry +; X64-SSE2-NEXT: pslld $31, %xmm0 +; X64-SSE2-NEXT: psrad $31, %xmm0 +; X64-SSE2-NEXT: movmskps %xmm0, %eax +; X64-SSE2-NEXT: retq +; +; X64-AVX2-LABEL: PR15215_bad: +; X64-AVX2: # BB#0: # %entry +; X64-AVX2-NEXT: vpslld $31, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpsrad $31, %xmm0, %xmm0 +; X64-AVX2-NEXT: vmovmskps %xmm0, %eax +; X64-AVX2-NEXT: retq +entry: + %0 = trunc <4 x i32> %input to <4 x i1> + %1 = bitcast <4 x i1> %0 to i4 + %2 = zext i4 %1 to i32 + ret i32 %2 +} + +define i32 @PR15215_good(<4 x i32> %input) { +; X32-SSE2-LABEL: PR15215_good: +; X32-SSE2: # BB#0: # %entry +; X32-SSE2-NEXT: pushl %esi +; X32-SSE2-NEXT: .Lcfi0: +; X32-SSE2-NEXT: .cfi_def_cfa_offset 8 +; X32-SSE2-NEXT: .Lcfi1: +; X32-SSE2-NEXT: .cfi_offset %esi, -8 +; X32-SSE2-NEXT: movd %xmm0, %eax +; X32-SSE2-NEXT: andl $1, %eax +; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] +; X32-SSE2-NEXT: movd %xmm1, %ecx +; X32-SSE2-NEXT: andl $1, %ecx +; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; X32-SSE2-NEXT: movd %xmm1, %edx +; X32-SSE2-NEXT: andl $1, %edx +; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3] +; X32-SSE2-NEXT: movd %xmm0, %esi +; X32-SSE2-NEXT: andl $1, %esi +; X32-SSE2-NEXT: leal (%eax,%ecx,2), %eax +; X32-SSE2-NEXT: leal (%eax,%edx,4), %eax +; X32-SSE2-NEXT: leal (%eax,%esi,8), %eax +; X32-SSE2-NEXT: popl %esi +; X32-SSE2-NEXT: retl +; +; X32-AVX2-LABEL: PR15215_good: +; X32-AVX2: # BB#0: # %entry +; X32-AVX2-NEXT: pushl %esi +; X32-AVX2-NEXT: .Lcfi0: +; X32-AVX2-NEXT: .cfi_def_cfa_offset 8 +; X32-AVX2-NEXT: .Lcfi1: +; X32-AVX2-NEXT: .cfi_offset %esi, -8 +; X32-AVX2-NEXT: vmovd %xmm0, %eax +; X32-AVX2-NEXT: andl $1, %eax +; X32-AVX2-NEXT: vpextrd $1, %xmm0, %ecx +; X32-AVX2-NEXT: andl $1, %ecx +; X32-AVX2-NEXT: vpextrd $2, %xmm0, %edx +; X32-AVX2-NEXT: andl $1, %edx +; X32-AVX2-NEXT: vpextrd $3, %xmm0, %esi +; X32-AVX2-NEXT: andl $1, %esi +; X32-AVX2-NEXT: leal (%eax,%ecx,2), %eax +; X32-AVX2-NEXT: leal (%eax,%edx,4), %eax +; X32-AVX2-NEXT: leal (%eax,%esi,8), %eax +; X32-AVX2-NEXT: popl %esi +; X32-AVX2-NEXT: retl +; +; X64-SSE2-LABEL: PR15215_good: +; X64-SSE2: # BB#0: # %entry +; X64-SSE2-NEXT: movd %xmm0, %eax +; X64-SSE2-NEXT: andl $1, %eax +; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] +; X64-SSE2-NEXT: movd %xmm1, %ecx +; X64-SSE2-NEXT: andl $1, %ecx +; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; X64-SSE2-NEXT: movd %xmm1, %edx +; X64-SSE2-NEXT: andl $1, %edx +; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3] +; X64-SSE2-NEXT: movd %xmm0, %esi +; X64-SSE2-NEXT: andl $1, %esi +; X64-SSE2-NEXT: leal (%rax,%rcx,2), %eax +; X64-SSE2-NEXT: leal (%rax,%rdx,4), %eax +; X64-SSE2-NEXT: leal (%rax,%rsi,8), %eax +; X64-SSE2-NEXT: retq +; +; X64-AVX2-LABEL: PR15215_good: +; X64-AVX2: # BB#0: # %entry +; X64-AVX2-NEXT: vmovd %xmm0, %eax +; X64-AVX2-NEXT: andl $1, %eax +; X64-AVX2-NEXT: vpextrd $1, %xmm0, %ecx +; X64-AVX2-NEXT: andl $1, %ecx +; X64-AVX2-NEXT: vpextrd $2, %xmm0, %edx +; X64-AVX2-NEXT: andl $1, %edx +; X64-AVX2-NEXT: vpextrd $3, %xmm0, %esi +; X64-AVX2-NEXT: andl $1, %esi +; X64-AVX2-NEXT: leal (%rax,%rcx,2), %eax +; X64-AVX2-NEXT: leal (%rax,%rdx,4), %eax +; X64-AVX2-NEXT: leal (%rax,%rsi,8), %eax +; X64-AVX2-NEXT: retq +entry: + %0 = trunc <4 x i32> %input to <4 x i1> + %1 = extractelement <4 x i1> %0, i32 0 + %e1 = select i1 %1, i32 1, i32 0 + %2 = extractelement <4 x i1> %0, i32 1 + %e2 = select i1 %2, i32 2, i32 0 + %3 = extractelement <4 x i1> %0, i32 2 + %e3 = select i1 %3, i32 4, i32 0 + %4 = extractelement <4 x i1> %0, i32 3 + %e4 = select i1 %4, i32 8, i32 0 + %5 = or i32 %e1, %e2 + %6 = or i32 %5, %e3 + %7 = or i32 %6, %e4 + ret i32 %7 +} |

