diff options
| author | Craig Topper <craig.topper@intel.com> | 2018-02-09 05:54:31 +0000 |
|---|---|---|
| committer | Craig Topper <craig.topper@intel.com> | 2018-02-09 05:54:31 +0000 |
| commit | 090e41d0cc657ac03c638ceb1610b628a9ef65d1 (patch) | |
| tree | 45e9fc966a726730e45181256414bac4f1f9f8ba /llvm/test/CodeGen | |
| parent | 7ac1c780622f847cdadcb5ed82c5ef0f9a6b445e (diff) | |
| download | bcm5719-llvm-090e41d0cc657ac03c638ceb1610b628a9ef65d1.tar.gz bcm5719-llvm-090e41d0cc657ac03c638ceb1610b628a9ef65d1.zip | |
[X86] Add 512-bit shuffle test cases for concatenating 128/256-bits with zeros in the upper portion.
We should recognize this and just use a mov that will zero the upper bits.
llvm-svn: 324708
Diffstat (limited to 'llvm/test/CodeGen')
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll | 34 |
1 files changed, 34 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll b/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll index eed07bc9aa0..ca72ea0eea6 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll @@ -2761,3 +2761,37 @@ define <2 x i64> @test_v8i64_2_5 (<8 x i64> %v) { %res = shufflevector <8 x i64> %v, <8 x i64> undef, <2 x i32> <i32 2, i32 5> ret <2 x i64> %res } + +define <8 x i64> @test_v8i64_insert_zero_128(<8 x i64> %a) { +; AVX512F-LABEL: test_v8i64_insert_zero_128: +; AVX512F: # %bb.0: +; AVX512F-NEXT: movb $3, %al +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z} +; AVX512F-NEXT: retq +; +; AVX512F-32-LABEL: test_v8i64_insert_zero_128: +; AVX512F-32: # %bb.0: +; AVX512F-32-NEXT: movb $3, %al +; AVX512F-32-NEXT: kmovw %eax, %k1 +; AVX512F-32-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z} +; AVX512F-32-NEXT: retl + %res = shufflevector <8 x i64> %a, <8 x i64> <i64 0, i64 0, i64 0, i64 0, i64 undef, i64 undef, i64 undef, i64 undef>, <8 x i32> <i32 0, i32 1, i32 8, i32 9, i32 8, i32 9, i32 8, i32 9> + ret <8 x i64> %res +} + +define <8 x i64> @test_v8i64_insert_zero_256(<8 x i64> %a) { +; AVX512F-LABEL: test_v8i64_insert_zero_256: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; AVX512F-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512F-NEXT: retq +; +; AVX512F-32-LABEL: test_v8i64_insert_zero_256: +; AVX512F-32: # %bb.0: +; AVX512F-32-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[0,1,0,1] +; AVX512F-32-NEXT: retl + %res = shufflevector <8 x i64> %a, <8 x i64> <i64 0, i64 0, i64 0, i64 0, i64 undef, i64 undef, i64 undef, i64 undef>, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 8, i32 9> + ret <8 x i64> %res +} |

