diff options
| author | Craig Topper <craig.topper@gmail.com> | 2016-11-24 05:36:50 +0000 |
|---|---|---|
| committer | Craig Topper <craig.topper@gmail.com> | 2016-11-24 05:36:50 +0000 |
| commit | f23b995f78d69fe00480cb148fc0b1b2680c0cdf (patch) | |
| tree | a5440a37a96b1555493fa7f9626e99f281d9b09f /llvm | |
| parent | 993c7416d3fcb2877e05d266dbc1d996b3ef8c2b (diff) | |
| download | bcm5719-llvm-f23b995f78d69fe00480cb148fc0b1b2680c0cdf.tar.gz bcm5719-llvm-f23b995f78d69fe00480cb148fc0b1b2680c0cdf.zip | |
[AVX-512] Fix some mask shuffle tests to actually test the case they were supposed to test.
llvm-svn: 287854
Diffstat (limited to 'llvm')
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll | 20 |
1 files changed, 12 insertions, 8 deletions
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll b/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll index 06ae4d5513a..625681dc294 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll @@ -2292,43 +2292,47 @@ define <8 x i64> @shuffle_v8i64_12345670(<8 x i64> %a) { ret <8 x i64> %shuffle } -define <8 x i64> @mask_shuffle_v8i64_12345678(<8 x i64> %a, <8 x i64> %b, <16 x i32> %passthru, i8 %mask) { +define <8 x i64> @mask_shuffle_v8i64_12345678(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passthru, i8 %mask) { ; ; AVX512F-LABEL: mask_shuffle_v8i64_12345678: ; AVX512F: # BB#0: ; AVX512F-NEXT: kmovw %edi, %k1 -; AVX512F-NEXT: valignq {{.*#+}} zmm0 {%k1} {z} = zmm0[1,2,3,4,5,6,7],zmm1[0] +; AVX512F-NEXT: valignq {{.*#+}} zmm2 {%k1} = zmm0[1,2,3,4,5,6,7],zmm1[0] +; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0 ; AVX512F-NEXT: retq ; ; AVX512F-32-LABEL: mask_shuffle_v8i64_12345678: ; AVX512F-32: # BB#0: ; AVX512F-32-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; AVX512F-32-NEXT: kmovw %eax, %k1 -; AVX512F-32-NEXT: valignq {{.*#+}} zmm0 {%k1} {z} = zmm0[1,2,3,4,5,6,7],zmm1[0] +; AVX512F-32-NEXT: valignq {{.*#+}} zmm2 {%k1} = zmm0[1,2,3,4,5,6,7],zmm1[0] +; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0 ; AVX512F-32-NEXT: retl %shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8> %mask.cast = bitcast i8 %mask to <8 x i1> - %res = select <8 x i1> %mask.cast, <8 x i64> %shuffle, <8 x i64> zeroinitializer + %res = select <8 x i1> %mask.cast, <8 x i64> %shuffle, <8 x i64> %passthru ret <8 x i64> %res } -define <8 x i64> @mask_shuffle_v8i64_12345670(<8 x i64> %a, <16 x i32> %passthru, i8 %mask) { +define <8 x i64> @mask_shuffle_v8i64_12345670(<8 x i64> %a, <8 x i64> %passthru, i8 %mask) { ; ; AVX512F-LABEL: mask_shuffle_v8i64_12345670: ; AVX512F: # BB#0: ; AVX512F-NEXT: kmovw %edi, %k1 -; AVX512F-NEXT: valignq {{.*#+}} zmm0 {%k1} {z} = zmm0[1,2,3,4,5,6,7,0] +; AVX512F-NEXT: valignq {{.*#+}} zmm1 {%k1} = zmm0[1,2,3,4,5,6,7,0] +; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm0 ; AVX512F-NEXT: retq ; ; AVX512F-32-LABEL: mask_shuffle_v8i64_12345670: ; AVX512F-32: # BB#0: ; AVX512F-32-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; AVX512F-32-NEXT: kmovw %eax, %k1 -; AVX512F-32-NEXT: valignq {{.*#+}} zmm0 {%k1} {z} = zmm0[1,2,3,4,5,6,7,0] +; AVX512F-32-NEXT: valignq {{.*#+}} zmm1 {%k1} = zmm0[1,2,3,4,5,6,7,0] +; AVX512F-32-NEXT: vmovdqa64 %zmm1, %zmm0 ; AVX512F-32-NEXT: retl %shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0> %mask.cast = bitcast i8 %mask to <8 x i1> - %res = select <8 x i1> %mask.cast, <8 x i64> %shuffle, <8 x i64> zeroinitializer + %res = select <8 x i1> %mask.cast, <8 x i64> %shuffle, <8 x i64> %passthru ret <8 x i64> %res } |

