diff options
| author | Craig Topper <craig.topper@gmail.com> | 2016-11-20 19:50:32 +0000 |
|---|---|---|
| committer | Craig Topper <craig.topper@gmail.com> | 2016-11-20 19:50:32 +0000 |
| commit | 85a1f5c20ccb6941047091d3480860f87c72c5a0 (patch) | |
| tree | 6844a503191009e21101df0b2f93022335c4f20f /llvm | |
| parent | 99a66390473f9ce57141fce34cf1446e1ca78a18 (diff) | |
| download | bcm5719-llvm-85a1f5c20ccb6941047091d3480860f87c72c5a0.tar.gz bcm5719-llvm-85a1f5c20ccb6941047091d3480860f87c72c5a0.zip | |
[AVX-512] Add tests for masked palignr/valignd/valignq shuffles, many of which show failures to fold the masking into the operation.
Many of these problems are because shuffle lowering widens element size and reduces element count when possible. This causes the shuffle to become separated from the select by a bitcast. Future patches will work to improve these cases by rewriting the shuffle back to a narrow element type if we think it can result in folding the mask.
llvm-svn: 287503
Diffstat (limited to 'llvm')
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll | 39 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll | 80 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-masked.ll | 242 |
3 files changed, 361 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll b/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll index d13c867b087..02c49744f5b 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll @@ -369,3 +369,42 @@ define <16 x i32> @mask_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15 %res = select <16 x i1> %mask.cast, <16 x i32> %shuffle, <16 x i32> %passthru ret <16 x i32> %res } + +define <16 x i32> @mask_shuffle_v16i32_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_16(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passthru, i16 %mask) { +; ALL-LABEL: mask_shuffle_v16i32_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_16: +; ALL: # BB#0: +; ALL-NEXT: valignq {{.*#+}} zmm0 = zmm0[1,2,3,4,5,6,7],zmm1[0] +; ALL-NEXT: kmovw %edi, %k1 +; ALL-NEXT: vpblendmd %zmm0, %zmm2, %zmm0 {%k1} +; ALL-NEXT: retq + %shuffle = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32><i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17> + %mask.cast = bitcast i16 %mask to <16 x i1> + %res = select <16 x i1> %mask.cast, <16 x i32> %shuffle, <16 x i32> %passthru + ret <16 x i32> %res +} + +define <16 x i32> @maskz_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00_01(<16 x i32> %a, i16 %mask) { +; ALL-LABEL: maskz_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00_01: +; ALL: # BB#0: +; ALL-NEXT: valignq {{.*#+}} zmm0 = zmm0[1,2,3,4,5,6,7,0] +; ALL-NEXT: kmovw %edi, %k1 +; ALL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z} +; ALL-NEXT: retq + %shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <16 x i32><i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1> + %mask.cast = bitcast i16 %mask to <16 x i1> + %res = select <16 x i1> %mask.cast, <16 x i32> %shuffle, <16 x i32> zeroinitializer + ret <16 x i32> %res +} + +define <16 x i32> @maskz_shuffle_v16i32_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_16(<16 x i32> %a, <16 x i32> %b, i16 %mask) { +; ALL-LABEL: maskz_shuffle_v16i32_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_16: +; ALL: # BB#0: +; ALL-NEXT: valignq {{.*#+}} zmm0 = zmm0[1,2,3,4,5,6,7],zmm1[0] +; ALL-NEXT: kmovw %edi, %k1 +; ALL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z} +; ALL-NEXT: retq + %shuffle = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32><i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17> + %mask.cast = bitcast i16 %mask to <16 x i1> + %res = select <16 x i1> %mask.cast, <16 x i32> %shuffle, <16 x i32> zeroinitializer + ret <16 x i32> %res +} diff --git a/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll b/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll index 0f163ba2188..2cb35827561 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll @@ -2305,3 +2305,83 @@ define <8 x i64> @shuffle_v8i64_12345670(<8 x i64> %a) { %shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0> ret <8 x i64> %shuffle } + +define <8 x i64> @mask_shuffle_v8i64_12345678(<8 x i64> %a, <8 x i64> %b, <16 x i32> %passthru, i8 %mask) { +; +; AVX512F-LABEL: mask_shuffle_v8i64_12345678: +; AVX512F: # BB#0: +; AVX512F-NEXT: kmovw %edi, %k1 +; AVX512F-NEXT: valignq {{.*#+}} zmm0 {%k1} {z} = zmm0[1,2,3,4,5,6,7],zmm1[0] +; AVX512F-NEXT: retq +; +; AVX512F-32-LABEL: mask_shuffle_v8i64_12345678: +; AVX512F-32: # BB#0: +; AVX512F-32-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; AVX512F-32-NEXT: kmovw %eax, %k1 +; AVX512F-32-NEXT: valignq {{.*#+}} zmm0 {%k1} {z} = zmm0[1,2,3,4,5,6,7],zmm1[0] +; AVX512F-32-NEXT: retl + %shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8> + %mask.cast = bitcast i8 %mask to <8 x i1> + %res = select <8 x i1> %mask.cast, <8 x i64> %shuffle, <8 x i64> zeroinitializer + ret <8 x i64> %res +} + +define <8 x i64> @mask_shuffle_v8i64_12345670(<8 x i64> %a, <16 x i32> %passthru, i8 %mask) { +; +; AVX512F-LABEL: mask_shuffle_v8i64_12345670: +; AVX512F: # BB#0: +; AVX512F-NEXT: kmovw %edi, %k1 +; AVX512F-NEXT: valignq {{.*#+}} zmm0 {%k1} {z} = zmm0[1,2,3,4,5,6,7,0] +; AVX512F-NEXT: retq +; +; AVX512F-32-LABEL: mask_shuffle_v8i64_12345670: +; AVX512F-32: # BB#0: +; AVX512F-32-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; AVX512F-32-NEXT: kmovw %eax, %k1 +; AVX512F-32-NEXT: valignq {{.*#+}} zmm0 {%k1} {z} = zmm0[1,2,3,4,5,6,7,0] +; AVX512F-32-NEXT: retl + %shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0> + %mask.cast = bitcast i8 %mask to <8 x i1> + %res = select <8 x i1> %mask.cast, <8 x i64> %shuffle, <8 x i64> zeroinitializer + ret <8 x i64> %res +} + +define <8 x i64> @maskz_shuffle_v8i64_12345678(<8 x i64> %a, <8 x i64> %b, i8 %mask) { +; +; AVX512F-LABEL: maskz_shuffle_v8i64_12345678: +; AVX512F: # BB#0: +; AVX512F-NEXT: kmovw %edi, %k1 +; AVX512F-NEXT: valignq {{.*#+}} zmm0 {%k1} {z} = zmm0[1,2,3,4,5,6,7],zmm1[0] +; AVX512F-NEXT: retq +; +; AVX512F-32-LABEL: maskz_shuffle_v8i64_12345678: +; AVX512F-32: # BB#0: +; AVX512F-32-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; AVX512F-32-NEXT: kmovw %eax, %k1 +; AVX512F-32-NEXT: valignq {{.*#+}} zmm0 {%k1} {z} = zmm0[1,2,3,4,5,6,7],zmm1[0] +; AVX512F-32-NEXT: retl + %shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8> + %mask.cast = bitcast i8 %mask to <8 x i1> + %res = select <8 x i1> %mask.cast, <8 x i64> %shuffle, <8 x i64> zeroinitializer + ret <8 x i64> %res +} + +define <8 x i64> @maskz_shuffle_v8i64_12345670(<8 x i64> %a, i8 %mask) { +; +; AVX512F-LABEL: maskz_shuffle_v8i64_12345670: +; AVX512F: # BB#0: +; AVX512F-NEXT: kmovw %edi, %k1 +; AVX512F-NEXT: valignq {{.*#+}} zmm0 {%k1} {z} = zmm0[1,2,3,4,5,6,7,0] +; AVX512F-NEXT: retq +; +; AVX512F-32-LABEL: maskz_shuffle_v8i64_12345670: +; AVX512F-32: # BB#0: +; AVX512F-32-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; AVX512F-32-NEXT: kmovw %eax, %k1 +; AVX512F-32-NEXT: valignq {{.*#+}} zmm0 {%k1} {z} = zmm0[1,2,3,4,5,6,7,0] +; AVX512F-32-NEXT: retl + %shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0> + %mask.cast = bitcast i8 %mask to <8 x i1> + %res = select <8 x i1> %mask.cast, <8 x i64> %shuffle, <8 x i64> zeroinitializer + ret <8 x i64> %res +} diff --git a/llvm/test/CodeGen/X86/vector-shuffle-masked.ll b/llvm/test/CodeGen/X86/vector-shuffle-masked.ll new file mode 100644 index 00000000000..24cf6f9a966 --- /dev/null +++ b/llvm/test/CodeGen/X86/vector-shuffle-masked.ll @@ -0,0 +1,242 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefix=CHECK + +define <4 x i32> @mask_shuffle_v4i32_1234(<4 x i32> %a, <4 x i32> %b, <4 x i32> %passthru, i8 %mask) { +; CHECK-LABEL: mask_shuffle_v4i32_1234: +; CHECK: # BB#0: +; CHECK-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3] +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vpblendmd %xmm0, %xmm2, %xmm0 {%k1} +; CHECK-NEXT: retq + %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 2, i32 3, i32 4> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %res = select <4 x i1> %mask.extract, <4 x i32> %shuffle, <4 x i32> %passthru + ret <4 x i32> %res +} + +define <4 x i32> @maskz_shuffle_v4i32_1234(<4 x i32> %a, <4 x i32> %b, i8 %mask) { +; CHECK-LABEL: maskz_shuffle_v4i32_1234: +; CHECK: # BB#0: +; CHECK-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3] +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 2, i32 3, i32 4> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %res = select <4 x i1> %mask.extract, <4 x i32> %shuffle, <4 x i32> zeroinitializer + ret <4 x i32> %res +} + +define <4 x i32> @mask_shuffle_v4i32_2345(<4 x i32> %a, <4 x i32> %b, <4 x i32> %passthru, i8 %mask) { +; CHECK-LABEL: mask_shuffle_v4i32_2345: +; CHECK: # BB#0: +; CHECK-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vpblendmd %xmm0, %xmm2, %xmm0 {%k1} +; CHECK-NEXT: retq + %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 3, i32 4, i32 5> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %res = select <4 x i1> %mask.extract, <4 x i32> %shuffle, <4 x i32> %passthru + ret <4 x i32> %res +} + +define <4 x i32> @maskz_shuffle_v4i32_2345(<4 x i32> %a, <4 x i32> %b, i8 %mask) { +; CHECK-LABEL: maskz_shuffle_v4i32_2345: +; CHECK: # BB#0: +; CHECK-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 3, i32 4, i32 5> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %res = select <4 x i1> %mask.extract, <4 x i32> %shuffle, <4 x i32> zeroinitializer + ret <4 x i32> %res +} + +define <2 x i64> @mask_shuffle_v2i64_12(<2 x i64> %a, <2 x i64> %b, <2 x i64> %passthru, i8 %mask) { +; CHECK-LABEL: mask_shuffle_v2i64_12: +; CHECK: # BB#0: +; CHECK-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vpblendmq %xmm0, %xmm2, %xmm0 {%k1} +; CHECK-NEXT: retq + %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 2> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <2 x i32> <i32 0, i32 1> + %res = select <2 x i1> %mask.extract, <2 x i64> %shuffle, <2 x i64> %passthru + ret <2 x i64> %res +} + +define <2 x i64> @maskz_shuffle_v2i64_12(<2 x i64> %a, <2 x i64> %b, i8 %mask) { +; CHECK-LABEL: maskz_shuffle_v2i64_12: +; CHECK: # BB#0: +; CHECK-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 2> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <2 x i32> <i32 0, i32 1> + %res = select <2 x i1> %mask.extract, <2 x i64> %shuffle, <2 x i64> zeroinitializer + ret <2 x i64> %res +} + +define <4 x i64> @mask_shuffle_v4i64_1234(<4 x i64> %a, <4 x i64> %b, <4 x i64> %passthru, i8 %mask) { +; CHECK-LABEL: mask_shuffle_v4i64_1234: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: valignq {{.*#+}} ymm2 {%k1} = ymm0[1,2,3],ymm1[0] +; CHECK-NEXT: vmovdqa64 %ymm2, %ymm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 1, i32 2, i32 3, i32 4> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %res = select <4 x i1> %mask.extract, <4 x i64> %shuffle, <4 x i64> %passthru + ret <4 x i64> %res +} + +define <4 x i64> @maskz_shuffle_v4i64_1234(<4 x i64> %a, <4 x i64> %b, i8 %mask) { +; CHECK-LABEL: maskz_shuffle_v4i64_1234: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: valignq {{.*#+}} ymm0 {%k1} {z} = ymm0[1,2,3],ymm1[0] +; CHECK-NEXT: retq + %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 1, i32 2, i32 3, i32 4> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %res = select <4 x i1> %mask.extract, <4 x i64> %shuffle, <4 x i64> zeroinitializer + ret <4 x i64> %res +} + +define <4 x i64> @mask_shuffle_v4i64_1230(<4 x i64> %a, <4 x i64> %passthru, i8 %mask) { +; CHECK-LABEL: mask_shuffle_v4i64_1230: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm1 {%k1} = ymm0[1,2,3,0] +; CHECK-NEXT: vmovdqa64 %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %res = select <4 x i1> %mask.extract, <4 x i64> %shuffle, <4 x i64> %passthru + ret <4 x i64> %res +} + +define <4 x i64> @maskz_shuffle_v4i64_1230(<4 x i64> %a, i8 %mask) { +; CHECK-LABEL: maskz_shuffle_v4i64_1230: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[1,2,3,0] +; CHECK-NEXT: retq + %shuffle = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %res = select <4 x i1> %mask.extract, <4 x i64> %shuffle, <4 x i64> zeroinitializer + ret <4 x i64> %res +} + +define <8 x i32> @mask_shuffle_v8i32_12345678(<8 x i32> %a, <8 x i32> %b, <8 x i32> %passthru, i8 %mask) { +; CHECK-LABEL: mask_shuffle_v8i32_12345678: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: valignd {{.*#+}} ymm2 {%k1} = ymm0[1,2,3,4,5,6,7],ymm1[0] +; CHECK-NEXT: vmovdqa64 %ymm2, %ymm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8> + %mask.cast = bitcast i8 %mask to <8 x i1> + %res = select <8 x i1> %mask.cast, <8 x i32> %shuffle, <8 x i32> %passthru + ret <8 x i32> %res +} + +define <8 x i32> @maskz_shuffle_v8i32_12345678(<8 x i32> %a, <8 x i32> %b, i8 %mask) { +; CHECK-LABEL: maskz_shuffle_v8i32_12345678: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: valignd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,2,3,4,5,6,7],ymm1[0] +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8> + %mask.cast = bitcast i8 %mask to <8 x i1> + %res = select <8 x i1> %mask.cast, <8 x i32> %shuffle, <8 x i32> zeroinitializer + ret <8 x i32> %res +} + +define <8 x i32> @mask_shuffle_v8i32_23456789(<8 x i32> %a, <8 x i32> %b, <8 x i32> %passthru, i8 %mask) { +; CHECK-LABEL: mask_shuffle_v8i32_23456789: +; CHECK: # BB#0: +; CHECK-NEXT: valignq {{.*#+}} ymm0 = ymm0[1,2,3],ymm1[0] +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vpblendmd %ymm0, %ymm2, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9> + %mask.cast = bitcast i8 %mask to <8 x i1> + %res = select <8 x i1> %mask.cast, <8 x i32> %shuffle, <8 x i32> %passthru + ret <8 x i32> %res +} + +define <8 x i32> @maskz_shuffle_v8i32_23456789(<8 x i32> %a, <8 x i32> %b, i8 %mask) { +; CHECK-LABEL: maskz_shuffle_v8i32_23456789: +; CHECK: # BB#0: +; CHECK-NEXT: valignq {{.*#+}} ymm0 = ymm0[1,2,3],ymm1[0] +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9> + %mask.cast = bitcast i8 %mask to <8 x i1> + %res = select <8 x i1> %mask.cast, <8 x i32> %shuffle, <8 x i32> zeroinitializer + ret <8 x i32> %res +} + +define <8 x i32> @mask_shuffle_v8i32_12345670(<8 x i32> %a, <8 x i32> %passthru, i8 %mask) { +; CHECK-LABEL: mask_shuffle_v8i32_12345670: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: valignd {{.*#+}} ymm1 {%k1} = ymm0[1,2,3,4,5,6,7,0] +; CHECK-NEXT: vmovdqa64 %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0> + %mask.cast = bitcast i8 %mask to <8 x i1> + %res = select <8 x i1> %mask.cast, <8 x i32> %shuffle, <8 x i32> %passthru + ret <8 x i32> %res +} + +define <8 x i32> @maskz_shuffle_v8i32_12345670(<8 x i32> %a, i8 %mask) { +; CHECK-LABEL: maskz_shuffle_v8i32_12345670: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: valignd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,2,3,4,5,6,7,0] +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0> + %mask.cast = bitcast i8 %mask to <8 x i1> + %res = select <8 x i1> %mask.cast, <8 x i32> %shuffle, <8 x i32> zeroinitializer + ret <8 x i32> %res +} + +define <8 x i32> @mask_shuffle_v8i32_23456701(<8 x i32> %a, <8 x i32> %passthru, i8 %mask) { +; CHECK-LABEL: mask_shuffle_v8i32_23456701: +; CHECK: # BB#0: +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,2,3,0] +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1> + %mask.cast = bitcast i8 %mask to <8 x i1> + %res = select <8 x i1> %mask.cast, <8 x i32> %shuffle, <8 x i32> %passthru + ret <8 x i32> %res +} + +define <8 x i32> @maskz_shuffle_v8i32_23456701(<8 x i32> %a, i8 %mask) { +; CHECK-LABEL: maskz_shuffle_v8i32_23456701: +; CHECK: # BB#0: +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,2,3,0] +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1> + %mask.cast = bitcast i8 %mask to <8 x i1> + %res = select <8 x i1> %mask.cast, <8 x i32> %shuffle, <8 x i32> zeroinitializer + ret <8 x i32> %res +} |

