summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/X86/avx512-intrinsics.ll4
-rw-r--r--llvm/test/CodeGen/X86/avx512vl-intrinsics.ll73
-rw-r--r--llvm/test/MC/X86/avx512vl-encoding.s226
3 files changed, 301 insertions, 2 deletions
diff --git a/llvm/test/CodeGen/X86/avx512-intrinsics.ll b/llvm/test/CodeGen/X86/avx512-intrinsics.ll
index 8e5165e0a06..7cd01683fa9 100644
--- a/llvm/test/CodeGen/X86/avx512-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512-intrinsics.ll
@@ -340,7 +340,7 @@ define <8 x i64> @test_ctlz_q(<8 x i64> %a) {
declare <8 x i64> @llvm.ctlz.v8i64(<8 x i64>, i1) nounwind readonly
define <16 x float> @test_x86_mask_blend_ps_512(i16 %a0, <16 x float> %a1, <16 x float> %a2) {
- ; CHECK: vblendmps
+ ; CHECK: vblendmps %zmm1, %zmm0
%res = call <16 x float> @llvm.x86.avx512.mask.blend.ps.512(<16 x float> %a1, <16 x float> %a2, i16 %a0) ; <<16 x float>> [#uses=1]
ret <16 x float> %res
}
@@ -348,7 +348,7 @@ define <16 x float> @test_x86_mask_blend_ps_512(i16 %a0, <16 x float> %a1, <16 x
declare <16 x float> @llvm.x86.avx512.mask.blend.ps.512(<16 x float>, <16 x float>, i16) nounwind readonly
define <8 x double> @test_x86_mask_blend_pd_512(i8 %a0, <8 x double> %a1, <8 x double> %a2) {
- ; CHECK: vblendmpd
+ ; CHECK: vblendmpd %zmm1, %zmm0
%res = call <8 x double> @llvm.x86.avx512.mask.blend.pd.512(<8 x double> %a1, <8 x double> %a2, i8 %a0) ; <<8 x double>> [#uses=1]
ret <8 x double> %res
}
diff --git a/llvm/test/CodeGen/X86/avx512vl-intrinsics.ll b/llvm/test/CodeGen/X86/avx512vl-intrinsics.ll
index 23b05e38ed4..d349f4f5378 100644
--- a/llvm/test/CodeGen/X86/avx512vl-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512vl-intrinsics.ll
@@ -788,4 +788,77 @@ define <4 x i32> @expand10(<4 x i32> %data, i8 %mask) {
declare <4 x i32> @llvm.x86.avx512.mask.expand.d.128(<4 x i32> %data, <4 x i32> %src0, i8 %mask)
+define <8 x float> @test_x86_mask_blend_ps_256(i8 %a0, <8 x float> %a1, <8 x float> %a2) {
+ ; CHECK: vblendmps %ymm1, %ymm0
+ %res = call <8 x float> @llvm.x86.avx512.mask.blend.ps.256(<8 x float> %a1, <8 x float> %a2, i8 %a0) ; <<8 x float>> [#uses=1]
+ ret <8 x float> %res
+}
+
+declare <8 x float> @llvm.x86.avx512.mask.blend.ps.256(<8 x float>, <8 x float>, i8) nounwind readonly
+
+define <4 x double> @test_x86_mask_blend_pd_256(i8 %a0, <4 x double> %a1, <4 x double> %a2) {
+ ; CHECK: vblendmpd %ymm1, %ymm0
+ %res = call <4 x double> @llvm.x86.avx512.mask.blend.pd.256(<4 x double> %a1, <4 x double> %a2, i8 %a0) ; <<4 x double>> [#uses=1]
+ ret <4 x double> %res
+}
+
+define <4 x double> @test_x86_mask_blend_pd_256_memop(<4 x double> %a, <4 x double>* %ptr, i8 %mask) {
+ ; CHECK-LABEL: test_x86_mask_blend_pd_256_memop
+ ; CHECK: vblendmpd (%
+ %b = load <4 x double>* %ptr
+ %res = call <4 x double> @llvm.x86.avx512.mask.blend.pd.256(<4 x double> %a, <4 x double> %b, i8 %mask) ; <<4 x double>> [#uses=1]
+ ret <4 x double> %res
+}
+declare <4 x double> @llvm.x86.avx512.mask.blend.pd.256(<4 x double>, <4 x double>, i8) nounwind readonly
+
+; CHECK-LABEL: test_x86_mask_blend_d_256
+; CHECK: vpblendmd
+define <8 x i32> @test_x86_mask_blend_d_256(i8 %a0, <8 x i32> %a1, <8 x i32> %a2) {
+ %res = call <8 x i32> @llvm.x86.avx512.mask.blend.d.256(<8 x i32> %a1, <8 x i32> %a2, i8 %a0) ; <<8 x i32>> [#uses=1]
+ ret <8 x i32> %res
+}
+declare <8 x i32> @llvm.x86.avx512.mask.blend.d.256(<8 x i32>, <8 x i32>, i8) nounwind readonly
+
+define <4 x i64> @test_x86_mask_blend_q_256(i8 %a0, <4 x i64> %a1, <4 x i64> %a2) {
+ ; CHECK: vpblendmq
+ %res = call <4 x i64> @llvm.x86.avx512.mask.blend.q.256(<4 x i64> %a1, <4 x i64> %a2, i8 %a0) ; <<4 x i64>> [#uses=1]
+ ret <4 x i64> %res
+}
+declare <4 x i64> @llvm.x86.avx512.mask.blend.q.256(<4 x i64>, <4 x i64>, i8) nounwind readonly
+define <4 x float> @test_x86_mask_blend_ps_128(i8 %a0, <4 x float> %a1, <4 x float> %a2) {
+ ; CHECK: vblendmps %xmm1, %xmm0
+ %res = call <4 x float> @llvm.x86.avx512.mask.blend.ps.128(<4 x float> %a1, <4 x float> %a2, i8 %a0) ; <<4 x float>> [#uses=1]
+ ret <4 x float> %res
+}
+
+declare <4 x float> @llvm.x86.avx512.mask.blend.ps.128(<4 x float>, <4 x float>, i8) nounwind readonly
+
+define <2 x double> @test_x86_mask_blend_pd_128(i8 %a0, <2 x double> %a1, <2 x double> %a2) {
+ ; CHECK: vblendmpd %xmm1, %xmm0
+ %res = call <2 x double> @llvm.x86.avx512.mask.blend.pd.128(<2 x double> %a1, <2 x double> %a2, i8 %a0) ; <<2 x double>> [#uses=1]
+ ret <2 x double> %res
+}
+
+define <2 x double> @test_x86_mask_blend_pd_128_memop(<2 x double> %a, <2 x double>* %ptr, i8 %mask) {
+ ; CHECK-LABEL: test_x86_mask_blend_pd_128_memop
+ ; CHECK: vblendmpd (%
+ %b = load <2 x double>* %ptr
+ %res = call <2 x double> @llvm.x86.avx512.mask.blend.pd.128(<2 x double> %a, <2 x double> %b, i8 %mask) ; <<2 x double>> [#uses=1]
+ ret <2 x double> %res
+}
+declare <2 x double> @llvm.x86.avx512.mask.blend.pd.128(<2 x double>, <2 x double>, i8) nounwind readonly
+
+define <4 x i32> @test_x86_mask_blend_d_128(i8 %a0, <4 x i32> %a1, <4 x i32> %a2) {
+ ; CHECK: vpblendmd
+ %res = call <4 x i32> @llvm.x86.avx512.mask.blend.d.128(<4 x i32> %a1, <4 x i32> %a2, i8 %a0) ; <<4 x i32>> [#uses=1]
+ ret <4 x i32> %res
+}
+declare <4 x i32> @llvm.x86.avx512.mask.blend.d.128(<4 x i32>, <4 x i32>, i8) nounwind readonly
+
+define <2 x i64> @test_x86_mask_blend_q_128(i8 %a0, <2 x i64> %a1, <2 x i64> %a2) {
+ ; CHECK: vpblendmq
+ %res = call <2 x i64> @llvm.x86.avx512.mask.blend.q.128(<2 x i64> %a1, <2 x i64> %a2, i8 %a0) ; <<2 x i64>> [#uses=1]
+ ret <2 x i64> %res
+}
+declare <2 x i64> @llvm.x86.avx512.mask.blend.q.128(<2 x i64>, <2 x i64>, i8) nounwind readonly
diff --git a/llvm/test/MC/X86/avx512vl-encoding.s b/llvm/test/MC/X86/avx512vl-encoding.s
new file mode 100644
index 00000000000..36d14776285
--- /dev/null
+++ b/llvm/test/MC/X86/avx512vl-encoding.s
@@ -0,0 +1,226 @@
+// RUN: llvm-mc -triple x86_64-unknown-unknown -mcpu=skx --show-encoding %s | FileCheck %s
+
+// CHECK: vblendmpd %xmm19, %xmm20, %xmm27
+ vblendmpd %xmm19, %xmm20, %xmm27
+// CHECK: vblendmpd %xmm19, %xmm20, %xmm27 {%k7}
+ vblendmpd %xmm19, %xmm20, %xmm27 {%k7}
+// CHECK: vblendmpd %xmm19, %xmm20, %xmm27 {%k7} {z}
+ vblendmpd %xmm19, %xmm20, %xmm27 {%k7} {z}
+// CHECK: vblendmpd (%rcx), %xmm20, %xmm27
+ vblendmpd (%rcx), %xmm20, %xmm27
+// CHECK: vblendmpd 291(%rax,%r14,8), %xmm20, %xmm27
+ vblendmpd 291(%rax,%r14,8), %xmm20, %xmm27
+// CHECK: vblendmpd (%rcx){1to2}, %xmm20, %xmm27
+ vblendmpd (%rcx){1to2}, %xmm20, %xmm27
+// CHECK: vblendmpd 2032(%rdx), %xmm20, %xmm27
+ vblendmpd 2032(%rdx), %xmm20, %xmm27
+// CHECK: vblendmpd 2048(%rdx), %xmm20, %xmm27
+ vblendmpd 2048(%rdx), %xmm20, %xmm27
+// CHECK: vblendmpd -2048(%rdx), %xmm20, %xmm27
+ vblendmpd -2048(%rdx), %xmm20, %xmm27
+// CHECK: vblendmpd -2064(%rdx), %xmm20, %xmm27
+ vblendmpd -2064(%rdx), %xmm20, %xmm27
+// CHECK: vblendmpd 1016(%rdx){1to2}, %xmm20, %xmm27
+ vblendmpd 1016(%rdx){1to2}, %xmm20, %xmm27
+// CHECK: vblendmpd 1024(%rdx){1to2}, %xmm20, %xmm27
+ vblendmpd 1024(%rdx){1to2}, %xmm20, %xmm27
+// CHECK: vblendmpd -1024(%rdx){1to2}, %xmm20, %xmm27
+ vblendmpd -1024(%rdx){1to2}, %xmm20, %xmm27
+// CHECK: vblendmpd -1032(%rdx){1to2}, %xmm20, %xmm27
+ vblendmpd -1032(%rdx){1to2}, %xmm20, %xmm27
+// CHECK: vblendmpd %ymm23, %ymm21, %ymm28
+ vblendmpd %ymm23, %ymm21, %ymm28
+// CHECK: vblendmpd %ymm23, %ymm21, %ymm28 {%k3}
+ vblendmpd %ymm23, %ymm21, %ymm28 {%k3}
+// CHECK: vblendmpd %ymm23, %ymm21, %ymm28 {%k3} {z}
+ vblendmpd %ymm23, %ymm21, %ymm28 {%k3} {z}
+// CHECK: vblendmpd (%rcx), %ymm21, %ymm28
+ vblendmpd (%rcx), %ymm21, %ymm28
+// CHECK: vblendmpd 291(%rax,%r14,8), %ymm21, %ymm28
+ vblendmpd 291(%rax,%r14,8), %ymm21, %ymm28
+// CHECK: vblendmpd (%rcx){1to4}, %ymm21, %ymm28
+ vblendmpd (%rcx){1to4}, %ymm21, %ymm28
+// CHECK: vblendmpd 4064(%rdx), %ymm21, %ymm28
+ vblendmpd 4064(%rdx), %ymm21, %ymm28
+// CHECK: vblendmpd 4096(%rdx), %ymm21, %ymm28
+ vblendmpd 4096(%rdx), %ymm21, %ymm28
+// CHECK: vblendmpd -4096(%rdx), %ymm21, %ymm28
+ vblendmpd -4096(%rdx), %ymm21, %ymm28
+// CHECK: vblendmpd -4128(%rdx), %ymm21, %ymm28
+ vblendmpd -4128(%rdx), %ymm21, %ymm28
+// CHECK: vblendmpd 1016(%rdx){1to4}, %ymm21, %ymm28
+ vblendmpd 1016(%rdx){1to4}, %ymm21, %ymm28
+// CHECK: vblendmpd 1024(%rdx){1to4}, %ymm21, %ymm28
+ vblendmpd 1024(%rdx){1to4}, %ymm21, %ymm28
+// CHECK: vblendmpd -1024(%rdx){1to4}, %ymm21, %ymm28
+ vblendmpd -1024(%rdx){1to4}, %ymm21, %ymm28
+// CHECK: vblendmpd -1032(%rdx){1to4}, %ymm21, %ymm28
+ vblendmpd -1032(%rdx){1to4}, %ymm21, %ymm28
+// CHECK: vblendmps %xmm20, %xmm20, %xmm24
+ vblendmps %xmm20, %xmm20, %xmm24
+// CHECK: vblendmps %xmm20, %xmm20, %xmm24 {%k1}
+ vblendmps %xmm20, %xmm20, %xmm24 {%k1}
+// CHECK: vblendmps %xmm20, %xmm20, %xmm24 {%k1} {z}
+ vblendmps %xmm20, %xmm20, %xmm24 {%k1} {z}
+// CHECK: vblendmps (%rcx), %xmm20, %xmm24
+ vblendmps (%rcx), %xmm20, %xmm24
+// CHECK: vblendmps 291(%rax,%r14,8), %xmm20, %xmm24
+ vblendmps 291(%rax,%r14,8), %xmm20, %xmm24
+// CHECK: vblendmps (%rcx){1to4}, %xmm20, %xmm24
+ vblendmps (%rcx){1to4}, %xmm20, %xmm24
+// CHECK: vblendmps 2032(%rdx), %xmm20, %xmm24
+ vblendmps 2032(%rdx), %xmm20, %xmm24
+// CHECK: vblendmps 2048(%rdx), %xmm20, %xmm24
+ vblendmps 2048(%rdx), %xmm20, %xmm24
+// CHECK: vblendmps -2048(%rdx), %xmm20, %xmm24
+ vblendmps -2048(%rdx), %xmm20, %xmm24
+// CHECK: vblendmps -2064(%rdx), %xmm20, %xmm24
+ vblendmps -2064(%rdx), %xmm20, %xmm24
+// CHECK: vblendmps 508(%rdx){1to4}, %xmm20, %xmm24
+ vblendmps 508(%rdx){1to4}, %xmm20, %xmm24
+// CHECK: vblendmps 512(%rdx){1to4}, %xmm20, %xmm24
+ vblendmps 512(%rdx){1to4}, %xmm20, %xmm24
+// CHECK: vblendmps -512(%rdx){1to4}, %xmm20, %xmm24
+ vblendmps -512(%rdx){1to4}, %xmm20, %xmm24
+// CHECK: vblendmps -516(%rdx){1to4}, %xmm20, %xmm24
+ vblendmps -516(%rdx){1to4}, %xmm20, %xmm24
+// CHECK: vblendmps %ymm24, %ymm23, %ymm17
+ vblendmps %ymm24, %ymm23, %ymm17
+// CHECK: vblendmps %ymm24, %ymm23, %ymm17 {%k6}
+ vblendmps %ymm24, %ymm23, %ymm17 {%k6}
+// CHECK: vblendmps %ymm24, %ymm23, %ymm17 {%k6} {z}
+ vblendmps %ymm24, %ymm23, %ymm17 {%k6} {z}
+// CHECK: vblendmps (%rcx), %ymm23, %ymm17
+ vblendmps (%rcx), %ymm23, %ymm17
+// CHECK: vblendmps 291(%rax,%r14,8), %ymm23, %ymm17
+ vblendmps 291(%rax,%r14,8), %ymm23, %ymm17
+// CHECK: vblendmps (%rcx){1to8}, %ymm23, %ymm17
+ vblendmps (%rcx){1to8}, %ymm23, %ymm17
+// CHECK: vblendmps 4064(%rdx), %ymm23, %ymm17
+ vblendmps 4064(%rdx), %ymm23, %ymm17
+// CHECK: vblendmps 4096(%rdx), %ymm23, %ymm17
+ vblendmps 4096(%rdx), %ymm23, %ymm17
+// CHECK: vblendmps -4096(%rdx), %ymm23, %ymm17
+ vblendmps -4096(%rdx), %ymm23, %ymm17
+// CHECK: vblendmps -4128(%rdx), %ymm23, %ymm17
+ vblendmps -4128(%rdx), %ymm23, %ymm17
+// CHECK: vblendmps 508(%rdx){1to8}, %ymm23, %ymm17
+ vblendmps 508(%rdx){1to8}, %ymm23, %ymm17
+// CHECK: vblendmps 512(%rdx){1to8}, %ymm23, %ymm17
+ vblendmps 512(%rdx){1to8}, %ymm23, %ymm17
+// CHECK: vblendmps -512(%rdx){1to8}, %ymm23, %ymm17
+ vblendmps -512(%rdx){1to8}, %ymm23, %ymm17
+// CHECK: vblendmps -516(%rdx){1to8}, %ymm23, %ymm17
+ vblendmps -516(%rdx){1to8}, %ymm23, %ymm17
+// CHECK: vpblendmd %xmm26, %xmm25, %xmm17
+ vpblendmd %xmm26, %xmm25, %xmm17
+// CHECK: vpblendmd %xmm26, %xmm25, %xmm17 {%k5}
+ vpblendmd %xmm26, %xmm25, %xmm17 {%k5}
+// CHECK: vpblendmd %xmm26, %xmm25, %xmm17 {%k5} {z}
+ vpblendmd %xmm26, %xmm25, %xmm17 {%k5} {z}
+// CHECK: vpblendmd (%rcx), %xmm25, %xmm17
+ vpblendmd (%rcx), %xmm25, %xmm17
+// CHECK: vpblendmd 291(%rax,%r14,8), %xmm25, %xmm17
+ vpblendmd 291(%rax,%r14,8), %xmm25, %xmm17
+// CHECK: vpblendmd (%rcx){1to4}, %xmm25, %xmm17
+ vpblendmd (%rcx){1to4}, %xmm25, %xmm17
+// CHECK: vpblendmd 2032(%rdx), %xmm25, %xmm17
+ vpblendmd 2032(%rdx), %xmm25, %xmm17
+// CHECK: vpblendmd 2048(%rdx), %xmm25, %xmm17
+ vpblendmd 2048(%rdx), %xmm25, %xmm17
+// CHECK: vpblendmd -2048(%rdx), %xmm25, %xmm17
+ vpblendmd -2048(%rdx), %xmm25, %xmm17
+// CHECK: vpblendmd -2064(%rdx), %xmm25, %xmm17
+ vpblendmd -2064(%rdx), %xmm25, %xmm17
+// CHECK: vpblendmd 508(%rdx){1to4}, %xmm25, %xmm17
+ vpblendmd 508(%rdx){1to4}, %xmm25, %xmm17
+// CHECK: vpblendmd 512(%rdx){1to4}, %xmm25, %xmm17
+ vpblendmd 512(%rdx){1to4}, %xmm25, %xmm17
+// CHECK: vpblendmd -512(%rdx){1to4}, %xmm25, %xmm17
+ vpblendmd -512(%rdx){1to4}, %xmm25, %xmm17
+// CHECK: vpblendmd -516(%rdx){1to4}, %xmm25, %xmm17
+ vpblendmd -516(%rdx){1to4}, %xmm25, %xmm17
+// CHECK: vpblendmd %ymm23, %ymm29, %ymm26
+ vpblendmd %ymm23, %ymm29, %ymm26
+// CHECK: vpblendmd %ymm23, %ymm29, %ymm26 {%k7}
+ vpblendmd %ymm23, %ymm29, %ymm26 {%k7}
+// CHECK: vpblendmd %ymm23, %ymm29, %ymm26 {%k7} {z}
+ vpblendmd %ymm23, %ymm29, %ymm26 {%k7} {z}
+// CHECK: vpblendmd (%rcx), %ymm29, %ymm26
+ vpblendmd (%rcx), %ymm29, %ymm26
+// CHECK: vpblendmd 291(%rax,%r14,8), %ymm29, %ymm26
+ vpblendmd 291(%rax,%r14,8), %ymm29, %ymm26
+// CHECK: vpblendmd (%rcx){1to8}, %ymm29, %ymm26
+ vpblendmd (%rcx){1to8}, %ymm29, %ymm26
+// CHECK: vpblendmd 4064(%rdx), %ymm29, %ymm26
+ vpblendmd 4064(%rdx), %ymm29, %ymm26
+// CHECK: vpblendmd 4096(%rdx), %ymm29, %ymm26
+ vpblendmd 4096(%rdx), %ymm29, %ymm26
+// CHECK: vpblendmd -4096(%rdx), %ymm29, %ymm26
+ vpblendmd -4096(%rdx), %ymm29, %ymm26
+// CHECK: vpblendmd -4128(%rdx), %ymm29, %ymm26
+ vpblendmd -4128(%rdx), %ymm29, %ymm26
+// CHECK: vpblendmd 508(%rdx){1to8}, %ymm29, %ymm26
+ vpblendmd 508(%rdx){1to8}, %ymm29, %ymm26
+// CHECK: vpblendmd 512(%rdx){1to8}, %ymm29, %ymm26
+ vpblendmd 512(%rdx){1to8}, %ymm29, %ymm26
+// CHECK: vpblendmd -512(%rdx){1to8}, %ymm29, %ymm26
+ vpblendmd -512(%rdx){1to8}, %ymm29, %ymm26
+// CHECK: vpblendmd -516(%rdx){1to8}, %ymm29, %ymm26
+ vpblendmd -516(%rdx){1to8}, %ymm29, %ymm26
+// CHECK: vpblendmq %xmm17, %xmm27, %xmm29
+ vpblendmq %xmm17, %xmm27, %xmm29
+// CHECK: vpblendmq %xmm17, %xmm27, %xmm29 {%k6}
+ vpblendmq %xmm17, %xmm27, %xmm29 {%k6}
+// CHECK: vpblendmq %xmm17, %xmm27, %xmm29 {%k6} {z}
+ vpblendmq %xmm17, %xmm27, %xmm29 {%k6} {z}
+// CHECK: vpblendmq (%rcx), %xmm27, %xmm29
+ vpblendmq (%rcx), %xmm27, %xmm29
+// CHECK: vpblendmq 291(%rax,%r14,8), %xmm27, %xmm29
+ vpblendmq 291(%rax,%r14,8), %xmm27, %xmm29
+// CHECK: vpblendmq (%rcx){1to2}, %xmm27, %xmm29
+ vpblendmq (%rcx){1to2}, %xmm27, %xmm29
+// CHECK: vpblendmq 2032(%rdx), %xmm27, %xmm29
+ vpblendmq 2032(%rdx), %xmm27, %xmm29
+// CHECK: vpblendmq 2048(%rdx), %xmm27, %xmm29
+ vpblendmq 2048(%rdx), %xmm27, %xmm29
+// CHECK: vpblendmq -2048(%rdx), %xmm27, %xmm29
+ vpblendmq -2048(%rdx), %xmm27, %xmm29
+// CHECK: vpblendmq -2064(%rdx), %xmm27, %xmm29
+ vpblendmq -2064(%rdx), %xmm27, %xmm29
+// CHECK: vpblendmq 1016(%rdx){1to2}, %xmm27, %xmm29
+ vpblendmq 1016(%rdx){1to2}, %xmm27, %xmm29
+// CHECK: vpblendmq 1024(%rdx){1to2}, %xmm27, %xmm29
+ vpblendmq 1024(%rdx){1to2}, %xmm27, %xmm29
+// CHECK: vpblendmq -1024(%rdx){1to2}, %xmm27, %xmm29
+ vpblendmq -1024(%rdx){1to2}, %xmm27, %xmm29
+// CHECK: vpblendmq -1032(%rdx){1to2}, %xmm27, %xmm29
+ vpblendmq -1032(%rdx){1to2}, %xmm27, %xmm29
+// CHECK: vpblendmq %ymm21, %ymm23, %ymm21
+ vpblendmq %ymm21, %ymm23, %ymm21
+// CHECK: vpblendmq %ymm21, %ymm23, %ymm21 {%k3}
+ vpblendmq %ymm21, %ymm23, %ymm21 {%k3}
+// CHECK: vpblendmq %ymm21, %ymm23, %ymm21 {%k3} {z}
+ vpblendmq %ymm21, %ymm23, %ymm21 {%k3} {z}
+// CHECK: vpblendmq (%rcx), %ymm23, %ymm21
+ vpblendmq (%rcx), %ymm23, %ymm21
+// CHECK: vpblendmq 291(%rax,%r14,8), %ymm23, %ymm21
+ vpblendmq 291(%rax,%r14,8), %ymm23, %ymm21
+// CHECK: vpblendmq (%rcx){1to4}, %ymm23, %ymm21
+ vpblendmq (%rcx){1to4}, %ymm23, %ymm21
+// CHECK: vpblendmq 4064(%rdx), %ymm23, %ymm21
+ vpblendmq 4064(%rdx), %ymm23, %ymm21
+// CHECK: vpblendmq 4096(%rdx), %ymm23, %ymm21
+ vpblendmq 4096(%rdx), %ymm23, %ymm21
+// CHECK: vpblendmq -4096(%rdx), %ymm23, %ymm21
+ vpblendmq -4096(%rdx), %ymm23, %ymm21
+// CHECK: vpblendmq -4128(%rdx), %ymm23, %ymm21
+ vpblendmq -4128(%rdx), %ymm23, %ymm21
+// CHECK: vpblendmq 1016(%rdx){1to4}, %ymm23, %ymm21
+ vpblendmq 1016(%rdx){1to4}, %ymm23, %ymm21
+// CHECK: vpblendmq 1024(%rdx){1to4}, %ymm23, %ymm21
+ vpblendmq 1024(%rdx){1to4}, %ymm23, %ymm21
+// CHECK: vpblendmq -1024(%rdx){1to4}, %ymm23, %ymm21
+ vpblendmq -1024(%rdx){1to4}, %ymm23, %ymm21
+// CHECK: vpblendmq -1032(%rdx){1to4}, %ymm23, %ymm21
+ vpblendmq -1032(%rdx){1to4}, %ymm23, %ymm21
OpenPOWER on IntegriCloud