summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
authorIgor Breger <igor.breger@intel.com>2016-03-06 12:38:58 +0000
committerIgor Breger <igor.breger@intel.com>2016-03-06 12:38:58 +0000
commit4d94d4d5f7c82a543cebe0b7413aa9b0b90ae6ec (patch)
tree0a21c7b531228a0ac9a00a0b3ac4a1f95311ccd6 /llvm/test
parentc0531a4a21e809397b84ea89bd3a6ba5abbcfdc0 (diff)
downloadbcm5719-llvm-4d94d4d5f7c82a543cebe0b7413aa9b0b90ae6ec.tar.gz
bcm5719-llvm-4d94d4d5f7c82a543cebe0b7413aa9b0b90ae6ec.zip
AVX512BW: Support llvm intrinsic masked vector load/store for i8/i16 element types on SKX
Differential Revision: http://reviews.llvm.org/D17913 llvm-svn: 262803
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/X86/avx512bw-mov.ll112
-rw-r--r--llvm/test/CodeGen/X86/masked_memop.ll146
2 files changed, 258 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/avx512bw-mov.ll b/llvm/test/CodeGen/X86/avx512bw-mov.ll
index 519b649ff53..2580796e974 100644
--- a/llvm/test/CodeGen/X86/avx512bw-mov.ll
+++ b/llvm/test/CodeGen/X86/avx512bw-mov.ll
@@ -79,3 +79,115 @@ define <32 x i16> @test8(i8 * %addr, <32 x i16> %mask1) {
%res = select <32 x i1> %mask, <32 x i16> %r, <32 x i16> zeroinitializer
ret <32 x i16>%res
}
+
+define <16 x i8> @test_mask_load_16xi8(<16 x i1> %mask, <16 x i8>* %addr, <16 x i8> %val) {
+; CHECK-LABEL: test_mask_load_16xi8:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
+; CHECK-NEXT: vpmovb2m %zmm0, %k0
+; CHECK-NEXT: kshiftlq $48, %k0, %k0
+; CHECK-NEXT: kshiftrq $48, %k0, %k1
+; CHECK-NEXT: vmovdqu8 (%rdi), %zmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %res = call <16 x i8> @llvm.masked.load.v16i8(<16 x i8>* %addr, i32 4, <16 x i1>%mask, <16 x i8> undef)
+ ret <16 x i8> %res
+}
+declare <16 x i8> @llvm.masked.load.v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>)
+
+define <32 x i8> @test_mask_load_32xi8(<32 x i1> %mask, <32 x i8>* %addr, <32 x i8> %val) {
+; CHECK-LABEL: test_mask_load_32xi8:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
+; CHECK-NEXT: vpmovb2m %zmm0, %k0
+; CHECK-NEXT: kshiftlq $32, %k0, %k0
+; CHECK-NEXT: kshiftrq $32, %k0, %k1
+; CHECK-NEXT: vmovdqu8 (%rdi), %zmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %res = call <32 x i8> @llvm.masked.load.v32i8(<32 x i8>* %addr, i32 4, <32 x i1>%mask, <32 x i8> zeroinitializer)
+ ret <32 x i8> %res
+}
+declare <32 x i8> @llvm.masked.load.v32i8(<32 x i8>*, i32, <32 x i1>, <32 x i8>)
+
+define <8 x i16> @test_mask_load_8xi16(<8 x i1> %mask, <8 x i16>* %addr, <8 x i16> %val) {
+; CHECK-LABEL: test_mask_load_8xi16:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpsllw $15, %xmm0, %xmm0
+; CHECK-NEXT: vpmovw2m %zmm0, %k0
+; CHECK-NEXT: kshiftld $24, %k0, %k0
+; CHECK-NEXT: kshiftrd $24, %k0, %k1
+; CHECK-NEXT: vmovdqu16 (%rdi), %zmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %res = call <8 x i16> @llvm.masked.load.v8i16(<8 x i16>* %addr, i32 4, <8 x i1>%mask, <8 x i16> undef)
+ ret <8 x i16> %res
+}
+declare <8 x i16> @llvm.masked.load.v8i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>)
+
+define <16 x i16> @test_mask_load_16xi16(<16 x i1> %mask, <16 x i16>* %addr, <16 x i16> %val) {
+; CHECK-LABEL: test_mask_load_16xi16:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
+; CHECK-NEXT: vpmovb2m %zmm0, %k0
+; CHECK-NEXT: kshiftld $16, %k0, %k0
+; CHECK-NEXT: kshiftrd $16, %k0, %k1
+; CHECK-NEXT: vmovdqu16 (%rdi), %zmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %res = call <16 x i16> @llvm.masked.load.v16i16(<16 x i16>* %addr, i32 4, <16 x i1>%mask, <16 x i16> zeroinitializer)
+ ret <16 x i16> %res
+}
+declare <16 x i16> @llvm.masked.load.v16i16(<16 x i16>*, i32, <16 x i1>, <16 x i16>)
+
+define void @test_mask_store_16xi8(<16 x i1> %mask, <16 x i8>* %addr, <16 x i8> %val) {
+; CHECK-LABEL: test_mask_store_16xi8:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
+; CHECK-NEXT: vpmovb2m %zmm0, %k0
+; CHECK-NEXT: kshiftlq $48, %k0, %k0
+; CHECK-NEXT: kshiftrq $48, %k0, %k1
+; CHECK-NEXT: vmovdqu8 %zmm1, (%rdi) {%k1}
+; CHECK-NEXT: retq
+ call void @llvm.masked.store.v16i8(<16 x i8> %val, <16 x i8>* %addr, i32 4, <16 x i1>%mask)
+ ret void
+}
+declare void @llvm.masked.store.v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>)
+
+define void @test_mask_store_32xi8(<32 x i1> %mask, <32 x i8>* %addr, <32 x i8> %val) {
+; CHECK-LABEL: test_mask_store_32xi8:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
+; CHECK-NEXT: vpmovb2m %zmm0, %k0
+; CHECK-NEXT: kshiftlq $32, %k0, %k0
+; CHECK-NEXT: kshiftrq $32, %k0, %k1
+; CHECK-NEXT: vmovdqu8 %zmm1, (%rdi) {%k1}
+; CHECK-NEXT: retq
+ call void @llvm.masked.store.v32i8(<32 x i8> %val, <32 x i8>* %addr, i32 4, <32 x i1>%mask)
+ ret void
+}
+declare void @llvm.masked.store.v32i8(<32 x i8>, <32 x i8>*, i32, <32 x i1>)
+
+define void @test_mask_store_8xi16(<8 x i1> %mask, <8 x i16>* %addr, <8 x i16> %val) {
+; CHECK-LABEL: test_mask_store_8xi16:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpsllw $15, %xmm0, %xmm0
+; CHECK-NEXT: vpmovw2m %zmm0, %k0
+; CHECK-NEXT: kshiftld $24, %k0, %k0
+; CHECK-NEXT: kshiftrd $24, %k0, %k1
+; CHECK-NEXT: vmovdqu16 %zmm1, (%rdi) {%k1}
+; CHECK-NEXT: retq
+ call void @llvm.masked.store.v8i16(<8 x i16> %val, <8 x i16>* %addr, i32 4, <8 x i1>%mask)
+ ret void
+}
+declare void @llvm.masked.store.v8i16(<8 x i16>, <8 x i16>*, i32, <8 x i1>)
+
+define void @test_mask_store_16xi16(<16 x i1> %mask, <16 x i16>* %addr, <16 x i16> %val) {
+; CHECK-LABEL: test_mask_store_16xi16:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
+; CHECK-NEXT: vpmovb2m %zmm0, %k0
+; CHECK-NEXT: kshiftld $16, %k0, %k0
+; CHECK-NEXT: kshiftrd $16, %k0, %k1
+; CHECK-NEXT: vmovdqu16 %zmm1, (%rdi) {%k1}
+; CHECK-NEXT: retq
+ call void @llvm.masked.store.v16i16(<16 x i16> %val, <16 x i16>* %addr, i32 4, <16 x i1>%mask)
+ ret void
+}
+declare void @llvm.masked.store.v16i16(<16 x i16>, <16 x i16>*, i32, <16 x i1>)
diff --git a/llvm/test/CodeGen/X86/masked_memop.ll b/llvm/test/CodeGen/X86/masked_memop.ll
index 091219a858a..247560bb0b3 100644
--- a/llvm/test/CodeGen/X86/masked_memop.ll
+++ b/llvm/test/CodeGen/X86/masked_memop.ll
@@ -2168,3 +2168,149 @@ define <32 x double> @test_load_32f64(<32 x double>* %ptrs, <32 x i1> %mask, <32
ret <32 x double> %res
}
declare <32 x double> @llvm.masked.load.v32f64(<32 x double>* %ptrs, i32, <32 x i1> %mask, <32 x double> %src0)
+
+define <16 x i8> @test_mask_load_16xi8(<16 x i1> %mask, <16 x i8>* %addr, <16 x i8> %val) {
+; SKX-LABEL: test_mask_load_16xi8:
+; SKX: ## BB#0:
+; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
+; SKX-NEXT: vpmovb2m %xmm0, %k1
+; SKX-NEXT: vmovdqu8 (%rdi), %xmm0 {%k1} {z}
+; SKX-NEXT: retq
+ %res = call <16 x i8> @llvm.masked.load.v16i8(<16 x i8>* %addr, i32 4, <16 x i1>%mask, <16 x i8> undef)
+ ret <16 x i8> %res
+}
+declare <16 x i8> @llvm.masked.load.v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>)
+
+define <32 x i8> @test_mask_load_32xi8(<32 x i1> %mask, <32 x i8>* %addr, <32 x i8> %val) {
+; SKX-LABEL: test_mask_load_32xi8:
+; SKX: ## BB#0:
+; SKX-NEXT: vpsllw $7, %ymm0, %ymm0
+; SKX-NEXT: vpmovb2m %ymm0, %k1
+; SKX-NEXT: vmovdqu8 (%rdi), %ymm0 {%k1} {z}
+; SKX-NEXT: retq
+ %res = call <32 x i8> @llvm.masked.load.v32i8(<32 x i8>* %addr, i32 4, <32 x i1>%mask, <32 x i8> zeroinitializer)
+ ret <32 x i8> %res
+}
+declare <32 x i8> @llvm.masked.load.v32i8(<32 x i8>*, i32, <32 x i1>, <32 x i8>)
+
+define <64 x i8> @test_mask_load_64xi8(<64 x i1> %mask, <64 x i8>* %addr, <64 x i8> %val) {
+; SKX-LABEL: test_mask_load_64xi8:
+; SKX: ## BB#0:
+; SKX-NEXT: vpsllw $7, %zmm0, %zmm0
+; SKX-NEXT: vpmovb2m %zmm0, %k1
+; SKX-NEXT: vmovdqu8 (%rdi), %zmm1 {%k1}
+; SKX-NEXT: vmovaps %zmm1, %zmm0
+; SKX-NEXT: retq
+ %res = call <64 x i8> @llvm.masked.load.v64i8(<64 x i8>* %addr, i32 4, <64 x i1>%mask, <64 x i8> %val)
+ ret <64 x i8> %res
+}
+declare <64 x i8> @llvm.masked.load.v64i8(<64 x i8>*, i32, <64 x i1>, <64 x i8>)
+
+define <8 x i16> @test_mask_load_8xi16(<8 x i1> %mask, <8 x i16>* %addr, <8 x i16> %val) {
+; SKX-LABEL: test_mask_load_8xi16:
+; SKX: ## BB#0:
+; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
+; SKX-NEXT: vpmovw2m %xmm0, %k1
+; SKX-NEXT: vmovdqu16 (%rdi), %xmm0 {%k1} {z}
+; SKX-NEXT: retq
+ %res = call <8 x i16> @llvm.masked.load.v8i16(<8 x i16>* %addr, i32 4, <8 x i1>%mask, <8 x i16> undef)
+ ret <8 x i16> %res
+}
+declare <8 x i16> @llvm.masked.load.v8i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>)
+
+define <16 x i16> @test_mask_load_16xi16(<16 x i1> %mask, <16 x i16>* %addr, <16 x i16> %val) {
+; SKX-LABEL: test_mask_load_16xi16:
+; SKX: ## BB#0:
+; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
+; SKX-NEXT: vpmovb2m %xmm0, %k1
+; SKX-NEXT: vmovdqu16 (%rdi), %ymm0 {%k1} {z}
+; SKX-NEXT: retq
+ %res = call <16 x i16> @llvm.masked.load.v16i16(<16 x i16>* %addr, i32 4, <16 x i1>%mask, <16 x i16> zeroinitializer)
+ ret <16 x i16> %res
+}
+declare <16 x i16> @llvm.masked.load.v16i16(<16 x i16>*, i32, <16 x i1>, <16 x i16>)
+
+define <32 x i16> @test_mask_load_32xi16(<32 x i1> %mask, <32 x i16>* %addr, <32 x i16> %val) {
+; SKX-LABEL: test_mask_load_32xi16:
+; SKX: ## BB#0:
+; SKX-NEXT: vpsllw $7, %ymm0, %ymm0
+; SKX-NEXT: vpmovb2m %ymm0, %k1
+; SKX-NEXT: vmovdqu16 (%rdi), %zmm1 {%k1}
+; SKX-NEXT: vmovaps %zmm1, %zmm0
+; SKX-NEXT: retq
+ %res = call <32 x i16> @llvm.masked.load.v32i16(<32 x i16>* %addr, i32 4, <32 x i1>%mask, <32 x i16> %val)
+ ret <32 x i16> %res
+}
+declare <32 x i16> @llvm.masked.load.v32i16(<32 x i16>*, i32, <32 x i1>, <32 x i16>)
+
+define void @test_mask_store_16xi8(<16 x i1> %mask, <16 x i8>* %addr, <16 x i8> %val) {
+; SKX-LABEL: test_mask_store_16xi8:
+; SKX: ## BB#0:
+; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
+; SKX-NEXT: vpmovb2m %xmm0, %k1
+; SKX-NEXT: vmovdqu8 %xmm1, (%rdi) {%k1}
+; SKX-NEXT: retq
+ call void @llvm.masked.store.v16i8(<16 x i8> %val, <16 x i8>* %addr, i32 4, <16 x i1>%mask)
+ ret void
+}
+declare void @llvm.masked.store.v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>)
+
+define void @test_mask_store_32xi8(<32 x i1> %mask, <32 x i8>* %addr, <32 x i8> %val) {
+; SKX-LABEL: test_mask_store_32xi8:
+; SKX: ## BB#0:
+; SKX-NEXT: vpsllw $7, %ymm0, %ymm0
+; SKX-NEXT: vpmovb2m %ymm0, %k1
+; SKX-NEXT: vmovdqu8 %ymm1, (%rdi) {%k1}
+; SKX-NEXT: retq
+ call void @llvm.masked.store.v32i8(<32 x i8> %val, <32 x i8>* %addr, i32 4, <32 x i1>%mask)
+ ret void
+}
+declare void @llvm.masked.store.v32i8(<32 x i8>, <32 x i8>*, i32, <32 x i1>)
+
+define void @test_mask_store_64xi8(<64 x i1> %mask, <64 x i8>* %addr, <64 x i8> %val) {
+; SKX-LABEL: test_mask_store_64xi8:
+; SKX: ## BB#0:
+; SKX-NEXT: vpsllw $7, %zmm0, %zmm0
+; SKX-NEXT: vpmovb2m %zmm0, %k1
+; SKX-NEXT: vmovdqu8 %zmm1, (%rdi) {%k1}
+; SKX-NEXT: retq
+ call void @llvm.masked.store.v64i8(<64 x i8> %val, <64 x i8>* %addr, i32 4, <64 x i1>%mask)
+ ret void
+}
+declare void @llvm.masked.store.v64i8(<64 x i8>, <64 x i8>*, i32, <64 x i1>)
+
+define void @test_mask_store_8xi16(<8 x i1> %mask, <8 x i16>* %addr, <8 x i16> %val) {
+; SKX-LABEL: test_mask_store_8xi16:
+; SKX: ## BB#0:
+; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
+; SKX-NEXT: vpmovw2m %xmm0, %k1
+; SKX-NEXT: vmovdqu16 %xmm1, (%rdi) {%k1}
+; SKX-NEXT: retq
+ call void @llvm.masked.store.v8i16(<8 x i16> %val, <8 x i16>* %addr, i32 4, <8 x i1>%mask)
+ ret void
+}
+declare void @llvm.masked.store.v8i16(<8 x i16>, <8 x i16>*, i32, <8 x i1>)
+
+define void @test_mask_store_16xi16(<16 x i1> %mask, <16 x i16>* %addr, <16 x i16> %val) {
+; SKX-LABEL: test_mask_store_16xi16:
+; SKX: ## BB#0:
+; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
+; SKX-NEXT: vpmovb2m %xmm0, %k1
+; SKX-NEXT: vmovdqu16 %ymm1, (%rdi) {%k1}
+; SKX-NEXT: retq
+ call void @llvm.masked.store.v16i16(<16 x i16> %val, <16 x i16>* %addr, i32 4, <16 x i1>%mask)
+ ret void
+}
+declare void @llvm.masked.store.v16i16(<16 x i16>, <16 x i16>*, i32, <16 x i1>)
+
+define void @test_mask_store_32xi16(<32 x i1> %mask, <32 x i16>* %addr, <32 x i16> %val) {
+; SKX-LABEL: test_mask_store_32xi16:
+; SKX: ## BB#0:
+; SKX-NEXT: vpsllw $7, %ymm0, %ymm0
+; SKX-NEXT: vpmovb2m %ymm0, %k1
+; SKX-NEXT: vmovdqu16 %zmm1, (%rdi) {%k1}
+; SKX-NEXT: retq
+ call void @llvm.masked.store.v32i16(<32 x i16> %val, <32 x i16>* %addr, i32 4, <32 x i1>%mask)
+ ret void
+}
+declare void @llvm.masked.store.v32i16(<32 x i16>, <32 x i16>*, i32, <32 x i1>) \ No newline at end of file
OpenPOWER on IntegriCloud