summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/avx512-intrinsics.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86/avx512-intrinsics.ll')
-rw-r--r--llvm/test/CodeGen/X86/avx512-intrinsics.ll83
1 files changed, 72 insertions, 11 deletions
diff --git a/llvm/test/CodeGen/X86/avx512-intrinsics.ll b/llvm/test/CodeGen/X86/avx512-intrinsics.ll
index 014d8a2ae5e..4860ea571d7 100644
--- a/llvm/test/CodeGen/X86/avx512-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512-intrinsics.ll
@@ -858,49 +858,57 @@ define i16 @test_vptestmd(<16 x i32> %a0, <16 x i32> %a1) {
}
declare i16 @llvm.x86.avx512.mask.ptestm.d.512(<16 x i32>, <16 x i32>, i16)
-define void @test_store1(<16 x float> %data, i8* %ptr, i16 %mask) {
+define void @test_store1(<16 x float> %data, i8* %ptr, i8* %ptr2, i16 %mask) {
; CHECK-LABEL: test_store1:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: kmovw %edx, %k1
; CHECK-NEXT: vmovups %zmm0, (%rdi) {%k1}
+; CHECK-NEXT: vmovups %zmm0, (%rsi)
; CHECK-NEXT: retq
call void @llvm.x86.avx512.mask.storeu.ps.512(i8* %ptr, <16 x float> %data, i16 %mask)
+ call void @llvm.x86.avx512.mask.storeu.ps.512(i8* %ptr2, <16 x float> %data, i16 -1)
ret void
}
declare void @llvm.x86.avx512.mask.storeu.ps.512(i8*, <16 x float>, i16 )
-define void @test_store2(<8 x double> %data, i8* %ptr, i8 %mask) {
+define void @test_store2(<8 x double> %data, i8* %ptr, i8* %ptr2, i8 %mask) {
; CHECK-LABEL: test_store2:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: kmovw %edx, %k1
; CHECK-NEXT: vmovupd %zmm0, (%rdi) {%k1}
+; CHECK-NEXT: vmovupd %zmm0, (%rsi)
; CHECK-NEXT: retq
call void @llvm.x86.avx512.mask.storeu.pd.512(i8* %ptr, <8 x double> %data, i8 %mask)
+ call void @llvm.x86.avx512.mask.storeu.pd.512(i8* %ptr2, <8 x double> %data, i8 -1)
ret void
}
declare void @llvm.x86.avx512.mask.storeu.pd.512(i8*, <8 x double>, i8)
-define void @test_mask_store_aligned_ps(<16 x float> %data, i8* %ptr, i16 %mask) {
+define void @test_mask_store_aligned_ps(<16 x float> %data, i8* %ptr, i8* %ptr2, i16 %mask) {
; CHECK-LABEL: test_mask_store_aligned_ps:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: kmovw %edx, %k1
; CHECK-NEXT: vmovaps %zmm0, (%rdi) {%k1}
+; CHECK-NEXT: vmovaps %zmm0, (%rsi)
; CHECK-NEXT: retq
call void @llvm.x86.avx512.mask.store.ps.512(i8* %ptr, <16 x float> %data, i16 %mask)
+ call void @llvm.x86.avx512.mask.store.ps.512(i8* %ptr2, <16 x float> %data, i16 -1)
ret void
}
declare void @llvm.x86.avx512.mask.store.ps.512(i8*, <16 x float>, i16 )
-define void @test_mask_store_aligned_pd(<8 x double> %data, i8* %ptr, i8 %mask) {
+define void @test_mask_store_aligned_pd(<8 x double> %data, i8* %ptr, i8* %ptr2, i8 %mask) {
; CHECK-LABEL: test_mask_store_aligned_pd:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: kmovw %edx, %k1
; CHECK-NEXT: vmovapd %zmm0, (%rdi) {%k1}
+; CHECK-NEXT: vmovapd %zmm0, (%rsi)
; CHECK-NEXT: retq
call void @llvm.x86.avx512.mask.store.pd.512(i8* %ptr, <8 x double> %data, i8 %mask)
+ call void @llvm.x86.avx512.mask.store.pd.512(i8* %ptr2, <8 x double> %data, i8 -1)
ret void
}
@@ -922,6 +930,62 @@ define <16 x float> @test_mask_load_aligned_ps(<16 x float> %data, i8* %ptr, i16
ret <16 x float> %res4
}
+declare void @llvm.x86.avx512.mask.storeu.q.512(i8*, <8 x i64>, i8)
+
+define void@test_int_x86_avx512_mask_storeu_q_512(i8* %ptr1, i8* %ptr2, <8 x i64> %x1, i8 %x2) {
+; CHECK-LABEL: test_int_x86_avx512_mask_storeu_q_512:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edx, %k1
+; CHECK-NEXT: vmovdqu64 %zmm0, (%rdi) {%k1}
+; CHECK-NEXT: vmovdqu64 %zmm0, (%rsi)
+; CHECK-NEXT: retq
+ call void @llvm.x86.avx512.mask.storeu.q.512(i8* %ptr1, <8 x i64> %x1, i8 %x2)
+ call void @llvm.x86.avx512.mask.storeu.q.512(i8* %ptr2, <8 x i64> %x1, i8 -1)
+ ret void
+}
+
+declare void @llvm.x86.avx512.mask.storeu.d.512(i8*, <16 x i32>, i16)
+
+define void@test_int_x86_avx512_mask_storeu_d_512(i8* %ptr1, i8* %ptr2, <16 x i32> %x1, i16 %x2) {
+; CHECK-LABEL: test_int_x86_avx512_mask_storeu_d_512:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edx, %k1
+; CHECK-NEXT: vmovdqu32 %zmm0, (%rdi) {%k1}
+; CHECK-NEXT: vmovdqu32 %zmm0, (%rsi)
+; CHECK-NEXT: retq
+ call void @llvm.x86.avx512.mask.storeu.d.512(i8* %ptr1, <16 x i32> %x1, i16 %x2)
+ call void @llvm.x86.avx512.mask.storeu.d.512(i8* %ptr2, <16 x i32> %x1, i16 -1)
+ ret void
+}
+
+declare void @llvm.x86.avx512.mask.store.q.512(i8*, <8 x i64>, i8)
+
+define void@test_int_x86_avx512_mask_store_q_512(i8* %ptr1, i8* %ptr2, <8 x i64> %x1, i8 %x2) {
+; CHECK-LABEL: test_int_x86_avx512_mask_store_q_512:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edx, %k1
+; CHECK-NEXT: vmovdqa64 %zmm0, (%rdi) {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm0, (%rsi)
+; CHECK-NEXT: retq
+ call void @llvm.x86.avx512.mask.store.q.512(i8* %ptr1, <8 x i64> %x1, i8 %x2)
+ call void @llvm.x86.avx512.mask.store.q.512(i8* %ptr2, <8 x i64> %x1, i8 -1)
+ ret void
+}
+
+declare void @llvm.x86.avx512.mask.store.d.512(i8*, <16 x i32>, i16)
+
+define void@test_int_x86_avx512_mask_store_d_512(i8* %ptr1, i8* %ptr2, <16 x i32> %x1, i16 %x2) {
+; CHECK-LABEL: test_int_x86_avx512_mask_store_d_512:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edx, %k1
+; CHECK-NEXT: vmovdqa32 %zmm0, (%rdi) {%k1}
+; CHECK-NEXT: vmovdqa32 %zmm0, (%rsi)
+; CHECK-NEXT: retq
+ call void @llvm.x86.avx512.mask.store.d.512(i8* %ptr1, <16 x i32> %x1, i16 %x2)
+ call void @llvm.x86.avx512.mask.store.d.512(i8* %ptr2, <16 x i32> %x1, i16 -1)
+ ret void
+}
+
declare <16 x float> @llvm.x86.avx512.mask.load.ps.512(i8*, <16 x float>, i16)
define <16 x float> @test_mask_load_unaligned_ps(<16 x float> %data, i8* %ptr, i16 %mask) {
@@ -6897,8 +6961,6 @@ define <8 x i64>@test_int_x86_avx512_mask_permvar_di_512(<8 x i64> %x0, <8 x i64
ret <8 x i64> %res4
}
-
-
declare <16 x float> @llvm.x86.avx512.mask.permvar.sf.512(<16 x float>, <16 x i32>, <16 x float>, i16)
define <16 x float>@test_int_x86_avx512_mask_permvar_sf_512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %x3) {
@@ -6938,4 +7000,3 @@ define <16 x i32>@test_int_x86_avx512_mask_permvar_si_512(<16 x i32> %x0, <16 x
%res4 = add <16 x i32> %res3, %res2
ret <16 x i32> %res4
}
-
OpenPOWER on IntegriCloud