summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2019-01-14 00:03:50 +0000
committerCraig Topper <craig.topper@intel.com>2019-01-14 00:03:50 +0000
commit3f3b8ef442b2b950d47b6b32fc10fbe7174f3bfb (patch)
tree5f1436b98796aee90d143a2ab47da59779def641 /llvm/test/CodeGen/X86
parent2fefe153dd93b52bbaf5628958dfac929088452f (diff)
downloadbcm5719-llvm-3f3b8ef442b2b950d47b6b32fc10fbe7174f3bfb.tar.gz
bcm5719-llvm-3f3b8ef442b2b950d47b6b32fc10fbe7174f3bfb.zip
[X86] Remove mask parameter from vpshufbitqmb intrinsics. Change result to a vXi1 vector.
The input mask can be represented with an AND in IR. Fixes PR40258 llvm-svn: 351028
Diffstat (limited to 'llvm/test/CodeGen/X86')
-rw-r--r--llvm/test/CodeGen/X86/vpshufbitqbm-intrinsics.ll46
1 files changed, 28 insertions, 18 deletions
diff --git a/llvm/test/CodeGen/X86/vpshufbitqbm-intrinsics.ll b/llvm/test/CodeGen/X86/vpshufbitqbm-intrinsics.ll
index a80ed2f174e..f83f2df0c35 100644
--- a/llvm/test/CodeGen/X86/vpshufbitqbm-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/vpshufbitqbm-intrinsics.ll
@@ -1,41 +1,51 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512bitalg,+avx512vl | FileCheck %s
-declare i16 @llvm.x86.avx512.mask.vpshufbitqmb.128(<16 x i8> %a, <16 x i8> %b, i16 %mask)
-define i16 @test_vpshufbitqmb_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
+define i16 @test_vpshufbitqmb_128(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
; CHECK-LABEL: test_vpshufbitqmb_128:
; CHECK: ## %bb.0:
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpshufbitqmb %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: vpshufbitqmb %xmm1, %xmm0, %k1
+; CHECK-NEXT: vpshufbitqmb %xmm3, %xmm2, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
- %res = call i16 @llvm.x86.avx512.mask.vpshufbitqmb.128(<16 x i8> %a, <16 x i8> %b, i16 %mask)
- ret i16 %res
+ %tmp = call <16 x i1> @llvm.x86.avx512.vpshufbitqmb.128(<16 x i8> %a, <16 x i8> %b)
+ %tmp1 = call <16 x i1> @llvm.x86.avx512.vpshufbitqmb.128(<16 x i8> %c, <16 x i8> %d)
+ %tmp2 = and <16 x i1> %tmp, %tmp1
+ %tmp3 = bitcast <16 x i1> %tmp2 to i16
+ ret i16 %tmp3
}
-declare i32 @llvm.x86.avx512.mask.vpshufbitqmb.256(<32 x i8> %a, <32 x i8> %b, i32 %mask)
-define i32 @test_vpshufbitqmb_256(<32 x i8> %a, <32 x i8> %b, i32 %mask) {
+define i32 @test_vpshufbitqmb_256(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i8> %d) {
; CHECK-LABEL: test_vpshufbitqmb_256:
; CHECK: ## %bb.0:
-; CHECK-NEXT: kmovd %edi, %k1
-; CHECK-NEXT: vpshufbitqmb %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: vpshufbitqmb %ymm1, %ymm0, %k1
+; CHECK-NEXT: vpshufbitqmb %ymm3, %ymm2, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
- %res = call i32 @llvm.x86.avx512.mask.vpshufbitqmb.256(<32 x i8> %a, <32 x i8> %b, i32 %mask)
- ret i32 %res
+ %tmp = call <32 x i1> @llvm.x86.avx512.vpshufbitqmb.256(<32 x i8> %a, <32 x i8> %b)
+ %tmp1 = call <32 x i1> @llvm.x86.avx512.vpshufbitqmb.256(<32 x i8> %c, <32 x i8> %d)
+ %tmp2 = and <32 x i1> %tmp, %tmp1
+ %tmp3 = bitcast <32 x i1> %tmp2 to i32
+ ret i32 %tmp3
}
-declare i64 @llvm.x86.avx512.mask.vpshufbitqmb.512(<64 x i8> %a, <64 x i8> %b, i64 %mask)
-define i64 @test_vpshufbitqmb_512(<64 x i8> %a, <64 x i8> %b, i64 %mask) {
+define i64 @test_vpshufbitqmb_512(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i8> %d) {
; CHECK-LABEL: test_vpshufbitqmb_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: kmovq %rdi, %k1
-; CHECK-NEXT: vpshufbitqmb %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: vpshufbitqmb %zmm1, %zmm0, %k1
+; CHECK-NEXT: vpshufbitqmb %zmm3, %zmm2, %k0 {%k1}
; CHECK-NEXT: kmovq %k0, %rax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
- %res = call i64 @llvm.x86.avx512.mask.vpshufbitqmb.512(<64 x i8> %a, <64 x i8> %b, i64 %mask)
- ret i64 %res
+ %tmp = call <64 x i1> @llvm.x86.avx512.vpshufbitqmb.512(<64 x i8> %a, <64 x i8> %b)
+ %tmp1 = call <64 x i1> @llvm.x86.avx512.vpshufbitqmb.512(<64 x i8> %c, <64 x i8> %d)
+ %tmp2 = and <64 x i1> %tmp, %tmp1
+ %tmp3 = bitcast <64 x i1> %tmp2 to i64
+ ret i64 %tmp3
}
+
+declare <16 x i1> @llvm.x86.avx512.vpshufbitqmb.128(<16 x i8>, <16 x i8>)
+declare <32 x i1> @llvm.x86.avx512.vpshufbitqmb.256(<32 x i8>, <32 x i8>)
+declare <64 x i1> @llvm.x86.avx512.vpshufbitqmb.512(<64 x i8>, <64 x i8>)
OpenPOWER on IntegriCloud