summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/avx512vbmi2vl-intrinsics-fast-isel.ll
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2019-01-07 19:10:12 +0000
committerCraig Topper <craig.topper@intel.com>2019-01-07 19:10:12 +0000
commit9c4f7e9147679889dba6e7dd80325cb5f3af613e (patch)
treed1381c6ff39ac719d349d7a3ae71f27033ae3699 /llvm/test/CodeGen/X86/avx512vbmi2vl-intrinsics-fast-isel.ll
parentf192cdb5c9a5ae46a1a0e11a1e67aab33cd4798b (diff)
downloadbcm5719-llvm-9c4f7e9147679889dba6e7dd80325cb5f3af613e.tar.gz
bcm5719-llvm-9c4f7e9147679889dba6e7dd80325cb5f3af613e.zip
[X86] Remove AVX512VBMI2 concat and shift intrinsics. Replace with target independent funnel shift intrinsics.
Differential Revision: https://reviews.llvm.org/D56377 llvm-svn: 350554
Diffstat (limited to 'llvm/test/CodeGen/X86/avx512vbmi2vl-intrinsics-fast-isel.ll')
-rw-r--r--llvm/test/CodeGen/X86/avx512vbmi2vl-intrinsics-fast-isel.ll340
1 files changed, 188 insertions, 152 deletions
diff --git a/llvm/test/CodeGen/X86/avx512vbmi2vl-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx512vbmi2vl-intrinsics-fast-isel.ll
index e74f1e414bc..03594dfa36a 100644
--- a/llvm/test/CodeGen/X86/avx512vbmi2vl-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/avx512vbmi2vl-intrinsics-fast-isel.ll
@@ -591,14 +591,14 @@ define <4 x i64> @test_mm256_mask_shldi_epi64(<4 x i64> %__S, i8 zeroext %__U, <
; X64-NEXT: vpshldq $47, %ymm2, %ymm1, %ymm0 {%k1}
; X64-NEXT: retq
entry:
- %0 = tail call <4 x i64> @llvm.x86.avx512.vpshld.q.256(<4 x i64> %__A, <4 x i64> %__B, i32 47)
+ %0 = tail call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %__A, <4 x i64> %__B, <4 x i64> <i64 47, i64 47, i64 47, i64 47>)
%1 = bitcast i8 %__U to <8 x i1>
%extract = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%2 = select <4 x i1> %extract, <4 x i64> %0, <4 x i64> %__S
ret <4 x i64> %2
}
-declare <4 x i64> @llvm.x86.avx512.vpshld.q.256(<4 x i64>, <4 x i64>, i32)
+declare <4 x i64> @llvm.fshl.v4i64(<4 x i64>, <4 x i64>, <4 x i64>)
define <4 x i64> @test_mm256_maskz_shldi_epi64(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
; X86-LABEL: test_mm256_maskz_shldi_epi64:
@@ -614,7 +614,7 @@ define <4 x i64> @test_mm256_maskz_shldi_epi64(i8 zeroext %__U, <4 x i64> %__A,
; X64-NEXT: vpshldq $63, %ymm1, %ymm0, %ymm0 {%k1} {z}
; X64-NEXT: retq
entry:
- %0 = tail call <4 x i64> @llvm.x86.avx512.vpshld.q.256(<4 x i64> %__A, <4 x i64> %__B, i32 63)
+ %0 = tail call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %__A, <4 x i64> %__B, <4 x i64> <i64 63, i64 63, i64 63, i64 63>)
%1 = bitcast i8 %__U to <8 x i1>
%extract = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%2 = select <4 x i1> %extract, <4 x i64> %0, <4 x i64> zeroinitializer
@@ -627,7 +627,7 @@ define <4 x i64> @test_mm256_shldi_epi64(<4 x i64> %__A, <4 x i64> %__B) {
; CHECK-NEXT: vpshldq $31, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
entry:
- %0 = tail call <4 x i64> @llvm.x86.avx512.vpshld.q.256(<4 x i64> %__A, <4 x i64> %__B, i32 31)
+ %0 = tail call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %__A, <4 x i64> %__B, <4 x i64> <i64 31, i64 31, i64 31, i64 31>)
ret <4 x i64> %0
}
@@ -645,14 +645,14 @@ define <2 x i64> @test_mm_mask_shldi_epi64(<2 x i64> %__S, i8 zeroext %__U, <2 x
; X64-NEXT: vpshldq $47, %xmm2, %xmm1, %xmm0 {%k1}
; X64-NEXT: retq
entry:
- %0 = tail call <2 x i64> @llvm.x86.avx512.vpshld.q.128(<2 x i64> %__A, <2 x i64> %__B, i32 47)
+ %0 = tail call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %__A, <2 x i64> %__B, <2 x i64> <i64 47, i64 47>)
%1 = bitcast i8 %__U to <8 x i1>
%extract = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
%2 = select <2 x i1> %extract, <2 x i64> %0, <2 x i64> %__S
ret <2 x i64> %2
}
-declare <2 x i64> @llvm.x86.avx512.vpshld.q.128(<2 x i64>, <2 x i64>, i32)
+declare <2 x i64> @llvm.fshl.v2i64(<2 x i64>, <2 x i64>, <2 x i64>)
define <2 x i64> @test_mm_maskz_shldi_epi64(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
; X86-LABEL: test_mm_maskz_shldi_epi64:
@@ -668,7 +668,7 @@ define <2 x i64> @test_mm_maskz_shldi_epi64(i8 zeroext %__U, <2 x i64> %__A, <2
; X64-NEXT: vpshldq $63, %xmm1, %xmm0, %xmm0 {%k1} {z}
; X64-NEXT: retq
entry:
- %0 = tail call <2 x i64> @llvm.x86.avx512.vpshld.q.128(<2 x i64> %__A, <2 x i64> %__B, i32 63)
+ %0 = tail call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %__A, <2 x i64> %__B, <2 x i64> <i64 63, i64 63>)
%1 = bitcast i8 %__U to <8 x i1>
%extract = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
%2 = select <2 x i1> %extract, <2 x i64> %0, <2 x i64> zeroinitializer
@@ -681,7 +681,7 @@ define <2 x i64> @test_mm_shldi_epi64(<2 x i64> %__A, <2 x i64> %__B) {
; CHECK-NEXT: vpshldq $31, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
entry:
- %0 = tail call <2 x i64> @llvm.x86.avx512.vpshld.q.128(<2 x i64> %__A, <2 x i64> %__B, i32 31)
+ %0 = tail call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %__A, <2 x i64> %__B, <2 x i64> <i64 31, i64 31>)
ret <2 x i64> %0
}
@@ -701,7 +701,7 @@ define <4 x i64> @test_mm256_mask_shldi_epi32(<4 x i64> %__S, i8 zeroext %__U, <
entry:
%0 = bitcast <4 x i64> %__A to <8 x i32>
%1 = bitcast <4 x i64> %__B to <8 x i32>
- %2 = tail call <8 x i32> @llvm.x86.avx512.vpshld.d.256(<8 x i32> %0, <8 x i32> %1, i32 7)
+ %2 = tail call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %0, <8 x i32> %1, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>)
%3 = bitcast <4 x i64> %__S to <8 x i32>
%4 = bitcast i8 %__U to <8 x i1>
%5 = select <8 x i1> %4, <8 x i32> %2, <8 x i32> %3
@@ -709,7 +709,7 @@ entry:
ret <4 x i64> %6
}
-declare <8 x i32> @llvm.x86.avx512.vpshld.d.256(<8 x i32>, <8 x i32>, i32)
+declare <8 x i32> @llvm.fshl.v8i32(<8 x i32>, <8 x i32>, <8 x i32>)
define <4 x i64> @test_mm256_maskz_shldi_epi32(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
; X86-LABEL: test_mm256_maskz_shldi_epi32:
@@ -727,7 +727,7 @@ define <4 x i64> @test_mm256_maskz_shldi_epi32(i8 zeroext %__U, <4 x i64> %__A,
entry:
%0 = bitcast <4 x i64> %__A to <8 x i32>
%1 = bitcast <4 x i64> %__B to <8 x i32>
- %2 = tail call <8 x i32> @llvm.x86.avx512.vpshld.d.256(<8 x i32> %0, <8 x i32> %1, i32 15)
+ %2 = tail call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %0, <8 x i32> %1, <8 x i32> <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>)
%3 = bitcast i8 %__U to <8 x i1>
%4 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> zeroinitializer
%5 = bitcast <8 x i32> %4 to <4 x i64>
@@ -742,7 +742,7 @@ define <4 x i64> @test_mm256_shldi_epi32(<4 x i64> %__A, <4 x i64> %__B) {
entry:
%0 = bitcast <4 x i64> %__A to <8 x i32>
%1 = bitcast <4 x i64> %__B to <8 x i32>
- %2 = tail call <8 x i32> @llvm.x86.avx512.vpshld.d.256(<8 x i32> %0, <8 x i32> %1, i32 31)
+ %2 = tail call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %0, <8 x i32> %1, <8 x i32> <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>)
%3 = bitcast <8 x i32> %2 to <4 x i64>
ret <4 x i64> %3
}
@@ -763,7 +763,7 @@ define <2 x i64> @test_mm_mask_shldi_epi32(<2 x i64> %__S, i8 zeroext %__U, <2 x
entry:
%0 = bitcast <2 x i64> %__A to <4 x i32>
%1 = bitcast <2 x i64> %__B to <4 x i32>
- %2 = tail call <4 x i32> @llvm.x86.avx512.vpshld.d.128(<4 x i32> %0, <4 x i32> %1, i32 7)
+ %2 = tail call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> <i32 7, i32 7, i32 7, i32 7>)
%3 = bitcast <2 x i64> %__S to <4 x i32>
%4 = bitcast i8 %__U to <8 x i1>
%extract = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -772,7 +772,7 @@ entry:
ret <2 x i64> %6
}
-declare <4 x i32> @llvm.x86.avx512.vpshld.d.128(<4 x i32>, <4 x i32>, i32)
+declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
define <2 x i64> @test_mm_maskz_shldi_epi32(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
; X86-LABEL: test_mm_maskz_shldi_epi32:
@@ -790,7 +790,7 @@ define <2 x i64> @test_mm_maskz_shldi_epi32(i8 zeroext %__U, <2 x i64> %__A, <2
entry:
%0 = bitcast <2 x i64> %__A to <4 x i32>
%1 = bitcast <2 x i64> %__B to <4 x i32>
- %2 = tail call <4 x i32> @llvm.x86.avx512.vpshld.d.128(<4 x i32> %0, <4 x i32> %1, i32 15)
+ %2 = tail call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> <i32 15, i32 15, i32 15, i32 15>)
%3 = bitcast i8 %__U to <8 x i1>
%extract = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%4 = select <4 x i1> %extract, <4 x i32> %2, <4 x i32> zeroinitializer
@@ -806,7 +806,7 @@ define <2 x i64> @test_mm_shldi_epi32(<2 x i64> %__A, <2 x i64> %__B) {
entry:
%0 = bitcast <2 x i64> %__A to <4 x i32>
%1 = bitcast <2 x i64> %__B to <4 x i32>
- %2 = tail call <4 x i32> @llvm.x86.avx512.vpshld.d.128(<4 x i32> %0, <4 x i32> %1, i32 31)
+ %2 = tail call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> <i32 31, i32 31, i32 31, i32 31>)
%3 = bitcast <4 x i32> %2 to <2 x i64>
ret <2 x i64> %3
}
@@ -826,7 +826,7 @@ define <4 x i64> @test_mm256_mask_shldi_epi16(<4 x i64> %__S, i16 zeroext %__U,
entry:
%0 = bitcast <4 x i64> %__A to <16 x i16>
%1 = bitcast <4 x i64> %__B to <16 x i16>
- %2 = tail call <16 x i16> @llvm.x86.avx512.vpshld.w.256(<16 x i16> %0, <16 x i16> %1, i32 3)
+ %2 = tail call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %0, <16 x i16> %1, <16 x i16> <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>)
%3 = bitcast <4 x i64> %__S to <16 x i16>
%4 = bitcast i16 %__U to <16 x i1>
%5 = select <16 x i1> %4, <16 x i16> %2, <16 x i16> %3
@@ -834,7 +834,7 @@ entry:
ret <4 x i64> %6
}
-declare <16 x i16> @llvm.x86.avx512.vpshld.w.256(<16 x i16>, <16 x i16>, i32)
+declare <16 x i16> @llvm.fshl.v16i16(<16 x i16>, <16 x i16>, <16 x i16>)
define <4 x i64> @test_mm256_maskz_shldi_epi16(i16 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
; X86-LABEL: test_mm256_maskz_shldi_epi16:
@@ -851,7 +851,7 @@ define <4 x i64> @test_mm256_maskz_shldi_epi16(i16 zeroext %__U, <4 x i64> %__A,
entry:
%0 = bitcast <4 x i64> %__A to <16 x i16>
%1 = bitcast <4 x i64> %__B to <16 x i16>
- %2 = tail call <16 x i16> @llvm.x86.avx512.vpshld.w.256(<16 x i16> %0, <16 x i16> %1, i32 7)
+ %2 = tail call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %0, <16 x i16> %1, <16 x i16> <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>)
%3 = bitcast i16 %__U to <16 x i1>
%4 = select <16 x i1> %3, <16 x i16> %2, <16 x i16> zeroinitializer
%5 = bitcast <16 x i16> %4 to <4 x i64>
@@ -861,12 +861,12 @@ entry:
define <4 x i64> @test_mm256_shldi_epi16(<4 x i64> %__A, <4 x i64> %__B) {
; CHECK-LABEL: test_mm256_shldi_epi16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vpshldw $31, %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: vpshldw $15, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
entry:
%0 = bitcast <4 x i64> %__A to <16 x i16>
%1 = bitcast <4 x i64> %__B to <16 x i16>
- %2 = tail call <16 x i16> @llvm.x86.avx512.vpshld.w.256(<16 x i16> %0, <16 x i16> %1, i32 31)
+ %2 = tail call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %0, <16 x i16> %1, <16 x i16> <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31>)
%3 = bitcast <16 x i16> %2 to <4 x i64>
ret <4 x i64> %3
}
@@ -887,7 +887,7 @@ define <2 x i64> @test_mm_mask_shldi_epi16(<2 x i64> %__S, i8 zeroext %__U, <2 x
entry:
%0 = bitcast <2 x i64> %__A to <8 x i16>
%1 = bitcast <2 x i64> %__B to <8 x i16>
- %2 = tail call <8 x i16> @llvm.x86.avx512.vpshld.w.128(<8 x i16> %0, <8 x i16> %1, i32 3)
+ %2 = tail call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %0, <8 x i16> %1, <8 x i16> <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>)
%3 = bitcast <2 x i64> %__S to <8 x i16>
%4 = bitcast i8 %__U to <8 x i1>
%5 = select <8 x i1> %4, <8 x i16> %2, <8 x i16> %3
@@ -895,7 +895,7 @@ entry:
ret <2 x i64> %6
}
-declare <8 x i16> @llvm.x86.avx512.vpshld.w.128(<8 x i16>, <8 x i16>, i32)
+declare <8 x i16> @llvm.fshl.v8i16(<8 x i16>, <8 x i16>, <8 x i16>)
define <2 x i64> @test_mm_maskz_shldi_epi16(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
; X86-LABEL: test_mm_maskz_shldi_epi16:
@@ -913,7 +913,7 @@ define <2 x i64> @test_mm_maskz_shldi_epi16(i8 zeroext %__U, <2 x i64> %__A, <2
entry:
%0 = bitcast <2 x i64> %__A to <8 x i16>
%1 = bitcast <2 x i64> %__B to <8 x i16>
- %2 = tail call <8 x i16> @llvm.x86.avx512.vpshld.w.128(<8 x i16> %0, <8 x i16> %1, i32 7)
+ %2 = tail call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %0, <8 x i16> %1, <8 x i16> <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>)
%3 = bitcast i8 %__U to <8 x i1>
%4 = select <8 x i1> %3, <8 x i16> %2, <8 x i16> zeroinitializer
%5 = bitcast <8 x i16> %4 to <2 x i64>
@@ -923,12 +923,12 @@ entry:
define <2 x i64> @test_mm_shldi_epi16(<2 x i64> %__A, <2 x i64> %__B) {
; CHECK-LABEL: test_mm_shldi_epi16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vpshldw $31, %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vpshldw $15, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
entry:
%0 = bitcast <2 x i64> %__A to <8 x i16>
%1 = bitcast <2 x i64> %__B to <8 x i16>
- %2 = tail call <8 x i16> @llvm.x86.avx512.vpshld.w.128(<8 x i16> %0, <8 x i16> %1, i32 31)
+ %2 = tail call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %0, <8 x i16> %1, <8 x i16> <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31>)
%3 = bitcast <8 x i16> %2 to <2 x i64>
ret <2 x i64> %3
}
@@ -947,14 +947,14 @@ define <4 x i64> @test_mm256_mask_shrdi_epi64(<4 x i64> %__S, i8 zeroext %__U, <
; X64-NEXT: vpshrdq $47, %ymm2, %ymm1, %ymm0 {%k1}
; X64-NEXT: retq
entry:
- %0 = tail call <4 x i64> @llvm.x86.avx512.vpshrd.q.256(<4 x i64> %__A, <4 x i64> %__B, i32 47)
+ %0 = tail call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %__B, <4 x i64> %__A, <4 x i64> <i64 47, i64 47, i64 47, i64 47>)
%1 = bitcast i8 %__U to <8 x i1>
%extract = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%2 = select <4 x i1> %extract, <4 x i64> %0, <4 x i64> %__S
ret <4 x i64> %2
}
-declare <4 x i64> @llvm.x86.avx512.vpshrd.q.256(<4 x i64>, <4 x i64>, i32)
+declare <4 x i64> @llvm.fshr.v4i64(<4 x i64>, <4 x i64>, <4 x i64>)
define <4 x i64> @test_mm256_maskz_shrdi_epi64(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
; X86-LABEL: test_mm256_maskz_shrdi_epi64:
@@ -970,7 +970,7 @@ define <4 x i64> @test_mm256_maskz_shrdi_epi64(i8 zeroext %__U, <4 x i64> %__A,
; X64-NEXT: vpshrdq $63, %ymm1, %ymm0, %ymm0 {%k1} {z}
; X64-NEXT: retq
entry:
- %0 = tail call <4 x i64> @llvm.x86.avx512.vpshrd.q.256(<4 x i64> %__A, <4 x i64> %__B, i32 63)
+ %0 = tail call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %__B, <4 x i64> %__A, <4 x i64> <i64 63, i64 63, i64 63, i64 63>)
%1 = bitcast i8 %__U to <8 x i1>
%extract = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%2 = select <4 x i1> %extract, <4 x i64> %0, <4 x i64> zeroinitializer
@@ -983,7 +983,7 @@ define <4 x i64> @test_mm256_shrdi_epi64(<4 x i64> %__A, <4 x i64> %__B) {
; CHECK-NEXT: vpshrdq $31, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
entry:
- %0 = tail call <4 x i64> @llvm.x86.avx512.vpshrd.q.256(<4 x i64> %__A, <4 x i64> %__B, i32 31)
+ %0 = tail call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %__B, <4 x i64> %__A, <4 x i64> <i64 31, i64 31, i64 31, i64 31>)
ret <4 x i64> %0
}
@@ -1001,14 +1001,14 @@ define <2 x i64> @test_mm_mask_shrdi_epi64(<2 x i64> %__S, i8 zeroext %__U, <2 x
; X64-NEXT: vpshrdq $47, %xmm2, %xmm1, %xmm0 {%k1}
; X64-NEXT: retq
entry:
- %0 = tail call <2 x i64> @llvm.x86.avx512.vpshrd.q.128(<2 x i64> %__A, <2 x i64> %__B, i32 47)
+ %0 = tail call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %__B, <2 x i64> %__A, <2 x i64> <i64 47, i64 47>)
%1 = bitcast i8 %__U to <8 x i1>
%extract = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
%2 = select <2 x i1> %extract, <2 x i64> %0, <2 x i64> %__S
ret <2 x i64> %2
}
-declare <2 x i64> @llvm.x86.avx512.vpshrd.q.128(<2 x i64>, <2 x i64>, i32)
+declare <2 x i64> @llvm.fshr.v2i64(<2 x i64>, <2 x i64>, <2 x i64>)
define <2 x i64> @test_mm_maskz_shrdi_epi64(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
; X86-LABEL: test_mm_maskz_shrdi_epi64:
@@ -1024,7 +1024,7 @@ define <2 x i64> @test_mm_maskz_shrdi_epi64(i8 zeroext %__U, <2 x i64> %__A, <2
; X64-NEXT: vpshrdq $63, %xmm1, %xmm0, %xmm0 {%k1} {z}
; X64-NEXT: retq
entry:
- %0 = tail call <2 x i64> @llvm.x86.avx512.vpshrd.q.128(<2 x i64> %__A, <2 x i64> %__B, i32 63)
+ %0 = tail call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %__B, <2 x i64> %__A, <2 x i64> <i64 63, i64 63>)
%1 = bitcast i8 %__U to <8 x i1>
%extract = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
%2 = select <2 x i1> %extract, <2 x i64> %0, <2 x i64> zeroinitializer
@@ -1037,7 +1037,7 @@ define <2 x i64> @test_mm_shrdi_epi64(<2 x i64> %__A, <2 x i64> %__B) {
; CHECK-NEXT: vpshrdq $31, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
entry:
- %0 = tail call <2 x i64> @llvm.x86.avx512.vpshrd.q.128(<2 x i64> %__A, <2 x i64> %__B, i32 31)
+ %0 = tail call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %__B, <2 x i64> %__A, <2 x i64> <i64 31, i64 31>)
ret <2 x i64> %0
}
@@ -1057,7 +1057,7 @@ define <4 x i64> @test_mm256_mask_shrdi_epi32(<4 x i64> %__S, i8 zeroext %__U, <
entry:
%0 = bitcast <4 x i64> %__A to <8 x i32>
%1 = bitcast <4 x i64> %__B to <8 x i32>
- %2 = tail call <8 x i32> @llvm.x86.avx512.vpshrd.d.256(<8 x i32> %0, <8 x i32> %1, i32 7)
+ %2 = tail call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %1, <8 x i32> %0, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>)
%3 = bitcast <4 x i64> %__S to <8 x i32>
%4 = bitcast i8 %__U to <8 x i1>
%5 = select <8 x i1> %4, <8 x i32> %2, <8 x i32> %3
@@ -1065,7 +1065,7 @@ entry:
ret <4 x i64> %6
}
-declare <8 x i32> @llvm.x86.avx512.vpshrd.d.256(<8 x i32>, <8 x i32>, i32)
+declare <8 x i32> @llvm.fshr.v8i32(<8 x i32>, <8 x i32>, <8 x i32>)
define <4 x i64> @test_mm256_maskz_shrdi_epi32(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
; X86-LABEL: test_mm256_maskz_shrdi_epi32:
@@ -1083,7 +1083,7 @@ define <4 x i64> @test_mm256_maskz_shrdi_epi32(i8 zeroext %__U, <4 x i64> %__A,
entry:
%0 = bitcast <4 x i64> %__A to <8 x i32>
%1 = bitcast <4 x i64> %__B to <8 x i32>
- %2 = tail call <8 x i32> @llvm.x86.avx512.vpshrd.d.256(<8 x i32> %0, <8 x i32> %1, i32 15)
+ %2 = tail call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %1, <8 x i32> %0, <8 x i32> <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>)
%3 = bitcast i8 %__U to <8 x i1>
%4 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> zeroinitializer
%5 = bitcast <8 x i32> %4 to <4 x i64>
@@ -1098,7 +1098,7 @@ define <4 x i64> @test_mm256_shrdi_epi32(<4 x i64> %__A, <4 x i64> %__B) {
entry:
%0 = bitcast <4 x i64> %__A to <8 x i32>
%1 = bitcast <4 x i64> %__B to <8 x i32>
- %2 = tail call <8 x i32> @llvm.x86.avx512.vpshrd.d.256(<8 x i32> %0, <8 x i32> %1, i32 31)
+ %2 = tail call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %1, <8 x i32> %0, <8 x i32> <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>)
%3 = bitcast <8 x i32> %2 to <4 x i64>
ret <4 x i64> %3
}
@@ -1119,7 +1119,7 @@ define <2 x i64> @test_mm_mask_shrdi_epi32(<2 x i64> %__S, i8 zeroext %__U, <2 x
entry:
%0 = bitcast <2 x i64> %__A to <4 x i32>
%1 = bitcast <2 x i64> %__B to <4 x i32>
- %2 = tail call <4 x i32> @llvm.x86.avx512.vpshrd.d.128(<4 x i32> %0, <4 x i32> %1, i32 7)
+ %2 = tail call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %1, <4 x i32> %0, <4 x i32> <i32 7, i32 7, i32 7, i32 7>)
%3 = bitcast <2 x i64> %__S to <4 x i32>
%4 = bitcast i8 %__U to <8 x i1>
%extract = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -1128,7 +1128,7 @@ entry:
ret <2 x i64> %6
}
-declare <4 x i32> @llvm.x86.avx512.vpshrd.d.128(<4 x i32>, <4 x i32>, i32)
+declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
define <2 x i64> @test_mm_maskz_shrdi_epi32(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
; X86-LABEL: test_mm_maskz_shrdi_epi32:
@@ -1146,7 +1146,7 @@ define <2 x i64> @test_mm_maskz_shrdi_epi32(i8 zeroext %__U, <2 x i64> %__A, <2
entry:
%0 = bitcast <2 x i64> %__A to <4 x i32>
%1 = bitcast <2 x i64> %__B to <4 x i32>
- %2 = tail call <4 x i32> @llvm.x86.avx512.vpshrd.d.128(<4 x i32> %0, <4 x i32> %1, i32 15)
+ %2 = tail call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %1, <4 x i32> %0, <4 x i32> <i32 15, i32 15, i32 15, i32 15>)
%3 = bitcast i8 %__U to <8 x i1>
%extract = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%4 = select <4 x i1> %extract, <4 x i32> %2, <4 x i32> zeroinitializer
@@ -1162,7 +1162,7 @@ define <2 x i64> @test_mm_shrdi_epi32(<2 x i64> %__A, <2 x i64> %__B) {
entry:
%0 = bitcast <2 x i64> %__A to <4 x i32>
%1 = bitcast <2 x i64> %__B to <4 x i32>
- %2 = tail call <4 x i32> @llvm.x86.avx512.vpshrd.d.128(<4 x i32> %0, <4 x i32> %1, i32 31)
+ %2 = tail call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %1, <4 x i32> %0, <4 x i32> <i32 31, i32 31, i32 31, i32 31>)
%3 = bitcast <4 x i32> %2 to <2 x i64>
ret <2 x i64> %3
}
@@ -1182,7 +1182,7 @@ define <4 x i64> @test_mm256_mask_shrdi_epi16(<4 x i64> %__S, i16 zeroext %__U,
entry:
%0 = bitcast <4 x i64> %__A to <16 x i16>
%1 = bitcast <4 x i64> %__B to <16 x i16>
- %2 = tail call <16 x i16> @llvm.x86.avx512.vpshrd.w.256(<16 x i16> %0, <16 x i16> %1, i32 3)
+ %2 = tail call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %1, <16 x i16> %0, <16 x i16> <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>)
%3 = bitcast <4 x i64> %__S to <16 x i16>
%4 = bitcast i16 %__U to <16 x i1>
%5 = select <16 x i1> %4, <16 x i16> %2, <16 x i16> %3
@@ -1190,7 +1190,7 @@ entry:
ret <4 x i64> %6
}
-declare <16 x i16> @llvm.x86.avx512.vpshrd.w.256(<16 x i16>, <16 x i16>, i32)
+declare <16 x i16> @llvm.fshr.v16i16(<16 x i16>, <16 x i16>, <16 x i16>)
define <4 x i64> @test_mm256_maskz_shrdi_epi16(i16 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
; X86-LABEL: test_mm256_maskz_shrdi_epi16:
@@ -1207,7 +1207,7 @@ define <4 x i64> @test_mm256_maskz_shrdi_epi16(i16 zeroext %__U, <4 x i64> %__A,
entry:
%0 = bitcast <4 x i64> %__A to <16 x i16>
%1 = bitcast <4 x i64> %__B to <16 x i16>
- %2 = tail call <16 x i16> @llvm.x86.avx512.vpshrd.w.256(<16 x i16> %0, <16 x i16> %1, i32 7)
+ %2 = tail call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %1, <16 x i16> %0, <16 x i16> <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>)
%3 = bitcast i16 %__U to <16 x i1>
%4 = select <16 x i1> %3, <16 x i16> %2, <16 x i16> zeroinitializer
%5 = bitcast <16 x i16> %4 to <4 x i64>
@@ -1217,12 +1217,12 @@ entry:
define <4 x i64> @test_mm256_shrdi_epi16(<4 x i64> %__A, <4 x i64> %__B) {
; CHECK-LABEL: test_mm256_shrdi_epi16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vpshrdw $31, %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: vpshrdw $15, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
entry:
%0 = bitcast <4 x i64> %__A to <16 x i16>
%1 = bitcast <4 x i64> %__B to <16 x i16>
- %2 = tail call <16 x i16> @llvm.x86.avx512.vpshrd.w.256(<16 x i16> %0, <16 x i16> %1, i32 31)
+ %2 = tail call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %1, <16 x i16> %0, <16 x i16> <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31>)
%3 = bitcast <16 x i16> %2 to <4 x i64>
ret <4 x i64> %3
}
@@ -1243,7 +1243,7 @@ define <2 x i64> @test_mm_mask_shrdi_epi16(<2 x i64> %__S, i8 zeroext %__U, <2 x
entry:
%0 = bitcast <2 x i64> %__A to <8 x i16>
%1 = bitcast <2 x i64> %__B to <8 x i16>
- %2 = tail call <8 x i16> @llvm.x86.avx512.vpshrd.w.128(<8 x i16> %0, <8 x i16> %1, i32 3)
+ %2 = tail call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %1, <8 x i16> %0, <8 x i16> <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>)
%3 = bitcast <2 x i64> %__S to <8 x i16>
%4 = bitcast i8 %__U to <8 x i1>
%5 = select <8 x i1> %4, <8 x i16> %2, <8 x i16> %3
@@ -1251,7 +1251,7 @@ entry:
ret <2 x i64> %6
}
-declare <8 x i16> @llvm.x86.avx512.vpshrd.w.128(<8 x i16>, <8 x i16>, i32)
+declare <8 x i16> @llvm.fshr.v8i16(<8 x i16>, <8 x i16>, <8 x i16>)
define <2 x i64> @test_mm_maskz_shrdi_epi16(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
; X86-LABEL: test_mm_maskz_shrdi_epi16:
@@ -1269,7 +1269,7 @@ define <2 x i64> @test_mm_maskz_shrdi_epi16(i8 zeroext %__U, <2 x i64> %__A, <2
entry:
%0 = bitcast <2 x i64> %__A to <8 x i16>
%1 = bitcast <2 x i64> %__B to <8 x i16>
- %2 = tail call <8 x i16> @llvm.x86.avx512.vpshrd.w.128(<8 x i16> %0, <8 x i16> %1, i32 7)
+ %2 = tail call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %1, <8 x i16> %0, <8 x i16> <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>)
%3 = bitcast i8 %__U to <8 x i1>
%4 = select <8 x i1> %3, <8 x i16> %2, <8 x i16> zeroinitializer
%5 = bitcast <8 x i16> %4 to <2 x i64>
@@ -1279,12 +1279,12 @@ entry:
define <2 x i64> @test_mm_shrdi_epi16(<2 x i64> %__A, <2 x i64> %__B) {
; CHECK-LABEL: test_mm_shrdi_epi16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vpshrdw $31, %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vpshrdw $15, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
entry:
%0 = bitcast <2 x i64> %__A to <8 x i16>
%1 = bitcast <2 x i64> %__B to <8 x i16>
- %2 = tail call <8 x i16> @llvm.x86.avx512.vpshrd.w.128(<8 x i16> %0, <8 x i16> %1, i32 31)
+ %2 = tail call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %1, <8 x i16> %0, <8 x i16> <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31>)
%3 = bitcast <8 x i16> %2 to <2 x i64>
ret <2 x i64> %3
}
@@ -1303,8 +1303,11 @@ define <4 x i64> @test_mm256_mask_shldv_epi64(<4 x i64> %__S, i8 zeroext %__U, <
; X64-NEXT: vpshldvq %ymm2, %ymm1, %ymm0 {%k1}
; X64-NEXT: retq
entry:
- %0 = tail call <4 x i64> @llvm.x86.avx512.mask.vpshldv.q.256(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B, i8 %__U)
- ret <4 x i64> %0
+ %0 = tail call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %2 = select <4 x i1> %extract.i, <4 x i64> %0, <4 x i64> %__S
+ ret <4 x i64> %2
}
define <4 x i64> @test_mm256_maskz_shldv_epi64(i8 zeroext %__U, <4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
@@ -1321,8 +1324,11 @@ define <4 x i64> @test_mm256_maskz_shldv_epi64(i8 zeroext %__U, <4 x i64> %__S,
; X64-NEXT: vpshldvq %ymm2, %ymm1, %ymm0 {%k1} {z}
; X64-NEXT: retq
entry:
- %0 = tail call <4 x i64> @llvm.x86.avx512.maskz.vpshldv.q.256(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B, i8 %__U)
- ret <4 x i64> %0
+ %0 = tail call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %2 = select <4 x i1> %extract.i, <4 x i64> %0, <4 x i64> zeroinitializer
+ ret <4 x i64> %2
}
define <4 x i64> @test_mm256_shldv_epi64(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
@@ -1331,7 +1337,7 @@ define <4 x i64> @test_mm256_shldv_epi64(<4 x i64> %__S, <4 x i64> %__A, <4 x i6
; CHECK-NEXT: vpshldvq %ymm2, %ymm1, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
entry:
- %0 = tail call <4 x i64> @llvm.x86.avx512.mask.vpshldv.q.256(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B, i8 -1)
+ %0 = tail call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B)
ret <4 x i64> %0
}
@@ -1349,8 +1355,11 @@ define <2 x i64> @test_mm_mask_shldv_epi64(<2 x i64> %__S, i8 zeroext %__U, <2 x
; X64-NEXT: vpshldvq %xmm2, %xmm1, %xmm0 {%k1}
; X64-NEXT: retq
entry:
- %0 = tail call <2 x i64> @llvm.x86.avx512.mask.vpshldv.q.128(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B, i8 %__U)
- ret <2 x i64> %0
+ %0 = tail call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %2 = select <2 x i1> %extract.i, <2 x i64> %0, <2 x i64> %__S
+ ret <2 x i64> %2
}
define <2 x i64> @test_mm_maskz_shldv_epi64(i8 zeroext %__U, <2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
@@ -1367,8 +1376,11 @@ define <2 x i64> @test_mm_maskz_shldv_epi64(i8 zeroext %__U, <2 x i64> %__S, <2
; X64-NEXT: vpshldvq %xmm2, %xmm1, %xmm0 {%k1} {z}
; X64-NEXT: retq
entry:
- %0 = tail call <2 x i64> @llvm.x86.avx512.maskz.vpshldv.q.128(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B, i8 %__U)
- ret <2 x i64> %0
+ %0 = tail call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %2 = select <2 x i1> %extract.i, <2 x i64> %0, <2 x i64> zeroinitializer
+ ret <2 x i64> %2
}
define <2 x i64> @test_mm_shldv_epi64(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
@@ -1377,7 +1389,7 @@ define <2 x i64> @test_mm_shldv_epi64(<2 x i64> %__S, <2 x i64> %__A, <2 x i64>
; CHECK-NEXT: vpshldvq %xmm2, %xmm1, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
entry:
- %0 = tail call <2 x i64> @llvm.x86.avx512.mask.vpshldv.q.128(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B, i8 -1)
+ %0 = tail call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B)
ret <2 x i64> %0
}
@@ -1398,9 +1410,11 @@ entry:
%0 = bitcast <4 x i64> %__S to <8 x i32>
%1 = bitcast <4 x i64> %__A to <8 x i32>
%2 = bitcast <4 x i64> %__B to <8 x i32>
- %3 = tail call <8 x i32> @llvm.x86.avx512.mask.vpshldv.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, i8 %__U)
- %4 = bitcast <8 x i32> %3 to <4 x i64>
- ret <4 x i64> %4
+ %3 = tail call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2)
+ %4 = bitcast i8 %__U to <8 x i1>
+ %5 = select <8 x i1> %4, <8 x i32> %3, <8 x i32> %0
+ %6 = bitcast <8 x i32> %5 to <4 x i64>
+ ret <4 x i64> %6
}
define <4 x i64> @test_mm256_maskz_shldv_epi32(i8 zeroext %__U, <4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
@@ -1420,9 +1434,11 @@ entry:
%0 = bitcast <4 x i64> %__S to <8 x i32>
%1 = bitcast <4 x i64> %__A to <8 x i32>
%2 = bitcast <4 x i64> %__B to <8 x i32>
- %3 = tail call <8 x i32> @llvm.x86.avx512.maskz.vpshldv.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, i8 %__U)
- %4 = bitcast <8 x i32> %3 to <4 x i64>
- ret <4 x i64> %4
+ %3 = tail call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2)
+ %4 = bitcast i8 %__U to <8 x i1>
+ %5 = select <8 x i1> %4, <8 x i32> %3, <8 x i32> zeroinitializer
+ %6 = bitcast <8 x i32> %5 to <4 x i64>
+ ret <4 x i64> %6
}
define <4 x i64> @test_mm256_shldv_epi32(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
@@ -1434,7 +1450,7 @@ entry:
%0 = bitcast <4 x i64> %__S to <8 x i32>
%1 = bitcast <4 x i64> %__A to <8 x i32>
%2 = bitcast <4 x i64> %__B to <8 x i32>
- %3 = tail call <8 x i32> @llvm.x86.avx512.mask.vpshldv.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, i8 -1)
+ %3 = tail call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2)
%4 = bitcast <8 x i32> %3 to <4 x i64>
ret <4 x i64> %4
}
@@ -1456,9 +1472,12 @@ entry:
%0 = bitcast <2 x i64> %__S to <4 x i32>
%1 = bitcast <2 x i64> %__A to <4 x i32>
%2 = bitcast <2 x i64> %__B to <4 x i32>
- %3 = tail call <4 x i32> @llvm.x86.avx512.mask.vpshldv.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, i8 %__U)
- %4 = bitcast <4 x i32> %3 to <2 x i64>
- ret <2 x i64> %4
+ %3 = tail call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+ %4 = bitcast i8 %__U to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %5 = select <4 x i1> %extract.i, <4 x i32> %3, <4 x i32> %0
+ %6 = bitcast <4 x i32> %5 to <2 x i64>
+ ret <2 x i64> %6
}
define <2 x i64> @test_mm_maskz_shldv_epi32(i8 zeroext %__U, <2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
@@ -1478,9 +1497,12 @@ entry:
%0 = bitcast <2 x i64> %__S to <4 x i32>
%1 = bitcast <2 x i64> %__A to <4 x i32>
%2 = bitcast <2 x i64> %__B to <4 x i32>
- %3 = tail call <4 x i32> @llvm.x86.avx512.maskz.vpshldv.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, i8 %__U)
- %4 = bitcast <4 x i32> %3 to <2 x i64>
- ret <2 x i64> %4
+ %3 = tail call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+ %4 = bitcast i8 %__U to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %5 = select <4 x i1> %extract.i, <4 x i32> %3, <4 x i32> zeroinitializer
+ %6 = bitcast <4 x i32> %5 to <2 x i64>
+ ret <2 x i64> %6
}
define <2 x i64> @test_mm_shldv_epi32(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
@@ -1492,7 +1514,7 @@ entry:
%0 = bitcast <2 x i64> %__S to <4 x i32>
%1 = bitcast <2 x i64> %__A to <4 x i32>
%2 = bitcast <2 x i64> %__B to <4 x i32>
- %3 = tail call <4 x i32> @llvm.x86.avx512.mask.vpshldv.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, i8 -1)
+ %3 = tail call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
%4 = bitcast <4 x i32> %3 to <2 x i64>
ret <2 x i64> %4
}
@@ -1513,9 +1535,11 @@ entry:
%0 = bitcast <4 x i64> %__S to <16 x i16>
%1 = bitcast <4 x i64> %__A to <16 x i16>
%2 = bitcast <4 x i64> %__B to <16 x i16>
- %3 = tail call <16 x i16> @llvm.x86.avx512.mask.vpshldv.w.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2, i16 %__U)
- %4 = bitcast <16 x i16> %3 to <4 x i64>
- ret <4 x i64> %4
+ %3 = tail call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2)
+ %4 = bitcast i16 %__U to <16 x i1>
+ %5 = select <16 x i1> %4, <16 x i16> %3, <16 x i16> %0
+ %6 = bitcast <16 x i16> %5 to <4 x i64>
+ ret <4 x i64> %6
}
define <4 x i64> @test_mm256_maskz_shldv_epi16(i16 zeroext %__U, <4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
@@ -1534,9 +1558,11 @@ entry:
%0 = bitcast <4 x i64> %__S to <16 x i16>
%1 = bitcast <4 x i64> %__A to <16 x i16>
%2 = bitcast <4 x i64> %__B to <16 x i16>
- %3 = tail call <16 x i16> @llvm.x86.avx512.maskz.vpshldv.w.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2, i16 %__U)
- %4 = bitcast <16 x i16> %3 to <4 x i64>
- ret <4 x i64> %4
+ %3 = tail call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2)
+ %4 = bitcast i16 %__U to <16 x i1>
+ %5 = select <16 x i1> %4, <16 x i16> %3, <16 x i16> zeroinitializer
+ %6 = bitcast <16 x i16> %5 to <4 x i64>
+ ret <4 x i64> %6
}
define <4 x i64> @test_mm256_shldv_epi16(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
@@ -1548,7 +1574,7 @@ entry:
%0 = bitcast <4 x i64> %__S to <16 x i16>
%1 = bitcast <4 x i64> %__A to <16 x i16>
%2 = bitcast <4 x i64> %__B to <16 x i16>
- %3 = tail call <16 x i16> @llvm.x86.avx512.mask.vpshldv.w.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2, i16 -1)
+ %3 = tail call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2)
%4 = bitcast <16 x i16> %3 to <4 x i64>
ret <4 x i64> %4
}
@@ -1570,9 +1596,11 @@ entry:
%0 = bitcast <2 x i64> %__S to <8 x i16>
%1 = bitcast <2 x i64> %__A to <8 x i16>
%2 = bitcast <2 x i64> %__B to <8 x i16>
- %3 = tail call <8 x i16> @llvm.x86.avx512.mask.vpshldv.w.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2, i8 %__U)
- %4 = bitcast <8 x i16> %3 to <2 x i64>
- ret <2 x i64> %4
+ %3 = tail call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+ %4 = bitcast i8 %__U to <8 x i1>
+ %5 = select <8 x i1> %4, <8 x i16> %3, <8 x i16> %0
+ %6 = bitcast <8 x i16> %5 to <2 x i64>
+ ret <2 x i64> %6
}
define <2 x i64> @test_mm_maskz_shldv_epi16(i8 zeroext %__U, <2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
@@ -1592,9 +1620,11 @@ entry:
%0 = bitcast <2 x i64> %__S to <8 x i16>
%1 = bitcast <2 x i64> %__A to <8 x i16>
%2 = bitcast <2 x i64> %__B to <8 x i16>
- %3 = tail call <8 x i16> @llvm.x86.avx512.maskz.vpshldv.w.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2, i8 %__U)
- %4 = bitcast <8 x i16> %3 to <2 x i64>
- ret <2 x i64> %4
+ %3 = tail call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+ %4 = bitcast i8 %__U to <8 x i1>
+ %5 = select <8 x i1> %4, <8 x i16> %3, <8 x i16> zeroinitializer
+ %6 = bitcast <8 x i16> %5 to <2 x i64>
+ ret <2 x i64> %6
}
define <2 x i64> @test_mm_shldv_epi16(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
@@ -1606,7 +1636,7 @@ entry:
%0 = bitcast <2 x i64> %__S to <8 x i16>
%1 = bitcast <2 x i64> %__A to <8 x i16>
%2 = bitcast <2 x i64> %__B to <8 x i16>
- %3 = tail call <8 x i16> @llvm.x86.avx512.mask.vpshldv.w.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2, i8 -1)
+ %3 = tail call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
%4 = bitcast <8 x i16> %3 to <2 x i64>
ret <2 x i64> %4
}
@@ -1625,8 +1655,11 @@ define <4 x i64> @test_mm256_mask_shrdv_epi64(<4 x i64> %__S, i8 zeroext %__U, <
; X64-NEXT: vpshrdvq %ymm2, %ymm1, %ymm0 {%k1}
; X64-NEXT: retq
entry:
- %0 = tail call <4 x i64> @llvm.x86.avx512.mask.vpshrdv.q.256(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B, i8 %__U)
- ret <4 x i64> %0
+ %0 = tail call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %__A, <4 x i64> %__S, <4 x i64> %__B)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %2 = select <4 x i1> %extract.i, <4 x i64> %0, <4 x i64> %__S
+ ret <4 x i64> %2
}
define <4 x i64> @test_mm256_maskz_shrdv_epi64(i8 zeroext %__U, <4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
@@ -1643,8 +1676,11 @@ define <4 x i64> @test_mm256_maskz_shrdv_epi64(i8 zeroext %__U, <4 x i64> %__S,
; X64-NEXT: vpshrdvq %ymm2, %ymm1, %ymm0 {%k1} {z}
; X64-NEXT: retq
entry:
- %0 = tail call <4 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.256(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B, i8 %__U)
- ret <4 x i64> %0
+ %0 = tail call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %__A, <4 x i64> %__S, <4 x i64> %__B)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %2 = select <4 x i1> %extract.i, <4 x i64> %0, <4 x i64> zeroinitializer
+ ret <4 x i64> %2
}
define <4 x i64> @test_mm256_shrdv_epi64(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
@@ -1653,7 +1689,7 @@ define <4 x i64> @test_mm256_shrdv_epi64(<4 x i64> %__S, <4 x i64> %__A, <4 x i6
; CHECK-NEXT: vpshrdvq %ymm2, %ymm1, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
entry:
- %0 = tail call <4 x i64> @llvm.x86.avx512.mask.vpshrdv.q.256(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B, i8 -1)
+ %0 = tail call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %__A, <4 x i64> %__S, <4 x i64> %__B)
ret <4 x i64> %0
}
@@ -1671,8 +1707,11 @@ define <2 x i64> @test_mm_mask_shrdv_epi64(<2 x i64> %__S, i8 zeroext %__U, <2 x
; X64-NEXT: vpshrdvq %xmm2, %xmm1, %xmm0 {%k1}
; X64-NEXT: retq
entry:
- %0 = tail call <2 x i64> @llvm.x86.avx512.mask.vpshrdv.q.128(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B, i8 %__U)
- ret <2 x i64> %0
+ %0 = tail call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %__A, <2 x i64> %__S, <2 x i64> %__B)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %2 = select <2 x i1> %extract.i, <2 x i64> %0, <2 x i64> %__S
+ ret <2 x i64> %2
}
define <2 x i64> @test_mm_maskz_shrdv_epi64(i8 zeroext %__U, <2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
@@ -1689,8 +1728,11 @@ define <2 x i64> @test_mm_maskz_shrdv_epi64(i8 zeroext %__U, <2 x i64> %__S, <2
; X64-NEXT: vpshrdvq %xmm2, %xmm1, %xmm0 {%k1} {z}
; X64-NEXT: retq
entry:
- %0 = tail call <2 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.128(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B, i8 %__U)
- ret <2 x i64> %0
+ %0 = tail call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %__A, <2 x i64> %__S, <2 x i64> %__B)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %2 = select <2 x i1> %extract.i, <2 x i64> %0, <2 x i64> zeroinitializer
+ ret <2 x i64> %2
}
define <2 x i64> @test_mm_shrdv_epi64(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
@@ -1699,7 +1741,7 @@ define <2 x i64> @test_mm_shrdv_epi64(<2 x i64> %__S, <2 x i64> %__A, <2 x i64>
; CHECK-NEXT: vpshrdvq %xmm2, %xmm1, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
entry:
- %0 = tail call <2 x i64> @llvm.x86.avx512.mask.vpshrdv.q.128(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B, i8 -1)
+ %0 = tail call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %__A, <2 x i64> %__S, <2 x i64> %__B)
ret <2 x i64> %0
}
@@ -1720,9 +1762,11 @@ entry:
%0 = bitcast <4 x i64> %__S to <8 x i32>
%1 = bitcast <4 x i64> %__A to <8 x i32>
%2 = bitcast <4 x i64> %__B to <8 x i32>
- %3 = tail call <8 x i32> @llvm.x86.avx512.mask.vpshrdv.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, i8 %__U)
- %4 = bitcast <8 x i32> %3 to <4 x i64>
- ret <4 x i64> %4
+ %3 = tail call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %1, <8 x i32> %0, <8 x i32> %2)
+ %4 = bitcast i8 %__U to <8 x i1>
+ %5 = select <8 x i1> %4, <8 x i32> %3, <8 x i32> %0
+ %6 = bitcast <8 x i32> %5 to <4 x i64>
+ ret <4 x i64> %6
}
define <4 x i64> @test_mm256_maskz_shrdv_epi32(i8 zeroext %__U, <4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
@@ -1742,9 +1786,11 @@ entry:
%0 = bitcast <4 x i64> %__S to <8 x i32>
%1 = bitcast <4 x i64> %__A to <8 x i32>
%2 = bitcast <4 x i64> %__B to <8 x i32>
- %3 = tail call <8 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, i8 %__U)
- %4 = bitcast <8 x i32> %3 to <4 x i64>
- ret <4 x i64> %4
+ %3 = tail call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %1, <8 x i32> %0, <8 x i32> %2)
+ %4 = bitcast i8 %__U to <8 x i1>
+ %5 = select <8 x i1> %4, <8 x i32> %3, <8 x i32> zeroinitializer
+ %6 = bitcast <8 x i32> %5 to <4 x i64>
+ ret <4 x i64> %6
}
define <4 x i64> @test_mm256_shrdv_epi32(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
@@ -1756,7 +1802,7 @@ entry:
%0 = bitcast <4 x i64> %__S to <8 x i32>
%1 = bitcast <4 x i64> %__A to <8 x i32>
%2 = bitcast <4 x i64> %__B to <8 x i32>
- %3 = tail call <8 x i32> @llvm.x86.avx512.mask.vpshrdv.d.256(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, i8 -1)
+ %3 = tail call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %1, <8 x i32> %0, <8 x i32> %2)
%4 = bitcast <8 x i32> %3 to <4 x i64>
ret <4 x i64> %4
}
@@ -1778,9 +1824,12 @@ entry:
%0 = bitcast <2 x i64> %__S to <4 x i32>
%1 = bitcast <2 x i64> %__A to <4 x i32>
%2 = bitcast <2 x i64> %__B to <4 x i32>
- %3 = tail call <4 x i32> @llvm.x86.avx512.mask.vpshrdv.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, i8 %__U)
- %4 = bitcast <4 x i32> %3 to <2 x i64>
- ret <2 x i64> %4
+ %3 = tail call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %1, <4 x i32> %0, <4 x i32> %2)
+ %4 = bitcast i8 %__U to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %5 = select <4 x i1> %extract.i, <4 x i32> %3, <4 x i32> %0
+ %6 = bitcast <4 x i32> %5 to <2 x i64>
+ ret <2 x i64> %6
}
define <2 x i64> @test_mm_maskz_shrdv_epi32(i8 zeroext %__U, <2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
@@ -1800,9 +1849,12 @@ entry:
%0 = bitcast <2 x i64> %__S to <4 x i32>
%1 = bitcast <2 x i64> %__A to <4 x i32>
%2 = bitcast <2 x i64> %__B to <4 x i32>
- %3 = tail call <4 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, i8 %__U)
- %4 = bitcast <4 x i32> %3 to <2 x i64>
- ret <2 x i64> %4
+ %3 = tail call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %1, <4 x i32> %0, <4 x i32> %2)
+ %4 = bitcast i8 %__U to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %5 = select <4 x i1> %extract.i, <4 x i32> %3, <4 x i32> zeroinitializer
+ %6 = bitcast <4 x i32> %5 to <2 x i64>
+ ret <2 x i64> %6
}
define <2 x i64> @test_mm_shrdv_epi32(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
@@ -1814,7 +1866,7 @@ entry:
%0 = bitcast <2 x i64> %__S to <4 x i32>
%1 = bitcast <2 x i64> %__A to <4 x i32>
%2 = bitcast <2 x i64> %__B to <4 x i32>
- %3 = tail call <4 x i32> @llvm.x86.avx512.mask.vpshrdv.d.128(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, i8 -1)
+ %3 = tail call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %1, <4 x i32> %0, <4 x i32> %2)
%4 = bitcast <4 x i32> %3 to <2 x i64>
ret <2 x i64> %4
}
@@ -1835,9 +1887,11 @@ entry:
%0 = bitcast <4 x i64> %__S to <16 x i16>
%1 = bitcast <4 x i64> %__A to <16 x i16>
%2 = bitcast <4 x i64> %__B to <16 x i16>
- %3 = tail call <16 x i16> @llvm.x86.avx512.mask.vpshrdv.w.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2, i16 %__U)
- %4 = bitcast <16 x i16> %3 to <4 x i64>
- ret <4 x i64> %4
+ %3 = tail call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %1, <16 x i16> %0, <16 x i16> %2)
+ %4 = bitcast i16 %__U to <16 x i1>
+ %5 = select <16 x i1> %4, <16 x i16> %3, <16 x i16> %0
+ %6 = bitcast <16 x i16> %5 to <4 x i64>
+ ret <4 x i64> %6
}
define <4 x i64> @test_mm256_maskz_shrdv_epi16(i16 zeroext %__U, <4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
@@ -1856,9 +1910,11 @@ entry:
%0 = bitcast <4 x i64> %__S to <16 x i16>
%1 = bitcast <4 x i64> %__A to <16 x i16>
%2 = bitcast <4 x i64> %__B to <16 x i16>
- %3 = tail call <16 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2, i16 %__U)
- %4 = bitcast <16 x i16> %3 to <4 x i64>
- ret <4 x i64> %4
+ %3 = tail call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %1, <16 x i16> %0, <16 x i16> %2)
+ %4 = bitcast i16 %__U to <16 x i1>
+ %5 = select <16 x i1> %4, <16 x i16> %3, <16 x i16> zeroinitializer
+ %6 = bitcast <16 x i16> %5 to <4 x i64>
+ ret <4 x i64> %6
}
define <4 x i64> @test_mm256_shrdv_epi16(<4 x i64> %__S, <4 x i64> %__A, <4 x i64> %__B) {
@@ -1870,7 +1926,7 @@ entry:
%0 = bitcast <4 x i64> %__S to <16 x i16>
%1 = bitcast <4 x i64> %__A to <16 x i16>
%2 = bitcast <4 x i64> %__B to <16 x i16>
- %3 = tail call <16 x i16> @llvm.x86.avx512.mask.vpshrdv.w.256(<16 x i16> %0, <16 x i16> %1, <16 x i16> %2, i16 -1)
+ %3 = tail call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %1, <16 x i16> %0, <16 x i16> %2)
%4 = bitcast <16 x i16> %3 to <4 x i64>
ret <4 x i64> %4
}
@@ -1892,9 +1948,11 @@ entry:
%0 = bitcast <2 x i64> %__S to <8 x i16>
%1 = bitcast <2 x i64> %__A to <8 x i16>
%2 = bitcast <2 x i64> %__B to <8 x i16>
- %3 = tail call <8 x i16> @llvm.x86.avx512.mask.vpshrdv.w.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2, i8 %__U)
- %4 = bitcast <8 x i16> %3 to <2 x i64>
- ret <2 x i64> %4
+ %3 = tail call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %1, <8 x i16> %0, <8 x i16> %2)
+ %4 = bitcast i8 %__U to <8 x i1>
+ %5 = select <8 x i1> %4, <8 x i16> %3, <8 x i16> %0
+ %6 = bitcast <8 x i16> %5 to <2 x i64>
+ ret <2 x i64> %6
}
define <2 x i64> @test_mm_maskz_shrdv_epi16(i8 zeroext %__U, <2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
@@ -1914,9 +1972,11 @@ entry:
%0 = bitcast <2 x i64> %__S to <8 x i16>
%1 = bitcast <2 x i64> %__A to <8 x i16>
%2 = bitcast <2 x i64> %__B to <8 x i16>
- %3 = tail call <8 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2, i8 %__U)
- %4 = bitcast <8 x i16> %3 to <2 x i64>
- ret <2 x i64> %4
+ %3 = tail call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %1, <8 x i16> %0, <8 x i16> %2)
+ %4 = bitcast i8 %__U to <8 x i1>
+ %5 = select <8 x i1> %4, <8 x i16> %3, <8 x i16> zeroinitializer
+ %6 = bitcast <8 x i16> %5 to <2 x i64>
+ ret <2 x i64> %6
}
define <2 x i64> @test_mm_shrdv_epi16(<2 x i64> %__S, <2 x i64> %__A, <2 x i64> %__B) {
@@ -1928,7 +1988,7 @@ entry:
%0 = bitcast <2 x i64> %__S to <8 x i16>
%1 = bitcast <2 x i64> %__A to <8 x i16>
%2 = bitcast <2 x i64> %__B to <8 x i16>
- %3 = tail call <8 x i16> @llvm.x86.avx512.mask.vpshrdv.w.128(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2, i8 -1)
+ %3 = tail call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %1, <8 x i16> %0, <8 x i16> %2)
%4 = bitcast <8 x i16> %3 to <2 x i64>
ret <2 x i64> %4
}
@@ -1949,27 +2009,3 @@ declare <16 x i16> @llvm.x86.avx512.mask.expand.w.256(<16 x i16>, <16 x i16>, i1
declare <32 x i8> @llvm.x86.avx512.mask.expand.b.256(<32 x i8>, <32 x i8>, i32)
declare <16 x i16> @llvm.masked.expandload.v16i16(i16*, <16 x i1>, <16 x i16>)
declare <32 x i8> @llvm.masked.expandload.v32i8(i8*, <32 x i1>, <32 x i8>)
-declare <4 x i64> @llvm.x86.avx512.mask.vpshldv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
-declare <4 x i64> @llvm.x86.avx512.maskz.vpshldv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
-declare <2 x i64> @llvm.x86.avx512.mask.vpshldv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
-declare <2 x i64> @llvm.x86.avx512.maskz.vpshldv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
-declare <8 x i32> @llvm.x86.avx512.mask.vpshldv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
-declare <8 x i32> @llvm.x86.avx512.maskz.vpshldv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
-declare <4 x i32> @llvm.x86.avx512.mask.vpshldv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
-declare <4 x i32> @llvm.x86.avx512.maskz.vpshldv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
-declare <16 x i16> @llvm.x86.avx512.mask.vpshldv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
-declare <16 x i16> @llvm.x86.avx512.maskz.vpshldv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
-declare <8 x i16> @llvm.x86.avx512.mask.vpshldv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
-declare <8 x i16> @llvm.x86.avx512.maskz.vpshldv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
-declare <4 x i64> @llvm.x86.avx512.mask.vpshrdv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
-declare <4 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
-declare <2 x i64> @llvm.x86.avx512.mask.vpshrdv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
-declare <2 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
-declare <8 x i32> @llvm.x86.avx512.mask.vpshrdv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
-declare <8 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
-declare <4 x i32> @llvm.x86.avx512.mask.vpshrdv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
-declare <4 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
-declare <16 x i16> @llvm.x86.avx512.mask.vpshrdv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
-declare <16 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
-declare <8 x i16> @llvm.x86.avx512.mask.vpshrdv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
-declare <8 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
OpenPOWER on IntegriCloud