summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2018-06-19 18:52:15 +0000
committerCraig Topper <craig.topper@intel.com>2018-06-19 18:52:15 +0000
commit858afbd16521ded4f4b4d149b26e8a96c7957a75 (patch)
treeb4af2c421bbcacd0740bd5d2a5d17665bd9fb4dd
parent2ca3360b11a540d88d6fbf7219d97c10d5b837c7 (diff)
downloadbcm5719-llvm-858afbd16521ded4f4b4d149b26e8a96c7957a75.tar.gz
bcm5719-llvm-858afbd16521ded4f4b4d149b26e8a96c7957a75.zip
[X86] Add fast-isel tests for clang's AVX512F vector reduction intrinsics.
llvm-svn: 335068
-rw-r--r--llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll2396
1 files changed, 2396 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
index d021acb3e04..7db2fd736e0 100644
--- a/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
@@ -6313,6 +6313,2398 @@ entry:
ret void
}
+define i64 @test_mm512_reduce_add_epi64(<8 x i64> %__W) {
+; X86-LABEL: test_mm512_reduce_add_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpaddq %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vpextrd $1, %xmm0, %edx
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_reduce_add_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpaddq %xmm0, %xmm1, %xmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %shuffle.i = shufflevector <8 x i64> %__W, <8 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle1.i = shufflevector <8 x i64> %__W, <8 x i64> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %add.i = add <4 x i64> %shuffle.i, %shuffle1.i
+ %shuffle2.i = shufflevector <4 x i64> %add.i, <4 x i64> undef, <2 x i32> <i32 0, i32 1>
+ %shuffle3.i = shufflevector <4 x i64> %add.i, <4 x i64> undef, <2 x i32> <i32 2, i32 3>
+ %add4.i = add <2 x i64> %shuffle2.i, %shuffle3.i
+ %shuffle6.i = shufflevector <2 x i64> %add4.i, <2 x i64> undef, <2 x i32> <i32 1, i32 undef>
+ %add7.i = add <2 x i64> %shuffle6.i, %add4.i
+ %vecext.i = extractelement <2 x i64> %add7.i, i32 0
+ ret i64 %vecext.i
+}
+
+define i64 @test_mm512_reduce_mul_epi64(<8 x i64> %__W) {
+; X86-LABEL: test_mm512_reduce_mul_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vpsrlq $32, %ymm0, %ymm2
+; X86-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
+; X86-NEXT: vpsrlq $32, %ymm1, %ymm3
+; X86-NEXT: vpmuludq %ymm3, %ymm0, %ymm3
+; X86-NEXT: vpaddq %ymm2, %ymm3, %ymm2
+; X86-NEXT: vpsllq $32, %ymm2, %ymm2
+; X86-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
+; X86-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpsrlq $32, %xmm0, %xmm2
+; X86-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
+; X86-NEXT: vpsrlq $32, %xmm1, %xmm3
+; X86-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
+; X86-NEXT: vpaddq %xmm2, %xmm3, %xmm2
+; X86-NEXT: vpsllq $32, %xmm2, %xmm2
+; X86-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpaddq %xmm2, %xmm0, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpsrlq $32, %xmm0, %xmm2
+; X86-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
+; X86-NEXT: vpsrlq $32, %xmm1, %xmm3
+; X86-NEXT: vpmuludq %xmm0, %xmm3, %xmm3
+; X86-NEXT: vpaddq %xmm3, %xmm2, %xmm2
+; X86-NEXT: vpsllq $32, %xmm2, %xmm2
+; X86-NEXT: vpmuludq %xmm0, %xmm1, %xmm0
+; X86-NEXT: vpaddq %xmm2, %xmm0, %xmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vpextrd $1, %xmm0, %edx
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_reduce_mul_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vpsrlq $32, %ymm0, %ymm2
+; X64-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
+; X64-NEXT: vpsrlq $32, %ymm1, %ymm3
+; X64-NEXT: vpmuludq %ymm3, %ymm0, %ymm3
+; X64-NEXT: vpaddq %ymm2, %ymm3, %ymm2
+; X64-NEXT: vpsllq $32, %ymm2, %ymm2
+; X64-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
+; X64-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpsrlq $32, %xmm0, %xmm2
+; X64-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
+; X64-NEXT: vpsrlq $32, %xmm1, %xmm3
+; X64-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
+; X64-NEXT: vpaddq %xmm2, %xmm3, %xmm2
+; X64-NEXT: vpsllq $32, %xmm2, %xmm2
+; X64-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpaddq %xmm2, %xmm0, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpsrlq $32, %xmm0, %xmm2
+; X64-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
+; X64-NEXT: vpsrlq $32, %xmm1, %xmm3
+; X64-NEXT: vpmuludq %xmm0, %xmm3, %xmm3
+; X64-NEXT: vpaddq %xmm3, %xmm2, %xmm2
+; X64-NEXT: vpsllq $32, %xmm2, %xmm2
+; X64-NEXT: vpmuludq %xmm0, %xmm1, %xmm0
+; X64-NEXT: vpaddq %xmm2, %xmm0, %xmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %shuffle.i = shufflevector <8 x i64> %__W, <8 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle1.i = shufflevector <8 x i64> %__W, <8 x i64> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %mul.i = mul <4 x i64> %shuffle.i, %shuffle1.i
+ %shuffle2.i = shufflevector <4 x i64> %mul.i, <4 x i64> undef, <2 x i32> <i32 0, i32 1>
+ %shuffle3.i = shufflevector <4 x i64> %mul.i, <4 x i64> undef, <2 x i32> <i32 2, i32 3>
+ %mul4.i = mul <2 x i64> %shuffle2.i, %shuffle3.i
+ %shuffle6.i = shufflevector <2 x i64> %mul4.i, <2 x i64> undef, <2 x i32> <i32 1, i32 undef>
+ %mul7.i = mul <2 x i64> %shuffle6.i, %mul4.i
+ %vecext.i = extractelement <2 x i64> %mul7.i, i32 0
+ ret i64 %vecext.i
+}
+
+define i64 @test_mm512_reduce_or_epi64(<8 x i64> %__W) {
+; X86-LABEL: test_mm512_reduce_or_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vpor %ymm1, %ymm0, %ymm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpor %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpor %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vpextrd $1, %xmm0, %edx
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_reduce_or_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vpor %ymm1, %ymm0, %ymm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpor %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpor %xmm0, %xmm1, %xmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %shuffle.i = shufflevector <8 x i64> %__W, <8 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle1.i = shufflevector <8 x i64> %__W, <8 x i64> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %or.i = or <4 x i64> %shuffle.i, %shuffle1.i
+ %shuffle2.i = shufflevector <4 x i64> %or.i, <4 x i64> undef, <2 x i32> <i32 0, i32 1>
+ %shuffle3.i = shufflevector <4 x i64> %or.i, <4 x i64> undef, <2 x i32> <i32 2, i32 3>
+ %or4.i = or <2 x i64> %shuffle2.i, %shuffle3.i
+ %shuffle6.i = shufflevector <2 x i64> %or4.i, <2 x i64> undef, <2 x i32> <i32 1, i32 undef>
+ %or7.i = or <2 x i64> %shuffle6.i, %or4.i
+ %vecext.i = extractelement <2 x i64> %or7.i, i32 0
+ ret i64 %vecext.i
+}
+
+define i64 @test_mm512_reduce_and_epi64(<8 x i64> %__W) {
+; X86-LABEL: test_mm512_reduce_and_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vpand %ymm1, %ymm0, %ymm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpand %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vpextrd $1, %xmm0, %edx
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_reduce_and_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vpand %ymm1, %ymm0, %ymm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpand %xmm0, %xmm1, %xmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %shuffle.i = shufflevector <8 x i64> %__W, <8 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle1.i = shufflevector <8 x i64> %__W, <8 x i64> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %and.i = and <4 x i64> %shuffle.i, %shuffle1.i
+ %shuffle2.i = shufflevector <4 x i64> %and.i, <4 x i64> undef, <2 x i32> <i32 0, i32 1>
+ %shuffle3.i = shufflevector <4 x i64> %and.i, <4 x i64> undef, <2 x i32> <i32 2, i32 3>
+ %and4.i = and <2 x i64> %shuffle2.i, %shuffle3.i
+ %shuffle6.i = shufflevector <2 x i64> %and4.i, <2 x i64> undef, <2 x i32> <i32 1, i32 undef>
+ %and7.i = and <2 x i64> %shuffle6.i, %and4.i
+ %vecext.i = extractelement <2 x i64> %and7.i, i32 0
+ ret i64 %vecext.i
+}
+
+define i64 @test_mm512_mask_reduce_add_epi64(i8 zeroext %__M, <8 x i64> %__W) {
+; X86-LABEL: test_mm512_mask_reduce_add_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpaddq %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vpextrd $1, %xmm0, %edx
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_reduce_add_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpaddq %xmm0, %xmm1, %xmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast i8 %__M to <8 x i1>
+ %1 = select <8 x i1> %0, <8 x i64> %__W, <8 x i64> zeroinitializer
+ %shuffle.i = shufflevector <8 x i64> %1, <8 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle1.i = shufflevector <8 x i64> %1, <8 x i64> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %add.i = add <4 x i64> %shuffle.i, %shuffle1.i
+ %shuffle2.i = shufflevector <4 x i64> %add.i, <4 x i64> undef, <2 x i32> <i32 0, i32 1>
+ %shuffle3.i = shufflevector <4 x i64> %add.i, <4 x i64> undef, <2 x i32> <i32 2, i32 3>
+ %add4.i = add <2 x i64> %shuffle2.i, %shuffle3.i
+ %shuffle6.i = shufflevector <2 x i64> %add4.i, <2 x i64> undef, <2 x i32> <i32 1, i32 undef>
+ %add7.i = add <2 x i64> %shuffle6.i, %add4.i
+ %vecext.i = extractelement <2 x i64> %add7.i, i32 0
+ ret i64 %vecext.i
+}
+
+define i64 @test_mm512_mask_reduce_mul_epi64(i8 zeroext %__M, <8 x i64> %__W) {
+; X86-LABEL: test_mm512_mask_reduce_mul_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vmovdqa64 {{.*#+}} zmm1 = [1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0]
+; X86-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
+; X86-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; X86-NEXT: vpsrlq $32, %ymm1, %ymm2
+; X86-NEXT: vpmuludq %ymm0, %ymm2, %ymm2
+; X86-NEXT: vpsrlq $32, %ymm0, %ymm3
+; X86-NEXT: vpmuludq %ymm3, %ymm1, %ymm3
+; X86-NEXT: vpaddq %ymm2, %ymm3, %ymm2
+; X86-NEXT: vpsllq $32, %ymm2, %ymm2
+; X86-NEXT: vpmuludq %ymm0, %ymm1, %ymm0
+; X86-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpsrlq $32, %xmm0, %xmm2
+; X86-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
+; X86-NEXT: vpsrlq $32, %xmm1, %xmm3
+; X86-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
+; X86-NEXT: vpaddq %xmm2, %xmm3, %xmm2
+; X86-NEXT: vpsllq $32, %xmm2, %xmm2
+; X86-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpaddq %xmm2, %xmm0, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpsrlq $32, %xmm0, %xmm2
+; X86-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
+; X86-NEXT: vpsrlq $32, %xmm1, %xmm3
+; X86-NEXT: vpmuludq %xmm0, %xmm3, %xmm3
+; X86-NEXT: vpaddq %xmm3, %xmm2, %xmm2
+; X86-NEXT: vpsllq $32, %xmm2, %xmm2
+; X86-NEXT: vpmuludq %xmm0, %xmm1, %xmm0
+; X86-NEXT: vpaddq %xmm2, %xmm0, %xmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vpextrd $1, %xmm0, %edx
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_reduce_mul_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vpbroadcastq {{.*#+}} zmm1 = [1,1,1,1,1,1,1,1]
+; X64-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
+; X64-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; X64-NEXT: vpsrlq $32, %ymm1, %ymm2
+; X64-NEXT: vpmuludq %ymm0, %ymm2, %ymm2
+; X64-NEXT: vpsrlq $32, %ymm0, %ymm3
+; X64-NEXT: vpmuludq %ymm3, %ymm1, %ymm3
+; X64-NEXT: vpaddq %ymm2, %ymm3, %ymm2
+; X64-NEXT: vpsllq $32, %ymm2, %ymm2
+; X64-NEXT: vpmuludq %ymm0, %ymm1, %ymm0
+; X64-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpsrlq $32, %xmm0, %xmm2
+; X64-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
+; X64-NEXT: vpsrlq $32, %xmm1, %xmm3
+; X64-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
+; X64-NEXT: vpaddq %xmm2, %xmm3, %xmm2
+; X64-NEXT: vpsllq $32, %xmm2, %xmm2
+; X64-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpaddq %xmm2, %xmm0, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpsrlq $32, %xmm0, %xmm2
+; X64-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
+; X64-NEXT: vpsrlq $32, %xmm1, %xmm3
+; X64-NEXT: vpmuludq %xmm0, %xmm3, %xmm3
+; X64-NEXT: vpaddq %xmm3, %xmm2, %xmm2
+; X64-NEXT: vpsllq $32, %xmm2, %xmm2
+; X64-NEXT: vpmuludq %xmm0, %xmm1, %xmm0
+; X64-NEXT: vpaddq %xmm2, %xmm0, %xmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast i8 %__M to <8 x i1>
+ %1 = select <8 x i1> %0, <8 x i64> %__W, <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %shuffle.i = shufflevector <8 x i64> %1, <8 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle1.i = shufflevector <8 x i64> %1, <8 x i64> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %mul.i = mul <4 x i64> %shuffle.i, %shuffle1.i
+ %shuffle2.i = shufflevector <4 x i64> %mul.i, <4 x i64> undef, <2 x i32> <i32 0, i32 1>
+ %shuffle3.i = shufflevector <4 x i64> %mul.i, <4 x i64> undef, <2 x i32> <i32 2, i32 3>
+ %mul4.i = mul <2 x i64> %shuffle2.i, %shuffle3.i
+ %shuffle6.i = shufflevector <2 x i64> %mul4.i, <2 x i64> undef, <2 x i32> <i32 1, i32 undef>
+ %mul7.i = mul <2 x i64> %shuffle6.i, %mul4.i
+ %vecext.i = extractelement <2 x i64> %mul7.i, i32 0
+ ret i64 %vecext.i
+}
+
+define i64 @test_mm512_mask_reduce_and_epi64(i8 zeroext %__M, <8 x i64> %__W) {
+; X86-LABEL: test_mm512_mask_reduce_and_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; X86-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
+; X86-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; X86-NEXT: vpand %ymm0, %ymm1, %ymm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpand %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vpextrd $1, %xmm0, %edx
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_reduce_and_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; X64-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
+; X64-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; X64-NEXT: vpand %ymm0, %ymm1, %ymm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpand %xmm0, %xmm1, %xmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast i8 %__M to <8 x i1>
+ %1 = select <8 x i1> %0, <8 x i64> %__W, <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
+ %shuffle.i = shufflevector <8 x i64> %1, <8 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle1.i = shufflevector <8 x i64> %1, <8 x i64> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %and.i = and <4 x i64> %shuffle.i, %shuffle1.i
+ %shuffle2.i = shufflevector <4 x i64> %and.i, <4 x i64> undef, <2 x i32> <i32 0, i32 1>
+ %shuffle3.i = shufflevector <4 x i64> %and.i, <4 x i64> undef, <2 x i32> <i32 2, i32 3>
+ %and4.i = and <2 x i64> %shuffle2.i, %shuffle3.i
+ %shuffle6.i = shufflevector <2 x i64> %and4.i, <2 x i64> undef, <2 x i32> <i32 1, i32 undef>
+ %and7.i = and <2 x i64> %shuffle6.i, %and4.i
+ %vecext.i = extractelement <2 x i64> %and7.i, i32 0
+ ret i64 %vecext.i
+}
+
+define i64 @test_mm512_mask_reduce_or_epi64(i8 zeroext %__M, <8 x i64> %__W) {
+; X86-LABEL: test_mm512_mask_reduce_or_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vpor %ymm1, %ymm0, %ymm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpor %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpor %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vpextrd $1, %xmm0, %edx
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_reduce_or_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vpor %ymm1, %ymm0, %ymm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpor %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpor %xmm0, %xmm1, %xmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast i8 %__M to <8 x i1>
+ %1 = select <8 x i1> %0, <8 x i64> %__W, <8 x i64> zeroinitializer
+ %shuffle.i = shufflevector <8 x i64> %1, <8 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle1.i = shufflevector <8 x i64> %1, <8 x i64> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %or.i = or <4 x i64> %shuffle.i, %shuffle1.i
+ %shuffle2.i = shufflevector <4 x i64> %or.i, <4 x i64> undef, <2 x i32> <i32 0, i32 1>
+ %shuffle3.i = shufflevector <4 x i64> %or.i, <4 x i64> undef, <2 x i32> <i32 2, i32 3>
+ %or4.i = or <2 x i64> %shuffle2.i, %shuffle3.i
+ %shuffle6.i = shufflevector <2 x i64> %or4.i, <2 x i64> undef, <2 x i32> <i32 1, i32 undef>
+ %or7.i = or <2 x i64> %shuffle6.i, %or4.i
+ %vecext.i = extractelement <2 x i64> %or7.i, i32 0
+ ret i64 %vecext.i
+}
+
+define i32 @test_mm512_reduce_add_epi32(<8 x i64> %__W) {
+; X86-LABEL: test_mm512_reduce_add_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; X86-NEXT: vphaddd %xmm0, %xmm0, %xmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_reduce_add_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; X64-NEXT: vphaddd %xmm0, %xmm0, %xmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__W to <16 x i32>
+ %shuffle.i = shufflevector <16 x i32> %0, <16 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %shuffle1.i = shufflevector <16 x i32> %0, <16 x i32> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %add.i = add <8 x i32> %shuffle.i, %shuffle1.i
+ %shuffle2.i = shufflevector <8 x i32> %add.i, <8 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle3.i = shufflevector <8 x i32> %add.i, <8 x i32> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %add4.i = add <4 x i32> %shuffle2.i, %shuffle3.i
+ %shuffle6.i = shufflevector <4 x i32> %add4.i, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+ %add7.i = add <4 x i32> %shuffle6.i, %add4.i
+ %shuffle9.i = shufflevector <4 x i32> %add7.i, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %add10.i = add <4 x i32> %shuffle9.i, %add7.i
+ %1 = bitcast <4 x i32> %add10.i to <2 x i64>
+ %vecext.i = extractelement <2 x i64> %1, i32 0
+ %conv.i = trunc i64 %vecext.i to i32
+ ret i32 %conv.i
+}
+
+define i32 @test_mm512_reduce_mul_epi32(<8 x i64> %__W) {
+; X86-LABEL: test_mm512_reduce_mul_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vpmulld %ymm1, %ymm0, %ymm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpmulld %xmm0, %xmm1, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X86-NEXT: vpmulld %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_reduce_mul_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vpmulld %ymm1, %ymm0, %ymm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpmulld %xmm0, %xmm1, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X64-NEXT: vpmulld %xmm0, %xmm1, %xmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__W to <16 x i32>
+ %shuffle.i = shufflevector <16 x i32> %0, <16 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %shuffle1.i = shufflevector <16 x i32> %0, <16 x i32> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %mul.i = mul <8 x i32> %shuffle.i, %shuffle1.i
+ %shuffle2.i = shufflevector <8 x i32> %mul.i, <8 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle3.i = shufflevector <8 x i32> %mul.i, <8 x i32> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %mul4.i = mul <4 x i32> %shuffle2.i, %shuffle3.i
+ %shuffle6.i = shufflevector <4 x i32> %mul4.i, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+ %mul7.i = mul <4 x i32> %shuffle6.i, %mul4.i
+ %shuffle9.i = shufflevector <4 x i32> %mul7.i, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %mul10.i = mul <4 x i32> %shuffle9.i, %mul7.i
+ %1 = bitcast <4 x i32> %mul10.i to <2 x i64>
+ %vecext.i = extractelement <2 x i64> %1, i32 0
+ %conv.i = trunc i64 %vecext.i to i32
+ ret i32 %conv.i
+}
+
+define i32 @test_mm512_reduce_or_epi32(<8 x i64> %__W) {
+; X86-LABEL: test_mm512_reduce_or_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vpor %ymm1, %ymm0, %ymm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpor %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpor %xmm0, %xmm1, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X86-NEXT: vpor %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_reduce_or_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vpor %ymm1, %ymm0, %ymm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpor %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpor %xmm0, %xmm1, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X64-NEXT: vpor %xmm0, %xmm1, %xmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__W to <16 x i32>
+ %shuffle.i = shufflevector <16 x i32> %0, <16 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %shuffle1.i = shufflevector <16 x i32> %0, <16 x i32> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %or.i = or <8 x i32> %shuffle.i, %shuffle1.i
+ %shuffle2.i = shufflevector <8 x i32> %or.i, <8 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle3.i = shufflevector <8 x i32> %or.i, <8 x i32> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %or4.i = or <4 x i32> %shuffle2.i, %shuffle3.i
+ %shuffle6.i = shufflevector <4 x i32> %or4.i, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+ %or7.i = or <4 x i32> %shuffle6.i, %or4.i
+ %shuffle9.i = shufflevector <4 x i32> %or7.i, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %or10.i = or <4 x i32> %shuffle9.i, %or7.i
+ %1 = bitcast <4 x i32> %or10.i to <2 x i64>
+ %vecext.i = extractelement <2 x i64> %1, i32 0
+ %conv.i = trunc i64 %vecext.i to i32
+ ret i32 %conv.i
+}
+
+define i32 @test_mm512_reduce_and_epi32(<8 x i64> %__W) {
+; X86-LABEL: test_mm512_reduce_and_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vpand %ymm1, %ymm0, %ymm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpand %xmm0, %xmm1, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X86-NEXT: vpand %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_reduce_and_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vpand %ymm1, %ymm0, %ymm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpand %xmm0, %xmm1, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X64-NEXT: vpand %xmm0, %xmm1, %xmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__W to <16 x i32>
+ %shuffle.i = shufflevector <16 x i32> %0, <16 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %shuffle1.i = shufflevector <16 x i32> %0, <16 x i32> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %and.i = and <8 x i32> %shuffle.i, %shuffle1.i
+ %shuffle2.i = shufflevector <8 x i32> %and.i, <8 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle3.i = shufflevector <8 x i32> %and.i, <8 x i32> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %and4.i = and <4 x i32> %shuffle2.i, %shuffle3.i
+ %shuffle6.i = shufflevector <4 x i32> %and4.i, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+ %and7.i = and <4 x i32> %shuffle6.i, %and4.i
+ %shuffle9.i = shufflevector <4 x i32> %and7.i, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %and10.i = and <4 x i32> %shuffle9.i, %and7.i
+ %1 = bitcast <4 x i32> %and10.i to <2 x i64>
+ %vecext.i = extractelement <2 x i64> %1, i32 0
+ %conv.i = trunc i64 %vecext.i to i32
+ ret i32 %conv.i
+}
+
+define i32 @test_mm512_mask_reduce_add_epi32(i16 zeroext %__M, <8 x i64> %__W) {
+; X86-LABEL: test_mm512_mask_reduce_add_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; X86-NEXT: vphaddd %xmm0, %xmm0, %xmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_reduce_add_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; X64-NEXT: vphaddd %xmm0, %xmm0, %xmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__W to <16 x i32>
+ %1 = bitcast i16 %__M to <16 x i1>
+ %2 = select <16 x i1> %1, <16 x i32> %0, <16 x i32> zeroinitializer
+ %shuffle.i = shufflevector <16 x i32> %2, <16 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %shuffle1.i = shufflevector <16 x i32> %2, <16 x i32> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %add.i = add <8 x i32> %shuffle.i, %shuffle1.i
+ %shuffle2.i = shufflevector <8 x i32> %add.i, <8 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle3.i = shufflevector <8 x i32> %add.i, <8 x i32> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %add4.i = add <4 x i32> %shuffle2.i, %shuffle3.i
+ %shuffle6.i = shufflevector <4 x i32> %add4.i, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+ %add7.i = add <4 x i32> %shuffle6.i, %add4.i
+ %shuffle9.i = shufflevector <4 x i32> %add7.i, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %add10.i = add <4 x i32> %shuffle9.i, %add7.i
+ %3 = bitcast <4 x i32> %add10.i to <2 x i64>
+ %vecext.i = extractelement <2 x i64> %3, i32 0
+ %conv.i = trunc i64 %vecext.i to i32
+ ret i32 %conv.i
+}
+
+define i32 @test_mm512_mask_reduce_mul_epi32(i16 zeroext %__M, <8 x i64> %__W) {
+; X86-LABEL: test_mm512_mask_reduce_mul_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT: vpbroadcastd {{.*#+}} zmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; X86-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
+; X86-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; X86-NEXT: vpmulld %ymm0, %ymm1, %ymm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpmulld %xmm0, %xmm1, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X86-NEXT: vpmulld %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_reduce_mul_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vpbroadcastd {{.*#+}} zmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; X64-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
+; X64-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; X64-NEXT: vpmulld %ymm0, %ymm1, %ymm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpmulld %xmm0, %xmm1, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X64-NEXT: vpmulld %xmm0, %xmm1, %xmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__W to <16 x i32>
+ %1 = bitcast i16 %__M to <16 x i1>
+ %2 = select <16 x i1> %1, <16 x i32> %0, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %shuffle.i = shufflevector <16 x i32> %2, <16 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %shuffle1.i = shufflevector <16 x i32> %2, <16 x i32> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %mul.i = mul <8 x i32> %shuffle.i, %shuffle1.i
+ %shuffle2.i = shufflevector <8 x i32> %mul.i, <8 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle3.i = shufflevector <8 x i32> %mul.i, <8 x i32> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %mul4.i = mul <4 x i32> %shuffle2.i, %shuffle3.i
+ %shuffle6.i = shufflevector <4 x i32> %mul4.i, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+ %mul7.i = mul <4 x i32> %shuffle6.i, %mul4.i
+ %shuffle9.i = shufflevector <4 x i32> %mul7.i, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %mul10.i = mul <4 x i32> %shuffle9.i, %mul7.i
+ %3 = bitcast <4 x i32> %mul10.i to <2 x i64>
+ %vecext.i = extractelement <2 x i64> %3, i32 0
+ %conv.i = trunc i64 %vecext.i to i32
+ ret i32 %conv.i
+}
+
+define i32 @test_mm512_mask_reduce_and_epi32(i16 zeroext %__M, <8 x i64> %__W) {
+; X86-LABEL: test_mm512_mask_reduce_and_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; X86-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
+; X86-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; X86-NEXT: vpand %ymm0, %ymm1, %ymm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpand %xmm0, %xmm1, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X86-NEXT: vpand %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_reduce_and_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; X64-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
+; X64-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; X64-NEXT: vpand %ymm0, %ymm1, %ymm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpand %xmm0, %xmm1, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X64-NEXT: vpand %xmm0, %xmm1, %xmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__W to <16 x i32>
+ %1 = bitcast i16 %__M to <16 x i1>
+ %2 = select <16 x i1> %1, <16 x i32> %0, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+ %shuffle.i = shufflevector <16 x i32> %2, <16 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %shuffle1.i = shufflevector <16 x i32> %2, <16 x i32> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %and.i = and <8 x i32> %shuffle.i, %shuffle1.i
+ %shuffle2.i = shufflevector <8 x i32> %and.i, <8 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle3.i = shufflevector <8 x i32> %and.i, <8 x i32> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %and4.i = and <4 x i32> %shuffle2.i, %shuffle3.i
+ %shuffle6.i = shufflevector <4 x i32> %and4.i, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+ %and7.i = and <4 x i32> %shuffle6.i, %and4.i
+ %shuffle9.i = shufflevector <4 x i32> %and7.i, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %and10.i = and <4 x i32> %shuffle9.i, %and7.i
+ %3 = bitcast <4 x i32> %and10.i to <2 x i64>
+ %vecext.i = extractelement <2 x i64> %3, i32 0
+ %conv.i = trunc i64 %vecext.i to i32
+ ret i32 %conv.i
+}
+
+define i32 @test_mm512_mask_reduce_or_epi32(i16 zeroext %__M, <8 x i64> %__W) {
+; X86-LABEL: test_mm512_mask_reduce_or_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vpor %ymm1, %ymm0, %ymm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpor %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpor %xmm0, %xmm1, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X86-NEXT: vpor %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_reduce_or_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vpor %ymm1, %ymm0, %ymm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpor %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpor %xmm0, %xmm1, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X64-NEXT: vpor %xmm0, %xmm1, %xmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__W to <16 x i32>
+ %1 = bitcast i16 %__M to <16 x i1>
+ %2 = select <16 x i1> %1, <16 x i32> %0, <16 x i32> zeroinitializer
+ %shuffle.i = shufflevector <16 x i32> %2, <16 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %shuffle1.i = shufflevector <16 x i32> %2, <16 x i32> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %or.i = or <8 x i32> %shuffle.i, %shuffle1.i
+ %shuffle2.i = shufflevector <8 x i32> %or.i, <8 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle3.i = shufflevector <8 x i32> %or.i, <8 x i32> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %or4.i = or <4 x i32> %shuffle2.i, %shuffle3.i
+ %shuffle6.i = shufflevector <4 x i32> %or4.i, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+ %or7.i = or <4 x i32> %shuffle6.i, %or4.i
+ %shuffle9.i = shufflevector <4 x i32> %or7.i, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %or10.i = or <4 x i32> %shuffle9.i, %or7.i
+ %3 = bitcast <4 x i32> %or10.i to <2 x i64>
+ %vecext.i = extractelement <2 x i64> %3, i32 0
+ %conv.i = trunc i64 %vecext.i to i32
+ ret i32 %conv.i
+}
+
+define double @test_mm512_reduce_add_pd(<8 x double> %__W) {
+; X86-LABEL: test_mm512_reduce_add_pd:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .cfi_offset %ebp, -8
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: .cfi_def_cfa_register %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: vextractf64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; X86-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X86-NEXT: vaddpd %xmm1, %xmm0, %xmm0
+; X86-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
+; X86-NEXT: vmovlpd %xmm0, (%esp)
+; X86-NEXT: fldl (%esp)
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: .cfi_def_cfa %esp, 4
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_reduce_add_pd:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vextractf64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X64-NEXT: vaddpd %xmm1, %xmm0, %xmm0
+; X64-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %shuffle.i = shufflevector <8 x double> %__W, <8 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle1.i = shufflevector <8 x double> %__W, <8 x double> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %add.i = fadd <4 x double> %shuffle.i, %shuffle1.i
+ %shuffle2.i = shufflevector <4 x double> %add.i, <4 x double> undef, <2 x i32> <i32 0, i32 1>
+ %shuffle3.i = shufflevector <4 x double> %add.i, <4 x double> undef, <2 x i32> <i32 2, i32 3>
+ %add4.i = fadd <2 x double> %shuffle2.i, %shuffle3.i
+ %shuffle6.i = shufflevector <2 x double> %add4.i, <2 x double> undef, <2 x i32> <i32 1, i32 undef>
+ %add7.i = fadd <2 x double> %add4.i, %shuffle6.i
+ %vecext.i = extractelement <2 x double> %add7.i, i32 0
+ ret double %vecext.i
+}
+
+define double @test_mm512_reduce_mul_pd(<8 x double> %__W) {
+; X86-LABEL: test_mm512_reduce_mul_pd:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .cfi_offset %ebp, -8
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: .cfi_def_cfa_register %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: vextractf64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vmulpd %ymm1, %ymm0, %ymm0
+; X86-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X86-NEXT: vmulpd %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X86-NEXT: vmulpd %xmm1, %xmm0, %xmm0
+; X86-NEXT: vmovlpd %xmm0, (%esp)
+; X86-NEXT: fldl (%esp)
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: .cfi_def_cfa %esp, 4
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_reduce_mul_pd:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vextractf64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vmulpd %ymm1, %ymm0, %ymm0
+; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X64-NEXT: vmulpd %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X64-NEXT: vmulpd %xmm1, %xmm0, %xmm0
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %shuffle.i = shufflevector <8 x double> %__W, <8 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle1.i = shufflevector <8 x double> %__W, <8 x double> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %mul.i = fmul <4 x double> %shuffle.i, %shuffle1.i
+ %shuffle2.i = shufflevector <4 x double> %mul.i, <4 x double> undef, <2 x i32> <i32 0, i32 1>
+ %shuffle3.i = shufflevector <4 x double> %mul.i, <4 x double> undef, <2 x i32> <i32 2, i32 3>
+ %mul4.i = fmul <2 x double> %shuffle2.i, %shuffle3.i
+ %shuffle6.i = shufflevector <2 x double> %mul4.i, <2 x double> undef, <2 x i32> <i32 1, i32 undef>
+ %mul7.i = fmul <2 x double> %mul4.i, %shuffle6.i
+ %vecext.i = extractelement <2 x double> %mul7.i, i32 0
+ ret double %vecext.i
+}
+
+define float @test_mm512_reduce_add_ps(<16 x float> %__W) {
+; X86-LABEL: test_mm512_reduce_add_ps:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %eax
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: vextractf64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; X86-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X86-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X86-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; X86-NEXT: vhaddps %xmm0, %xmm0, %xmm0
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: flds (%esp)
+; X86-NEXT: popl %eax
+; X86-NEXT: .cfi_def_cfa_offset 4
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_reduce_add_ps:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vextractf64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X64-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X64-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; X64-NEXT: vhaddps %xmm0, %xmm0, %xmm0
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %shuffle.i = shufflevector <16 x float> %__W, <16 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %shuffle1.i = shufflevector <16 x float> %__W, <16 x float> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %add.i = fadd <8 x float> %shuffle.i, %shuffle1.i
+ %shuffle2.i = shufflevector <8 x float> %add.i, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle3.i = shufflevector <8 x float> %add.i, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %add4.i = fadd <4 x float> %shuffle2.i, %shuffle3.i
+ %shuffle6.i = shufflevector <4 x float> %add4.i, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+ %add7.i = fadd <4 x float> %add4.i, %shuffle6.i
+ %shuffle9.i = shufflevector <4 x float> %add7.i, <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %add10.i = fadd <4 x float> %add7.i, %shuffle9.i
+ %vecext.i = extractelement <4 x float> %add10.i, i32 0
+ ret float %vecext.i
+}
+
+define float @test_mm512_reduce_mul_ps(<16 x float> %__W) {
+; X86-LABEL: test_mm512_reduce_mul_ps:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %eax
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: vextractf64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; X86-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X86-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X86-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; X86-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X86-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: flds (%esp)
+; X86-NEXT: popl %eax
+; X86-NEXT: .cfi_def_cfa_offset 4
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_reduce_mul_ps:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vextractf64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X64-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X64-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; X64-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X64-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %shuffle.i = shufflevector <16 x float> %__W, <16 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %shuffle1.i = shufflevector <16 x float> %__W, <16 x float> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %mul.i = fmul <8 x float> %shuffle.i, %shuffle1.i
+ %shuffle2.i = shufflevector <8 x float> %mul.i, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle3.i = shufflevector <8 x float> %mul.i, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %mul4.i = fmul <4 x float> %shuffle2.i, %shuffle3.i
+ %shuffle6.i = shufflevector <4 x float> %mul4.i, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+ %mul7.i = fmul <4 x float> %mul4.i, %shuffle6.i
+ %shuffle9.i = shufflevector <4 x float> %mul7.i, <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %mul10.i = fmul <4 x float> %mul7.i, %shuffle9.i
+ %vecext.i = extractelement <4 x float> %mul10.i, i32 0
+ ret float %vecext.i
+}
+
+define double @test_mm512_mask_reduce_add_pd(i8 zeroext %__M, <8 x double> %__W) {
+; X86-LABEL: test_mm512_mask_reduce_add_pd:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .cfi_offset %ebp, -8
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: .cfi_def_cfa_register %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movb 8(%ebp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vmovapd %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT: vextractf64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; X86-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X86-NEXT: vaddpd %xmm1, %xmm0, %xmm0
+; X86-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
+; X86-NEXT: vmovlpd %xmm0, (%esp)
+; X86-NEXT: fldl (%esp)
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: .cfi_def_cfa %esp, 4
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_reduce_add_pd:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vmovapd %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT: vextractf64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X64-NEXT: vaddpd %xmm1, %xmm0, %xmm0
+; X64-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast i8 %__M to <8 x i1>
+ %1 = select <8 x i1> %0, <8 x double> %__W, <8 x double> zeroinitializer
+ %shuffle.i = shufflevector <8 x double> %1, <8 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle1.i = shufflevector <8 x double> %1, <8 x double> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %add.i = fadd <4 x double> %shuffle.i, %shuffle1.i
+ %shuffle2.i = shufflevector <4 x double> %add.i, <4 x double> undef, <2 x i32> <i32 0, i32 1>
+ %shuffle3.i = shufflevector <4 x double> %add.i, <4 x double> undef, <2 x i32> <i32 2, i32 3>
+ %add4.i = fadd <2 x double> %shuffle2.i, %shuffle3.i
+ %shuffle6.i = shufflevector <2 x double> %add4.i, <2 x double> undef, <2 x i32> <i32 1, i32 undef>
+ %add7.i = fadd <2 x double> %add4.i, %shuffle6.i
+ %vecext.i = extractelement <2 x double> %add7.i, i32 0
+ ret double %vecext.i
+}
+
+define double @test_mm512_mask_reduce_mul_pd(i8 zeroext %__M, <8 x double> %__W) {
+; X86-LABEL: test_mm512_mask_reduce_mul_pd:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .cfi_offset %ebp, -8
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: .cfi_def_cfa_register %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movb 8(%ebp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vbroadcastsd {{.*#+}} zmm1 = [1,1,1,1,1,1,1,1]
+; X86-NEXT: vmovapd %zmm0, %zmm1 {%k1}
+; X86-NEXT: vextractf64x4 $1, %zmm1, %ymm0
+; X86-NEXT: vmulpd %ymm0, %ymm1, %ymm0
+; X86-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X86-NEXT: vmulpd %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X86-NEXT: vmulpd %xmm1, %xmm0, %xmm0
+; X86-NEXT: vmovlpd %xmm0, (%esp)
+; X86-NEXT: fldl (%esp)
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: .cfi_def_cfa %esp, 4
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_reduce_mul_pd:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vbroadcastsd {{.*#+}} zmm1 = [1,1,1,1,1,1,1,1]
+; X64-NEXT: vmovapd %zmm0, %zmm1 {%k1}
+; X64-NEXT: vextractf64x4 $1, %zmm1, %ymm0
+; X64-NEXT: vmulpd %ymm0, %ymm1, %ymm0
+; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X64-NEXT: vmulpd %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X64-NEXT: vmulpd %xmm1, %xmm0, %xmm0
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast i8 %__M to <8 x i1>
+ %1 = select <8 x i1> %0, <8 x double> %__W, <8 x double> <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00>
+ %shuffle.i = shufflevector <8 x double> %1, <8 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle1.i = shufflevector <8 x double> %1, <8 x double> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %mul.i = fmul <4 x double> %shuffle.i, %shuffle1.i
+ %shuffle2.i = shufflevector <4 x double> %mul.i, <4 x double> undef, <2 x i32> <i32 0, i32 1>
+ %shuffle3.i = shufflevector <4 x double> %mul.i, <4 x double> undef, <2 x i32> <i32 2, i32 3>
+ %mul4.i = fmul <2 x double> %shuffle2.i, %shuffle3.i
+ %shuffle6.i = shufflevector <2 x double> %mul4.i, <2 x double> undef, <2 x i32> <i32 1, i32 undef>
+ %mul7.i = fmul <2 x double> %mul4.i, %shuffle6.i
+ %vecext.i = extractelement <2 x double> %mul7.i, i32 0
+ ret double %vecext.i
+}
+
+define float @test_mm512_mask_reduce_add_ps(i16 zeroext %__M, <16 x float> %__W) {
+; X86-LABEL: test_mm512_mask_reduce_add_ps:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %eax
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT: vmovaps %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT: vextractf64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; X86-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X86-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X86-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; X86-NEXT: vhaddps %xmm0, %xmm0, %xmm0
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: flds (%esp)
+; X86-NEXT: popl %eax
+; X86-NEXT: .cfi_def_cfa_offset 4
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_reduce_add_ps:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vmovaps %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT: vextractf64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X64-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X64-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; X64-NEXT: vhaddps %xmm0, %xmm0, %xmm0
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast i16 %__M to <16 x i1>
+ %1 = select <16 x i1> %0, <16 x float> %__W, <16 x float> zeroinitializer
+ %shuffle.i = shufflevector <16 x float> %1, <16 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %shuffle1.i = shufflevector <16 x float> %1, <16 x float> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %add.i = fadd <8 x float> %shuffle.i, %shuffle1.i
+ %shuffle2.i = shufflevector <8 x float> %add.i, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle3.i = shufflevector <8 x float> %add.i, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %add4.i = fadd <4 x float> %shuffle2.i, %shuffle3.i
+ %shuffle6.i = shufflevector <4 x float> %add4.i, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+ %add7.i = fadd <4 x float> %add4.i, %shuffle6.i
+ %shuffle9.i = shufflevector <4 x float> %add7.i, <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %add10.i = fadd <4 x float> %add7.i, %shuffle9.i
+ %vecext.i = extractelement <4 x float> %add10.i, i32 0
+ ret float %vecext.i
+}
+
+define float @test_mm512_mask_reduce_mul_ps(i16 zeroext %__M, <16 x float> %__W) {
+; X86-LABEL: test_mm512_mask_reduce_mul_ps:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %eax
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT: vbroadcastss {{.*#+}} zmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; X86-NEXT: vmovaps %zmm0, %zmm1 {%k1}
+; X86-NEXT: vextractf64x4 $1, %zmm1, %ymm0
+; X86-NEXT: vmulps %ymm0, %ymm1, %ymm0
+; X86-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X86-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X86-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; X86-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X86-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: flds (%esp)
+; X86-NEXT: popl %eax
+; X86-NEXT: .cfi_def_cfa_offset 4
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_reduce_mul_ps:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vbroadcastss {{.*#+}} zmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; X64-NEXT: vmovaps %zmm0, %zmm1 {%k1}
+; X64-NEXT: vextractf64x4 $1, %zmm1, %ymm0
+; X64-NEXT: vmulps %ymm0, %ymm1, %ymm0
+; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X64-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X64-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; X64-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X64-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast i16 %__M to <16 x i1>
+ %1 = select <16 x i1> %0, <16 x float> %__W, <16 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
+ %shuffle.i = shufflevector <16 x float> %1, <16 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %shuffle1.i = shufflevector <16 x float> %1, <16 x float> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %mul.i = fmul <8 x float> %shuffle.i, %shuffle1.i
+ %shuffle2.i = shufflevector <8 x float> %mul.i, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle3.i = shufflevector <8 x float> %mul.i, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %mul4.i = fmul <4 x float> %shuffle2.i, %shuffle3.i
+ %shuffle6.i = shufflevector <4 x float> %mul4.i, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+ %mul7.i = fmul <4 x float> %mul4.i, %shuffle6.i
+ %shuffle9.i = shufflevector <4 x float> %mul7.i, <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %mul10.i = fmul <4 x float> %mul7.i, %shuffle9.i
+ %vecext.i = extractelement <4 x float> %mul10.i, i32 0
+ ret float %vecext.i
+}
+
+define i64 @test_mm512_reduce_max_epi64(<8 x i64> %__W) {
+; X86-LABEL: test_mm512_reduce_max_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vpmaxsq %zmm0, %zmm1, %zmm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vpextrd $1, %xmm0, %edx
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_reduce_max_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vpmaxsq %zmm0, %zmm1, %zmm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %shuffle1.i = shufflevector <8 x i64> %__W, <8 x i64> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %0 = icmp slt <8 x i64> %shuffle1.i, %__W
+ %1 = select <8 x i1> %0, <8 x i64> %__W, <8 x i64> %shuffle1.i
+ %shuffle3.i = shufflevector <8 x i64> %1, <8 x i64> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = icmp sgt <8 x i64> %1, %shuffle3.i
+ %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %shuffle3.i
+ %shuffle6.i = shufflevector <8 x i64> %3, <8 x i64> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = icmp sgt <8 x i64> %3, %shuffle6.i
+ %5 = select <8 x i1> %4, <8 x i64> %3, <8 x i64> %shuffle6.i
+ %vecext.i = extractelement <8 x i64> %5, i32 0
+ ret i64 %vecext.i
+}
+
+define i64 @test_mm512_reduce_max_epu64(<8 x i64> %__W) {
+; X86-LABEL: test_mm512_reduce_max_epu64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vpmaxuq %zmm0, %zmm1, %zmm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vpextrd $1, %xmm0, %edx
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_reduce_max_epu64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vpmaxuq %zmm0, %zmm1, %zmm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %shuffle1.i = shufflevector <8 x i64> %__W, <8 x i64> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %0 = icmp ult <8 x i64> %shuffle1.i, %__W
+ %1 = select <8 x i1> %0, <8 x i64> %__W, <8 x i64> %shuffle1.i
+ %shuffle3.i = shufflevector <8 x i64> %1, <8 x i64> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = icmp ugt <8 x i64> %1, %shuffle3.i
+ %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %shuffle3.i
+ %shuffle6.i = shufflevector <8 x i64> %3, <8 x i64> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = icmp ugt <8 x i64> %3, %shuffle6.i
+ %5 = select <8 x i1> %4, <8 x i64> %3, <8 x i64> %shuffle6.i
+ %vecext.i = extractelement <8 x i64> %5, i32 0
+ ret i64 %vecext.i
+}
+
+define double @test_mm512_reduce_max_pd(<8 x double> %__W) {
+; X86-LABEL: test_mm512_reduce_max_pd:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .cfi_offset %ebp, -8
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: .cfi_def_cfa_register %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: vextractf64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vmaxpd %zmm1, %zmm0, %zmm0
+; X86-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X86-NEXT: vmaxpd %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X86-NEXT: vmaxpd %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovlpd %xmm0, (%esp)
+; X86-NEXT: fldl (%esp)
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: .cfi_def_cfa %esp, 4
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_reduce_max_pd:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vextractf64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vmaxpd %zmm1, %zmm0, %zmm0
+; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X64-NEXT: vmaxpd %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X64-NEXT: vmaxpd %zmm1, %zmm0, %zmm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %shuffle1.i = shufflevector <8 x double> %__W, <8 x double> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %0 = tail call <8 x double> @llvm.x86.avx512.mask.max.pd.512(<8 x double> %__W, <8 x double> %shuffle1.i, <8 x double> zeroinitializer, i8 -1, i32 4) #3
+ %shuffle3.i = shufflevector <8 x double> %0, <8 x double> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %1 = tail call <8 x double> @llvm.x86.avx512.mask.max.pd.512(<8 x double> %0, <8 x double> %shuffle3.i, <8 x double> zeroinitializer, i8 -1, i32 4) #3
+ %shuffle6.i = shufflevector <8 x double> %1, <8 x double> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = tail call <8 x double> @llvm.x86.avx512.mask.max.pd.512(<8 x double> %1, <8 x double> %shuffle6.i, <8 x double> zeroinitializer, i8 -1, i32 4) #3
+ %vecext.i = extractelement <8 x double> %2, i32 0
+ ret double %vecext.i
+}
+
+define i64 @test_mm512_reduce_min_epi64(<8 x i64> %__W) {
+; X86-LABEL: test_mm512_reduce_min_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vpminsq %zmm0, %zmm1, %zmm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpminsq %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpminsq %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vpextrd $1, %xmm0, %edx
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_reduce_min_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vpminsq %zmm0, %zmm1, %zmm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpminsq %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpminsq %zmm1, %zmm0, %zmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %shuffle1.i = shufflevector <8 x i64> %__W, <8 x i64> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %0 = icmp sgt <8 x i64> %shuffle1.i, %__W
+ %1 = select <8 x i1> %0, <8 x i64> %__W, <8 x i64> %shuffle1.i
+ %shuffle3.i = shufflevector <8 x i64> %1, <8 x i64> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = icmp slt <8 x i64> %1, %shuffle3.i
+ %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %shuffle3.i
+ %shuffle6.i = shufflevector <8 x i64> %3, <8 x i64> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = icmp slt <8 x i64> %3, %shuffle6.i
+ %5 = select <8 x i1> %4, <8 x i64> %3, <8 x i64> %shuffle6.i
+ %vecext.i = extractelement <8 x i64> %5, i32 0
+ ret i64 %vecext.i
+}
+
+define i64 @test_mm512_reduce_min_epu64(<8 x i64> %__W) {
+; X86-LABEL: test_mm512_reduce_min_epu64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vpminuq %zmm0, %zmm1, %zmm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpminuq %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpminuq %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vpextrd $1, %xmm0, %edx
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_reduce_min_epu64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vpminuq %zmm0, %zmm1, %zmm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpminuq %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpminuq %zmm1, %zmm0, %zmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %shuffle1.i = shufflevector <8 x i64> %__W, <8 x i64> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %0 = icmp ugt <8 x i64> %shuffle1.i, %__W
+ %1 = select <8 x i1> %0, <8 x i64> %__W, <8 x i64> %shuffle1.i
+ %shuffle3.i = shufflevector <8 x i64> %1, <8 x i64> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = icmp ult <8 x i64> %1, %shuffle3.i
+ %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %shuffle3.i
+ %shuffle6.i = shufflevector <8 x i64> %3, <8 x i64> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = icmp ult <8 x i64> %3, %shuffle6.i
+ %5 = select <8 x i1> %4, <8 x i64> %3, <8 x i64> %shuffle6.i
+ %vecext.i = extractelement <8 x i64> %5, i32 0
+ ret i64 %vecext.i
+}
+
+define double @test_mm512_reduce_min_pd(<8 x double> %__W) {
+; X86-LABEL: test_mm512_reduce_min_pd:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .cfi_offset %ebp, -8
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: .cfi_def_cfa_register %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: vextractf64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vminpd %zmm1, %zmm0, %zmm0
+; X86-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X86-NEXT: vminpd %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X86-NEXT: vminpd %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovlpd %xmm0, (%esp)
+; X86-NEXT: fldl (%esp)
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: .cfi_def_cfa %esp, 4
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_reduce_min_pd:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vextractf64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vminpd %zmm1, %zmm0, %zmm0
+; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X64-NEXT: vminpd %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X64-NEXT: vminpd %zmm1, %zmm0, %zmm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %shuffle1.i = shufflevector <8 x double> %__W, <8 x double> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %0 = tail call <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double> %__W, <8 x double> %shuffle1.i, <8 x double> zeroinitializer, i8 -1, i32 4) #3
+ %shuffle3.i = shufflevector <8 x double> %0, <8 x double> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %1 = tail call <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double> %0, <8 x double> %shuffle3.i, <8 x double> zeroinitializer, i8 -1, i32 4) #3
+ %shuffle6.i = shufflevector <8 x double> %1, <8 x double> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = tail call <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double> %1, <8 x double> %shuffle6.i, <8 x double> zeroinitializer, i8 -1, i32 4) #3
+ %vecext.i = extractelement <8 x double> %2, i32 0
+ ret double %vecext.i
+}
+
+define i64 @test_mm512_mask_reduce_max_epi64(i8 zeroext %__M, <8 x i64> %__W) {
+; X86-LABEL: test_mm512_mask_reduce_max_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,2147483648,0,2147483648,0,2147483648,0,2147483648,0,2147483648,0,2147483648,0,2147483648,0,2147483648]
+; X86-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
+; X86-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; X86-NEXT: vpmaxsq %zmm0, %zmm1, %zmm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vpextrd $1, %xmm0, %edx
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_reduce_max_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vpbroadcastq {{.*#+}} zmm1 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; X64-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
+; X64-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; X64-NEXT: vpmaxsq %zmm0, %zmm1, %zmm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast i8 %__M to <8 x i1>
+ %1 = select <8 x i1> %0, <8 x i64> %__W, <8 x i64> <i64 -9223372036854775808, i64 -9223372036854775808, i64 -9223372036854775808, i64 -9223372036854775808, i64 -9223372036854775808, i64 -9223372036854775808, i64 -9223372036854775808, i64 -9223372036854775808>
+ %shuffle1.i = shufflevector <8 x i64> %1, <8 x i64> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = icmp sgt <8 x i64> %1, %shuffle1.i
+ %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %shuffle1.i
+ %shuffle4.i = shufflevector <8 x i64> %3, <8 x i64> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = icmp sgt <8 x i64> %3, %shuffle4.i
+ %5 = select <8 x i1> %4, <8 x i64> %3, <8 x i64> %shuffle4.i
+ %shuffle7.i = shufflevector <8 x i64> %5, <8 x i64> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %6 = icmp sgt <8 x i64> %5, %shuffle7.i
+ %7 = select <8 x i1> %6, <8 x i64> %5, <8 x i64> %shuffle7.i
+ %vecext.i = extractelement <8 x i64> %7, i32 0
+ ret i64 %vecext.i
+}
+
+define i64 @test_mm512_mask_reduce_max_epu64(i8 zeroext %__M, <8 x i64> %__W) {
+; X86-LABEL: test_mm512_mask_reduce_max_epu64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vpextrd $1, %xmm0, %edx
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_reduce_max_epu64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast i8 %__M to <8 x i1>
+ %1 = select <8 x i1> %0, <8 x i64> %__W, <8 x i64> zeroinitializer
+ %shuffle1.i = shufflevector <8 x i64> %1, <8 x i64> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = icmp ugt <8 x i64> %1, %shuffle1.i
+ %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %shuffle1.i
+ %shuffle4.i = shufflevector <8 x i64> %3, <8 x i64> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = icmp ugt <8 x i64> %3, %shuffle4.i
+ %5 = select <8 x i1> %4, <8 x i64> %3, <8 x i64> %shuffle4.i
+ %shuffle7.i = shufflevector <8 x i64> %5, <8 x i64> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %6 = icmp ugt <8 x i64> %5, %shuffle7.i
+ %7 = select <8 x i1> %6, <8 x i64> %5, <8 x i64> %shuffle7.i
+ %vecext.i = extractelement <8 x i64> %7, i32 0
+ ret i64 %vecext.i
+}
+
+define i64 @test_mm512_mask_reduce_max_pd(i8 zeroext %__M, <8 x double> %__W) {
+; X86-LABEL: test_mm512_mask_reduce_max_pd:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .cfi_offset %ebp, -8
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: .cfi_def_cfa_register %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movb 8(%ebp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vbroadcastsd {{.*#+}} zmm1 = [-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf]
+; X86-NEXT: vmovapd %zmm0, %zmm1 {%k1}
+; X86-NEXT: vextractf64x4 $1, %zmm1, %ymm0
+; X86-NEXT: vmaxpd %zmm0, %zmm1, %zmm0
+; X86-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X86-NEXT: vmaxpd %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X86-NEXT: vmaxpd %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovlpd %xmm0, {{[0-9]+}}(%esp)
+; X86-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-NEXT: fisttpll (%esp)
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: .cfi_def_cfa %esp, 4
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_reduce_max_pd:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vbroadcastsd {{.*#+}} zmm1 = [-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf]
+; X64-NEXT: vmovapd %zmm0, %zmm1 {%k1}
+; X64-NEXT: vextractf64x4 $1, %zmm1, %ymm0
+; X64-NEXT: vmaxpd %zmm0, %zmm1, %zmm0
+; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X64-NEXT: vmaxpd %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X64-NEXT: vmaxpd %zmm1, %zmm0, %zmm0
+; X64-NEXT: vcvttsd2si %xmm0, %rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast i8 %__M to <8 x i1>
+ %1 = select <8 x i1> %0, <8 x double> %__W, <8 x double> <double 0xFFF0000000000000, double 0xFFF0000000000000, double 0xFFF0000000000000, double 0xFFF0000000000000, double 0xFFF0000000000000, double 0xFFF0000000000000, double 0xFFF0000000000000, double 0xFFF0000000000000>
+ %shuffle1.i = shufflevector <8 x double> %1, <8 x double> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = tail call <8 x double> @llvm.x86.avx512.mask.max.pd.512(<8 x double> %1, <8 x double> %shuffle1.i, <8 x double> zeroinitializer, i8 -1, i32 4) #3
+ %shuffle4.i = shufflevector <8 x double> %2, <8 x double> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %3 = tail call <8 x double> @llvm.x86.avx512.mask.max.pd.512(<8 x double> %2, <8 x double> %shuffle4.i, <8 x double> zeroinitializer, i8 -1, i32 4) #3
+ %shuffle7.i = shufflevector <8 x double> %3, <8 x double> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = tail call <8 x double> @llvm.x86.avx512.mask.max.pd.512(<8 x double> %3, <8 x double> %shuffle7.i, <8 x double> zeroinitializer, i8 -1, i32 4) #3
+ %vecext.i = extractelement <8 x double> %4, i32 0
+ %conv = fptosi double %vecext.i to i64
+ ret i64 %conv
+}
+
+define i64 @test_mm512_mask_reduce_min_epi64(i8 zeroext %__M, <8 x i64> %__W) {
+; X86-LABEL: test_mm512_mask_reduce_min_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4294967295,2147483647,4294967295,2147483647,4294967295,2147483647,4294967295,2147483647,4294967295,2147483647,4294967295,2147483647,4294967295,2147483647,4294967295,2147483647]
+; X86-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
+; X86-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; X86-NEXT: vpminsq %zmm0, %zmm1, %zmm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpminsq %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpminsq %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vpextrd $1, %xmm0, %edx
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_reduce_min_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vpbroadcastq {{.*#+}} zmm1 = [9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807]
+; X64-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
+; X64-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; X64-NEXT: vpminsq %zmm0, %zmm1, %zmm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpminsq %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpminsq %zmm1, %zmm0, %zmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast i8 %__M to <8 x i1>
+ %1 = select <8 x i1> %0, <8 x i64> %__W, <8 x i64> <i64 9223372036854775807, i64 9223372036854775807, i64 9223372036854775807, i64 9223372036854775807, i64 9223372036854775807, i64 9223372036854775807, i64 9223372036854775807, i64 9223372036854775807>
+ %shuffle1.i = shufflevector <8 x i64> %1, <8 x i64> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = icmp slt <8 x i64> %1, %shuffle1.i
+ %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %shuffle1.i
+ %shuffle4.i = shufflevector <8 x i64> %3, <8 x i64> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = icmp slt <8 x i64> %3, %shuffle4.i
+ %5 = select <8 x i1> %4, <8 x i64> %3, <8 x i64> %shuffle4.i
+ %shuffle7.i = shufflevector <8 x i64> %5, <8 x i64> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %6 = icmp slt <8 x i64> %5, %shuffle7.i
+ %7 = select <8 x i1> %6, <8 x i64> %5, <8 x i64> %shuffle7.i
+ %vecext.i = extractelement <8 x i64> %7, i32 0
+ ret i64 %vecext.i
+}
+
+define i64 @test_mm512_mask_reduce_min_epu64(i8 zeroext %__M, <8 x i64> %__W) {
+; X86-LABEL: test_mm512_mask_reduce_min_epu64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; X86-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
+; X86-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; X86-NEXT: vpminuq %zmm0, %zmm1, %zmm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpminuq %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpminuq %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vpextrd $1, %xmm0, %edx
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_reduce_min_epu64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; X64-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
+; X64-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; X64-NEXT: vpminuq %zmm0, %zmm1, %zmm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpminuq %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpminuq %zmm1, %zmm0, %zmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast i8 %__M to <8 x i1>
+ %1 = select <8 x i1> %0, <8 x i64> %__W, <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
+ %shuffle1.i = shufflevector <8 x i64> %1, <8 x i64> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = icmp ult <8 x i64> %1, %shuffle1.i
+ %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %shuffle1.i
+ %shuffle4.i = shufflevector <8 x i64> %3, <8 x i64> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = icmp ult <8 x i64> %3, %shuffle4.i
+ %5 = select <8 x i1> %4, <8 x i64> %3, <8 x i64> %shuffle4.i
+ %shuffle7.i = shufflevector <8 x i64> %5, <8 x i64> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %6 = icmp ult <8 x i64> %5, %shuffle7.i
+ %7 = select <8 x i1> %6, <8 x i64> %5, <8 x i64> %shuffle7.i
+ %vecext.i = extractelement <8 x i64> %7, i32 0
+ ret i64 %vecext.i
+}
+
+define double @test_mm512_mask_reduce_min_pd(i8 zeroext %__M, <8 x double> %__W) {
+; X86-LABEL: test_mm512_mask_reduce_min_pd:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .cfi_offset %ebp, -8
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: .cfi_def_cfa_register %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movb 8(%ebp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vbroadcastsd {{.*#+}} zmm1 = [+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf]
+; X86-NEXT: vmovapd %zmm0, %zmm1 {%k1}
+; X86-NEXT: vextractf64x4 $1, %zmm1, %ymm0
+; X86-NEXT: vminpd %zmm0, %zmm1, %zmm0
+; X86-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X86-NEXT: vminpd %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X86-NEXT: vminpd %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovlpd %xmm0, (%esp)
+; X86-NEXT: fldl (%esp)
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: .cfi_def_cfa %esp, 4
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_reduce_min_pd:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vbroadcastsd {{.*#+}} zmm1 = [+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf]
+; X64-NEXT: vmovapd %zmm0, %zmm1 {%k1}
+; X64-NEXT: vextractf64x4 $1, %zmm1, %ymm0
+; X64-NEXT: vminpd %zmm0, %zmm1, %zmm0
+; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X64-NEXT: vminpd %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X64-NEXT: vminpd %zmm1, %zmm0, %zmm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast i8 %__M to <8 x i1>
+ %1 = select <8 x i1> %0, <8 x double> %__W, <8 x double> <double 0x7FF0000000000000, double 0x7FF0000000000000, double 0x7FF0000000000000, double 0x7FF0000000000000, double 0x7FF0000000000000, double 0x7FF0000000000000, double 0x7FF0000000000000, double 0x7FF0000000000000>
+ %shuffle1.i = shufflevector <8 x double> %1, <8 x double> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = tail call <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double> %1, <8 x double> %shuffle1.i, <8 x double> zeroinitializer, i8 -1, i32 4) #3
+ %shuffle4.i = shufflevector <8 x double> %2, <8 x double> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %3 = tail call <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double> %2, <8 x double> %shuffle4.i, <8 x double> zeroinitializer, i8 -1, i32 4) #3
+ %shuffle7.i = shufflevector <8 x double> %3, <8 x double> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = tail call <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double> %3, <8 x double> %shuffle7.i, <8 x double> zeroinitializer, i8 -1, i32 4) #3
+ %vecext.i = extractelement <8 x double> %4, i32 0
+ ret double %vecext.i
+}
+
+define i32 @test_mm512_reduce_max_epi32(<8 x i64> %__W) {
+; X86-LABEL: test_mm512_reduce_max_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vpmaxsd %zmm0, %zmm1, %zmm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X86-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_reduce_max_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vpmaxsd %zmm0, %zmm1, %zmm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X64-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__W to <16 x i32>
+ %shuffle1.i = shufflevector <16 x i32> %0, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %1 = icmp slt <16 x i32> %shuffle1.i, %0
+ %2 = select <16 x i1> %1, <16 x i32> %0, <16 x i32> %shuffle1.i
+ %shuffle3.i = shufflevector <16 x i32> %2, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %3 = icmp sgt <16 x i32> %2, %shuffle3.i
+ %4 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> %shuffle3.i
+ %shuffle6.i = shufflevector <16 x i32> %4, <16 x i32> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %5 = icmp sgt <16 x i32> %4, %shuffle6.i
+ %6 = select <16 x i1> %5, <16 x i32> %4, <16 x i32> %shuffle6.i
+ %shuffle9.i = shufflevector <16 x i32> %6, <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %7 = icmp sgt <16 x i32> %6, %shuffle9.i
+ %8 = select <16 x i1> %7, <16 x i32> %6, <16 x i32> %shuffle9.i
+ %9 = bitcast <16 x i32> %8 to <8 x i64>
+ %vecext.i = extractelement <8 x i64> %9, i32 0
+ %conv.i = trunc i64 %vecext.i to i32
+ ret i32 %conv.i
+}
+
+define i32 @test_mm512_reduce_max_epu32(<8 x i64> %__W) {
+; X86-LABEL: test_mm512_reduce_max_epu32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vpmaxud %zmm0, %zmm1, %zmm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpmaxud %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpmaxud %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X86-NEXT: vpmaxud %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_reduce_max_epu32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vpmaxud %zmm0, %zmm1, %zmm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpmaxud %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpmaxud %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X64-NEXT: vpmaxud %zmm1, %zmm0, %zmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__W to <16 x i32>
+ %shuffle1.i = shufflevector <16 x i32> %0, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %1 = icmp ult <16 x i32> %shuffle1.i, %0
+ %2 = select <16 x i1> %1, <16 x i32> %0, <16 x i32> %shuffle1.i
+ %shuffle3.i = shufflevector <16 x i32> %2, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %3 = icmp ugt <16 x i32> %2, %shuffle3.i
+ %4 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> %shuffle3.i
+ %shuffle6.i = shufflevector <16 x i32> %4, <16 x i32> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %5 = icmp ugt <16 x i32> %4, %shuffle6.i
+ %6 = select <16 x i1> %5, <16 x i32> %4, <16 x i32> %shuffle6.i
+ %shuffle9.i = shufflevector <16 x i32> %6, <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %7 = icmp ugt <16 x i32> %6, %shuffle9.i
+ %8 = select <16 x i1> %7, <16 x i32> %6, <16 x i32> %shuffle9.i
+ %9 = bitcast <16 x i32> %8 to <8 x i64>
+ %vecext.i = extractelement <8 x i64> %9, i32 0
+ %conv.i = trunc i64 %vecext.i to i32
+ ret i32 %conv.i
+}
+
+define float @test_mm512_reduce_max_ps(<16 x float> %__W) {
+; X86-LABEL: test_mm512_reduce_max_ps:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %eax
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: vextractf64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vmaxps %zmm1, %zmm0, %zmm0
+; X86-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X86-NEXT: vmaxps %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X86-NEXT: vmaxps %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X86-NEXT: vmaxps %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: flds (%esp)
+; X86-NEXT: popl %eax
+; X86-NEXT: .cfi_def_cfa_offset 4
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_reduce_max_ps:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vextractf64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vmaxps %zmm1, %zmm0, %zmm0
+; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X64-NEXT: vmaxps %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X64-NEXT: vmaxps %zmm1, %zmm0, %zmm0
+; X64-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X64-NEXT: vmaxps %zmm1, %zmm0, %zmm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %shuffle1.i = shufflevector <16 x float> %__W, <16 x float> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %0 = tail call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %__W, <16 x float> %shuffle1.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
+ %shuffle3.i = shufflevector <16 x float> %0, <16 x float> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %1 = tail call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %0, <16 x float> %shuffle3.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
+ %shuffle6.i = shufflevector <16 x float> %1, <16 x float> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = tail call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %1, <16 x float> %shuffle6.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
+ %shuffle9.i = shufflevector <16 x float> %2, <16 x float> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %3 = tail call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %2, <16 x float> %shuffle9.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
+ %vecext.i = extractelement <16 x float> %3, i32 0
+ ret float %vecext.i
+}
+
+define i32 @test_mm512_reduce_min_epi32(<8 x i64> %__W) {
+; X86-LABEL: test_mm512_reduce_min_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vpminsd %zmm0, %zmm1, %zmm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpminsd %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpminsd %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X86-NEXT: vpminsd %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_reduce_min_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vpminsd %zmm0, %zmm1, %zmm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpminsd %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpminsd %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X64-NEXT: vpminsd %zmm1, %zmm0, %zmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__W to <16 x i32>
+ %shuffle1.i = shufflevector <16 x i32> %0, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %1 = icmp sgt <16 x i32> %shuffle1.i, %0
+ %2 = select <16 x i1> %1, <16 x i32> %0, <16 x i32> %shuffle1.i
+ %shuffle3.i = shufflevector <16 x i32> %2, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %3 = icmp slt <16 x i32> %2, %shuffle3.i
+ %4 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> %shuffle3.i
+ %shuffle6.i = shufflevector <16 x i32> %4, <16 x i32> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %5 = icmp slt <16 x i32> %4, %shuffle6.i
+ %6 = select <16 x i1> %5, <16 x i32> %4, <16 x i32> %shuffle6.i
+ %shuffle9.i = shufflevector <16 x i32> %6, <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %7 = icmp slt <16 x i32> %6, %shuffle9.i
+ %8 = select <16 x i1> %7, <16 x i32> %6, <16 x i32> %shuffle9.i
+ %9 = bitcast <16 x i32> %8 to <8 x i64>
+ %vecext.i = extractelement <8 x i64> %9, i32 0
+ %conv.i = trunc i64 %vecext.i to i32
+ ret i32 %conv.i
+}
+
+define i32 @test_mm512_reduce_min_epu32(<8 x i64> %__W) {
+; X86-LABEL: test_mm512_reduce_min_epu32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vpminud %zmm0, %zmm1, %zmm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpminud %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpminud %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X86-NEXT: vpminud %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_reduce_min_epu32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vpminud %zmm0, %zmm1, %zmm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpminud %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpminud %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X64-NEXT: vpminud %zmm1, %zmm0, %zmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__W to <16 x i32>
+ %shuffle1.i = shufflevector <16 x i32> %0, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %1 = icmp ugt <16 x i32> %shuffle1.i, %0
+ %2 = select <16 x i1> %1, <16 x i32> %0, <16 x i32> %shuffle1.i
+ %shuffle3.i = shufflevector <16 x i32> %2, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %3 = icmp ult <16 x i32> %2, %shuffle3.i
+ %4 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> %shuffle3.i
+ %shuffle6.i = shufflevector <16 x i32> %4, <16 x i32> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %5 = icmp ult <16 x i32> %4, %shuffle6.i
+ %6 = select <16 x i1> %5, <16 x i32> %4, <16 x i32> %shuffle6.i
+ %shuffle9.i = shufflevector <16 x i32> %6, <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %7 = icmp ult <16 x i32> %6, %shuffle9.i
+ %8 = select <16 x i1> %7, <16 x i32> %6, <16 x i32> %shuffle9.i
+ %9 = bitcast <16 x i32> %8 to <8 x i64>
+ %vecext.i = extractelement <8 x i64> %9, i32 0
+ %conv.i = trunc i64 %vecext.i to i32
+ ret i32 %conv.i
+}
+
+define float @test_mm512_reduce_min_ps(<16 x float> %__W) {
+; X86-LABEL: test_mm512_reduce_min_ps:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %eax
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: vextractf64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vminps %zmm1, %zmm0, %zmm0
+; X86-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X86-NEXT: vminps %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X86-NEXT: vminps %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X86-NEXT: vminps %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: flds (%esp)
+; X86-NEXT: popl %eax
+; X86-NEXT: .cfi_def_cfa_offset 4
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_reduce_min_ps:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vextractf64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vminps %zmm1, %zmm0, %zmm0
+; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X64-NEXT: vminps %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X64-NEXT: vminps %zmm1, %zmm0, %zmm0
+; X64-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X64-NEXT: vminps %zmm1, %zmm0, %zmm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %shuffle1.i = shufflevector <16 x float> %__W, <16 x float> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %0 = tail call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %__W, <16 x float> %shuffle1.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
+ %shuffle3.i = shufflevector <16 x float> %0, <16 x float> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %1 = tail call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %0, <16 x float> %shuffle3.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
+ %shuffle6.i = shufflevector <16 x float> %1, <16 x float> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = tail call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %1, <16 x float> %shuffle6.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
+ %shuffle9.i = shufflevector <16 x float> %2, <16 x float> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %3 = tail call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %2, <16 x float> %shuffle9.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
+ %vecext.i = extractelement <16 x float> %3, i32 0
+ ret float %vecext.i
+}
+
+define i32 @test_mm512_mask_reduce_max_epi32(i16 zeroext %__M, <8 x i64> %__W) {
+; X86-LABEL: test_mm512_mask_reduce_max_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT: vpbroadcastd {{.*#+}} zmm1 = [2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648]
+; X86-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
+; X86-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; X86-NEXT: vpmaxsd %zmm0, %zmm1, %zmm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X86-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_reduce_max_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vpbroadcastd {{.*#+}} zmm1 = [2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648]
+; X64-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
+; X64-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; X64-NEXT: vpmaxsd %zmm0, %zmm1, %zmm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X64-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__W to <16 x i32>
+ %1 = bitcast i16 %__M to <16 x i1>
+ %2 = select <16 x i1> %1, <16 x i32> %0, <16 x i32> <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
+ %shuffle1.i = shufflevector <16 x i32> %2, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %3 = icmp sgt <16 x i32> %2, %shuffle1.i
+ %4 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> %shuffle1.i
+ %shuffle4.i = shufflevector <16 x i32> %4, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %5 = icmp sgt <16 x i32> %4, %shuffle4.i
+ %6 = select <16 x i1> %5, <16 x i32> %4, <16 x i32> %shuffle4.i
+ %shuffle7.i = shufflevector <16 x i32> %6, <16 x i32> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %7 = icmp sgt <16 x i32> %6, %shuffle7.i
+ %8 = select <16 x i1> %7, <16 x i32> %6, <16 x i32> %shuffle7.i
+ %shuffle10.i = shufflevector <16 x i32> %8, <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %9 = icmp sgt <16 x i32> %8, %shuffle10.i
+ %10 = select <16 x i1> %9, <16 x i32> %8, <16 x i32> %shuffle10.i
+ %11 = bitcast <16 x i32> %10 to <8 x i64>
+ %vecext.i = extractelement <8 x i64> %11, i32 0
+ %conv.i = trunc i64 %vecext.i to i32
+ ret i32 %conv.i
+}
+
+define i32 @test_mm512_mask_reduce_max_epu32(i16 zeroext %__M, <8 x i64> %__W) {
+; X86-LABEL: test_mm512_mask_reduce_max_epu32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X86-NEXT: vpmaxud %zmm1, %zmm0, %zmm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpmaxud %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpmaxud %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X86-NEXT: vpmaxud %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_reduce_max_epu32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; X64-NEXT: vpmaxud %zmm1, %zmm0, %zmm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpmaxud %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpmaxud %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X64-NEXT: vpmaxud %zmm1, %zmm0, %zmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__W to <16 x i32>
+ %1 = bitcast i16 %__M to <16 x i1>
+ %2 = select <16 x i1> %1, <16 x i32> %0, <16 x i32> zeroinitializer
+ %shuffle1.i = shufflevector <16 x i32> %2, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %3 = icmp ugt <16 x i32> %2, %shuffle1.i
+ %4 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> %shuffle1.i
+ %shuffle4.i = shufflevector <16 x i32> %4, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %5 = icmp ugt <16 x i32> %4, %shuffle4.i
+ %6 = select <16 x i1> %5, <16 x i32> %4, <16 x i32> %shuffle4.i
+ %shuffle7.i = shufflevector <16 x i32> %6, <16 x i32> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %7 = icmp ugt <16 x i32> %6, %shuffle7.i
+ %8 = select <16 x i1> %7, <16 x i32> %6, <16 x i32> %shuffle7.i
+ %shuffle10.i = shufflevector <16 x i32> %8, <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %9 = icmp ugt <16 x i32> %8, %shuffle10.i
+ %10 = select <16 x i1> %9, <16 x i32> %8, <16 x i32> %shuffle10.i
+ %11 = bitcast <16 x i32> %10 to <8 x i64>
+ %vecext.i = extractelement <8 x i64> %11, i32 0
+ %conv.i = trunc i64 %vecext.i to i32
+ ret i32 %conv.i
+}
+
+define float @test_mm512_mask_reduce_max_ps(i16 zeroext %__M, <16 x float> %__W) {
+; X86-LABEL: test_mm512_mask_reduce_max_ps:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %eax
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT: vbroadcastss {{.*#+}} zmm1 = [-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf]
+; X86-NEXT: vmovaps %zmm0, %zmm1 {%k1}
+; X86-NEXT: vextractf64x4 $1, %zmm1, %ymm0
+; X86-NEXT: vmaxps %zmm0, %zmm1, %zmm0
+; X86-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X86-NEXT: vmaxps %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X86-NEXT: vmaxps %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X86-NEXT: vmaxps %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: flds (%esp)
+; X86-NEXT: popl %eax
+; X86-NEXT: .cfi_def_cfa_offset 4
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_reduce_max_ps:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vbroadcastss {{.*#+}} zmm1 = [-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf,-Inf]
+; X64-NEXT: vmovaps %zmm0, %zmm1 {%k1}
+; X64-NEXT: vextractf64x4 $1, %zmm1, %ymm0
+; X64-NEXT: vmaxps %zmm0, %zmm1, %zmm0
+; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X64-NEXT: vmaxps %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X64-NEXT: vmaxps %zmm1, %zmm0, %zmm0
+; X64-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X64-NEXT: vmaxps %zmm1, %zmm0, %zmm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast i16 %__M to <16 x i1>
+ %1 = select <16 x i1> %0, <16 x float> %__W, <16 x float> <float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000>
+ %shuffle1.i = shufflevector <16 x float> %1, <16 x float> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = tail call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %1, <16 x float> %shuffle1.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
+ %shuffle4.i = shufflevector <16 x float> %2, <16 x float> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %3 = tail call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %2, <16 x float> %shuffle4.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
+ %shuffle7.i = shufflevector <16 x float> %3, <16 x float> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = tail call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %3, <16 x float> %shuffle7.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
+ %shuffle10.i = shufflevector <16 x float> %4, <16 x float> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %5 = tail call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %4, <16 x float> %shuffle10.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
+ %vecext.i = extractelement <16 x float> %5, i32 0
+ ret float %vecext.i
+}
+
+define i32 @test_mm512_mask_reduce_min_epi32(i16 zeroext %__M, <8 x i64> %__W) {
+; X86-LABEL: test_mm512_mask_reduce_min_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT: vpbroadcastd {{.*#+}} zmm1 = [2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647]
+; X86-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
+; X86-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; X86-NEXT: vpminsd %zmm0, %zmm1, %zmm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpminsd %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpminsd %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X86-NEXT: vpminsd %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_reduce_min_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vpbroadcastd {{.*#+}} zmm1 = [2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647]
+; X64-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
+; X64-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; X64-NEXT: vpminsd %zmm0, %zmm1, %zmm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpminsd %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpminsd %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X64-NEXT: vpminsd %zmm1, %zmm0, %zmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__W to <16 x i32>
+ %1 = bitcast i16 %__M to <16 x i1>
+ %2 = select <16 x i1> %1, <16 x i32> %0, <16 x i32> <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
+ %shuffle1.i = shufflevector <16 x i32> %2, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %3 = icmp slt <16 x i32> %2, %shuffle1.i
+ %4 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> %shuffle1.i
+ %shuffle4.i = shufflevector <16 x i32> %4, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %5 = icmp slt <16 x i32> %4, %shuffle4.i
+ %6 = select <16 x i1> %5, <16 x i32> %4, <16 x i32> %shuffle4.i
+ %shuffle7.i = shufflevector <16 x i32> %6, <16 x i32> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %7 = icmp slt <16 x i32> %6, %shuffle7.i
+ %8 = select <16 x i1> %7, <16 x i32> %6, <16 x i32> %shuffle7.i
+ %shuffle10.i = shufflevector <16 x i32> %8, <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %9 = icmp slt <16 x i32> %8, %shuffle10.i
+ %10 = select <16 x i1> %9, <16 x i32> %8, <16 x i32> %shuffle10.i
+ %11 = bitcast <16 x i32> %10 to <8 x i64>
+ %vecext.i = extractelement <8 x i64> %11, i32 0
+ %conv.i = trunc i64 %vecext.i to i32
+ ret i32 %conv.i
+}
+
+define i32 @test_mm512_mask_reduce_min_epu32(i16 zeroext %__M, <8 x i64> %__W) {
+; X86-LABEL: test_mm512_mask_reduce_min_epu32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; X86-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
+; X86-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; X86-NEXT: vpminud %zmm0, %zmm1, %zmm0
+; X86-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT: vpminud %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X86-NEXT: vpminud %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X86-NEXT: vpminud %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_reduce_min_epu32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; X64-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
+; X64-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; X64-NEXT: vpminud %zmm0, %zmm1, %zmm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpminud %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; X64-NEXT: vpminud %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; X64-NEXT: vpminud %zmm1, %zmm0, %zmm0
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__W to <16 x i32>
+ %1 = bitcast i16 %__M to <16 x i1>
+ %2 = select <16 x i1> %1, <16 x i32> %0, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+ %shuffle1.i = shufflevector <16 x i32> %2, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %3 = icmp ult <16 x i32> %2, %shuffle1.i
+ %4 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> %shuffle1.i
+ %shuffle4.i = shufflevector <16 x i32> %4, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %5 = icmp ult <16 x i32> %4, %shuffle4.i
+ %6 = select <16 x i1> %5, <16 x i32> %4, <16 x i32> %shuffle4.i
+ %shuffle7.i = shufflevector <16 x i32> %6, <16 x i32> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %7 = icmp ult <16 x i32> %6, %shuffle7.i
+ %8 = select <16 x i1> %7, <16 x i32> %6, <16 x i32> %shuffle7.i
+ %shuffle10.i = shufflevector <16 x i32> %8, <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %9 = icmp ult <16 x i32> %8, %shuffle10.i
+ %10 = select <16 x i1> %9, <16 x i32> %8, <16 x i32> %shuffle10.i
+ %11 = bitcast <16 x i32> %10 to <8 x i64>
+ %vecext.i = extractelement <8 x i64> %11, i32 0
+ %conv.i = trunc i64 %vecext.i to i32
+ ret i32 %conv.i
+}
+
+define float @test_mm512_mask_reduce_min_ps(i16 zeroext %__M, <16 x float> %__W) {
+; X86-LABEL: test_mm512_mask_reduce_min_ps:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %eax
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT: vbroadcastss {{.*#+}} zmm1 = [+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf]
+; X86-NEXT: vmovaps %zmm0, %zmm1 {%k1}
+; X86-NEXT: vextractf64x4 $1, %zmm1, %ymm0
+; X86-NEXT: vminps %zmm0, %zmm1, %zmm0
+; X86-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X86-NEXT: vminps %zmm1, %zmm0, %zmm0
+; X86-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X86-NEXT: vminps %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X86-NEXT: vminps %zmm1, %zmm0, %zmm0
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: flds (%esp)
+; X86-NEXT: popl %eax
+; X86-NEXT: .cfi_def_cfa_offset 4
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_reduce_min_ps:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vbroadcastss {{.*#+}} zmm1 = [+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf,+Inf]
+; X64-NEXT: vmovaps %zmm0, %zmm1 {%k1}
+; X64-NEXT: vextractf64x4 $1, %zmm1, %ymm0
+; X64-NEXT: vminps %zmm0, %zmm1, %zmm0
+; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X64-NEXT: vminps %zmm1, %zmm0, %zmm0
+; X64-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; X64-NEXT: vminps %zmm1, %zmm0, %zmm0
+; X64-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X64-NEXT: vminps %zmm1, %zmm0, %zmm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
+entry:
+ %0 = bitcast i16 %__M to <16 x i1>
+ %1 = select <16 x i1> %0, <16 x float> %__W, <16 x float> <float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000>
+ %shuffle1.i = shufflevector <16 x float> %1, <16 x float> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = tail call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %1, <16 x float> %shuffle1.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
+ %shuffle4.i = shufflevector <16 x float> %2, <16 x float> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %3 = tail call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %2, <16 x float> %shuffle4.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
+ %shuffle7.i = shufflevector <16 x float> %3, <16 x float> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = tail call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %3, <16 x float> %shuffle7.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
+ %shuffle10.i = shufflevector <16 x float> %4, <16 x float> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %5 = tail call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %4, <16 x float> %shuffle10.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
+ %vecext.i = extractelement <16 x float> %5, i32 0
+ ret float %vecext.i
+}
+
declare <8 x double> @llvm.fma.v8f64(<8 x double>, <8 x double>, <8 x double>) #9
declare <16 x float> @llvm.fma.v16f32(<16 x float>, <16 x float>, <16 x float>) #9
declare float @llvm.fma.f32(float, float, float) #9
@@ -6325,6 +8717,10 @@ declare void @llvm.masked.compressstore.v8f64(<8 x double>, double*, <8 x i1>)
declare void @llvm.masked.compressstore.v8i64(<8 x i64>, i64*, <8 x i1>)
declare void @llvm.masked.compressstore.v16f32(<16 x float>, float*, <16 x i1>)
declare void @llvm.masked.compressstore.v16i32(<16 x i32>, i32*, <16 x i1>)
+declare <8 x double> @llvm.x86.avx512.mask.max.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32)
+declare <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32)
+declare <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32)
+declare <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32)
!0 = !{i32 1}
OpenPOWER on IntegriCloud