summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/vector-reduce-and.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-reduce-and.ll')
-rw-r--r--llvm/test/CodeGen/X86/vector-reduce-and.ll88
1 files changed, 44 insertions, 44 deletions
diff --git a/llvm/test/CodeGen/X86/vector-reduce-and.ll b/llvm/test/CodeGen/X86/vector-reduce-and.ll
index 9a9f5f21af5..de102b44ae3 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-and.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-and.ll
@@ -24,7 +24,7 @@ define i64 @test_v2i64(<2 x i64> %a0) {
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovq %xmm0, %rax
; AVX-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.and.i64.v2i64(<2 x i64> %a0)
+ %1 = call i64 @llvm.experimental.vector.reduce.and.v2i64(<2 x i64> %a0)
ret i64 %1
}
@@ -66,7 +66,7 @@ define i64 @test_v4i64(<4 x i64> %a0) {
; AVX512-NEXT: vmovq %xmm0, %rax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.and.i64.v4i64(<4 x i64> %a0)
+ %1 = call i64 @llvm.experimental.vector.reduce.and.v4i64(<4 x i64> %a0)
ret i64 %1
}
@@ -114,7 +114,7 @@ define i64 @test_v8i64(<8 x i64> %a0) {
; AVX512-NEXT: vmovq %xmm0, %rax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.and.i64.v8i64(<8 x i64> %a0)
+ %1 = call i64 @llvm.experimental.vector.reduce.and.v8i64(<8 x i64> %a0)
ret i64 %1
}
@@ -171,7 +171,7 @@ define i64 @test_v16i64(<16 x i64> %a0) {
; AVX512-NEXT: vmovq %xmm0, %rax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.and.i64.v16i64(<16 x i64> %a0)
+ %1 = call i64 @llvm.experimental.vector.reduce.and.v16i64(<16 x i64> %a0)
ret i64 %1
}
@@ -193,7 +193,7 @@ define i32 @test_v2i32(<2 x i32> %a0) {
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovd %xmm0, %eax
; AVX-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.and.i32.v2i32(<2 x i32> %a0)
+ %1 = call i32 @llvm.experimental.vector.reduce.and.v2i32(<2 x i32> %a0)
ret i32 %1
}
@@ -215,7 +215,7 @@ define i32 @test_v4i32(<4 x i32> %a0) {
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovd %xmm0, %eax
; AVX-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.and.i32.v4i32(<4 x i32> %a0)
+ %1 = call i32 @llvm.experimental.vector.reduce.and.v4i32(<4 x i32> %a0)
ret i32 %1
}
@@ -265,7 +265,7 @@ define i32 @test_v8i32(<8 x i32> %a0) {
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.and.i32.v8i32(<8 x i32> %a0)
+ %1 = call i32 @llvm.experimental.vector.reduce.and.v8i32(<8 x i32> %a0)
ret i32 %1
}
@@ -321,7 +321,7 @@ define i32 @test_v16i32(<16 x i32> %a0) {
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.and.i32.v16i32(<16 x i32> %a0)
+ %1 = call i32 @llvm.experimental.vector.reduce.and.v16i32(<16 x i32> %a0)
ret i32 %1
}
@@ -386,7 +386,7 @@ define i32 @test_v32i32(<32 x i32> %a0) {
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.and.i32.v32i32(<32 x i32> %a0)
+ %1 = call i32 @llvm.experimental.vector.reduce.and.v32i32(<32 x i32> %a0)
ret i32 %1
}
@@ -410,7 +410,7 @@ define i16 @test_v2i16(<2 x i16> %a0) {
; AVX-NEXT: vmovd %xmm0, %eax
; AVX-NEXT: # kill: def $ax killed $ax killed $eax
; AVX-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.and.i16.v2i16(<2 x i16> %a0)
+ %1 = call i16 @llvm.experimental.vector.reduce.and.v2i16(<2 x i16> %a0)
ret i16 %1
}
@@ -434,7 +434,7 @@ define i16 @test_v4i16(<4 x i16> %a0) {
; AVX-NEXT: vmovd %xmm0, %eax
; AVX-NEXT: # kill: def $ax killed $ax killed $eax
; AVX-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.and.i16.v4i16(<4 x i16> %a0)
+ %1 = call i16 @llvm.experimental.vector.reduce.and.v4i16(<4 x i16> %a0)
ret i16 %1
}
@@ -463,7 +463,7 @@ define i16 @test_v8i16(<8 x i16> %a0) {
; AVX-NEXT: vmovd %xmm0, %eax
; AVX-NEXT: # kill: def $ax killed $ax killed $eax
; AVX-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.and.i16.v8i16(<8 x i16> %a0)
+ %1 = call i16 @llvm.experimental.vector.reduce.and.v8i16(<8 x i16> %a0)
ret i16 %1
}
@@ -526,7 +526,7 @@ define i16 @test_v16i16(<16 x i16> %a0) {
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.and.i16.v16i16(<16 x i16> %a0)
+ %1 = call i16 @llvm.experimental.vector.reduce.and.v16i16(<16 x i16> %a0)
ret i16 %1
}
@@ -595,7 +595,7 @@ define i16 @test_v32i16(<32 x i16> %a0) {
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.and.i16.v32i16(<32 x i16> %a0)
+ %1 = call i16 @llvm.experimental.vector.reduce.and.v32i16(<32 x i16> %a0)
ret i16 %1
}
@@ -673,7 +673,7 @@ define i16 @test_v64i16(<64 x i16> %a0) {
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.and.i16.v64i16(<64 x i16> %a0)
+ %1 = call i16 @llvm.experimental.vector.reduce.and.v64i16(<64 x i16> %a0)
ret i16 %1
}
@@ -705,7 +705,7 @@ define i8 @test_v2i8(<2 x i8> %a0) {
; AVX-NEXT: vpextrb $0, %xmm0, %eax
; AVX-NEXT: # kill: def $al killed $al killed $eax
; AVX-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.and.i8.v2i8(<2 x i8> %a0)
+ %1 = call i8 @llvm.experimental.vector.reduce.and.v2i8(<2 x i8> %a0)
ret i8 %1
}
@@ -739,7 +739,7 @@ define i8 @test_v4i8(<4 x i8> %a0) {
; AVX-NEXT: vpextrb $0, %xmm0, %eax
; AVX-NEXT: # kill: def $al killed $al killed $eax
; AVX-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.and.i8.v4i8(<4 x i8> %a0)
+ %1 = call i8 @llvm.experimental.vector.reduce.and.v4i8(<4 x i8> %a0)
ret i8 %1
}
@@ -781,7 +781,7 @@ define i8 @test_v8i8(<8 x i8> %a0) {
; AVX-NEXT: vpextrb $0, %xmm0, %eax
; AVX-NEXT: # kill: def $al killed $al killed $eax
; AVX-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.and.i8.v8i8(<8 x i8> %a0)
+ %1 = call i8 @llvm.experimental.vector.reduce.and.v8i8(<8 x i8> %a0)
ret i8 %1
}
@@ -831,7 +831,7 @@ define i8 @test_v16i8(<16 x i8> %a0) {
; AVX-NEXT: vpextrb $0, %xmm0, %eax
; AVX-NEXT: # kill: def $al killed $al killed $eax
; AVX-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.and.i8.v16i8(<16 x i8> %a0)
+ %1 = call i8 @llvm.experimental.vector.reduce.and.v16i8(<16 x i8> %a0)
ret i8 %1
}
@@ -920,7 +920,7 @@ define i8 @test_v32i8(<32 x i8> %a0) {
; AVX512-NEXT: # kill: def $al killed $al killed $eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.and.i8.v32i8(<32 x i8> %a0)
+ %1 = call i8 @llvm.experimental.vector.reduce.and.v32i8(<32 x i8> %a0)
ret i8 %1
}
@@ -1017,7 +1017,7 @@ define i8 @test_v64i8(<64 x i8> %a0) {
; AVX512-NEXT: # kill: def $al killed $al killed $eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.and.i8.v64i8(<64 x i8> %a0)
+ %1 = call i8 @llvm.experimental.vector.reduce.and.v64i8(<64 x i8> %a0)
ret i8 %1
}
@@ -1127,32 +1127,32 @@ define i8 @test_v128i8(<128 x i8> %a0) {
; AVX512-NEXT: # kill: def $al killed $al killed $eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.and.i8.v128i8(<128 x i8> %a0)
+ %1 = call i8 @llvm.experimental.vector.reduce.and.v128i8(<128 x i8> %a0)
ret i8 %1
}
-declare i64 @llvm.experimental.vector.reduce.and.i64.v2i64(<2 x i64>)
-declare i64 @llvm.experimental.vector.reduce.and.i64.v4i64(<4 x i64>)
-declare i64 @llvm.experimental.vector.reduce.and.i64.v8i64(<8 x i64>)
-declare i64 @llvm.experimental.vector.reduce.and.i64.v16i64(<16 x i64>)
+declare i64 @llvm.experimental.vector.reduce.and.v2i64(<2 x i64>)
+declare i64 @llvm.experimental.vector.reduce.and.v4i64(<4 x i64>)
+declare i64 @llvm.experimental.vector.reduce.and.v8i64(<8 x i64>)
+declare i64 @llvm.experimental.vector.reduce.and.v16i64(<16 x i64>)
-declare i32 @llvm.experimental.vector.reduce.and.i32.v2i32(<2 x i32>)
-declare i32 @llvm.experimental.vector.reduce.and.i32.v4i32(<4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.and.i32.v8i32(<8 x i32>)
-declare i32 @llvm.experimental.vector.reduce.and.i32.v16i32(<16 x i32>)
-declare i32 @llvm.experimental.vector.reduce.and.i32.v32i32(<32 x i32>)
+declare i32 @llvm.experimental.vector.reduce.and.v2i32(<2 x i32>)
+declare i32 @llvm.experimental.vector.reduce.and.v4i32(<4 x i32>)
+declare i32 @llvm.experimental.vector.reduce.and.v8i32(<8 x i32>)
+declare i32 @llvm.experimental.vector.reduce.and.v16i32(<16 x i32>)
+declare i32 @llvm.experimental.vector.reduce.and.v32i32(<32 x i32>)
-declare i16 @llvm.experimental.vector.reduce.and.i16.v2i16(<2 x i16>)
-declare i16 @llvm.experimental.vector.reduce.and.i16.v4i16(<4 x i16>)
-declare i16 @llvm.experimental.vector.reduce.and.i16.v8i16(<8 x i16>)
-declare i16 @llvm.experimental.vector.reduce.and.i16.v16i16(<16 x i16>)
-declare i16 @llvm.experimental.vector.reduce.and.i16.v32i16(<32 x i16>)
-declare i16 @llvm.experimental.vector.reduce.and.i16.v64i16(<64 x i16>)
+declare i16 @llvm.experimental.vector.reduce.and.v2i16(<2 x i16>)
+declare i16 @llvm.experimental.vector.reduce.and.v4i16(<4 x i16>)
+declare i16 @llvm.experimental.vector.reduce.and.v8i16(<8 x i16>)
+declare i16 @llvm.experimental.vector.reduce.and.v16i16(<16 x i16>)
+declare i16 @llvm.experimental.vector.reduce.and.v32i16(<32 x i16>)
+declare i16 @llvm.experimental.vector.reduce.and.v64i16(<64 x i16>)
-declare i8 @llvm.experimental.vector.reduce.and.i8.v2i8(<2 x i8>)
-declare i8 @llvm.experimental.vector.reduce.and.i8.v4i8(<4 x i8>)
-declare i8 @llvm.experimental.vector.reduce.and.i8.v8i8(<8 x i8>)
-declare i8 @llvm.experimental.vector.reduce.and.i8.v16i8(<16 x i8>)
-declare i8 @llvm.experimental.vector.reduce.and.i8.v32i8(<32 x i8>)
-declare i8 @llvm.experimental.vector.reduce.and.i8.v64i8(<64 x i8>)
-declare i8 @llvm.experimental.vector.reduce.and.i8.v128i8(<128 x i8>)
+declare i8 @llvm.experimental.vector.reduce.and.v2i8(<2 x i8>)
+declare i8 @llvm.experimental.vector.reduce.and.v4i8(<4 x i8>)
+declare i8 @llvm.experimental.vector.reduce.and.v8i8(<8 x i8>)
+declare i8 @llvm.experimental.vector.reduce.and.v16i8(<16 x i8>)
+declare i8 @llvm.experimental.vector.reduce.and.v32i8(<32 x i8>)
+declare i8 @llvm.experimental.vector.reduce.and.v64i8(<64 x i8>)
+declare i8 @llvm.experimental.vector.reduce.and.v128i8(<128 x i8>)
OpenPOWER on IntegriCloud