summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/vector-reduce-add.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-reduce-add.ll')
-rw-r--r--llvm/test/CodeGen/X86/vector-reduce-add.ll88
1 files changed, 44 insertions, 44 deletions
diff --git a/llvm/test/CodeGen/X86/vector-reduce-add.ll b/llvm/test/CodeGen/X86/vector-reduce-add.ll
index 7abdf1cb037..02fb375a318 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-add.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-add.ll
@@ -32,7 +32,7 @@ define i64 @test_v2i64(<2 x i64> %a0) {
; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovq %xmm0, %rax
; AVX512-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.add.i64.v2i64(<2 x i64> %a0)
+ %1 = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %a0)
ret i64 %1
}
@@ -74,7 +74,7 @@ define i64 @test_v4i64(<4 x i64> %a0) {
; AVX512-NEXT: vmovq %xmm0, %rax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.add.i64.v4i64(<4 x i64> %a0)
+ %1 = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %a0)
ret i64 %1
}
@@ -124,7 +124,7 @@ define i64 @test_v8i64(<8 x i64> %a0) {
; AVX512-NEXT: vmovq %xmm0, %rax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.add.i64.v8i64(<8 x i64> %a0)
+ %1 = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %a0)
ret i64 %1
}
@@ -187,7 +187,7 @@ define i64 @test_v16i64(<16 x i64> %a0) {
; AVX512-NEXT: vmovq %xmm0, %rax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.add.i64.v16i64(<16 x i64> %a0)
+ %1 = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %a0)
ret i64 %1
}
@@ -216,7 +216,7 @@ define i32 @test_v2i32(<2 x i32> %a0) {
; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.add.i32.v2i32(<2 x i32> %a0)
+ %1 = call i32 @llvm.experimental.vector.reduce.add.v2i32(<2 x i32> %a0)
ret i32 %1
}
@@ -264,7 +264,7 @@ define i32 @test_v4i32(<4 x i32> %a0) {
; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.add.i32.v4i32(<4 x i32> %a0)
+ %1 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %a0)
ret i32 %1
}
@@ -325,7 +325,7 @@ define i32 @test_v8i32(<8 x i32> %a0) {
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.add.i32.v8i32(<8 x i32> %a0)
+ %1 = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %a0)
ret i32 %1
}
@@ -397,7 +397,7 @@ define i32 @test_v16i32(<16 x i32> %a0) {
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.add.i32.v16i32(<16 x i32> %a0)
+ %1 = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %a0)
ret i32 %1
}
@@ -488,7 +488,7 @@ define i32 @test_v32i32(<32 x i32> %a0) {
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.add.i32.v32i32(<32 x i32> %a0)
+ %1 = call i32 @llvm.experimental.vector.reduce.add.v32i32(<32 x i32> %a0)
ret i32 %1
}
@@ -520,7 +520,7 @@ define i16 @test_v2i16(<2 x i16> %a0) {
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.add.i16.v2i16(<2 x i16> %a0)
+ %1 = call i16 @llvm.experimental.vector.reduce.add.v2i16(<2 x i16> %a0)
ret i16 %1
}
@@ -573,7 +573,7 @@ define i16 @test_v4i16(<4 x i16> %a0) {
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.add.i16.v4i16(<4 x i16> %a0)
+ %1 = call i16 @llvm.experimental.vector.reduce.add.v4i16(<4 x i16> %a0)
ret i16 %1
}
@@ -637,7 +637,7 @@ define i16 @test_v8i16(<8 x i16> %a0) {
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.add.i16.v8i16(<8 x i16> %a0)
+ %1 = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %a0)
ret i16 %1
}
@@ -714,7 +714,7 @@ define i16 @test_v16i16(<16 x i16> %a0) {
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.add.i16.v16i16(<16 x i16> %a0)
+ %1 = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %a0)
ret i16 %1
}
@@ -802,7 +802,7 @@ define i16 @test_v32i16(<32 x i16> %a0) {
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.add.i16.v32i16(<32 x i16> %a0)
+ %1 = call i16 @llvm.experimental.vector.reduce.add.v32i16(<32 x i16> %a0)
ret i16 %1
}
@@ -909,7 +909,7 @@ define i16 @test_v64i16(<64 x i16> %a0) {
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.add.i16.v64i16(<64 x i16> %a0)
+ %1 = call i16 @llvm.experimental.vector.reduce.add.v64i16(<64 x i16> %a0)
ret i16 %1
}
@@ -949,7 +949,7 @@ define i8 @test_v2i8(<2 x i8> %a0) {
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
; AVX512-NEXT: # kill: def $al killed $al killed $eax
; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.add.i8.v2i8(<2 x i8> %a0)
+ %1 = call i8 @llvm.experimental.vector.reduce.add.v2i8(<2 x i8> %a0)
ret i8 %1
}
@@ -1012,7 +1012,7 @@ define i8 @test_v4i8(<4 x i8> %a0) {
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
; AVX512-NEXT: # kill: def $al killed $al killed $eax
; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.add.i8.v4i8(<4 x i8> %a0)
+ %1 = call i8 @llvm.experimental.vector.reduce.add.v4i8(<4 x i8> %a0)
ret i8 %1
}
@@ -1089,7 +1089,7 @@ define i8 @test_v8i8(<8 x i8> %a0) {
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
; AVX512-NEXT: # kill: def $al killed $al killed $eax
; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.add.i8.v8i8(<8 x i8> %a0)
+ %1 = call i8 @llvm.experimental.vector.reduce.add.v8i8(<8 x i8> %a0)
ret i8 %1
}
@@ -1153,7 +1153,7 @@ define i8 @test_v16i8(<16 x i8> %a0) {
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
; AVX512-NEXT: # kill: def $al killed $al killed $eax
; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.add.i8.v16i8(<16 x i8> %a0)
+ %1 = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %a0)
ret i8 %1
}
@@ -1242,7 +1242,7 @@ define i8 @test_v32i8(<32 x i8> %a0) {
; AVX512-NEXT: # kill: def $al killed $al killed $eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.add.i8.v32i8(<32 x i8> %a0)
+ %1 = call i8 @llvm.experimental.vector.reduce.add.v32i8(<32 x i8> %a0)
ret i8 %1
}
@@ -1341,7 +1341,7 @@ define i8 @test_v64i8(<64 x i8> %a0) {
; AVX512-NEXT: # kill: def $al killed $al killed $eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.add.i8.v64i8(<64 x i8> %a0)
+ %1 = call i8 @llvm.experimental.vector.reduce.add.v64i8(<64 x i8> %a0)
ret i8 %1
}
@@ -1457,32 +1457,32 @@ define i8 @test_v128i8(<128 x i8> %a0) {
; AVX512-NEXT: # kill: def $al killed $al killed $eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.add.i8.v128i8(<128 x i8> %a0)
+ %1 = call i8 @llvm.experimental.vector.reduce.add.v128i8(<128 x i8> %a0)
ret i8 %1
}
-declare i64 @llvm.experimental.vector.reduce.add.i64.v2i64(<2 x i64>)
-declare i64 @llvm.experimental.vector.reduce.add.i64.v4i64(<4 x i64>)
-declare i64 @llvm.experimental.vector.reduce.add.i64.v8i64(<8 x i64>)
-declare i64 @llvm.experimental.vector.reduce.add.i64.v16i64(<16 x i64>)
+declare i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64>)
+declare i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64>)
+declare i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64>)
+declare i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64>)
-declare i32 @llvm.experimental.vector.reduce.add.i32.v2i32(<2 x i32>)
-declare i32 @llvm.experimental.vector.reduce.add.i32.v4i32(<4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.add.i32.v8i32(<8 x i32>)
-declare i32 @llvm.experimental.vector.reduce.add.i32.v16i32(<16 x i32>)
-declare i32 @llvm.experimental.vector.reduce.add.i32.v32i32(<32 x i32>)
+declare i32 @llvm.experimental.vector.reduce.add.v2i32(<2 x i32>)
+declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
+declare i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32>)
+declare i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32>)
+declare i32 @llvm.experimental.vector.reduce.add.v32i32(<32 x i32>)
-declare i16 @llvm.experimental.vector.reduce.add.i16.v2i16(<2 x i16>)
-declare i16 @llvm.experimental.vector.reduce.add.i16.v4i16(<4 x i16>)
-declare i16 @llvm.experimental.vector.reduce.add.i16.v8i16(<8 x i16>)
-declare i16 @llvm.experimental.vector.reduce.add.i16.v16i16(<16 x i16>)
-declare i16 @llvm.experimental.vector.reduce.add.i16.v32i16(<32 x i16>)
-declare i16 @llvm.experimental.vector.reduce.add.i16.v64i16(<64 x i16>)
+declare i16 @llvm.experimental.vector.reduce.add.v2i16(<2 x i16>)
+declare i16 @llvm.experimental.vector.reduce.add.v4i16(<4 x i16>)
+declare i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16>)
+declare i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16>)
+declare i16 @llvm.experimental.vector.reduce.add.v32i16(<32 x i16>)
+declare i16 @llvm.experimental.vector.reduce.add.v64i16(<64 x i16>)
-declare i8 @llvm.experimental.vector.reduce.add.i8.v2i8(<2 x i8>)
-declare i8 @llvm.experimental.vector.reduce.add.i8.v4i8(<4 x i8>)
-declare i8 @llvm.experimental.vector.reduce.add.i8.v8i8(<8 x i8>)
-declare i8 @llvm.experimental.vector.reduce.add.i8.v16i8(<16 x i8>)
-declare i8 @llvm.experimental.vector.reduce.add.i8.v32i8(<32 x i8>)
-declare i8 @llvm.experimental.vector.reduce.add.i8.v64i8(<64 x i8>)
-declare i8 @llvm.experimental.vector.reduce.add.i8.v128i8(<128 x i8>)
+declare i8 @llvm.experimental.vector.reduce.add.v2i8(<2 x i8>)
+declare i8 @llvm.experimental.vector.reduce.add.v4i8(<4 x i8>)
+declare i8 @llvm.experimental.vector.reduce.add.v8i8(<8 x i8>)
+declare i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8>)
+declare i8 @llvm.experimental.vector.reduce.add.v32i8(<32 x i8>)
+declare i8 @llvm.experimental.vector.reduce.add.v64i8(<64 x i8>)
+declare i8 @llvm.experimental.vector.reduce.add.v128i8(<128 x i8>)
OpenPOWER on IntegriCloud