summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/vector-reduce-mul-widen.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-reduce-mul-widen.ll')
-rw-r--r--llvm/test/CodeGen/X86/vector-reduce-mul-widen.ll88
1 files changed, 44 insertions, 44 deletions
diff --git a/llvm/test/CodeGen/X86/vector-reduce-mul-widen.ll b/llvm/test/CodeGen/X86/vector-reduce-mul-widen.ll
index 62143fb1ffe..573f9c836c2 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-mul-widen.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-mul-widen.ll
@@ -86,7 +86,7 @@ define i64 @test_v2i64(<2 x i64> %a0) {
; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0
; AVX512DQVL-NEXT: vmovq %xmm0, %rax
; AVX512DQVL-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.mul.i64.v2i64(<2 x i64> %a0)
+ %1 = call i64 @llvm.experimental.vector.reduce.mul.v2i64(<2 x i64> %a0)
ret i64 %1
}
@@ -233,7 +233,7 @@ define i64 @test_v4i64(<4 x i64> %a0) {
; AVX512DQVL-NEXT: vmovq %xmm0, %rax
; AVX512DQVL-NEXT: vzeroupper
; AVX512DQVL-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.mul.i64.v4i64(<4 x i64> %a0)
+ %1 = call i64 @llvm.experimental.vector.reduce.mul.v4i64(<4 x i64> %a0)
ret i64 %1
}
@@ -446,7 +446,7 @@ define i64 @test_v8i64(<8 x i64> %a0) {
; AVX512DQVL-NEXT: vmovq %xmm0, %rax
; AVX512DQVL-NEXT: vzeroupper
; AVX512DQVL-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.mul.i64.v8i64(<8 x i64> %a0)
+ %1 = call i64 @llvm.experimental.vector.reduce.mul.v8i64(<8 x i64> %a0)
ret i64 %1
}
@@ -767,7 +767,7 @@ define i64 @test_v16i64(<16 x i64> %a0) {
; AVX512DQVL-NEXT: vmovq %xmm0, %rax
; AVX512DQVL-NEXT: vzeroupper
; AVX512DQVL-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.mul.i64.v16i64(<16 x i64> %a0)
+ %1 = call i64 @llvm.experimental.vector.reduce.mul.v16i64(<16 x i64> %a0)
ret i64 %1
}
@@ -803,7 +803,7 @@ define i32 @test_v2i32(<2 x i32> %a0) {
; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.mul.i32.v2i32(<2 x i32> %a0)
+ %1 = call i32 @llvm.experimental.vector.reduce.mul.v2i32(<2 x i32> %a0)
ret i32 %1
}
@@ -849,7 +849,7 @@ define i32 @test_v4i32(<4 x i32> %a0) {
; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.mul.i32.v4i32(<4 x i32> %a0)
+ %1 = call i32 @llvm.experimental.vector.reduce.mul.v4i32(<4 x i32> %a0)
ret i32 %1
}
@@ -920,7 +920,7 @@ define i32 @test_v8i32(<8 x i32> %a0) {
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.mul.i32.v8i32(<8 x i32> %a0)
+ %1 = call i32 @llvm.experimental.vector.reduce.mul.v8i32(<8 x i32> %a0)
ret i32 %1
}
@@ -1005,7 +1005,7 @@ define i32 @test_v16i32(<16 x i32> %a0) {
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.mul.i32.v16i32(<16 x i32> %a0)
+ %1 = call i32 @llvm.experimental.vector.reduce.mul.v16i32(<16 x i32> %a0)
ret i32 %1
}
@@ -1115,7 +1115,7 @@ define i32 @test_v32i32(<32 x i32> %a0) {
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.mul.i32.v32i32(<32 x i32> %a0)
+ %1 = call i32 @llvm.experimental.vector.reduce.mul.v32i32(<32 x i32> %a0)
ret i32 %1
}
@@ -1148,7 +1148,7 @@ define i16 @test_v2i16(<2 x i16> %a0) {
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.mul.i16.v2i16(<2 x i16> %a0)
+ %1 = call i16 @llvm.experimental.vector.reduce.mul.v2i16(<2 x i16> %a0)
ret i16 %1
}
@@ -1183,7 +1183,7 @@ define i16 @test_v4i16(<4 x i16> %a0) {
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.mul.i16.v4i16(<4 x i16> %a0)
+ %1 = call i16 @llvm.experimental.vector.reduce.mul.v4i16(<4 x i16> %a0)
ret i16 %1
}
@@ -1224,7 +1224,7 @@ define i16 @test_v8i16(<8 x i16> %a0) {
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.mul.i16.v8i16(<8 x i16> %a0)
+ %1 = call i16 @llvm.experimental.vector.reduce.mul.v8i16(<8 x i16> %a0)
ret i16 %1
}
@@ -1287,7 +1287,7 @@ define i16 @test_v16i16(<16 x i16> %a0) {
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.mul.i16.v16i16(<16 x i16> %a0)
+ %1 = call i16 @llvm.experimental.vector.reduce.mul.v16i16(<16 x i16> %a0)
ret i16 %1
}
@@ -1407,7 +1407,7 @@ define i16 @test_v32i16(<32 x i16> %a0) {
; AVX512DQVL-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512DQVL-NEXT: vzeroupper
; AVX512DQVL-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.mul.i16.v32i16(<32 x i16> %a0)
+ %1 = call i16 @llvm.experimental.vector.reduce.mul.v32i16(<32 x i16> %a0)
ret i16 %1
}
@@ -1545,7 +1545,7 @@ define i16 @test_v64i16(<64 x i16> %a0) {
; AVX512DQVL-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512DQVL-NEXT: vzeroupper
; AVX512DQVL-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.mul.i16.v64i16(<64 x i16> %a0)
+ %1 = call i16 @llvm.experimental.vector.reduce.mul.v64i16(<64 x i16> %a0)
ret i16 %1
}
@@ -1587,7 +1587,7 @@ define i8 @test_v2i8(<2 x i8> %a0) {
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
; AVX512-NEXT: # kill: def $al killed $al killed $eax
; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.mul.i8.v2i8(<2 x i8> %a0)
+ %1 = call i8 @llvm.experimental.vector.reduce.mul.v2i8(<2 x i8> %a0)
ret i8 %1
}
@@ -1647,7 +1647,7 @@ define i8 @test_v4i8(<4 x i8> %a0) {
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
; AVX512-NEXT: # kill: def $al killed $al killed $eax
; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.mul.i8.v4i8(<4 x i8> %a0)
+ %1 = call i8 @llvm.experimental.vector.reduce.mul.v4i8(<4 x i8> %a0)
ret i8 %1
}
@@ -1726,7 +1726,7 @@ define i8 @test_v8i8(<8 x i8> %a0) {
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
; AVX512-NEXT: # kill: def $al killed $al killed $eax
; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.mul.i8.v8i8(<8 x i8> %a0)
+ %1 = call i8 @llvm.experimental.vector.reduce.mul.v8i8(<8 x i8> %a0)
ret i8 %1
}
@@ -1945,7 +1945,7 @@ define i8 @test_v16i8(<16 x i8> %a0) {
; AVX512DQVL-NEXT: # kill: def $al killed $al killed $eax
; AVX512DQVL-NEXT: vzeroupper
; AVX512DQVL-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.mul.i8.v16i8(<16 x i8> %a0)
+ %1 = call i8 @llvm.experimental.vector.reduce.mul.v16i8(<16 x i8> %a0)
ret i8 %1
}
@@ -2220,7 +2220,7 @@ define i8 @test_v32i8(<32 x i8> %a0) {
; AVX512DQVL-NEXT: # kill: def $al killed $al killed $eax
; AVX512DQVL-NEXT: vzeroupper
; AVX512DQVL-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.mul.i8.v32i8(<32 x i8> %a0)
+ %1 = call i8 @llvm.experimental.vector.reduce.mul.v32i8(<32 x i8> %a0)
ret i8 %1
}
@@ -2599,7 +2599,7 @@ define i8 @test_v64i8(<64 x i8> %a0) {
; AVX512DQVL-NEXT: # kill: def $al killed $al killed $eax
; AVX512DQVL-NEXT: vzeroupper
; AVX512DQVL-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.mul.i8.v64i8(<64 x i8> %a0)
+ %1 = call i8 @llvm.experimental.vector.reduce.mul.v64i8(<64 x i8> %a0)
ret i8 %1
}
@@ -3086,32 +3086,32 @@ define i8 @test_v128i8(<128 x i8> %a0) {
; AVX512DQVL-NEXT: # kill: def $al killed $al killed $eax
; AVX512DQVL-NEXT: vzeroupper
; AVX512DQVL-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.mul.i8.v128i8(<128 x i8> %a0)
+ %1 = call i8 @llvm.experimental.vector.reduce.mul.v128i8(<128 x i8> %a0)
ret i8 %1
}
-declare i64 @llvm.experimental.vector.reduce.mul.i64.v2i64(<2 x i64>)
-declare i64 @llvm.experimental.vector.reduce.mul.i64.v4i64(<4 x i64>)
-declare i64 @llvm.experimental.vector.reduce.mul.i64.v8i64(<8 x i64>)
-declare i64 @llvm.experimental.vector.reduce.mul.i64.v16i64(<16 x i64>)
+declare i64 @llvm.experimental.vector.reduce.mul.v2i64(<2 x i64>)
+declare i64 @llvm.experimental.vector.reduce.mul.v4i64(<4 x i64>)
+declare i64 @llvm.experimental.vector.reduce.mul.v8i64(<8 x i64>)
+declare i64 @llvm.experimental.vector.reduce.mul.v16i64(<16 x i64>)
-declare i32 @llvm.experimental.vector.reduce.mul.i32.v2i32(<2 x i32>)
-declare i32 @llvm.experimental.vector.reduce.mul.i32.v4i32(<4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.mul.i32.v8i32(<8 x i32>)
-declare i32 @llvm.experimental.vector.reduce.mul.i32.v16i32(<16 x i32>)
-declare i32 @llvm.experimental.vector.reduce.mul.i32.v32i32(<32 x i32>)
+declare i32 @llvm.experimental.vector.reduce.mul.v2i32(<2 x i32>)
+declare i32 @llvm.experimental.vector.reduce.mul.v4i32(<4 x i32>)
+declare i32 @llvm.experimental.vector.reduce.mul.v8i32(<8 x i32>)
+declare i32 @llvm.experimental.vector.reduce.mul.v16i32(<16 x i32>)
+declare i32 @llvm.experimental.vector.reduce.mul.v32i32(<32 x i32>)
-declare i16 @llvm.experimental.vector.reduce.mul.i16.v2i16(<2 x i16>)
-declare i16 @llvm.experimental.vector.reduce.mul.i16.v4i16(<4 x i16>)
-declare i16 @llvm.experimental.vector.reduce.mul.i16.v8i16(<8 x i16>)
-declare i16 @llvm.experimental.vector.reduce.mul.i16.v16i16(<16 x i16>)
-declare i16 @llvm.experimental.vector.reduce.mul.i16.v32i16(<32 x i16>)
-declare i16 @llvm.experimental.vector.reduce.mul.i16.v64i16(<64 x i16>)
+declare i16 @llvm.experimental.vector.reduce.mul.v2i16(<2 x i16>)
+declare i16 @llvm.experimental.vector.reduce.mul.v4i16(<4 x i16>)
+declare i16 @llvm.experimental.vector.reduce.mul.v8i16(<8 x i16>)
+declare i16 @llvm.experimental.vector.reduce.mul.v16i16(<16 x i16>)
+declare i16 @llvm.experimental.vector.reduce.mul.v32i16(<32 x i16>)
+declare i16 @llvm.experimental.vector.reduce.mul.v64i16(<64 x i16>)
-declare i8 @llvm.experimental.vector.reduce.mul.i8.v2i8(<2 x i8>)
-declare i8 @llvm.experimental.vector.reduce.mul.i8.v4i8(<4 x i8>)
-declare i8 @llvm.experimental.vector.reduce.mul.i8.v8i8(<8 x i8>)
-declare i8 @llvm.experimental.vector.reduce.mul.i8.v16i8(<16 x i8>)
-declare i8 @llvm.experimental.vector.reduce.mul.i8.v32i8(<32 x i8>)
-declare i8 @llvm.experimental.vector.reduce.mul.i8.v64i8(<64 x i8>)
-declare i8 @llvm.experimental.vector.reduce.mul.i8.v128i8(<128 x i8>)
+declare i8 @llvm.experimental.vector.reduce.mul.v2i8(<2 x i8>)
+declare i8 @llvm.experimental.vector.reduce.mul.v4i8(<4 x i8>)
+declare i8 @llvm.experimental.vector.reduce.mul.v8i8(<8 x i8>)
+declare i8 @llvm.experimental.vector.reduce.mul.v16i8(<16 x i8>)
+declare i8 @llvm.experimental.vector.reduce.mul.v32i8(<32 x i8>)
+declare i8 @llvm.experimental.vector.reduce.mul.v64i8(<64 x i8>)
+declare i8 @llvm.experimental.vector.reduce.mul.v128i8(<128 x i8>)
OpenPOWER on IntegriCloud