diff options
Diffstat (limited to 'llvm/test/CodeGen/AArch64/vecreduce-add-legalization.ll')
-rw-r--r-- | llvm/test/CodeGen/AArch64/vecreduce-add-legalization.ll | 58 |
1 files changed, 29 insertions, 29 deletions
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-add-legalization.ll b/llvm/test/CodeGen/AArch64/vecreduce-add-legalization.ll index eb4bd8c5c7b..2f899878363 100644 --- a/llvm/test/CodeGen/AArch64/vecreduce-add-legalization.ll +++ b/llvm/test/CodeGen/AArch64/vecreduce-add-legalization.ll @@ -1,28 +1,28 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s --check-prefix=CHECK -declare i1 @llvm.experimental.vector.reduce.add.i1.v1i1(<1 x i1> %a) -declare i8 @llvm.experimental.vector.reduce.add.i8.v1i8(<1 x i8> %a) -declare i16 @llvm.experimental.vector.reduce.add.i16.v1i16(<1 x i16> %a) -declare i24 @llvm.experimental.vector.reduce.add.i24.v1i24(<1 x i24> %a) -declare i32 @llvm.experimental.vector.reduce.add.i32.v1i32(<1 x i32> %a) -declare i64 @llvm.experimental.vector.reduce.add.i64.v1i64(<1 x i64> %a) -declare i128 @llvm.experimental.vector.reduce.add.i128.v1i128(<1 x i128> %a) - -declare i8 @llvm.experimental.vector.reduce.add.i8.v3i8(<3 x i8> %a) -declare i8 @llvm.experimental.vector.reduce.add.i8.v9i8(<9 x i8> %a) -declare i32 @llvm.experimental.vector.reduce.add.i32.v3i32(<3 x i32> %a) -declare i1 @llvm.experimental.vector.reduce.add.i1.v4i1(<4 x i1> %a) -declare i24 @llvm.experimental.vector.reduce.add.i24.v4i24(<4 x i24> %a) -declare i128 @llvm.experimental.vector.reduce.add.i128.v2i128(<2 x i128> %a) -declare i32 @llvm.experimental.vector.reduce.add.i32.v16i32(<16 x i32> %a) +declare i1 @llvm.experimental.vector.reduce.add.v1i1(<1 x i1> %a) +declare i8 @llvm.experimental.vector.reduce.add.v1i8(<1 x i8> %a) +declare i16 @llvm.experimental.vector.reduce.add.v1i16(<1 x i16> %a) +declare i24 @llvm.experimental.vector.reduce.add.v1i24(<1 x i24> %a) +declare i32 @llvm.experimental.vector.reduce.add.v1i32(<1 x i32> %a) +declare i64 @llvm.experimental.vector.reduce.add.v1i64(<1 x i64> %a) +declare i128 @llvm.experimental.vector.reduce.add.v1i128(<1 x i128> %a) + +declare i8 @llvm.experimental.vector.reduce.add.v3i8(<3 x i8> %a) +declare i8 @llvm.experimental.vector.reduce.add.v9i8(<9 x i8> %a) +declare i32 @llvm.experimental.vector.reduce.add.v3i32(<3 x i32> %a) +declare i1 @llvm.experimental.vector.reduce.add.v4i1(<4 x i1> %a) +declare i24 @llvm.experimental.vector.reduce.add.v4i24(<4 x i24> %a) +declare i128 @llvm.experimental.vector.reduce.add.v2i128(<2 x i128> %a) +declare i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %a) define i1 @test_v1i1(<1 x i1> %a) nounwind { ; CHECK-LABEL: test_v1i1: ; CHECK: // %bb.0: ; CHECK-NEXT: and w0, w0, #0x1 ; CHECK-NEXT: ret - %b = call i1 @llvm.experimental.vector.reduce.add.i1.v1i1(<1 x i1> %a) + %b = call i1 @llvm.experimental.vector.reduce.add.v1i1(<1 x i1> %a) ret i1 %b } @@ -32,7 +32,7 @@ define i8 @test_v1i8(<1 x i8> %a) nounwind { ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: umov w0, v0.b[0] ; CHECK-NEXT: ret - %b = call i8 @llvm.experimental.vector.reduce.add.i8.v1i8(<1 x i8> %a) + %b = call i8 @llvm.experimental.vector.reduce.add.v1i8(<1 x i8> %a) ret i8 %b } @@ -42,7 +42,7 @@ define i16 @test_v1i16(<1 x i16> %a) nounwind { ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: umov w0, v0.h[0] ; CHECK-NEXT: ret - %b = call i16 @llvm.experimental.vector.reduce.add.i16.v1i16(<1 x i16> %a) + %b = call i16 @llvm.experimental.vector.reduce.add.v1i16(<1 x i16> %a) ret i16 %b } @@ -50,7 +50,7 @@ define i24 @test_v1i24(<1 x i24> %a) nounwind { ; CHECK-LABEL: test_v1i24: ; CHECK: // %bb.0: ; CHECK-NEXT: ret - %b = call i24 @llvm.experimental.vector.reduce.add.i24.v1i24(<1 x i24> %a) + %b = call i24 @llvm.experimental.vector.reduce.add.v1i24(<1 x i24> %a) ret i24 %b } @@ -60,7 +60,7 @@ define i32 @test_v1i32(<1 x i32> %a) nounwind { ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret - %b = call i32 @llvm.experimental.vector.reduce.add.i32.v1i32(<1 x i32> %a) + %b = call i32 @llvm.experimental.vector.reduce.add.v1i32(<1 x i32> %a) ret i32 %b } @@ -70,7 +70,7 @@ define i64 @test_v1i64(<1 x i64> %a) nounwind { ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: ret - %b = call i64 @llvm.experimental.vector.reduce.add.i64.v1i64(<1 x i64> %a) + %b = call i64 @llvm.experimental.vector.reduce.add.v1i64(<1 x i64> %a) ret i64 %b } @@ -78,7 +78,7 @@ define i128 @test_v1i128(<1 x i128> %a) nounwind { ; CHECK-LABEL: test_v1i128: ; CHECK: // %bb.0: ; CHECK-NEXT: ret - %b = call i128 @llvm.experimental.vector.reduce.add.i128.v1i128(<1 x i128> %a) + %b = call i128 @llvm.experimental.vector.reduce.add.v1i128(<1 x i128> %a) ret i128 %b } @@ -92,7 +92,7 @@ define i8 @test_v3i8(<3 x i8> %a) nounwind { ; CHECK-NEXT: addv h0, v0.4h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret - %b = call i8 @llvm.experimental.vector.reduce.add.i8.v3i8(<3 x i8> %a) + %b = call i8 @llvm.experimental.vector.reduce.add.v3i8(<3 x i8> %a) ret i8 %b } @@ -109,7 +109,7 @@ define i8 @test_v9i8(<9 x i8> %a) nounwind { ; CHECK-NEXT: addv b0, v0.16b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret - %b = call i8 @llvm.experimental.vector.reduce.add.i8.v9i8(<9 x i8> %a) + %b = call i8 @llvm.experimental.vector.reduce.add.v9i8(<9 x i8> %a) ret i8 %b } @@ -120,7 +120,7 @@ define i32 @test_v3i32(<3 x i32> %a) nounwind { ; CHECK-NEXT: addv s0, v0.4s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret - %b = call i32 @llvm.experimental.vector.reduce.add.i32.v3i32(<3 x i32> %a) + %b = call i32 @llvm.experimental.vector.reduce.add.v3i32(<3 x i32> %a) ret i32 %b } @@ -131,7 +131,7 @@ define i1 @test_v4i1(<4 x i1> %a) nounwind { ; CHECK-NEXT: fmov w8, s0 ; CHECK-NEXT: and w0, w8, #0x1 ; CHECK-NEXT: ret - %b = call i1 @llvm.experimental.vector.reduce.add.i1.v4i1(<4 x i1> %a) + %b = call i1 @llvm.experimental.vector.reduce.add.v4i1(<4 x i1> %a) ret i1 %b } @@ -141,7 +141,7 @@ define i24 @test_v4i24(<4 x i24> %a) nounwind { ; CHECK-NEXT: addv s0, v0.4s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret - %b = call i24 @llvm.experimental.vector.reduce.add.i24.v4i24(<4 x i24> %a) + %b = call i24 @llvm.experimental.vector.reduce.add.v4i24(<4 x i24> %a) ret i24 %b } @@ -151,7 +151,7 @@ define i128 @test_v2i128(<2 x i128> %a) nounwind { ; CHECK-NEXT: adds x0, x0, x2 ; CHECK-NEXT: adcs x1, x1, x3 ; CHECK-NEXT: ret - %b = call i128 @llvm.experimental.vector.reduce.add.i128.v2i128(<2 x i128> %a) + %b = call i128 @llvm.experimental.vector.reduce.add.v2i128(<2 x i128> %a) ret i128 %b } @@ -164,6 +164,6 @@ define i32 @test_v16i32(<16 x i32> %a) nounwind { ; CHECK-NEXT: addv s0, v0.4s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret - %b = call i32 @llvm.experimental.vector.reduce.add.i32.v16i32(<16 x i32> %a) + %b = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %a) ret i32 %b } |