summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/AArch64/vecreduce-add-legalization.ll169
-rw-r--r--llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll198
-rw-r--r--llvm/test/CodeGen/AArch64/vecreduce-fadd-legalization.ll83
-rw-r--r--llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization.ll77
-rw-r--r--llvm/test/CodeGen/AArch64/vecreduce-umax-legalization.ll177
5 files changed, 704 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-add-legalization.ll b/llvm/test/CodeGen/AArch64/vecreduce-add-legalization.ll
new file mode 100644
index 00000000000..eb4bd8c5c7b
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/vecreduce-add-legalization.ll
@@ -0,0 +1,169 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s --check-prefix=CHECK
+
+declare i1 @llvm.experimental.vector.reduce.add.i1.v1i1(<1 x i1> %a)
+declare i8 @llvm.experimental.vector.reduce.add.i8.v1i8(<1 x i8> %a)
+declare i16 @llvm.experimental.vector.reduce.add.i16.v1i16(<1 x i16> %a)
+declare i24 @llvm.experimental.vector.reduce.add.i24.v1i24(<1 x i24> %a)
+declare i32 @llvm.experimental.vector.reduce.add.i32.v1i32(<1 x i32> %a)
+declare i64 @llvm.experimental.vector.reduce.add.i64.v1i64(<1 x i64> %a)
+declare i128 @llvm.experimental.vector.reduce.add.i128.v1i128(<1 x i128> %a)
+
+declare i8 @llvm.experimental.vector.reduce.add.i8.v3i8(<3 x i8> %a)
+declare i8 @llvm.experimental.vector.reduce.add.i8.v9i8(<9 x i8> %a)
+declare i32 @llvm.experimental.vector.reduce.add.i32.v3i32(<3 x i32> %a)
+declare i1 @llvm.experimental.vector.reduce.add.i1.v4i1(<4 x i1> %a)
+declare i24 @llvm.experimental.vector.reduce.add.i24.v4i24(<4 x i24> %a)
+declare i128 @llvm.experimental.vector.reduce.add.i128.v2i128(<2 x i128> %a)
+declare i32 @llvm.experimental.vector.reduce.add.i32.v16i32(<16 x i32> %a)
+
+define i1 @test_v1i1(<1 x i1> %a) nounwind {
+; CHECK-LABEL: test_v1i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and w0, w0, #0x1
+; CHECK-NEXT: ret
+ %b = call i1 @llvm.experimental.vector.reduce.add.i1.v1i1(<1 x i1> %a)
+ ret i1 %b
+}
+
+define i8 @test_v1i8(<1 x i8> %a) nounwind {
+; CHECK-LABEL: test_v1i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: umov w0, v0.b[0]
+; CHECK-NEXT: ret
+ %b = call i8 @llvm.experimental.vector.reduce.add.i8.v1i8(<1 x i8> %a)
+ ret i8 %b
+}
+
+define i16 @test_v1i16(<1 x i16> %a) nounwind {
+; CHECK-LABEL: test_v1i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: umov w0, v0.h[0]
+; CHECK-NEXT: ret
+ %b = call i16 @llvm.experimental.vector.reduce.add.i16.v1i16(<1 x i16> %a)
+ ret i16 %b
+}
+
+define i24 @test_v1i24(<1 x i24> %a) nounwind {
+; CHECK-LABEL: test_v1i24:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ret
+ %b = call i24 @llvm.experimental.vector.reduce.add.i24.v1i24(<1 x i24> %a)
+ ret i24 %b
+}
+
+define i32 @test_v1i32(<1 x i32> %a) nounwind {
+; CHECK-LABEL: test_v1i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: ret
+ %b = call i32 @llvm.experimental.vector.reduce.add.i32.v1i32(<1 x i32> %a)
+ ret i32 %b
+}
+
+define i64 @test_v1i64(<1 x i64> %a) nounwind {
+; CHECK-LABEL: test_v1i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: fmov x0, d0
+; CHECK-NEXT: ret
+ %b = call i64 @llvm.experimental.vector.reduce.add.i64.v1i64(<1 x i64> %a)
+ ret i64 %b
+}
+
+define i128 @test_v1i128(<1 x i128> %a) nounwind {
+; CHECK-LABEL: test_v1i128:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ret
+ %b = call i128 @llvm.experimental.vector.reduce.add.i128.v1i128(<1 x i128> %a)
+ ret i128 %b
+}
+
+define i8 @test_v3i8(<3 x i8> %a) nounwind {
+; CHECK-LABEL: test_v3i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi d0, #0000000000000000
+; CHECK-NEXT: mov v0.h[0], w0
+; CHECK-NEXT: mov v0.h[1], w1
+; CHECK-NEXT: mov v0.h[2], w2
+; CHECK-NEXT: addv h0, v0.4h
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: ret
+ %b = call i8 @llvm.experimental.vector.reduce.add.i8.v3i8(<3 x i8> %a)
+ ret i8 %b
+}
+
+define i8 @test_v9i8(<9 x i8> %a) nounwind {
+; CHECK-LABEL: test_v9i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov v0.b[9], wzr
+; CHECK-NEXT: mov v0.b[10], wzr
+; CHECK-NEXT: mov v0.b[11], wzr
+; CHECK-NEXT: mov v0.b[12], wzr
+; CHECK-NEXT: mov v0.b[13], wzr
+; CHECK-NEXT: mov v0.b[14], wzr
+; CHECK-NEXT: mov v0.b[15], wzr
+; CHECK-NEXT: addv b0, v0.16b
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: ret
+ %b = call i8 @llvm.experimental.vector.reduce.add.i8.v9i8(<9 x i8> %a)
+ ret i8 %b
+}
+
+define i32 @test_v3i32(<3 x i32> %a) nounwind {
+; CHECK-LABEL: test_v3i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov v0.s[3], wzr
+; CHECK-NEXT: addv s0, v0.4s
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: ret
+ %b = call i32 @llvm.experimental.vector.reduce.add.i32.v3i32(<3 x i32> %a)
+ ret i32 %b
+}
+
+define i1 @test_v4i1(<4 x i1> %a) nounwind {
+; CHECK-LABEL: test_v4i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: addv h0, v0.4h
+; CHECK-NEXT: fmov w8, s0
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+ %b = call i1 @llvm.experimental.vector.reduce.add.i1.v4i1(<4 x i1> %a)
+ ret i1 %b
+}
+
+define i24 @test_v4i24(<4 x i24> %a) nounwind {
+; CHECK-LABEL: test_v4i24:
+; CHECK: // %bb.0:
+; CHECK-NEXT: addv s0, v0.4s
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: ret
+ %b = call i24 @llvm.experimental.vector.reduce.add.i24.v4i24(<4 x i24> %a)
+ ret i24 %b
+}
+
+define i128 @test_v2i128(<2 x i128> %a) nounwind {
+; CHECK-LABEL: test_v2i128:
+; CHECK: // %bb.0:
+; CHECK-NEXT: adds x0, x0, x2
+; CHECK-NEXT: adcs x1, x1, x3
+; CHECK-NEXT: ret
+ %b = call i128 @llvm.experimental.vector.reduce.add.i128.v2i128(<2 x i128> %a)
+ ret i128 %b
+}
+
+define i32 @test_v16i32(<16 x i32> %a) nounwind {
+; CHECK-LABEL: test_v16i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: add v1.4s, v1.4s, v3.4s
+; CHECK-NEXT: add v0.4s, v0.4s, v2.4s
+; CHECK-NEXT: add v0.4s, v0.4s, v1.4s
+; CHECK-NEXT: addv s0, v0.4s
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: ret
+ %b = call i32 @llvm.experimental.vector.reduce.add.i32.v16i32(<16 x i32> %a)
+ ret i32 %b
+}
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll b/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll
new file mode 100644
index 00000000000..78032521a85
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll
@@ -0,0 +1,198 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s --check-prefix=CHECK
+
+declare i1 @llvm.experimental.vector.reduce.and.i1.v1i1(<1 x i1> %a)
+declare i8 @llvm.experimental.vector.reduce.and.i8.v1i8(<1 x i8> %a)
+declare i16 @llvm.experimental.vector.reduce.and.i16.v1i16(<1 x i16> %a)
+declare i24 @llvm.experimental.vector.reduce.and.i24.v1i24(<1 x i24> %a)
+declare i32 @llvm.experimental.vector.reduce.and.i32.v1i32(<1 x i32> %a)
+declare i64 @llvm.experimental.vector.reduce.and.i64.v1i64(<1 x i64> %a)
+declare i128 @llvm.experimental.vector.reduce.and.i128.v1i128(<1 x i128> %a)
+
+declare i8 @llvm.experimental.vector.reduce.and.i8.v3i8(<3 x i8> %a)
+declare i8 @llvm.experimental.vector.reduce.and.i8.v9i8(<9 x i8> %a)
+declare i32 @llvm.experimental.vector.reduce.and.i32.v3i32(<3 x i32> %a)
+declare i1 @llvm.experimental.vector.reduce.and.i1.v4i1(<4 x i1> %a)
+declare i24 @llvm.experimental.vector.reduce.and.i24.v4i24(<4 x i24> %a)
+declare i128 @llvm.experimental.vector.reduce.and.i128.v2i128(<2 x i128> %a)
+declare i32 @llvm.experimental.vector.reduce.and.i32.v16i32(<16 x i32> %a)
+
+define i1 @test_v1i1(<1 x i1> %a) nounwind {
+; CHECK-LABEL: test_v1i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and w0, w0, #0x1
+; CHECK-NEXT: ret
+ %b = call i1 @llvm.experimental.vector.reduce.and.i1.v1i1(<1 x i1> %a)
+ ret i1 %b
+}
+
+define i8 @test_v1i8(<1 x i8> %a) nounwind {
+; CHECK-LABEL: test_v1i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: umov w0, v0.b[0]
+; CHECK-NEXT: ret
+ %b = call i8 @llvm.experimental.vector.reduce.and.i8.v1i8(<1 x i8> %a)
+ ret i8 %b
+}
+
+define i16 @test_v1i16(<1 x i16> %a) nounwind {
+; CHECK-LABEL: test_v1i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: umov w0, v0.h[0]
+; CHECK-NEXT: ret
+ %b = call i16 @llvm.experimental.vector.reduce.and.i16.v1i16(<1 x i16> %a)
+ ret i16 %b
+}
+
+define i24 @test_v1i24(<1 x i24> %a) nounwind {
+; CHECK-LABEL: test_v1i24:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ret
+ %b = call i24 @llvm.experimental.vector.reduce.and.i24.v1i24(<1 x i24> %a)
+ ret i24 %b
+}
+
+define i32 @test_v1i32(<1 x i32> %a) nounwind {
+; CHECK-LABEL: test_v1i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: ret
+ %b = call i32 @llvm.experimental.vector.reduce.and.i32.v1i32(<1 x i32> %a)
+ ret i32 %b
+}
+
+define i64 @test_v1i64(<1 x i64> %a) nounwind {
+; CHECK-LABEL: test_v1i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: fmov x0, d0
+; CHECK-NEXT: ret
+ %b = call i64 @llvm.experimental.vector.reduce.and.i64.v1i64(<1 x i64> %a)
+ ret i64 %b
+}
+
+define i128 @test_v1i128(<1 x i128> %a) nounwind {
+; CHECK-LABEL: test_v1i128:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ret
+ %b = call i128 @llvm.experimental.vector.reduce.and.i128.v1i128(<1 x i128> %a)
+ ret i128 %b
+}
+
+define i8 @test_v3i8(<3 x i8> %a) nounwind {
+; CHECK-LABEL: test_v3i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and w8, w0, w1
+; CHECK-NEXT: and w8, w8, w2
+; CHECK-NEXT: and w0, w8, #0xff
+; CHECK-NEXT: ret
+ %b = call i8 @llvm.experimental.vector.reduce.and.i8.v3i8(<3 x i8> %a)
+ ret i8 %b
+}
+
+define i8 @test_v9i8(<9 x i8> %a) nounwind {
+; CHECK-LABEL: test_v9i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #-1
+; CHECK-NEXT: mov v0.b[9], w8
+; CHECK-NEXT: mov v0.b[10], w8
+; CHECK-NEXT: mov v0.b[11], w8
+; CHECK-NEXT: mov v0.b[12], w8
+; CHECK-NEXT: mov v0.b[13], w8
+; CHECK-NEXT: mov v0.b[14], w8
+; CHECK-NEXT: mov v0.b[15], w8
+; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-NEXT: umov w8, v0.b[1]
+; CHECK-NEXT: umov w9, v0.b[0]
+; CHECK-NEXT: and w8, w9, w8
+; CHECK-NEXT: umov w9, v0.b[2]
+; CHECK-NEXT: and w8, w8, w9
+; CHECK-NEXT: umov w9, v0.b[3]
+; CHECK-NEXT: and w8, w8, w9
+; CHECK-NEXT: umov w9, v0.b[4]
+; CHECK-NEXT: and w8, w8, w9
+; CHECK-NEXT: umov w9, v0.b[5]
+; CHECK-NEXT: and w8, w8, w9
+; CHECK-NEXT: umov w9, v0.b[6]
+; CHECK-NEXT: and w8, w8, w9
+; CHECK-NEXT: umov w9, v0.b[7]
+; CHECK-NEXT: and w0, w8, w9
+; CHECK-NEXT: ret
+ %b = call i8 @llvm.experimental.vector.reduce.and.i8.v9i8(<9 x i8> %a)
+ ret i8 %b
+}
+
+define i32 @test_v3i32(<3 x i32> %a) nounwind {
+; CHECK-LABEL: test_v3i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #-1
+; CHECK-NEXT: mov v0.s[3], w8
+; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-NEXT: mov w8, v0.s[1]
+; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: and w0, w9, w8
+; CHECK-NEXT: ret
+ %b = call i32 @llvm.experimental.vector.reduce.and.i32.v3i32(<3 x i32> %a)
+ ret i32 %b
+}
+
+define i1 @test_v4i1(<4 x i1> %a) nounwind {
+; CHECK-LABEL: test_v4i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: umov w10, v0.h[1]
+; CHECK-NEXT: umov w11, v0.h[0]
+; CHECK-NEXT: umov w9, v0.h[2]
+; CHECK-NEXT: and w10, w11, w10
+; CHECK-NEXT: umov w8, v0.h[3]
+; CHECK-NEXT: and w9, w10, w9
+; CHECK-NEXT: and w8, w9, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+ %b = call i1 @llvm.experimental.vector.reduce.and.i1.v4i1(<4 x i1> %a)
+ ret i1 %b
+}
+
+define i24 @test_v4i24(<4 x i24> %a) nounwind {
+; CHECK-LABEL: test_v4i24:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-NEXT: mov w8, v0.s[1]
+; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: and w0, w9, w8
+; CHECK-NEXT: ret
+ %b = call i24 @llvm.experimental.vector.reduce.and.i24.v4i24(<4 x i24> %a)
+ ret i24 %b
+}
+
+define i128 @test_v2i128(<2 x i128> %a) nounwind {
+; CHECK-LABEL: test_v2i128:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and x0, x0, x2
+; CHECK-NEXT: and x1, x1, x3
+; CHECK-NEXT: ret
+ %b = call i128 @llvm.experimental.vector.reduce.and.i128.v2i128(<2 x i128> %a)
+ ret i128 %b
+}
+
+define i32 @test_v16i32(<16 x i32> %a) nounwind {
+; CHECK-LABEL: test_v16i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and v1.16b, v1.16b, v3.16b
+; CHECK-NEXT: and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-NEXT: mov w8, v0.s[1]
+; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: and w0, w9, w8
+; CHECK-NEXT: ret
+ %b = call i32 @llvm.experimental.vector.reduce.and.i32.v16i32(<16 x i32> %a)
+ ret i32 %b
+}
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-fadd-legalization.ll b/llvm/test/CodeGen/AArch64/vecreduce-fadd-legalization.ll
new file mode 100644
index 00000000000..86511daadff
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/vecreduce-fadd-legalization.ll
@@ -0,0 +1,83 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s --check-prefix=CHECK
+
+declare half @llvm.experimental.vector.reduce.fadd.f16.v1f16(half, <1 x half>)
+declare float @llvm.experimental.vector.reduce.fadd.f32.v1f32(float, <1 x float>)
+declare double @llvm.experimental.vector.reduce.fadd.f64.v1f64(double, <1 x double>)
+declare fp128 @llvm.experimental.vector.reduce.fadd.f128.v1f128(fp128, <1 x fp128>)
+
+declare float @llvm.experimental.vector.reduce.fadd.f32.v3f32(float, <3 x float>)
+declare fp128 @llvm.experimental.vector.reduce.fadd.f128.v2f128(fp128, <2 x fp128>)
+declare float @llvm.experimental.vector.reduce.fadd.f32.v16f32(float, <16 x float>)
+
+define half @test_v1f16(<1 x half> %a) nounwind {
+; CHECK-LABEL: test_v1f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ret
+ %b = call fast nnan half @llvm.experimental.vector.reduce.fadd.f16.v1f16(half 0.0, <1 x half> %a)
+ ret half %b
+}
+
+define float @test_v1f32(<1 x float> %a) nounwind {
+; CHECK-LABEL: test_v1f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0
+; CHECK-NEXT: ret
+ %b = call fast nnan float @llvm.experimental.vector.reduce.fadd.f32.v1f32(float 0.0, <1 x float> %a)
+ ret float %b
+}
+
+define double @test_v1f64(<1 x double> %a) nounwind {
+; CHECK-LABEL: test_v1f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ret
+ %b = call fast nnan double @llvm.experimental.vector.reduce.fadd.f64.v1f64(double 0.0, <1 x double> %a)
+ ret double %b
+}
+
+define fp128 @test_v1f128(<1 x fp128> %a) nounwind {
+; CHECK-LABEL: test_v1f128:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ret
+ %b = call fast nnan fp128 @llvm.experimental.vector.reduce.fadd.f128.v1f128(fp128 zeroinitializer, <1 x fp128> %a)
+ ret fp128 %b
+}
+
+define float @test_v3f32(<3 x float> %a) nounwind {
+; CHECK-LABEL: test_v3f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fmov s1, wzr
+; CHECK-NEXT: mov v0.s[3], v1.s[0]
+; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT: fadd v0.2s, v0.2s, v1.2s
+; CHECK-NEXT: faddp s0, v0.2s
+; CHECK-NEXT: ret
+ %b = call fast nnan float @llvm.experimental.vector.reduce.fadd.f32.v3f32(float 0.0, <3 x float> %a)
+ ret float %b
+}
+
+define fp128 @test_v2f128(<2 x fp128> %a) nounwind {
+; CHECK-LABEL: test_v2f128:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: bl __addtf3
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %b = call fast nnan fp128 @llvm.experimental.vector.reduce.fadd.f128.v2f128(fp128 zeroinitializer, <2 x fp128> %a)
+ ret fp128 %b
+}
+
+define float @test_v16f32(<16 x float> %a) nounwind {
+; CHECK-LABEL: test_v16f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fadd v1.4s, v1.4s, v3.4s
+; CHECK-NEXT: fadd v0.4s, v0.4s, v2.4s
+; CHECK-NEXT: fadd v0.4s, v0.4s, v1.4s
+; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT: fadd v0.2s, v0.2s, v1.2s
+; CHECK-NEXT: faddp s0, v0.2s
+; CHECK-NEXT: ret
+ %b = call fast nnan float @llvm.experimental.vector.reduce.fadd.f32.v16f32(float 0.0, <16 x float> %a)
+ ret float %b
+}
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization.ll b/llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization.ll
new file mode 100644
index 00000000000..44260d53632
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization.ll
@@ -0,0 +1,77 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s --check-prefix=CHECK
+
+declare half @llvm.experimental.vector.reduce.fmax.f16.v1f16(<1 x half> %a)
+declare float @llvm.experimental.vector.reduce.fmax.f32.v1f32(<1 x float> %a)
+declare double @llvm.experimental.vector.reduce.fmax.f64.v1f64(<1 x double> %a)
+declare fp128 @llvm.experimental.vector.reduce.fmax.f128.v1f128(<1 x fp128> %a)
+
+declare float @llvm.experimental.vector.reduce.fmax.f32.v3f32(<3 x float> %a)
+declare fp128 @llvm.experimental.vector.reduce.fmax.f128.v2f128(<2 x fp128> %a)
+declare float @llvm.experimental.vector.reduce.fmax.f32.v16f32(<16 x float> %a)
+
+define half @test_v1f16(<1 x half> %a) nounwind {
+; CHECK-LABEL: test_v1f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ret
+ %b = call nnan half @llvm.experimental.vector.reduce.fmax.f16.v1f16(<1 x half> %a)
+ ret half %b
+}
+
+define float @test_v1f32(<1 x float> %a) nounwind {
+; CHECK-LABEL: test_v1f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0
+; CHECK-NEXT: ret
+ %b = call nnan float @llvm.experimental.vector.reduce.fmax.f32.v1f32(<1 x float> %a)
+ ret float %b
+}
+
+define double @test_v1f64(<1 x double> %a) nounwind {
+; CHECK-LABEL: test_v1f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ret
+ %b = call nnan double @llvm.experimental.vector.reduce.fmax.f64.v1f64(<1 x double> %a)
+ ret double %b
+}
+
+define fp128 @test_v1f128(<1 x fp128> %a) nounwind {
+; CHECK-LABEL: test_v1f128:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ret
+ %b = call nnan fp128 @llvm.experimental.vector.reduce.fmax.f128.v1f128(<1 x fp128> %a)
+ ret fp128 %b
+}
+
+define float @test_v3f32(<3 x float> %a) nounwind {
+; CHECK-LABEL: test_v3f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: orr w8, wzr, #0x7f800000
+; CHECK-NEXT: fmov s1, w8
+; CHECK-NEXT: mov v0.s[3], v1.s[0]
+; CHECK-NEXT: fmaxnmv s0, v0.4s
+; CHECK-NEXT: ret
+ %b = call nnan float @llvm.experimental.vector.reduce.fmax.f32.v3f32(<3 x float> %a)
+ ret float %b
+}
+
+define fp128 @test_v2f128(<2 x fp128> %a) nounwind {
+; CHECK-LABEL: test_v2f128:
+; CHECK: // %bb.0:
+; CHECK-NEXT: b fmaxl
+ %b = call nnan fp128 @llvm.experimental.vector.reduce.fmax.f128.v2f128(<2 x fp128> %a)
+ ret fp128 %b
+}
+
+define float @test_v16f32(<16 x float> %a) nounwind {
+; CHECK-LABEL: test_v16f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fmaxnm v1.4s, v1.4s, v3.4s
+; CHECK-NEXT: fmaxnm v0.4s, v0.4s, v2.4s
+; CHECK-NEXT: fmaxnm v0.4s, v0.4s, v1.4s
+; CHECK-NEXT: fmaxnmv s0, v0.4s
+; CHECK-NEXT: ret
+ %b = call nnan float @llvm.experimental.vector.reduce.fmax.f32.v16f32(<16 x float> %a)
+ ret float %b
+}
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-umax-legalization.ll b/llvm/test/CodeGen/AArch64/vecreduce-umax-legalization.ll
new file mode 100644
index 00000000000..eff97260ea3
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/vecreduce-umax-legalization.ll
@@ -0,0 +1,177 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s --check-prefix=CHECK
+
+declare i1 @llvm.experimental.vector.reduce.umax.i1.v1i1(<1 x i1> %a)
+declare i8 @llvm.experimental.vector.reduce.umax.i8.v1i8(<1 x i8> %a)
+declare i16 @llvm.experimental.vector.reduce.umax.i16.v1i16(<1 x i16> %a)
+declare i24 @llvm.experimental.vector.reduce.umax.i24.v1i24(<1 x i24> %a)
+declare i32 @llvm.experimental.vector.reduce.umax.i32.v1i32(<1 x i32> %a)
+declare i64 @llvm.experimental.vector.reduce.umax.i64.v1i64(<1 x i64> %a)
+declare i128 @llvm.experimental.vector.reduce.umax.i128.v1i128(<1 x i128> %a)
+
+declare i8 @llvm.experimental.vector.reduce.umax.i8.v3i8(<3 x i8> %a)
+declare i8 @llvm.experimental.vector.reduce.umax.i8.v9i8(<9 x i8> %a)
+declare i32 @llvm.experimental.vector.reduce.umax.i32.v3i32(<3 x i32> %a)
+declare i1 @llvm.experimental.vector.reduce.umax.i1.v4i1(<4 x i1> %a)
+declare i24 @llvm.experimental.vector.reduce.umax.i24.v4i24(<4 x i24> %a)
+declare i128 @llvm.experimental.vector.reduce.umax.i128.v2i128(<2 x i128> %a)
+declare i32 @llvm.experimental.vector.reduce.umax.i32.v16i32(<16 x i32> %a)
+
+define i1 @test_v1i1(<1 x i1> %a) nounwind {
+; CHECK-LABEL: test_v1i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and w0, w0, #0x1
+; CHECK-NEXT: ret
+ %b = call i1 @llvm.experimental.vector.reduce.umax.i1.v1i1(<1 x i1> %a)
+ ret i1 %b
+}
+
+define i8 @test_v1i8(<1 x i8> %a) nounwind {
+; CHECK-LABEL: test_v1i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: umov w0, v0.b[0]
+; CHECK-NEXT: ret
+ %b = call i8 @llvm.experimental.vector.reduce.umax.i8.v1i8(<1 x i8> %a)
+ ret i8 %b
+}
+
+define i16 @test_v1i16(<1 x i16> %a) nounwind {
+; CHECK-LABEL: test_v1i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: umov w0, v0.h[0]
+; CHECK-NEXT: ret
+ %b = call i16 @llvm.experimental.vector.reduce.umax.i16.v1i16(<1 x i16> %a)
+ ret i16 %b
+}
+
+define i24 @test_v1i24(<1 x i24> %a) nounwind {
+; CHECK-LABEL: test_v1i24:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ret
+ %b = call i24 @llvm.experimental.vector.reduce.umax.i24.v1i24(<1 x i24> %a)
+ ret i24 %b
+}
+
+define i32 @test_v1i32(<1 x i32> %a) nounwind {
+; CHECK-LABEL: test_v1i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: ret
+ %b = call i32 @llvm.experimental.vector.reduce.umax.i32.v1i32(<1 x i32> %a)
+ ret i32 %b
+}
+
+define i64 @test_v1i64(<1 x i64> %a) nounwind {
+; CHECK-LABEL: test_v1i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: fmov x0, d0
+; CHECK-NEXT: ret
+ %b = call i64 @llvm.experimental.vector.reduce.umax.i64.v1i64(<1 x i64> %a)
+ ret i64 %b
+}
+
+define i128 @test_v1i128(<1 x i128> %a) nounwind {
+; CHECK-LABEL: test_v1i128:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ret
+ %b = call i128 @llvm.experimental.vector.reduce.umax.i128.v1i128(<1 x i128> %a)
+ ret i128 %b
+}
+
+define i8 @test_v3i8(<3 x i8> %a) nounwind {
+; CHECK-LABEL: test_v3i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi d0, #0000000000000000
+; CHECK-NEXT: mov v0.h[0], w0
+; CHECK-NEXT: mov v0.h[1], w1
+; CHECK-NEXT: mov v0.h[2], w2
+; CHECK-NEXT: bic v0.4h, #255, lsl #8
+; CHECK-NEXT: umaxv h0, v0.4h
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: ret
+ %b = call i8 @llvm.experimental.vector.reduce.umax.i8.v3i8(<3 x i8> %a)
+ ret i8 %b
+}
+
+define i8 @test_v9i8(<9 x i8> %a) nounwind {
+; CHECK-LABEL: test_v9i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov v0.b[9], wzr
+; CHECK-NEXT: mov v0.b[10], wzr
+; CHECK-NEXT: mov v0.b[11], wzr
+; CHECK-NEXT: mov v0.b[12], wzr
+; CHECK-NEXT: mov v0.b[13], wzr
+; CHECK-NEXT: mov v0.b[14], wzr
+; CHECK-NEXT: mov v0.b[15], wzr
+; CHECK-NEXT: umaxv b0, v0.16b
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: ret
+ %b = call i8 @llvm.experimental.vector.reduce.umax.i8.v9i8(<9 x i8> %a)
+ ret i8 %b
+}
+
+define i32 @test_v3i32(<3 x i32> %a) nounwind {
+; CHECK-LABEL: test_v3i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov v0.s[3], wzr
+; CHECK-NEXT: umaxv s0, v0.4s
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: ret
+ %b = call i32 @llvm.experimental.vector.reduce.umax.i32.v3i32(<3 x i32> %a)
+ ret i32 %b
+}
+
+define i1 @test_v4i1(<4 x i1> %a) nounwind {
+; CHECK-LABEL: test_v4i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v1.4h, #1
+; CHECK-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-NEXT: umaxv h0, v0.4h
+; CHECK-NEXT: fmov w8, s0
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+ %b = call i1 @llvm.experimental.vector.reduce.umax.i1.v4i1(<4 x i1> %a)
+ ret i1 %b
+}
+
+define i24 @test_v4i24(<4 x i24> %a) nounwind {
+; CHECK-LABEL: test_v4i24:
+; CHECK: // %bb.0:
+; CHECK-NEXT: bic v0.4s, #255, lsl #24
+; CHECK-NEXT: umaxv s0, v0.4s
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: ret
+ %b = call i24 @llvm.experimental.vector.reduce.umax.i24.v4i24(<4 x i24> %a)
+ ret i24 %b
+}
+
+define i128 @test_v2i128(<2 x i128> %a) nounwind {
+; CHECK-LABEL: test_v2i128:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmp x0, x2
+; CHECK-NEXT: csel x8, x0, x2, hi
+; CHECK-NEXT: cmp x1, x3
+; CHECK-NEXT: csel x9, x0, x2, hi
+; CHECK-NEXT: csel x0, x8, x9, eq
+; CHECK-NEXT: csel x1, x1, x3, hi
+; CHECK-NEXT: ret
+ %b = call i128 @llvm.experimental.vector.reduce.umax.i128.v2i128(<2 x i128> %a)
+ ret i128 %b
+}
+
+define i32 @test_v16i32(<16 x i32> %a) nounwind {
+; CHECK-LABEL: test_v16i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: umax v1.4s, v1.4s, v3.4s
+; CHECK-NEXT: umax v0.4s, v0.4s, v2.4s
+; CHECK-NEXT: umax v0.4s, v0.4s, v1.4s
+; CHECK-NEXT: umaxv s0, v0.4s
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: ret
+ %b = call i32 @llvm.experimental.vector.reduce.umax.i32.v16i32(<16 x i32> %a)
+ ret i32 %b
+}
OpenPOWER on IntegriCloud