summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AArch64/vecreduce-fmul-legalization-strict.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/AArch64/vecreduce-fmul-legalization-strict.ll')
-rw-r--r--llvm/test/CodeGen/AArch64/vecreduce-fmul-legalization-strict.ll114
1 files changed, 114 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-fmul-legalization-strict.ll b/llvm/test/CodeGen/AArch64/vecreduce-fmul-legalization-strict.ll
new file mode 100644
index 00000000000..41914ca73af
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/vecreduce-fmul-legalization-strict.ll
@@ -0,0 +1,114 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s --check-prefix=CHECK
+
+; Same as vecreduce-fmul-legalization.ll, but without fmf.
+
+declare half @llvm.experimental.vector.reduce.v2.fmul.f16.v1f16(half, <1 x half>)
+declare float @llvm.experimental.vector.reduce.v2.fmul.f32.v1f32(float, <1 x float>)
+declare double @llvm.experimental.vector.reduce.v2.fmul.f64.v1f64(double, <1 x double>)
+declare fp128 @llvm.experimental.vector.reduce.v2.fmul.f128.v1f128(fp128, <1 x fp128>)
+
+declare float @llvm.experimental.vector.reduce.v2.fmul.f32.v3f32(float, <3 x float>)
+declare fp128 @llvm.experimental.vector.reduce.v2.fmul.f128.v2f128(fp128, <2 x fp128>)
+declare float @llvm.experimental.vector.reduce.v2.fmul.f32.v16f32(float, <16 x float>)
+
+define half @test_v1f16(<1 x half> %a) nounwind {
+; CHECK-LABEL: test_v1f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvt s0, h0
+; CHECK-NEXT: fmov s1, wzr
+; CHECK-NEXT: fmul s0, s0, s1
+; CHECK-NEXT: fcvt h0, s0
+; CHECK-NEXT: ret
+ %b = call half @llvm.experimental.vector.reduce.v2.fmul.f16.v1f16(half 0.0, <1 x half> %a)
+ ret half %b
+}
+
+define float @test_v1f32(<1 x float> %a) nounwind {
+; CHECK-LABEL: test_v1f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: fmov s1, wzr
+; CHECK-NEXT: fmul s0, s1, v0.s[0]
+; CHECK-NEXT: ret
+ %b = call float @llvm.experimental.vector.reduce.v2.fmul.f32.v1f32(float 0.0, <1 x float> %a)
+ ret float %b
+}
+
+define double @test_v1f64(<1 x double> %a) nounwind {
+; CHECK-LABEL: test_v1f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fmov d1, xzr
+; CHECK-NEXT: fmul d0, d0, d1
+; CHECK-NEXT: ret
+ %b = call double @llvm.experimental.vector.reduce.v2.fmul.f64.v1f64(double 0.0, <1 x double> %a)
+ ret double %b
+}
+
+define fp128 @test_v1f128(<1 x fp128> %a) nounwind {
+; CHECK-LABEL: test_v1f128:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: adrp x8, .LCPI3_0
+; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI3_0]
+; CHECK-NEXT: bl __multf3
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %b = call fp128 @llvm.experimental.vector.reduce.v2.fmul.f128.v1f128(fp128 zeroinitializer, <1 x fp128> %a)
+ ret fp128 %b
+}
+
+define float @test_v3f32(<3 x float> %a) nounwind {
+; CHECK-LABEL: test_v3f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fmov s1, wzr
+; CHECK-NEXT: fmul s1, s1, v0.s[0]
+; CHECK-NEXT: fmul s1, s1, v0.s[1]
+; CHECK-NEXT: fmul s0, s1, v0.s[2]
+; CHECK-NEXT: ret
+ %b = call float @llvm.experimental.vector.reduce.v2.fmul.f32.v3f32(float 0.0, <3 x float> %a)
+ ret float %b
+}
+
+define fp128 @test_v2f128(<2 x fp128> %a) nounwind {
+; CHECK-LABEL: test_v2f128:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub sp, sp, #32 // =32
+; CHECK-NEXT: adrp x8, .LCPI5_0
+; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI5_0]
+; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: bl __multf3
+; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: bl __multf3
+; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #32 // =32
+; CHECK-NEXT: ret
+ %b = call fp128 @llvm.experimental.vector.reduce.v2.fmul.f128.v2f128(fp128 zeroinitializer, <2 x fp128> %a)
+ ret fp128 %b
+}
+
+define float @test_v16f32(<16 x float> %a) nounwind {
+; CHECK-LABEL: test_v16f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fmov s4, wzr
+; CHECK-NEXT: fmul s4, s4, v0.s[0]
+; CHECK-NEXT: fmul s4, s4, v0.s[1]
+; CHECK-NEXT: fmul s4, s4, v0.s[2]
+; CHECK-NEXT: fmul s0, s4, v0.s[3]
+; CHECK-NEXT: fmul s0, s0, v1.s[0]
+; CHECK-NEXT: fmul s0, s0, v1.s[1]
+; CHECK-NEXT: fmul s0, s0, v1.s[2]
+; CHECK-NEXT: fmul s0, s0, v1.s[3]
+; CHECK-NEXT: fmul s0, s0, v2.s[0]
+; CHECK-NEXT: fmul s0, s0, v2.s[1]
+; CHECK-NEXT: fmul s0, s0, v2.s[2]
+; CHECK-NEXT: fmul s0, s0, v2.s[3]
+; CHECK-NEXT: fmul s0, s0, v3.s[0]
+; CHECK-NEXT: fmul s0, s0, v3.s[1]
+; CHECK-NEXT: fmul s0, s0, v3.s[2]
+; CHECK-NEXT: fmul s0, s0, v3.s[3]
+; CHECK-NEXT: ret
+ %b = call float @llvm.experimental.vector.reduce.v2.fmul.f32.v16f32(float 0.0, <16 x float> %a)
+ ret float %b
+}
OpenPOWER on IntegriCloud