summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/lib/CodeGen/TargetLoweringBase.cpp13
-rw-r--r--llvm/test/CodeGen/ARM/arm32-rounding.ll85
-rw-r--r--llvm/test/CodeGen/ARM/tail-call-builtin.ll23
-rw-r--r--llvm/test/CodeGen/Thumb2/float-intrinsics-double.ll119
-rw-r--r--llvm/test/CodeGen/Thumb2/float-intrinsics-float.ll115
-rw-r--r--llvm/test/CodeGen/Thumb2/float-ops.ll5
6 files changed, 182 insertions, 178 deletions
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 85c277a5025..f7322622c03 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -488,12 +488,11 @@ static void InitLibcallNames(const char **Names, const Triple &TT) {
Names[RTLIB::DEOPTIMIZE] = "__llvm_deoptimize";
}
-/// InitLibcallCallingConvs - Set default libcall CallingConvs.
-///
-static void InitLibcallCallingConvs(CallingConv::ID *CCs) {
- for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i) {
- CCs[i] = CallingConv::C;
- }
+/// Set default libcall CallingConvs.
+static void InitLibcallCallingConvs(CallingConv::ID *CCs, const Triple &T) {
+ bool IsARM = T.getArch() == Triple::arm || T.getArch() == Triple::thumb;
+ for (int LC = 0; LC < RTLIB::UNKNOWN_LIBCALL; ++LC)
+ CCs[LC] = IsARM ? CallingConv::ARM_AAPCS : CallingConv::C;
}
/// getFPEXT - Return the FPEXT_*_* value for the given types, or
@@ -835,7 +834,7 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
InitLibcallNames(LibcallRoutineNames, TM.getTargetTriple());
InitCmpLibcallCCs(CmpLibcallCCs);
- InitLibcallCallingConvs(LibcallCallingConvs);
+ InitLibcallCallingConvs(LibcallCallingConvs, TM.getTargetTriple());
}
void TargetLoweringBase::initActions() {
diff --git a/llvm/test/CodeGen/ARM/arm32-rounding.ll b/llvm/test/CodeGen/ARM/arm32-rounding.ll
index f247648d814..d1aeb886b43 100644
--- a/llvm/test/CodeGen/ARM/arm32-rounding.ll
+++ b/llvm/test/CodeGen/ARM/arm32-rounding.ll
@@ -4,115 +4,116 @@
; CHECK-LABEL: test1
; CHECK: vrintm.f32
-define float @test1(float %a) {
+define arm_aapcs_vfpcc float @test1(float %a) {
entry:
- %call = call float @floorf(float %a) nounwind readnone
+ %call = call arm_aapcs_vfpcc float @floorf(float %a) nounwind readnone
ret float %call
}
; CHECK-LABEL: test2
-; SP: b floor
+; SP: bl floor
; DP: vrintm.f64
-define double @test2(double %a) {
+define arm_aapcs_vfpcc double @test2(double %a) {
entry:
- %call = call double @floor(double %a) nounwind readnone
+ %call = call arm_aapcscc double @floor(double %a) nounwind readnone
ret double %call
}
; CHECK-LABEL: test3
; CHECK: vrintp.f32
-define float @test3(float %a) {
+define arm_aapcs_vfpcc float @test3(float %a) {
entry:
- %call = call float @ceilf(float %a) nounwind readnone
+ %call = call arm_aapcs_vfpcc float @ceilf(float %a) nounwind readnone
ret float %call
}
; CHECK-LABEL: test4
-; SP: b ceil
+; SP: bl ceil
; DP: vrintp.f64
-define double @test4(double %a) {
+define arm_aapcs_vfpcc double @test4(double %a) {
entry:
- %call = call double @ceil(double %a) nounwind readnone
+ %call = call arm_aapcscc double @ceil(double %a) nounwind readnone
ret double %call
}
; CHECK-LABEL: test5
; CHECK: vrinta.f32
-define float @test5(float %a) {
+define arm_aapcs_vfpcc float @test5(float %a) {
entry:
- %call = call float @roundf(float %a) nounwind readnone
+ %call = call arm_aapcs_vfpcc float @roundf(float %a) nounwind readnone
ret float %call
}
; CHECK-LABEL: test6
-; SP: b round
+; SP: bl round
; DP: vrinta.f64
-define double @test6(double %a) {
+define arm_aapcs_vfpcc double @test6(double %a) {
entry:
- %call = call double @round(double %a) nounwind readnone
+ %call = call arm_aapcscc double @round(double %a) nounwind readnone
ret double %call
}
; CHECK-LABEL: test7
; CHECK: vrintz.f32
-define float @test7(float %a) {
+define arm_aapcs_vfpcc float @test7(float %a) {
entry:
- %call = call float @truncf(float %a) nounwind readnone
+ %call = call arm_aapcs_vfpcc float @truncf(float %a) nounwind readnone
ret float %call
}
; CHECK-LABEL: test8
-; SP: b trunc
+; SP: bl trunc
; DP: vrintz.f64
-define double @test8(double %a) {
+define arm_aapcs_vfpcc double @test8(double %a) {
entry:
- %call = call double @trunc(double %a) nounwind readnone
+ %call = call arm_aapcscc double @trunc(double %a) nounwind readnone
ret double %call
}
; CHECK-LABEL: test9
; CHECK: vrintr.f32
-define float @test9(float %a) {
+define arm_aapcs_vfpcc float @test9(float %a) {
entry:
- %call = call float @nearbyintf(float %a) nounwind readnone
+ %call = call arm_aapcs_vfpcc float @nearbyintf(float %a) nounwind readnone
ret float %call
}
; CHECK-LABEL: test10
-; SP: b nearbyint
+; SP: bl nearbyint
; DP: vrintr.f64
-define double @test10(double %a) {
+define arm_aapcs_vfpcc double @test10(double %a) {
entry:
- %call = call double @nearbyint(double %a) nounwind readnone
+ %call = call arm_aapcscc double @nearbyint(double %a) nounwind readnone
ret double %call
}
; CHECK-LABEL: test11
; CHECK: vrintx.f32
-define float @test11(float %a) {
+define arm_aapcs_vfpcc float @test11(float %a) {
entry:
- %call = call float @rintf(float %a) nounwind readnone
+ %call = call arm_aapcs_vfpcc float @rintf(float %a) nounwind readnone
ret float %call
}
; CHECK-LABEL: test12
-; SP: b rint
+; SP: bl rint
; DP: vrintx.f64
-define double @test12(double %a) {
+define arm_aapcs_vfpcc double @test12(double %a) {
entry:
- %call = call double @rint(double %a) nounwind readnone
+ %call = call arm_aapcscc double @rint(double %a) nounwind readnone
ret double %call
}
-declare float @floorf(float) nounwind readnone
-declare double @floor(double) nounwind readnone
-declare float @ceilf(float) nounwind readnone
-declare double @ceil(double) nounwind readnone
-declare float @roundf(float) nounwind readnone
-declare double @round(double) nounwind readnone
-declare float @truncf(float) nounwind readnone
-declare double @trunc(double) nounwind readnone
-declare float @nearbyintf(float) nounwind readnone
-declare double @nearbyint(double) nounwind readnone
-declare float @rintf(float) nounwind readnone
-declare double @rint(double) nounwind readnone
+declare arm_aapcs_vfpcc float @floorf(float) nounwind readnone
+declare arm_aapcscc double @floor(double) nounwind readnone
+declare arm_aapcs_vfpcc float @ceilf(float) nounwind readnone
+declare arm_aapcscc double @ceil(double) nounwind readnone
+declare arm_aapcs_vfpcc float @roundf(float) nounwind readnone
+declare arm_aapcscc double @round(double) nounwind readnone
+declare arm_aapcs_vfpcc float @truncf(float) nounwind readnone
+declare arm_aapcscc double @trunc(double) nounwind readnone
+declare arm_aapcs_vfpcc float @nearbyintf(float) nounwind readnone
+declare arm_aapcscc double @nearbyint(double) nounwind readnone
+declare arm_aapcs_vfpcc float @rintf(float) nounwind readnone
+declare arm_aapcscc double @rint(double) nounwind readnone
+
diff --git a/llvm/test/CodeGen/ARM/tail-call-builtin.ll b/llvm/test/CodeGen/ARM/tail-call-builtin.ll
index c829cc52462..0ed15bc8403 100644
--- a/llvm/test/CodeGen/ARM/tail-call-builtin.ll
+++ b/llvm/test/CodeGen/ARM/tail-call-builtin.ll
@@ -1,37 +1,38 @@
; RUN: llc -mtriple=thumbv7-linux-gnueabihf %s -o - | FileCheck %s
-define i64 @test_mismatched_call(double %in) {
+define arm_aapcs_vfpcc i64 @test_mismatched_call(double %in) {
; CHECK-LABEL: test_mismatched_call:
-; CHECK: bl floor
; CHECK: vmov r0, r1, d0
+; CHECK: bl floor
- %val = tail call double @floor(double %in)
+ %val = tail call arm_aapcscc double @floor(double %in)
%res = bitcast double %val to i64
ret i64 %res
}
-define double @test_matched_call(double %in) {
+define arm_aapcs_vfpcc double @test_matched_call(double %in) {
; CHECK-LABEL: test_matched_call:
-; CHECK: b floor
+; CHECK: b _floor
- %val = tail call double @floor(double %in)
+ %val = tail call arm_aapcs_vfpcc double @_floor(double %in)
ret double %val
}
-define void @test_irrelevant_call(double %in) {
+define arm_aapcs_vfpcc void @test_irrelevant_call(double %in) {
; CHECK-LABEL: test_irrelevant_call:
; CHECK-NOT: bl floor
- %val = tail call double @floor(double %in)
+ %val = tail call arm_aapcscc double @floor(double %in)
ret void
}
-define arm_aapcscc double @test_callingconv(double %in) {
+define arm_aapcs_vfpcc double @test_callingconv(double %in) {
; CHECK: test_callingconv:
; CHECK: bl floor
- %val = tail call double @floor(double %in)
+ %val = tail call arm_aapcscc double @floor(double %in)
ret double %val
}
-declare double @floor(double) nounwind readonly
+declare arm_aapcs_vfpcc double @_floor(double) nounwind readonly
+declare arm_aapcscc double @floor(double) nounwind readonly
diff --git a/llvm/test/CodeGen/Thumb2/float-intrinsics-double.ll b/llvm/test/CodeGen/Thumb2/float-intrinsics-double.ll
index 657d1b172da..19d80eb3770 100644
--- a/llvm/test/CodeGen/Thumb2/float-intrinsics-double.ll
+++ b/llvm/test/CodeGen/Thumb2/float-intrinsics-double.ll
@@ -5,107 +5,107 @@
; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a7 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=NEON -check-prefix=VFP4
; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a57 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=NEON -check-prefix=FP-ARMv8
-declare double @llvm.sqrt.f64(double %Val)
+declare arm_aapcscc double @llvm.sqrt.f64(double %Val)
define double @sqrt_d(double %a) {
; CHECK-LABEL: sqrt_d:
; SOFT: {{(bl|b)}} sqrt
; HARD: vsqrt.f64 d0, d0
- %1 = call double @llvm.sqrt.f64(double %a)
+ %1 = call arm_aapcscc double @llvm.sqrt.f64(double %a)
ret double %1
}
-declare double @llvm.powi.f64(double %Val, i32 %power)
+declare arm_aapcscc double @llvm.powi.f64(double %Val, i32 %power)
define double @powi_d(double %a, i32 %b) {
; CHECK-LABEL: powi_d:
; SOFT: {{(bl|b)}} __powidf2
-; HARD: b __powidf2
- %1 = call double @llvm.powi.f64(double %a, i32 %b)
+; HARD: bl __powidf2
+ %1 = call arm_aapcscc double @llvm.powi.f64(double %a, i32 %b)
ret double %1
}
-declare double @llvm.sin.f64(double %Val)
+declare arm_aapcscc double @llvm.sin.f64(double %Val)
define double @sin_d(double %a) {
; CHECK-LABEL: sin_d:
; SOFT: {{(bl|b)}} sin
-; HARD: b sin
- %1 = call double @llvm.sin.f64(double %a)
+; HARD: bl sin
+ %1 = call arm_aapcscc double @llvm.sin.f64(double %a)
ret double %1
}
-declare double @llvm.cos.f64(double %Val)
+declare arm_aapcscc double @llvm.cos.f64(double %Val)
define double @cos_d(double %a) {
; CHECK-LABEL: cos_d:
; SOFT: {{(bl|b)}} cos
-; HARD: b cos
- %1 = call double @llvm.cos.f64(double %a)
+; HARD: bl cos
+ %1 = call arm_aapcscc double @llvm.cos.f64(double %a)
ret double %1
}
-declare double @llvm.pow.f64(double %Val, double %power)
+declare arm_aapcscc double @llvm.pow.f64(double %Val, double %power)
define double @pow_d(double %a, double %b) {
; CHECK-LABEL: pow_d:
; SOFT: {{(bl|b)}} pow
-; HARD: b pow
- %1 = call double @llvm.pow.f64(double %a, double %b)
+; HARD: bl pow
+ %1 = call arm_aapcscc double @llvm.pow.f64(double %a, double %b)
ret double %1
}
-declare double @llvm.exp.f64(double %Val)
+declare arm_aapcscc double @llvm.exp.f64(double %Val)
define double @exp_d(double %a) {
; CHECK-LABEL: exp_d:
; SOFT: {{(bl|b)}} exp
-; HARD: b exp
- %1 = call double @llvm.exp.f64(double %a)
+; HARD: bl exp
+ %1 = call arm_aapcscc double @llvm.exp.f64(double %a)
ret double %1
}
-declare double @llvm.exp2.f64(double %Val)
+declare arm_aapcscc double @llvm.exp2.f64(double %Val)
define double @exp2_d(double %a) {
; CHECK-LABEL: exp2_d:
; SOFT: {{(bl|b)}} exp2
-; HARD: b exp2
- %1 = call double @llvm.exp2.f64(double %a)
+; HARD: bl exp2
+ %1 = call arm_aapcscc double @llvm.exp2.f64(double %a)
ret double %1
}
-declare double @llvm.log.f64(double %Val)
+declare arm_aapcscc double @llvm.log.f64(double %Val)
define double @log_d(double %a) {
; CHECK-LABEL: log_d:
; SOFT: {{(bl|b)}} log
-; HARD: b log
- %1 = call double @llvm.log.f64(double %a)
+; HARD: bl log
+ %1 = call arm_aapcscc double @llvm.log.f64(double %a)
ret double %1
}
-declare double @llvm.log10.f64(double %Val)
+declare arm_aapcscc double @llvm.log10.f64(double %Val)
define double @log10_d(double %a) {
; CHECK-LABEL: log10_d:
; SOFT: {{(bl|b)}} log10
-; HARD: b log10
- %1 = call double @llvm.log10.f64(double %a)
+; HARD: bl log10
+ %1 = call arm_aapcscc double @llvm.log10.f64(double %a)
ret double %1
}
-declare double @llvm.log2.f64(double %Val)
+declare arm_aapcscc double @llvm.log2.f64(double %Val)
define double @log2_d(double %a) {
; CHECK-LABEL: log2_d:
; SOFT: {{(bl|b)}} log2
-; HARD: b log2
- %1 = call double @llvm.log2.f64(double %a)
+; HARD: bl log2
+ %1 = call arm_aapcscc double @llvm.log2.f64(double %a)
ret double %1
}
-declare double @llvm.fma.f64(double %a, double %b, double %c)
+declare arm_aapcscc double @llvm.fma.f64(double %a, double %b, double %c)
define double @fma_d(double %a, double %b, double %c) {
; CHECK-LABEL: fma_d:
; SOFT: {{(bl|b)}} fma
; HARD: vfma.f64
- %1 = call double @llvm.fma.f64(double %a, double %b, double %c)
+ %1 = call arm_aapcscc double @llvm.fma.f64(double %a, double %b, double %c)
ret double %1
}
; FIXME: the FPv4-SP version is less efficient than the no-FPU version
-declare double @llvm.fabs.f64(double %Val)
+declare arm_aapcscc double @llvm.fabs.f64(double %Val)
define double @abs_d(double %a) {
; CHECK-LABEL: abs_d:
; NONE: bic r1, r1, #-2147483648
@@ -116,11 +116,11 @@ define double @abs_d(double %a) {
; SP: bfi r1, r2, #31, #1
; SP: vmov d0, r0, r1
; DP: vabs.f64 d0, d0
- %1 = call double @llvm.fabs.f64(double %a)
+ %1 = call arm_aapcscc double @llvm.fabs.f64(double %a)
ret double %1
}
-declare double @llvm.copysign.f64(double %Mag, double %Sgn)
+declare arm_aapcscc double @llvm.copysign.f64(double %Mag, double %Sgn)
define double @copysign_d(double %a, double %b) {
; CHECK-LABEL: copysign_d:
; SOFT: lsrs [[REG:r[0-9]+]], r3, #31
@@ -130,71 +130,71 @@ define double @copysign_d(double %a, double %b) {
; NEON: vmov.i32 [[REG:d[0-9]+]], #0x80000000
; NEON: vshl.i64 [[REG]], [[REG]], #32
; NEON: vbsl [[REG]], d
- %1 = call double @llvm.copysign.f64(double %a, double %b)
+ %1 = call arm_aapcscc double @llvm.copysign.f64(double %a, double %b)
ret double %1
}
-declare double @llvm.floor.f64(double %Val)
+declare arm_aapcscc double @llvm.floor.f64(double %Val)
define double @floor_d(double %a) {
; CHECK-LABEL: floor_d:
; SOFT: {{(bl|b)}} floor
-; VFP4: b floor
+; VFP4: bl floor
; FP-ARMv8: vrintm.f64
- %1 = call double @llvm.floor.f64(double %a)
+ %1 = call arm_aapcscc double @llvm.floor.f64(double %a)
ret double %1
}
-declare double @llvm.ceil.f64(double %Val)
+declare arm_aapcscc double @llvm.ceil.f64(double %Val)
define double @ceil_d(double %a) {
; CHECK-LABEL: ceil_d:
; SOFT: {{(bl|b)}} ceil
-; VFP4: b ceil
+; VFP4: bl ceil
; FP-ARMv8: vrintp.f64
- %1 = call double @llvm.ceil.f64(double %a)
+ %1 = call arm_aapcscc double @llvm.ceil.f64(double %a)
ret double %1
}
-declare double @llvm.trunc.f64(double %Val)
+declare arm_aapcscc double @llvm.trunc.f64(double %Val)
define double @trunc_d(double %a) {
; CHECK-LABEL: trunc_d:
; SOFT: {{(bl|b)}} trunc
-; FFP4: b trunc
+; FFP4: bl trunc
; FP-ARMv8: vrintz.f64
- %1 = call double @llvm.trunc.f64(double %a)
+ %1 = call arm_aapcscc double @llvm.trunc.f64(double %a)
ret double %1
}
-declare double @llvm.rint.f64(double %Val)
+declare arm_aapcscc double @llvm.rint.f64(double %Val)
define double @rint_d(double %a) {
; CHECK-LABEL: rint_d:
; SOFT: {{(bl|b)}} rint
-; VFP4: b rint
+; VFP4: bl rint
; FP-ARMv8: vrintx.f64
- %1 = call double @llvm.rint.f64(double %a)
+ %1 = call arm_aapcscc double @llvm.rint.f64(double %a)
ret double %1
}
-declare double @llvm.nearbyint.f64(double %Val)
+declare arm_aapcscc double @llvm.nearbyint.f64(double %Val)
define double @nearbyint_d(double %a) {
; CHECK-LABEL: nearbyint_d:
; SOFT: {{(bl|b)}} nearbyint
-; VFP4: b nearbyint
+; VFP4: bl nearbyint
; FP-ARMv8: vrintr.f64
- %1 = call double @llvm.nearbyint.f64(double %a)
+ %1 = call arm_aapcscc double @llvm.nearbyint.f64(double %a)
ret double %1
}
-declare double @llvm.round.f64(double %Val)
+declare arm_aapcscc double @llvm.round.f64(double %Val)
define double @round_d(double %a) {
; CHECK-LABEL: round_d:
; SOFT: {{(bl|b)}} round
-; VFP4: b round
+; VFP4: bl round
; FP-ARMv8: vrinta.f64
- %1 = call double @llvm.round.f64(double %a)
+ %1 = call arm_aapcscc double @llvm.round.f64(double %a)
ret double %1
}
-declare double @llvm.fmuladd.f64(double %a, double %b, double %c)
+declare arm_aapcscc double @llvm.fmuladd.f64(double %a, double %b, double %c)
define double @fmuladd_d(double %a, double %b, double %c) {
; CHECK-LABEL: fmuladd_d:
; SOFT: bl __aeabi_dmul
@@ -202,21 +202,21 @@ define double @fmuladd_d(double %a, double %b, double %c) {
; VFP4: vmul.f64
; VFP4: vadd.f64
; FP-ARMv8: vmla.f64
- %1 = call double @llvm.fmuladd.f64(double %a, double %b, double %c)
+ %1 = call arm_aapcscc double @llvm.fmuladd.f64(double %a, double %b, double %c)
ret double %1
}
-declare i16 @llvm.convert.to.fp16.f64(double %a)
+declare arm_aapcscc i16 @llvm.convert.to.fp16.f64(double %a)
define i16 @d_to_h(double %a) {
; CHECK-LABEL: d_to_h:
; SOFT: bl __aeabi_d2h
; VFP4: bl __aeabi_d2h
; FP-ARMv8: vcvt{{[bt]}}.f16.f64
- %1 = call i16 @llvm.convert.to.fp16.f64(double %a)
+ %1 = call arm_aapcscc i16 @llvm.convert.to.fp16.f64(double %a)
ret i16 %1
}
-declare double @llvm.convert.from.fp16.f64(i16 %a)
+declare arm_aapcscc double @llvm.convert.from.fp16.f64(i16 %a)
define double @h_to_d(i16 %a) {
; CHECK-LABEL: h_to_d:
; NONE: bl __aeabi_h2f
@@ -226,6 +226,7 @@ define double @h_to_d(i16 %a) {
; VFPv4: vcvt{{[bt]}}.f32.f16
; VFPv4: vcvt.f64.f32
; FP-ARMv8: vcvt{{[bt]}}.f64.f16
- %1 = call double @llvm.convert.from.fp16.f64(i16 %a)
+ %1 = call arm_aapcscc double @llvm.convert.from.fp16.f64(i16 %a)
ret double %1
}
+
diff --git a/llvm/test/CodeGen/Thumb2/float-intrinsics-float.ll b/llvm/test/CodeGen/Thumb2/float-intrinsics-float.ll
index 847aeacd2f9..99d662ff169 100644
--- a/llvm/test/CodeGen/Thumb2/float-intrinsics-float.ll
+++ b/llvm/test/CodeGen/Thumb2/float-intrinsics-float.ll
@@ -14,106 +14,106 @@ define float @sqrt_f(float %a) {
ret float %1
}
-declare float @llvm.powi.f32(float %Val, i32 %power)
+declare arm_aapcscc float @llvm.powi.f32(float %Val, i32 %power)
define float @powi_f(float %a, i32 %b) {
; CHECK-LABEL: powi_f:
; SOFT: bl __powisf2
-; HARD: b __powisf2
- %1 = call float @llvm.powi.f32(float %a, i32 %b)
+; HARD: bl __powisf2
+ %1 = call arm_aapcscc float @llvm.powi.f32(float %a, i32 %b)
ret float %1
}
-declare float @llvm.sin.f32(float %Val)
+declare arm_aapcscc float @llvm.sin.f32(float %Val)
define float @sin_f(float %a) {
; CHECK-LABEL: sin_f:
; SOFT: bl sinf
-; HARD: b sinf
- %1 = call float @llvm.sin.f32(float %a)
+; HARD: bl sinf
+ %1 = call arm_aapcscc float @llvm.sin.f32(float %a)
ret float %1
}
-declare float @llvm.cos.f32(float %Val)
+declare arm_aapcscc float @llvm.cos.f32(float %Val)
define float @cos_f(float %a) {
; CHECK-LABEL: cos_f:
; SOFT: bl cosf
-; HARD: b cosf
- %1 = call float @llvm.cos.f32(float %a)
+; HARD: bl cosf
+ %1 = call arm_aapcscc float @llvm.cos.f32(float %a)
ret float %1
}
-declare float @llvm.pow.f32(float %Val, float %power)
+declare arm_aapcscc float @llvm.pow.f32(float %Val, float %power)
define float @pow_f(float %a, float %b) {
; CHECK-LABEL: pow_f:
; SOFT: bl powf
-; HARD: b powf
- %1 = call float @llvm.pow.f32(float %a, float %b)
+; HARD: bl powf
+ %1 = call arm_aapcscc float @llvm.pow.f32(float %a, float %b)
ret float %1
}
-declare float @llvm.exp.f32(float %Val)
+declare arm_aapcscc float @llvm.exp.f32(float %Val)
define float @exp_f(float %a) {
; CHECK-LABEL: exp_f:
; SOFT: bl expf
-; HARD: b expf
- %1 = call float @llvm.exp.f32(float %a)
+; HARD: bl expf
+ %1 = call arm_aapcscc float @llvm.exp.f32(float %a)
ret float %1
}
-declare float @llvm.exp2.f32(float %Val)
+declare arm_aapcscc float @llvm.exp2.f32(float %Val)
define float @exp2_f(float %a) {
; CHECK-LABEL: exp2_f:
; SOFT: bl exp2f
-; HARD: b exp2f
- %1 = call float @llvm.exp2.f32(float %a)
+; HARD: bl exp2f
+ %1 = call arm_aapcscc float @llvm.exp2.f32(float %a)
ret float %1
}
-declare float @llvm.log.f32(float %Val)
+declare arm_aapcscc float @llvm.log.f32(float %Val)
define float @log_f(float %a) {
; CHECK-LABEL: log_f:
; SOFT: bl logf
-; HARD: b logf
- %1 = call float @llvm.log.f32(float %a)
+; HARD: bl logf
+ %1 = call arm_aapcscc float @llvm.log.f32(float %a)
ret float %1
}
-declare float @llvm.log10.f32(float %Val)
+declare arm_aapcscc float @llvm.log10.f32(float %Val)
define float @log10_f(float %a) {
; CHECK-LABEL: log10_f:
; SOFT: bl log10f
-; HARD: b log10f
- %1 = call float @llvm.log10.f32(float %a)
+; HARD: bl log10f
+ %1 = call arm_aapcscc float @llvm.log10.f32(float %a)
ret float %1
}
-declare float @llvm.log2.f32(float %Val)
+declare arm_aapcscc float @llvm.log2.f32(float %Val)
define float @log2_f(float %a) {
; CHECK-LABEL: log2_f:
; SOFT: bl log2f
-; HARD: b log2f
- %1 = call float @llvm.log2.f32(float %a)
+; HARD: bl log2f
+ %1 = call arm_aapcscc float @llvm.log2.f32(float %a)
ret float %1
}
-declare float @llvm.fma.f32(float %a, float %b, float %c)
+declare arm_aapcscc float @llvm.fma.f32(float %a, float %b, float %c)
define float @fma_f(float %a, float %b, float %c) {
; CHECK-LABEL: fma_f:
; SOFT: bl fmaf
; HARD: vfma.f32
- %1 = call float @llvm.fma.f32(float %a, float %b, float %c)
+ %1 = call arm_aapcscc float @llvm.fma.f32(float %a, float %b, float %c)
ret float %1
}
-declare float @llvm.fabs.f32(float %Val)
+declare arm_aapcscc float @llvm.fabs.f32(float %Val)
define float @abs_f(float %a) {
; CHECK-LABEL: abs_f:
; SOFT: bic r0, r0, #-2147483648
; HARD: vabs.f32
- %1 = call float @llvm.fabs.f32(float %a)
+ %1 = call arm_aapcscc float @llvm.fabs.f32(float %a)
ret float %1
}
-declare float @llvm.copysign.f32(float %Mag, float %Sgn)
+declare arm_aapcscc float @llvm.copysign.f32(float %Mag, float %Sgn)
define float @copysign_f(float %a, float %b) {
; CHECK-LABEL: copysign_f:
; NONE: lsrs [[REG:r[0-9]+]], r{{[0-9]+}}, #31
@@ -124,73 +124,73 @@ define float @copysign_f(float %a, float %b) {
; VFP: bfi r{{[0-9]+}}, [[REG]], #31, #1
; NEON: vmov.i32 [[REG:d[0-9]+]], #0x80000000
; NEON: vbsl [[REG]], d
- %1 = call float @llvm.copysign.f32(float %a, float %b)
+ %1 = call arm_aapcscc float @llvm.copysign.f32(float %a, float %b)
ret float %1
}
-declare float @llvm.floor.f32(float %Val)
+declare arm_aapcscc float @llvm.floor.f32(float %Val)
define float @floor_f(float %a) {
; CHECK-LABEL: floor_f:
; SOFT: bl floorf
-; VFP4: b floorf
+; VFP4: bl floorf
; FP-ARMv8: vrintm.f32
- %1 = call float @llvm.floor.f32(float %a)
+ %1 = call arm_aapcscc float @llvm.floor.f32(float %a)
ret float %1
}
-declare float @llvm.ceil.f32(float %Val)
+declare arm_aapcscc float @llvm.ceil.f32(float %Val)
define float @ceil_f(float %a) {
; CHECK-LABEL: ceil_f:
; SOFT: bl ceilf
-; VFP4: b ceilf
+; VFP4: bl ceilf
; FP-ARMv8: vrintp.f32
- %1 = call float @llvm.ceil.f32(float %a)
+ %1 = call arm_aapcscc float @llvm.ceil.f32(float %a)
ret float %1
}
-declare float @llvm.trunc.f32(float %Val)
+declare arm_aapcscc float @llvm.trunc.f32(float %Val)
define float @trunc_f(float %a) {
; CHECK-LABEL: trunc_f:
; SOFT: bl truncf
-; VFP4: b truncf
+; VFP4: bl truncf
; FP-ARMv8: vrintz.f32
- %1 = call float @llvm.trunc.f32(float %a)
+ %1 = call arm_aapcscc float @llvm.trunc.f32(float %a)
ret float %1
}
-declare float @llvm.rint.f32(float %Val)
+declare arm_aapcscc float @llvm.rint.f32(float %Val)
define float @rint_f(float %a) {
; CHECK-LABEL: rint_f:
; SOFT: bl rintf
-; VFP4: b rintf
+; VFP4: bl rintf
; FP-ARMv8: vrintx.f32
- %1 = call float @llvm.rint.f32(float %a)
+ %1 = call arm_aapcscc float @llvm.rint.f32(float %a)
ret float %1
}
-declare float @llvm.nearbyint.f32(float %Val)
+declare arm_aapcscc float @llvm.nearbyint.f32(float %Val)
define float @nearbyint_f(float %a) {
; CHECK-LABEL: nearbyint_f:
; SOFT: bl nearbyintf
-; VFP4: b nearbyintf
+; VFP4: bl nearbyintf
; FP-ARMv8: vrintr.f32
- %1 = call float @llvm.nearbyint.f32(float %a)
+ %1 = call arm_aapcscc float @llvm.nearbyint.f32(float %a)
ret float %1
}
-declare float @llvm.round.f32(float %Val)
+declare arm_aapcscc float @llvm.round.f32(float %Val)
define float @round_f(float %a) {
; CHECK-LABEL: round_f:
; SOFT: bl roundf
-; VFP4: b roundf
+; VFP4: bl roundf
; FP-ARMv8: vrinta.f32
- %1 = call float @llvm.round.f32(float %a)
+ %1 = call arm_aapcscc float @llvm.round.f32(float %a)
ret float %1
}
; FIXME: why does cortex-m4 use vmla, while cortex-a7 uses vmul+vadd?
; (these should be equivalent, even the rounding is the same)
-declare float @llvm.fmuladd.f32(float %a, float %b, float %c)
+declare arm_aapcscc float @llvm.fmuladd.f32(float %a, float %b, float %c)
define float @fmuladd_f(float %a, float %b, float %c) {
; CHECK-LABEL: fmuladd_f:
; SOFT: bl __aeabi_fmul
@@ -198,24 +198,25 @@ define float @fmuladd_f(float %a, float %b, float %c) {
; VMLA: vmla.f32
; NO-VMLA: vmul.f32
; NO-VMLA: vadd.f32
- %1 = call float @llvm.fmuladd.f32(float %a, float %b, float %c)
+ %1 = call arm_aapcscc float @llvm.fmuladd.f32(float %a, float %b, float %c)
ret float %1
}
-declare i16 @llvm.convert.to.fp16.f32(float %a)
+declare arm_aapcscc i16 @llvm.convert.to.fp16.f32(float %a)
define i16 @f_to_h(float %a) {
; CHECK-LABEL: f_to_h:
; SOFT: bl __aeabi_f2h
; HARD: vcvt{{[bt]}}.f16.f32
- %1 = call i16 @llvm.convert.to.fp16.f32(float %a)
+ %1 = call arm_aapcscc i16 @llvm.convert.to.fp16.f32(float %a)
ret i16 %1
}
-declare float @llvm.convert.from.fp16.f32(i16 %a)
+declare arm_aapcscc float @llvm.convert.from.fp16.f32(i16 %a)
define float @h_to_f(i16 %a) {
; CHECK-LABEL: h_to_f:
; SOFT: bl __aeabi_h2f
; HARD: vcvt{{[bt]}}.f32.f16
- %1 = call float @llvm.convert.from.fp16.f32(i16 %a)
+ %1 = call arm_aapcscc float @llvm.convert.from.fp16.f32(i16 %a)
ret float %1
}
+
diff --git a/llvm/test/CodeGen/Thumb2/float-ops.ll b/llvm/test/CodeGen/Thumb2/float-ops.ll
index c9f93f2d613..4101984e3f7 100644
--- a/llvm/test/CodeGen/Thumb2/float-ops.ll
+++ b/llvm/test/CodeGen/Thumb2/float-ops.ll
@@ -4,6 +4,7 @@
; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a8 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=VFP4-ALL -check-prefix=VFP4-DP
define float @add_f(float %a, float %b) {
+
entry:
; CHECK-LABEL: add_f:
; NONE: bl __aeabi_fadd
@@ -83,7 +84,7 @@ define float @rem_f(float %a, float %b) {
entry:
; CHECK-LABEL: rem_f:
; NONE: bl fmodf
-; HARD: b fmodf
+; HARD: bl fmodf
%0 = frem float %a, %b
ret float %0
}
@@ -92,7 +93,7 @@ define double @rem_d(double %a, double %b) {
entry:
; CHECK-LABEL: rem_d:
; NONE: bl fmod
-; HARD: b fmod
+; HARD: bl fmod
%0 = frem double %a, %b
ret double %0
}
OpenPOWER on IntegriCloud