summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChad Rosier <mcrosier@codeaurora.org>2013-11-15 21:28:10 +0000
committerChad Rosier <mcrosier@codeaurora.org>2013-11-15 21:28:10 +0000
commit0c57c3402e93c23d349325d6b1391b84bb103afd (patch)
treecc72dd3e19a14798e04eb619f5165406f8357f07
parente448f9e418ffcec6fdadfc51af213f0d11645296 (diff)
downloadbcm5719-llvm-0c57c3402e93c23d349325d6b1391b84bb103afd.tar.gz
bcm5719-llvm-0c57c3402e93c23d349325d6b1391b84bb103afd.zip
[AArch64] Fix the scalar NEON ACLE functions so that they return float/double
rather than the vector equivalent. llvm-svn: 194853
-rw-r--r--llvm/include/llvm/IR/IntrinsicsAArch64.td16
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrNEON.td8
-rw-r--r--llvm/test/CodeGen/AArch64/neon-scalar-cvt.ll40
3 files changed, 28 insertions, 36 deletions
diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index 42b00aad4ff..29026f66ab6 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -233,15 +233,15 @@ def int_aarch64_neon_vpfminnmq :
// Scalar Signed Integer Convert To Floating-point
def int_aarch64_neon_vcvtf32_s32 :
- Intrinsic<[llvm_v1f32_ty], [llvm_v1i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_float_ty], [llvm_v1i32_ty], [IntrNoMem]>;
def int_aarch64_neon_vcvtf64_s64 :
- Intrinsic<[llvm_v1f64_ty], [llvm_v1i64_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_double_ty], [llvm_v1i64_ty], [IntrNoMem]>;
// Scalar Unsigned Integer Convert To Floating-point
def int_aarch64_neon_vcvtf32_u32 :
- Intrinsic<[llvm_v1f32_ty], [llvm_v1i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_float_ty], [llvm_v1i32_ty], [IntrNoMem]>;
def int_aarch64_neon_vcvtf64_u64 :
- Intrinsic<[llvm_v1f64_ty], [llvm_v1i64_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_double_ty], [llvm_v1i64_ty], [IntrNoMem]>;
// Scalar Floating-point Reciprocal Exponent
def int_aarch64_neon_vrecpx : Neon_1Arg_Intrinsic;
@@ -330,15 +330,15 @@ def int_aarch64_neon_vqshlus_n : Neon_N2V_Intrinsic;
// Scalar Signed Fixed-point Convert To Floating-Point (Immediate)
def int_aarch64_neon_vcvtf32_n_s32 :
- Intrinsic<[llvm_v1f32_ty], [llvm_v1i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_float_ty], [llvm_v1i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_aarch64_neon_vcvtf64_n_s64 :
- Intrinsic<[llvm_v1f64_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_double_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>;
// Scalar Unsigned Fixed-point Convert To Floating-Point (Immediate)
def int_aarch64_neon_vcvtf32_n_u32 :
- Intrinsic<[llvm_v1f32_ty], [llvm_v1i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_float_ty], [llvm_v1i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_aarch64_neon_vcvtf64_n_u64 :
- Intrinsic<[llvm_v1f64_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_double_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>;
// Scalar Floating-point Convert To Signed Fixed-point (Immediate)
def int_aarch64_neon_vcvts_n_s32_f32 :
diff --git a/llvm/lib/Target/AArch64/AArch64InstrNEON.td b/llvm/lib/Target/AArch64/AArch64InstrNEON.td
index 63790cbdac3..4b8bb8e1efc 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrNEON.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrNEON.td
@@ -3853,9 +3853,9 @@ multiclass Neon_Scalar2SameMisc_cvt_SD_size_patterns<SDPatternOperator Sopnode,
SDPatternOperator Dopnode,
Instruction INSTS,
Instruction INSTD> {
- def : Pat<(v1f32 (Sopnode (v1i32 FPR32:$Rn))),
+ def : Pat<(f32 (Sopnode (v1i32 FPR32:$Rn))),
(INSTS FPR32:$Rn)>;
- def : Pat<(v1f64 (Dopnode (v1i64 FPR64:$Rn))),
+ def : Pat<(f64 (Dopnode (v1i64 FPR64:$Rn))),
(INSTD FPR64:$Rn)>;
}
@@ -4135,9 +4135,9 @@ multiclass Neon_ScalarShiftImm_scvtf_SD_size_patterns<SDPatternOperator Sopnode,
SDPatternOperator Dopnode,
Instruction INSTS,
Instruction INSTD> {
- def ssi : Pat<(v1f32 (Sopnode (v1i32 FPR32:$Rn), (i32 imm:$Imm))),
+ def ssi : Pat<(f32 (Sopnode (v1i32 FPR32:$Rn), (i32 imm:$Imm))),
(INSTS FPR32:$Rn, imm:$Imm)>;
- def ddi : Pat<(v1f64 (Dopnode (v1i64 FPR64:$Rn), (i32 imm:$Imm))),
+ def ddi : Pat<(f64 (Dopnode (v1i64 FPR64:$Rn), (i32 imm:$Imm))),
(INSTD FPR64:$Rn, imm:$Imm)>;
}
diff --git a/llvm/test/CodeGen/AArch64/neon-scalar-cvt.ll b/llvm/test/CodeGen/AArch64/neon-scalar-cvt.ll
index a7f0ac09658..2fe25b8c19e 100644
--- a/llvm/test/CodeGen/AArch64/neon-scalar-cvt.ll
+++ b/llvm/test/CodeGen/AArch64/neon-scalar-cvt.ll
@@ -5,96 +5,88 @@ define float @test_vcvts_f32_s32(i32 %a) {
; CHECK: scvtf {{s[0-9]+}}, {{s[0-9]+}}
entry:
%vcvtf.i = insertelement <1 x i32> undef, i32 %a, i32 0
- %vcvtf1.i = call <1 x float> @llvm.aarch64.neon.vcvtf32.s32(<1 x i32> %vcvtf.i)
- %0 = extractelement <1 x float> %vcvtf1.i, i32 0
+ %0 = call float @llvm.aarch64.neon.vcvtf32.s32(<1 x i32> %vcvtf.i)
ret float %0
}
-declare <1 x float> @llvm.aarch64.neon.vcvtf32.s32(<1 x i32>)
+declare float @llvm.aarch64.neon.vcvtf32.s32(<1 x i32>)
define double @test_vcvtd_f64_s64(i64 %a) {
; CHECK: test_vcvtd_f64_s64
; CHECK: scvtf {{d[0-9]+}}, {{d[0-9]+}}
entry:
%vcvtf.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %vcvtf1.i = call <1 x double> @llvm.aarch64.neon.vcvtf64.s64(<1 x i64> %vcvtf.i)
- %0 = extractelement <1 x double> %vcvtf1.i, i32 0
+ %0 = call double @llvm.aarch64.neon.vcvtf64.s64(<1 x i64> %vcvtf.i)
ret double %0
}
-declare <1 x double> @llvm.aarch64.neon.vcvtf64.s64(<1 x i64>)
+declare double @llvm.aarch64.neon.vcvtf64.s64(<1 x i64>)
define float @test_vcvts_f32_u32(i32 %a) {
; CHECK: test_vcvts_f32_u32
; CHECK: ucvtf {{s[0-9]+}}, {{s[0-9]+}}
entry:
%vcvtf.i = insertelement <1 x i32> undef, i32 %a, i32 0
- %vcvtf1.i = call <1 x float> @llvm.aarch64.neon.vcvtf32.u32(<1 x i32> %vcvtf.i)
- %0 = extractelement <1 x float> %vcvtf1.i, i32 0
+ %0 = call float @llvm.aarch64.neon.vcvtf32.u32(<1 x i32> %vcvtf.i)
ret float %0
}
-declare <1 x float> @llvm.aarch64.neon.vcvtf32.u32(<1 x i32>)
+declare float @llvm.aarch64.neon.vcvtf32.u32(<1 x i32>)
define double @test_vcvtd_f64_u64(i64 %a) {
; CHECK: test_vcvtd_f64_u64
; CHECK: ucvtf {{d[0-9]+}}, {{d[0-9]+}}
entry:
%vcvtf.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %vcvtf1.i = call <1 x double> @llvm.aarch64.neon.vcvtf64.u64(<1 x i64> %vcvtf.i)
- %0 = extractelement <1 x double> %vcvtf1.i, i32 0
+ %0 = call double @llvm.aarch64.neon.vcvtf64.u64(<1 x i64> %vcvtf.i)
ret double %0
}
-declare <1 x double> @llvm.aarch64.neon.vcvtf64.u64(<1 x i64>)
+declare double @llvm.aarch64.neon.vcvtf64.u64(<1 x i64>)
define float @test_vcvts_n_f32_s32(i32 %a) {
; CHECK: test_vcvts_n_f32_s32
; CHECK: scvtf {{s[0-9]+}}, {{s[0-9]+}}, #1
entry:
%vcvtf = insertelement <1 x i32> undef, i32 %a, i32 0
- %vcvtf1 = call <1 x float> @llvm.aarch64.neon.vcvtf32.n.s32(<1 x i32> %vcvtf, i32 1)
- %0 = extractelement <1 x float> %vcvtf1, i32 0
+ %0 = call float @llvm.aarch64.neon.vcvtf32.n.s32(<1 x i32> %vcvtf, i32 1)
ret float %0
}
-declare <1 x float> @llvm.aarch64.neon.vcvtf32.n.s32(<1 x i32>, i32)
+declare float @llvm.aarch64.neon.vcvtf32.n.s32(<1 x i32>, i32)
define double @test_vcvtd_n_f64_s64(i64 %a) {
; CHECK: test_vcvtd_n_f64_s64
; CHECK: scvtf {{d[0-9]+}}, {{d[0-9]+}}, #1
entry:
%vcvtf = insertelement <1 x i64> undef, i64 %a, i32 0
- %vcvtf1 = call <1 x double> @llvm.aarch64.neon.vcvtf64.n.s64(<1 x i64> %vcvtf, i32 1)
- %0 = extractelement <1 x double> %vcvtf1, i32 0
+ %0 = call double @llvm.aarch64.neon.vcvtf64.n.s64(<1 x i64> %vcvtf, i32 1)
ret double %0
}
-declare <1 x double> @llvm.aarch64.neon.vcvtf64.n.s64(<1 x i64>, i32)
+declare double @llvm.aarch64.neon.vcvtf64.n.s64(<1 x i64>, i32)
define float @test_vcvts_n_f32_u32(i32 %a) {
; CHECK: test_vcvts_n_f32_u32
; CHECK: ucvtf {{s[0-9]+}}, {{s[0-9]+}}, #1
entry:
%vcvtf = insertelement <1 x i32> undef, i32 %a, i32 0
- %vcvtf1 = call <1 x float> @llvm.aarch64.neon.vcvtf32.n.u32(<1 x i32> %vcvtf, i32 1)
- %0 = extractelement <1 x float> %vcvtf1, i32 0
+ %0 = call float @llvm.aarch64.neon.vcvtf32.n.u32(<1 x i32> %vcvtf, i32 1)
ret float %0
}
-declare <1 x float> @llvm.aarch64.neon.vcvtf32.n.u32(<1 x i32>, i32)
+declare float @llvm.aarch64.neon.vcvtf32.n.u32(<1 x i32>, i32)
define double @test_vcvtd_n_f64_u64(i64 %a) {
; CHECK: test_vcvtd_n_f64_u64
; CHECK: ucvtf {{d[0-9]+}}, {{d[0-9]+}}, #1
entry:
%vcvtf = insertelement <1 x i64> undef, i64 %a, i32 0
- %vcvtf1 = call <1 x double> @llvm.aarch64.neon.vcvtf64.n.u64(<1 x i64> %vcvtf, i32 1)
- %0 = extractelement <1 x double> %vcvtf1, i32 0
+ %0 = call double @llvm.aarch64.neon.vcvtf64.n.u64(<1 x i64> %vcvtf, i32 1)
ret double %0
}
-declare <1 x double> @llvm.aarch64.neon.vcvtf64.n.u64(<1 x i64>, i32)
+declare double @llvm.aarch64.neon.vcvtf64.n.u64(<1 x i64>, i32)
define i32 @test_vcvts_n_s32_f32(float %a) {
; CHECK: test_vcvts_n_s32_f32
OpenPOWER on IntegriCloud