summaryrefslogtreecommitdiffstats
path: root/clang/lib
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib')
-rw-r--r--clang/lib/Basic/Targets/AArch64.cpp2
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp217
-rw-r--r--clang/lib/Headers/CMakeLists.txt16
-rw-r--r--clang/lib/Headers/module.modulemap1
-rw-r--r--clang/lib/Sema/SemaChecking.cpp2
5 files changed, 236 insertions, 2 deletions
diff --git a/clang/lib/Basic/Targets/AArch64.cpp b/clang/lib/Basic/Targets/AArch64.cpp
index 4d3cd121f70..5eccf6a2cda 100644
--- a/clang/lib/Basic/Targets/AArch64.cpp
+++ b/clang/lib/Basic/Targets/AArch64.cpp
@@ -183,6 +183,8 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if ((FPU & NeonMode) && HasFullFP16)
Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
+ if (HasFullFP16)
+ Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
switch (ArchKind) {
default:
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 51a3cea7d0e..c29ef1f8c11 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -4101,6 +4101,54 @@ static const NeonIntrinsicInfo AArch64SISDIntrinsicMap[] = {
NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
+ // FP16 scalar intrinisics go here.
+ NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType),
+ NEONMAP1(vabsh_f16, aarch64_neon_abs, Add1ArgType),
+ NEONMAP1(vcageh_f16, aarch64_neon_facge, AddRetType | Add1ArgType),
+ NEONMAP1(vcagth_f16, aarch64_neon_facgt, AddRetType | Add1ArgType),
+ NEONMAP1(vcaleh_f16, aarch64_neon_facge, AddRetType | Add1ArgType),
+ NEONMAP1(vcalth_f16, aarch64_neon_facgt, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtah_s16_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtah_u16_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_f16_s16, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_f16_u16, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_s16_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_u16_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtmh_s16_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtmh_u16_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtnh_s16_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtnh_u16_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtph_s16_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtph_u16_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
+ NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType),
+ NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType),
+ NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType),
+ NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType),
+ NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType),
};
#undef NEONMAP0
@@ -6125,6 +6173,58 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateUIToFP(Ops[0], FTy);
return Builder.CreateSIToFP(Ops[0], FTy);
}
+ case NEON::BI__builtin_neon_vcvth_f16_u16:
+ case NEON::BI__builtin_neon_vcvth_f16_u32:
+ case NEON::BI__builtin_neon_vcvth_f16_u64:
+ usgn = true;
+ // FALL THROUGH
+ case NEON::BI__builtin_neon_vcvth_f16_s16:
+ case NEON::BI__builtin_neon_vcvth_f16_s32:
+ case NEON::BI__builtin_neon_vcvth_f16_s64: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ llvm::Type *FTy = HalfTy;
+ llvm::Type *InTy;
+ if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64)
+ InTy = Int64Ty;
+ else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32)
+ InTy = Int32Ty;
+ else
+ InTy = Int16Ty;
+ Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
+ if (usgn)
+ return Builder.CreateUIToFP(Ops[0], FTy);
+ return Builder.CreateSIToFP(Ops[0], FTy);
+ }
+ case NEON::BI__builtin_neon_vcvth_u16_f16:
+ usgn = true;
+ // FALL THROUGH
+ case NEON::BI__builtin_neon_vcvth_s16_f16: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
+ if (usgn)
+ return Builder.CreateFPToUI(Ops[0], Int16Ty);
+ return Builder.CreateFPToSI(Ops[0], Int16Ty);
+ }
+ case NEON::BI__builtin_neon_vcvth_u32_f16:
+ usgn = true;
+ // FALL THROUGH
+ case NEON::BI__builtin_neon_vcvth_s32_f16: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
+ if (usgn)
+ return Builder.CreateFPToUI(Ops[0], Int32Ty);
+ return Builder.CreateFPToSI(Ops[0], Int32Ty);
+ }
+ case NEON::BI__builtin_neon_vcvth_u64_f16:
+ usgn = true;
+ // FALL THROUGH
+ case NEON::BI__builtin_neon_vcvth_s64_f16: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
+ if (usgn)
+ return Builder.CreateFPToUI(Ops[0], Int64Ty);
+ return Builder.CreateFPToSI(Ops[0], Int64Ty);
+ }
case NEON::BI__builtin_neon_vpaddd_s64: {
llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2);
Value *Vec = EmitScalarExpr(E->getArg(0));
@@ -6166,6 +6266,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vceqzd_s64:
case NEON::BI__builtin_neon_vceqzd_f64:
case NEON::BI__builtin_neon_vceqzs_f32:
+ case NEON::BI__builtin_neon_vceqzh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitAArch64CompareBuiltinExpr(
Ops[0], ConvertType(E->getCallReturnType(getContext())),
@@ -6173,6 +6274,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcgezd_s64:
case NEON::BI__builtin_neon_vcgezd_f64:
case NEON::BI__builtin_neon_vcgezs_f32:
+ case NEON::BI__builtin_neon_vcgezh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitAArch64CompareBuiltinExpr(
Ops[0], ConvertType(E->getCallReturnType(getContext())),
@@ -6180,6 +6282,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vclezd_s64:
case NEON::BI__builtin_neon_vclezd_f64:
case NEON::BI__builtin_neon_vclezs_f32:
+ case NEON::BI__builtin_neon_vclezh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitAArch64CompareBuiltinExpr(
Ops[0], ConvertType(E->getCallReturnType(getContext())),
@@ -6187,6 +6290,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcgtzd_s64:
case NEON::BI__builtin_neon_vcgtzd_f64:
case NEON::BI__builtin_neon_vcgtzs_f32:
+ case NEON::BI__builtin_neon_vcgtzh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitAArch64CompareBuiltinExpr(
Ops[0], ConvertType(E->getCallReturnType(getContext())),
@@ -6194,6 +6298,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcltzd_s64:
case NEON::BI__builtin_neon_vcltzd_f64:
case NEON::BI__builtin_neon_vcltzs_f32:
+ case NEON::BI__builtin_neon_vcltzh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitAArch64CompareBuiltinExpr(
Ops[0], ConvertType(E->getCallReturnType(getContext())),
@@ -6246,6 +6351,26 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
}
+ case NEON::BI__builtin_neon_vceqh_f16:
+ case NEON::BI__builtin_neon_vcleh_f16:
+ case NEON::BI__builtin_neon_vclth_f16:
+ case NEON::BI__builtin_neon_vcgeh_f16:
+ case NEON::BI__builtin_neon_vcgth_f16: {
+ llvm::CmpInst::Predicate P;
+ switch (BuiltinID) {
+ default: llvm_unreachable("missing builtin ID in switch!");
+ case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ; break;
+ case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE; break;
+ case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT; break;
+ case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE; break;
+ case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT; break;
+ }
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
+ Ops[1] = Builder.CreateBitCast(Ops[1], HalfTy);
+ Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
+ return Builder.CreateSExt(Ops[0], Int16Ty, "vcmpd");
+ }
case NEON::BI__builtin_neon_vceqd_s64:
case NEON::BI__builtin_neon_vceqd_u64:
case NEON::BI__builtin_neon_vcgtd_s64:
@@ -6383,6 +6508,31 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
llvm::VectorType::get(DoubleTy, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
+ case NEON::BI__builtin_neon_vaddh_f16:
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ return Builder.CreateFAdd(Ops[0], Ops[1], "vaddh");
+ case NEON::BI__builtin_neon_vsubh_f16:
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ return Builder.CreateFSub(Ops[0], Ops[1], "vsubh");
+ case NEON::BI__builtin_neon_vmulh_f16:
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ return Builder.CreateFMul(Ops[0], Ops[1], "vmulh");
+ case NEON::BI__builtin_neon_vdivh_f16:
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh");
+ case NEON::BI__builtin_neon_vfmah_f16: {
+ Value *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy);
+ // NEON intrinsic puts accumulator first, unlike the LLVM fma.
+ return Builder.CreateCall(F,
+ {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
+ }
+ case NEON::BI__builtin_neon_vfmsh_f16: {
+ Value *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy);
+ Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy);
+ Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh");
+ // NEON intrinsic puts accumulator first, unlike the LLVM fma.
+ return Builder.CreateCall(F, {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]});
+ }
case NEON::BI__builtin_neon_vaddd_s64:
case NEON::BI__builtin_neon_vaddd_u64:
return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd");
@@ -6657,12 +6807,22 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
+ case NEON::BI__builtin_neon_vmaxh_f16: {
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ Int = Intrinsic::aarch64_neon_fmax;
+ return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmax");
+ }
case NEON::BI__builtin_neon_vmin_v:
case NEON::BI__builtin_neon_vminq_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
+ case NEON::BI__builtin_neon_vminh_f16: {
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ Int = Intrinsic::aarch64_neon_fmin;
+ return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmin");
+ }
case NEON::BI__builtin_neon_vabd_v:
case NEON::BI__builtin_neon_vabdq_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
@@ -6701,20 +6861,31 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminnmq_v:
Int = Intrinsic::aarch64_neon_fminnm;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
+ case NEON::BI__builtin_neon_vminnmh_f16:
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ Int = Intrinsic::aarch64_neon_fminnm;
+ return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vminnm");
case NEON::BI__builtin_neon_vmaxnm_v:
case NEON::BI__builtin_neon_vmaxnmq_v:
Int = Intrinsic::aarch64_neon_fmaxnm;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
+ case NEON::BI__builtin_neon_vmaxnmh_f16:
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ Int = Intrinsic::aarch64_neon_fmaxnm;
+ return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmaxnm");
case NEON::BI__builtin_neon_vrecpss_f32: {
Ops.push_back(EmitScalarExpr(E->getArg(1)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy),
Ops, "vrecps");
}
- case NEON::BI__builtin_neon_vrecpsd_f64: {
+ case NEON::BI__builtin_neon_vrecpsd_f64:
Ops.push_back(EmitScalarExpr(E->getArg(1)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy),
Ops, "vrecps");
- }
+ case NEON::BI__builtin_neon_vrecpsh_f16:
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy),
+ Ops, "vrecps");
case NEON::BI__builtin_neon_vqshrun_n_v:
Int = Intrinsic::aarch64_neon_sqshrun;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
@@ -6730,36 +6901,71 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vqrshrn_n_v:
Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
+ case NEON::BI__builtin_neon_vrndah_f16: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Int = Intrinsic::round;
+ return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda");
+ }
case NEON::BI__builtin_neon_vrnda_v:
case NEON::BI__builtin_neon_vrndaq_v: {
Int = Intrinsic::round;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
}
+ case NEON::BI__builtin_neon_vrndih_f16: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Int = Intrinsic::nearbyint;
+ return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi");
+ }
case NEON::BI__builtin_neon_vrndi_v:
case NEON::BI__builtin_neon_vrndiq_v: {
Int = Intrinsic::nearbyint;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndi");
}
+ case NEON::BI__builtin_neon_vrndmh_f16: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Int = Intrinsic::floor;
+ return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm");
+ }
case NEON::BI__builtin_neon_vrndm_v:
case NEON::BI__builtin_neon_vrndmq_v: {
Int = Intrinsic::floor;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
}
+ case NEON::BI__builtin_neon_vrndnh_f16: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Int = Intrinsic::aarch64_neon_frintn;
+ return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndn");
+ }
case NEON::BI__builtin_neon_vrndn_v:
case NEON::BI__builtin_neon_vrndnq_v: {
Int = Intrinsic::aarch64_neon_frintn;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
}
+ case NEON::BI__builtin_neon_vrndph_f16: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Int = Intrinsic::ceil;
+ return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp");
+ }
case NEON::BI__builtin_neon_vrndp_v:
case NEON::BI__builtin_neon_vrndpq_v: {
Int = Intrinsic::ceil;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
}
+ case NEON::BI__builtin_neon_vrndxh_f16: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Int = Intrinsic::rint;
+ return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx");
+ }
case NEON::BI__builtin_neon_vrndx_v:
case NEON::BI__builtin_neon_vrndxq_v: {
Int = Intrinsic::rint;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
}
+ case NEON::BI__builtin_neon_vrndh_f16: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Int = Intrinsic::trunc;
+ return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
+ }
case NEON::BI__builtin_neon_vrnd_v:
case NEON::BI__builtin_neon_vrndq_v: {
Int = Intrinsic::trunc;
@@ -6908,6 +7114,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vnegd_s64:
return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd");
+ case NEON::BI__builtin_neon_vnegh_f16:
+ return Builder.CreateFNeg(EmitScalarExpr(E->getArg(0)), "vnegh");
case NEON::BI__builtin_neon_vpmaxnm_v:
case NEON::BI__builtin_neon_vpmaxnmq_v: {
Int = Intrinsic::aarch64_neon_fmaxnmp;
@@ -6918,6 +7126,11 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_fminnmp;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
}
+ case NEON::BI__builtin_neon_vsqrth_f16: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Int = Intrinsic::sqrt;
+ return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt");
+ }
case NEON::BI__builtin_neon_vsqrt_v:
case NEON::BI__builtin_neon_vsqrtq_v: {
Int = Intrinsic::sqrt;
diff --git a/clang/lib/Headers/CMakeLists.txt b/clang/lib/Headers/CMakeLists.txt
index 97ba3edea1c..84bf7ddb9ad 100644
--- a/clang/lib/Headers/CMakeLists.txt
+++ b/clang/lib/Headers/CMakeLists.txt
@@ -116,7 +116,12 @@ set(output_dir ${LLVM_LIBRARY_OUTPUT_INTDIR}/clang/${CLANG_VERSION}/include)
# Generate arm_neon.h
clang_tablegen(arm_neon.h -gen-arm-neon
+ -I ${CLANG_SOURCE_DIR}/include/clang/Basic/
SOURCE ${CLANG_SOURCE_DIR}/include/clang/Basic/arm_neon.td)
+# Generate arm_fp16.h
+clang_tablegen(arm_fp16.h -gen-arm-fp16
+ -I ${CLANG_SOURCE_DIR}/include/clang/Basic/
+ SOURCE ${CLANG_SOURCE_DIR}/include/clang/Basic/arm_fp16.td)
set(out_files)
foreach( f ${files} ${cuda_wrapper_files} )
@@ -134,6 +139,11 @@ add_custom_command(OUTPUT ${output_dir}/arm_neon.h
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_BINARY_DIR}/arm_neon.h ${output_dir}/arm_neon.h
COMMENT "Copying clang's arm_neon.h...")
list(APPEND out_files ${output_dir}/arm_neon.h)
+add_custom_command(OUTPUT ${output_dir}/arm_fp16.h
+ DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/arm_fp16.h
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_BINARY_DIR}/arm_fp16.h ${output_dir}/arm_fp16.h
+ COMMENT "Copying clang's arm_fp16.h...")
+list(APPEND out_files ${output_dir}/arm_fp16.h)
add_custom_target(clang-headers ALL DEPENDS ${out_files})
set_target_properties(clang-headers PROPERTIES FOLDER "Misc")
@@ -145,6 +155,12 @@ install(
DESTINATION lib${LLVM_LIBDIR_SUFFIX}/clang/${CLANG_VERSION}/include)
install(
+ FILES ${files} ${CMAKE_CURRENT_BINARY_DIR}/arm_fp16.h
+ COMPONENT clang-headers
+ PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ
+ DESTINATION lib${LLVM_LIBDIR_SUFFIX}/clang/${CLANG_VERSION}/include)
+
+install(
FILES ${cuda_wrapper_files}
COMPONENT clang-headers
PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ
diff --git a/clang/lib/Headers/module.modulemap b/clang/lib/Headers/module.modulemap
index 95d26cefa6f..961d8dde6eb 100644
--- a/clang/lib/Headers/module.modulemap
+++ b/clang/lib/Headers/module.modulemap
@@ -38,6 +38,7 @@ module _Builtin_intrinsics [system] [extern_c] {
explicit module neon {
requires neon
header "arm_neon.h"
+ header "arm_fp16.h"
export *
}
}
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index c7f1b94ba50..3a6988c6026 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -1353,6 +1353,7 @@ bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
switch (BuiltinID) {
#define GET_NEON_OVERLOAD_CHECK
#include "clang/Basic/arm_neon.inc"
+#include "clang/Basic/arm_fp16.inc"
#undef GET_NEON_OVERLOAD_CHECK
}
@@ -1404,6 +1405,7 @@ bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
return false;
#define GET_NEON_IMMEDIATE_CHECK
#include "clang/Basic/arm_neon.inc"
+#include "clang/Basic/arm_fp16.inc"
#undef GET_NEON_IMMEDIATE_CHECK
}
OpenPOWER on IntegriCloud