diff options
author | Tim Northover <tnorthover@apple.com> | 2014-05-24 12:52:07 +0000 |
---|---|---|
committer | Tim Northover <tnorthover@apple.com> | 2014-05-24 12:52:07 +0000 |
commit | 573cbee543154bc17648f9428cb235886e61bc21 (patch) | |
tree | 3adfb560b9e14c6acedc0e6b63a589362b8239c6 /clang/lib | |
parent | 25e8a6754e3f4c447ddfe5b742c01c16cb050b67 (diff) | |
download | bcm5719-llvm-573cbee543154bc17648f9428cb235886e61bc21.tar.gz bcm5719-llvm-573cbee543154bc17648f9428cb235886e61bc21.zip |
AArch64/ARM64: rename ARM64 components to AArch64
This keeps Clang consistent with backend naming conventions.
llvm-svn: 209579
Diffstat (limited to 'clang/lib')
-rw-r--r-- | clang/lib/Basic/Targets.cpp | 68 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGBuiltin.cpp | 902 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGObjCMac.cpp | 3 | ||||
-rw-r--r-- | clang/lib/CodeGen/CodeGenFunction.h | 4 | ||||
-rw-r--r-- | clang/lib/CodeGen/TargetInfo.cpp | 40 | ||||
-rw-r--r-- | clang/lib/Driver/ToolChains.cpp | 14 | ||||
-rw-r--r-- | clang/lib/Driver/Tools.cpp | 34 | ||||
-rw-r--r-- | clang/lib/Frontend/InitHeaderSearch.cpp | 1 | ||||
-rw-r--r-- | clang/lib/Sema/SemaChecking.cpp | 14 |
9 files changed, 548 insertions, 532 deletions
diff --git a/clang/lib/Basic/Targets.cpp b/clang/lib/Basic/Targets.cpp index 82d79f7a153..16c3ef9104f 100644 --- a/clang/lib/Basic/Targets.cpp +++ b/clang/lib/Basic/Targets.cpp @@ -4233,7 +4233,7 @@ public: namespace { -class ARM64TargetInfo : public TargetInfo { +class AArch64TargetInfo : public TargetInfo { virtual void setDescriptionString() = 0; static const TargetInfo::GCCRegAlias GCCRegAliases[]; static const char *const GCCRegNames[]; @@ -4252,7 +4252,7 @@ class ARM64TargetInfo : public TargetInfo { std::string ABI; public: - ARM64TargetInfo(const llvm::Triple &Triple) + AArch64TargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple), ABI("aapcs") { if (getTriple().getOS() == llvm::Triple::NetBSD) { @@ -4283,7 +4283,7 @@ public: // specifiers. NoAsmVariants = true; - // ARM64 targets default to using the ARM C++ ABI. + // AArch64 targets default to using the ARM C++ ABI. TheCXXABI.set(TargetCXXABI::GenericAArch64); } @@ -4364,7 +4364,7 @@ public: virtual void getTargetBuiltins(const Builtin::Info *&Records, unsigned &NumRecords) const { Records = BuiltinInfo; - NumRecords = clang::ARM64::LastTSBuiltin - Builtin::FirstTSBuiltin; + NumRecords = clang::AArch64::LastTSBuiltin - Builtin::FirstTSBuiltin; } virtual bool hasFeature(StringRef Feature) const { @@ -4453,7 +4453,7 @@ public: } }; -const char *const ARM64TargetInfo::GCCRegNames[] = { +const char *const AArch64TargetInfo::GCCRegNames[] = { // 32-bit Integer registers "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11", "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", @@ -4480,13 +4480,13 @@ const char *const ARM64TargetInfo::GCCRegNames[] = { "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" }; -void ARM64TargetInfo::getGCCRegNames(const char *const *&Names, +void AArch64TargetInfo::getGCCRegNames(const char *const *&Names, unsigned &NumNames) const { Names = GCCRegNames; NumNames = llvm::array_lengthof(GCCRegNames); } -const TargetInfo::GCCRegAlias ARM64TargetInfo::GCCRegAliases[] = { +const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = { { { "w31" }, "wsp" }, { { "x29" }, "fp" }, { { "x30" }, "lr" }, @@ -4495,23 +4495,23 @@ const TargetInfo::GCCRegAlias ARM64TargetInfo::GCCRegAliases[] = { // don't want to substitute one of these for a different-sized one. }; -void ARM64TargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases, +void AArch64TargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const { Aliases = GCCRegAliases; NumAliases = llvm::array_lengthof(GCCRegAliases); } -const Builtin::Info ARM64TargetInfo::BuiltinInfo[] = { +const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = { #define BUILTIN(ID, TYPE, ATTRS) \ { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES }, #include "clang/Basic/BuiltinsNEON.def" #define BUILTIN(ID, TYPE, ATTRS) \ { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES }, -#include "clang/Basic/BuiltinsARM64.def" +#include "clang/Basic/BuiltinsAArch64.def" }; -class ARM64leTargetInfo : public ARM64TargetInfo { +class AArch64leTargetInfo : public AArch64TargetInfo { void setDescriptionString() override { if (getTriple().isOSBinFormatMachO()) DescriptionString = "e-m:o-i64:64-i128:128-n32:64-S128"; @@ -4520,38 +4520,38 @@ class ARM64leTargetInfo : public ARM64TargetInfo { } public: - ARM64leTargetInfo(const llvm::Triple &Triple) - : ARM64TargetInfo(Triple) { + AArch64leTargetInfo(const llvm::Triple &Triple) + : AArch64TargetInfo(Triple) { BigEndian = false; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { Builder.defineMacro("__AARCH64EL__"); - ARM64TargetInfo::getTargetDefines(Opts, Builder); + AArch64TargetInfo::getTargetDefines(Opts, Builder); } }; -class ARM64beTargetInfo : public ARM64TargetInfo { +class AArch64beTargetInfo : public AArch64TargetInfo { void setDescriptionString() override { assert(!getTriple().isOSBinFormatMachO()); DescriptionString = "E-m:e-i64:64-i128:128-n32:64-S128"; } public: - ARM64beTargetInfo(const llvm::Triple &Triple) - : ARM64TargetInfo(Triple) { } + AArch64beTargetInfo(const llvm::Triple &Triple) + : AArch64TargetInfo(Triple) { } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { Builder.defineMacro("__AARCH64EB__"); Builder.defineMacro("__AARCH_BIG_ENDIAN"); Builder.defineMacro("__ARM_BIG_ENDIAN"); - ARM64TargetInfo::getTargetDefines(Opts, Builder); + AArch64TargetInfo::getTargetDefines(Opts, Builder); } }; } // end anonymous namespace. namespace { -class DarwinARM64TargetInfo : public DarwinTargetInfo<ARM64leTargetInfo> { +class DarwinAArch64TargetInfo : public DarwinTargetInfo<AArch64leTargetInfo> { protected: void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, MacroBuilder &Builder) const override { @@ -4567,8 +4567,8 @@ protected: } public: - DarwinARM64TargetInfo(const llvm::Triple &Triple) - : DarwinTargetInfo<ARM64leTargetInfo>(Triple) { + DarwinAArch64TargetInfo(const llvm::Triple &Triple) + : DarwinTargetInfo<AArch64leTargetInfo>(Triple) { Int64Type = SignedLongLong; WCharType = SignedInt; UseSignedCharForObjCBool = false; @@ -5917,25 +5917,25 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple) { case llvm::Triple::arm64: if (Triple.isOSDarwin()) - return new DarwinARM64TargetInfo(Triple); + return new DarwinAArch64TargetInfo(Triple); switch (os) { case llvm::Triple::Linux: - return new LinuxTargetInfo<ARM64leTargetInfo>(Triple); + return new LinuxTargetInfo<AArch64leTargetInfo>(Triple); case llvm::Triple::NetBSD: - return new NetBSDTargetInfo<ARM64leTargetInfo>(Triple); + return new NetBSDTargetInfo<AArch64leTargetInfo>(Triple); default: - return new ARM64leTargetInfo(Triple); + return new AArch64leTargetInfo(Triple); } case llvm::Triple::arm64_be: switch (os) { case llvm::Triple::Linux: - return new LinuxTargetInfo<ARM64beTargetInfo>(Triple); + return new LinuxTargetInfo<AArch64beTargetInfo>(Triple); case llvm::Triple::NetBSD: - return new NetBSDTargetInfo<ARM64beTargetInfo>(Triple); + return new NetBSDTargetInfo<AArch64beTargetInfo>(Triple); default: - return new ARM64beTargetInfo(Triple); + return new AArch64beTargetInfo(Triple); } case llvm::Triple::xcore: @@ -5947,21 +5947,21 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple) { case llvm::Triple::aarch64: switch (os) { case llvm::Triple::Linux: - return new LinuxTargetInfo<ARM64leTargetInfo>(Triple); + return new LinuxTargetInfo<AArch64leTargetInfo>(Triple); case llvm::Triple::NetBSD: - return new NetBSDTargetInfo<ARM64leTargetInfo>(Triple); + return new NetBSDTargetInfo<AArch64leTargetInfo>(Triple); default: - return new ARM64leTargetInfo(Triple); + return new AArch64leTargetInfo(Triple); } case llvm::Triple::aarch64_be: switch (os) { case llvm::Triple::Linux: - return new LinuxTargetInfo<ARM64beTargetInfo>(Triple); + return new LinuxTargetInfo<AArch64beTargetInfo>(Triple); case llvm::Triple::NetBSD: - return new NetBSDTargetInfo<ARM64beTargetInfo>(Triple); + return new NetBSDTargetInfo<AArch64beTargetInfo>(Triple); default: - return new ARM64beTargetInfo(Triple); + return new AArch64beTargetInfo(Triple); } case llvm::Triple::arm: diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index 585db1778bf..c6ac3cc9445 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -1646,7 +1646,7 @@ Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID, case llvm::Triple::aarch64_be: case llvm::Triple::arm64: case llvm::Triple::arm64_be: - return EmitARM64BuiltinExpr(BuiltinID, E); + return EmitAArch64BuiltinExpr(BuiltinID, E); case llvm::Triple::x86: case llvm::Triple::x86_64: return EmitX86BuiltinExpr(BuiltinID, E); @@ -2079,109 +2079,109 @@ static NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = { NEONMAP0(vzipq_v) }; -static NeonIntrinsicInfo ARM64SIMDIntrinsicMap[] = { - NEONMAP1(vabs_v, arm64_neon_abs, 0), - NEONMAP1(vabsq_v, arm64_neon_abs, 0), +static NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = { + NEONMAP1(vabs_v, aarch64_neon_abs, 0), + NEONMAP1(vabsq_v, aarch64_neon_abs, 0), NEONMAP0(vaddhn_v), - NEONMAP1(vaesdq_v, arm64_crypto_aesd, 0), - NEONMAP1(vaeseq_v, arm64_crypto_aese, 0), - NEONMAP1(vaesimcq_v, arm64_crypto_aesimc, 0), - NEONMAP1(vaesmcq_v, arm64_crypto_aesmc, 0), - NEONMAP1(vcage_v, arm64_neon_facge, 0), - NEONMAP1(vcageq_v, arm64_neon_facge, 0), - NEONMAP1(vcagt_v, arm64_neon_facgt, 0), - NEONMAP1(vcagtq_v, arm64_neon_facgt, 0), - NEONMAP1(vcale_v, arm64_neon_facge, 0), - NEONMAP1(vcaleq_v, arm64_neon_facge, 0), - NEONMAP1(vcalt_v, arm64_neon_facgt, 0), - NEONMAP1(vcaltq_v, arm64_neon_facgt, 0), - NEONMAP1(vcls_v, arm64_neon_cls, Add1ArgType), - NEONMAP1(vclsq_v, arm64_neon_cls, Add1ArgType), + NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0), + NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0), + NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0), + NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0), + NEONMAP1(vcage_v, aarch64_neon_facge, 0), + NEONMAP1(vcageq_v, aarch64_neon_facge, 0), + NEONMAP1(vcagt_v, aarch64_neon_facgt, 0), + NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0), + NEONMAP1(vcale_v, aarch64_neon_facge, 0), + NEONMAP1(vcaleq_v, aarch64_neon_facge, 0), + NEONMAP1(vcalt_v, aarch64_neon_facgt, 0), + NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0), + NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType), + NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType), NEONMAP1(vclz_v, ctlz, Add1ArgType), NEONMAP1(vclzq_v, ctlz, Add1ArgType), NEONMAP1(vcnt_v, ctpop, Add1ArgType), NEONMAP1(vcntq_v, ctpop, Add1ArgType), - NEONMAP1(vcvt_f16_v, arm64_neon_vcvtfp2hf, 0), - NEONMAP1(vcvt_f32_f16, arm64_neon_vcvthf2fp, 0), + NEONMAP1(vcvt_f16_v, aarch64_neon_vcvtfp2hf, 0), + NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0), NEONMAP0(vcvt_f32_v), - NEONMAP2(vcvt_n_f32_v, arm64_neon_vcvtfxu2fp, arm64_neon_vcvtfxs2fp, 0), - NEONMAP2(vcvt_n_f64_v, arm64_neon_vcvtfxu2fp, arm64_neon_vcvtfxs2fp, 0), - NEONMAP1(vcvt_n_s32_v, arm64_neon_vcvtfp2fxs, 0), - NEONMAP1(vcvt_n_s64_v, arm64_neon_vcvtfp2fxs, 0), - NEONMAP1(vcvt_n_u32_v, arm64_neon_vcvtfp2fxu, 0), - NEONMAP1(vcvt_n_u64_v, arm64_neon_vcvtfp2fxu, 0), + NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), + NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), + NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0), + NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0), + NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0), + NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0), NEONMAP0(vcvtq_f32_v), - NEONMAP2(vcvtq_n_f32_v, arm64_neon_vcvtfxu2fp, arm64_neon_vcvtfxs2fp, 0), - NEONMAP2(vcvtq_n_f64_v, arm64_neon_vcvtfxu2fp, arm64_neon_vcvtfxs2fp, 0), - NEONMAP1(vcvtq_n_s32_v, arm64_neon_vcvtfp2fxs, 0), - NEONMAP1(vcvtq_n_s64_v, arm64_neon_vcvtfp2fxs, 0), - NEONMAP1(vcvtq_n_u32_v, arm64_neon_vcvtfp2fxu, 0), - NEONMAP1(vcvtq_n_u64_v, arm64_neon_vcvtfp2fxu, 0), - NEONMAP1(vcvtx_f32_v, arm64_neon_fcvtxn, AddRetType | Add1ArgType), + NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), + NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), + NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0), + NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0), + NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0), + NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0), + NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType), NEONMAP0(vext_v), NEONMAP0(vextq_v), NEONMAP0(vfma_v), NEONMAP0(vfmaq_v), - NEONMAP2(vhadd_v, arm64_neon_uhadd, arm64_neon_shadd, Add1ArgType | UnsignedAlts), - NEONMAP2(vhaddq_v, arm64_neon_uhadd, arm64_neon_shadd, Add1ArgType | UnsignedAlts), - NEONMAP2(vhsub_v, arm64_neon_uhsub, arm64_neon_shsub, Add1ArgType | UnsignedAlts), - NEONMAP2(vhsubq_v, arm64_neon_uhsub, arm64_neon_shsub, Add1ArgType | UnsignedAlts), + NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts), + NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts), + NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts), + NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts), NEONMAP0(vmovl_v), NEONMAP0(vmovn_v), - NEONMAP1(vmul_v, arm64_neon_pmul, Add1ArgType), - NEONMAP1(vmulq_v, arm64_neon_pmul, Add1ArgType), - NEONMAP1(vpadd_v, arm64_neon_addp, Add1ArgType), - NEONMAP2(vpaddl_v, arm64_neon_uaddlp, arm64_neon_saddlp, UnsignedAlts), - NEONMAP2(vpaddlq_v, arm64_neon_uaddlp, arm64_neon_saddlp, UnsignedAlts), - NEONMAP1(vpaddq_v, arm64_neon_addp, Add1ArgType), - NEONMAP1(vqabs_v, arm64_neon_sqabs, Add1ArgType), - NEONMAP1(vqabsq_v, arm64_neon_sqabs, Add1ArgType), - NEONMAP2(vqadd_v, arm64_neon_uqadd, arm64_neon_sqadd, Add1ArgType | UnsignedAlts), - NEONMAP2(vqaddq_v, arm64_neon_uqadd, arm64_neon_sqadd, Add1ArgType | UnsignedAlts), - NEONMAP2(vqdmlal_v, arm64_neon_sqdmull, arm64_neon_sqadd, 0), - NEONMAP2(vqdmlsl_v, arm64_neon_sqdmull, arm64_neon_sqsub, 0), - NEONMAP1(vqdmulh_v, arm64_neon_sqdmulh, Add1ArgType), - NEONMAP1(vqdmulhq_v, arm64_neon_sqdmulh, Add1ArgType), - NEONMAP1(vqdmull_v, arm64_neon_sqdmull, Add1ArgType), - NEONMAP2(vqmovn_v, arm64_neon_uqxtn, arm64_neon_sqxtn, Add1ArgType | UnsignedAlts), - NEONMAP1(vqmovun_v, arm64_neon_sqxtun, Add1ArgType), - NEONMAP1(vqneg_v, arm64_neon_sqneg, Add1ArgType), - NEONMAP1(vqnegq_v, arm64_neon_sqneg, Add1ArgType), - NEONMAP1(vqrdmulh_v, arm64_neon_sqrdmulh, Add1ArgType), - NEONMAP1(vqrdmulhq_v, arm64_neon_sqrdmulh, Add1ArgType), - NEONMAP2(vqrshl_v, arm64_neon_uqrshl, arm64_neon_sqrshl, Add1ArgType | UnsignedAlts), - NEONMAP2(vqrshlq_v, arm64_neon_uqrshl, arm64_neon_sqrshl, Add1ArgType | UnsignedAlts), - NEONMAP2(vqshl_n_v, arm64_neon_uqshl, arm64_neon_sqshl, UnsignedAlts), - NEONMAP2(vqshl_v, arm64_neon_uqshl, arm64_neon_sqshl, Add1ArgType | UnsignedAlts), - NEONMAP2(vqshlq_n_v, arm64_neon_uqshl, arm64_neon_sqshl,UnsignedAlts), - NEONMAP2(vqshlq_v, arm64_neon_uqshl, arm64_neon_sqshl, Add1ArgType | UnsignedAlts), - NEONMAP2(vqsub_v, arm64_neon_uqsub, arm64_neon_sqsub, Add1ArgType | UnsignedAlts), - NEONMAP2(vqsubq_v, arm64_neon_uqsub, arm64_neon_sqsub, Add1ArgType | UnsignedAlts), - NEONMAP1(vraddhn_v, arm64_neon_raddhn, Add1ArgType), - NEONMAP2(vrecpe_v, arm64_neon_frecpe, arm64_neon_urecpe, 0), - NEONMAP2(vrecpeq_v, arm64_neon_frecpe, arm64_neon_urecpe, 0), - NEONMAP1(vrecps_v, arm64_neon_frecps, Add1ArgType), - NEONMAP1(vrecpsq_v, arm64_neon_frecps, Add1ArgType), - NEONMAP2(vrhadd_v, arm64_neon_urhadd, arm64_neon_srhadd, Add1ArgType | UnsignedAlts), - NEONMAP2(vrhaddq_v, arm64_neon_urhadd, arm64_neon_srhadd, Add1ArgType | UnsignedAlts), - NEONMAP2(vrshl_v, arm64_neon_urshl, arm64_neon_srshl, Add1ArgType | UnsignedAlts), - NEONMAP2(vrshlq_v, arm64_neon_urshl, arm64_neon_srshl, Add1ArgType | UnsignedAlts), - NEONMAP2(vrsqrte_v, arm64_neon_frsqrte, arm64_neon_ursqrte, 0), - NEONMAP2(vrsqrteq_v, arm64_neon_frsqrte, arm64_neon_ursqrte, 0), - NEONMAP1(vrsqrts_v, arm64_neon_frsqrts, Add1ArgType), - NEONMAP1(vrsqrtsq_v, arm64_neon_frsqrts, Add1ArgType), - NEONMAP1(vrsubhn_v, arm64_neon_rsubhn, Add1ArgType), - NEONMAP1(vsha1su0q_v, arm64_crypto_sha1su0, 0), - NEONMAP1(vsha1su1q_v, arm64_crypto_sha1su1, 0), - NEONMAP1(vsha256h2q_v, arm64_crypto_sha256h2, 0), - NEONMAP1(vsha256hq_v, arm64_crypto_sha256h, 0), - NEONMAP1(vsha256su0q_v, arm64_crypto_sha256su0, 0), - NEONMAP1(vsha256su1q_v, arm64_crypto_sha256su1, 0), + NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType), + NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType), + NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType), + NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts), + NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts), + NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType), + NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType), + NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType), + NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts), + NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts), + NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0), + NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0), + NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType), + NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType), + NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType), + NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts), + NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType), + NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType), + NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType), + NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType), + NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType), + NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts), + NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts), + NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts), + NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts), + NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts), + NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts), + NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts), + NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts), + NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType), + NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0), + NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0), + NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType), + NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType), + NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts), + NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts), + NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts), + NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts), + NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0), + NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0), + NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType), + NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType), + NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType), + NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0), + NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0), + NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0), + NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0), + NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0), + NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0), NEONMAP0(vshl_n_v), - NEONMAP2(vshl_v, arm64_neon_ushl, arm64_neon_sshl, Add1ArgType | UnsignedAlts), + NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts), NEONMAP0(vshll_n_v), NEONMAP0(vshlq_n_v), - NEONMAP2(vshlq_v, arm64_neon_ushl, arm64_neon_sshl, Add1ArgType | UnsignedAlts), + NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts), NEONMAP0(vshr_n_v), NEONMAP0(vshrn_n_v), NEONMAP0(vshrq_n_v), @@ -2190,199 +2190,199 @@ static NeonIntrinsicInfo ARM64SIMDIntrinsicMap[] = { NEONMAP0(vtstq_v), }; -static NeonIntrinsicInfo ARM64SISDIntrinsicMap[] = { - NEONMAP1(vabdd_f64, arm64_sisd_fabd, Add1ArgType), - NEONMAP1(vabds_f32, arm64_sisd_fabd, Add1ArgType), - NEONMAP1(vabsd_s64, arm64_neon_abs, Add1ArgType), - NEONMAP1(vaddlv_s32, arm64_neon_saddlv, AddRetType | Add1ArgType), - NEONMAP1(vaddlv_u32, arm64_neon_uaddlv, AddRetType | Add1ArgType), - NEONMAP1(vaddlvq_s32, arm64_neon_saddlv, AddRetType | Add1ArgType), - NEONMAP1(vaddlvq_u32, arm64_neon_uaddlv, AddRetType | Add1ArgType), - NEONMAP1(vaddv_f32, arm64_neon_faddv, AddRetType | Add1ArgType), - NEONMAP1(vaddv_s32, arm64_neon_saddv, AddRetType | Add1ArgType), - NEONMAP1(vaddv_u32, arm64_neon_uaddv, AddRetType | Add1ArgType), - NEONMAP1(vaddvq_f32, arm64_neon_faddv, AddRetType | Add1ArgType), - NEONMAP1(vaddvq_f64, arm64_neon_faddv, AddRetType | Add1ArgType), - NEONMAP1(vaddvq_s32, arm64_neon_saddv, AddRetType | Add1ArgType), - NEONMAP1(vaddvq_s64, arm64_neon_saddv, AddRetType | Add1ArgType), - NEONMAP1(vaddvq_u32, arm64_neon_uaddv, AddRetType | Add1ArgType), - NEONMAP1(vaddvq_u64, arm64_neon_uaddv, AddRetType | Add1ArgType), - NEONMAP1(vcaged_f64, arm64_neon_facge, AddRetType | Add1ArgType), - NEONMAP1(vcages_f32, arm64_neon_facge, AddRetType | Add1ArgType), - NEONMAP1(vcagtd_f64, arm64_neon_facgt, AddRetType | Add1ArgType), - NEONMAP1(vcagts_f32, arm64_neon_facgt, AddRetType | Add1ArgType), - NEONMAP1(vcaled_f64, arm64_neon_facge, AddRetType | Add1ArgType), - NEONMAP1(vcales_f32, arm64_neon_facge, AddRetType | Add1ArgType), - NEONMAP1(vcaltd_f64, arm64_neon_facgt, AddRetType | Add1ArgType), - NEONMAP1(vcalts_f32, arm64_neon_facgt, AddRetType | Add1ArgType), - NEONMAP1(vcvtad_s64_f64, arm64_neon_fcvtas, AddRetType | Add1ArgType), - NEONMAP1(vcvtad_u64_f64, arm64_neon_fcvtau, AddRetType | Add1ArgType), - NEONMAP1(vcvtas_s32_f32, arm64_neon_fcvtas, AddRetType | Add1ArgType), - NEONMAP1(vcvtas_u32_f32, arm64_neon_fcvtau, AddRetType | Add1ArgType), - NEONMAP1(vcvtd_n_f64_s64, arm64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), - NEONMAP1(vcvtd_n_f64_u64, arm64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), - NEONMAP1(vcvtd_n_s64_f64, arm64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), - NEONMAP1(vcvtd_n_u64_f64, arm64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), - NEONMAP1(vcvtmd_s64_f64, arm64_neon_fcvtms, AddRetType | Add1ArgType), - NEONMAP1(vcvtmd_u64_f64, arm64_neon_fcvtmu, AddRetType | Add1ArgType), - NEONMAP1(vcvtms_s32_f32, arm64_neon_fcvtms, AddRetType | Add1ArgType), - NEONMAP1(vcvtms_u32_f32, arm64_neon_fcvtmu, AddRetType | Add1ArgType), - NEONMAP1(vcvtnd_s64_f64, arm64_neon_fcvtns, AddRetType | Add1ArgType), - NEONMAP1(vcvtnd_u64_f64, arm64_neon_fcvtnu, AddRetType | Add1ArgType), - NEONMAP1(vcvtns_s32_f32, arm64_neon_fcvtns, AddRetType | Add1ArgType), - NEONMAP1(vcvtns_u32_f32, arm64_neon_fcvtnu, AddRetType | Add1ArgType), - NEONMAP1(vcvtpd_s64_f64, arm64_neon_fcvtps, AddRetType | Add1ArgType), - NEONMAP1(vcvtpd_u64_f64, arm64_neon_fcvtpu, AddRetType | Add1ArgType), - NEONMAP1(vcvtps_s32_f32, arm64_neon_fcvtps, AddRetType | Add1ArgType), - NEONMAP1(vcvtps_u32_f32, arm64_neon_fcvtpu, AddRetType | Add1ArgType), - NEONMAP1(vcvts_n_f32_s32, arm64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), - NEONMAP1(vcvts_n_f32_u32, arm64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), - NEONMAP1(vcvts_n_s32_f32, arm64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), - NEONMAP1(vcvts_n_u32_f32, arm64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), - NEONMAP1(vcvtxd_f32_f64, arm64_sisd_fcvtxn, 0), - NEONMAP1(vmaxnmv_f32, arm64_neon_fmaxnmv, AddRetType | Add1ArgType), - NEONMAP1(vmaxnmvq_f32, arm64_neon_fmaxnmv, AddRetType | Add1ArgType), - NEONMAP1(vmaxnmvq_f64, arm64_neon_fmaxnmv, AddRetType | Add1ArgType), - NEONMAP1(vmaxv_f32, arm64_neon_fmaxv, AddRetType | Add1ArgType), - NEONMAP1(vmaxv_s32, arm64_neon_smaxv, AddRetType | Add1ArgType), - NEONMAP1(vmaxv_u32, arm64_neon_umaxv, AddRetType | Add1ArgType), - NEONMAP1(vmaxvq_f32, arm64_neon_fmaxv, AddRetType | Add1ArgType), - NEONMAP1(vmaxvq_f64, arm64_neon_fmaxv, AddRetType | Add1ArgType), - NEONMAP1(vmaxvq_s32, arm64_neon_smaxv, AddRetType | Add1ArgType), - NEONMAP1(vmaxvq_u32, arm64_neon_umaxv, AddRetType | Add1ArgType), - NEONMAP1(vminnmv_f32, arm64_neon_fminnmv, AddRetType | Add1ArgType), - NEONMAP1(vminnmvq_f32, arm64_neon_fminnmv, AddRetType | Add1ArgType), - NEONMAP1(vminnmvq_f64, arm64_neon_fminnmv, AddRetType | Add1ArgType), - NEONMAP1(vminv_f32, arm64_neon_fminv, AddRetType | Add1ArgType), - NEONMAP1(vminv_s32, arm64_neon_sminv, AddRetType | Add1ArgType), - NEONMAP1(vminv_u32, arm64_neon_uminv, AddRetType | Add1ArgType), - NEONMAP1(vminvq_f32, arm64_neon_fminv, AddRetType | Add1ArgType), - NEONMAP1(vminvq_f64, arm64_neon_fminv, AddRetType | Add1ArgType), - NEONMAP1(vminvq_s32, arm64_neon_sminv, AddRetType | Add1ArgType), - NEONMAP1(vminvq_u32, arm64_neon_uminv, AddRetType | Add1ArgType), - NEONMAP1(vmull_p64, arm64_neon_pmull64, 0), - NEONMAP1(vmulxd_f64, arm64_neon_fmulx, Add1ArgType), - NEONMAP1(vmulxs_f32, arm64_neon_fmulx, Add1ArgType), - NEONMAP1(vpaddd_s64, arm64_neon_uaddv, AddRetType | Add1ArgType), - NEONMAP1(vpaddd_u64, arm64_neon_uaddv, AddRetType | Add1ArgType), - NEONMAP1(vpmaxnmqd_f64, arm64_neon_fmaxnmv, AddRetType | Add1ArgType), - NEONMAP1(vpmaxnms_f32, arm64_neon_fmaxnmv, AddRetType | Add1ArgType), - NEONMAP1(vpmaxqd_f64, arm64_neon_fmaxv, AddRetType | Add1ArgType), - NEONMAP1(vpmaxs_f32, arm64_neon_fmaxv, AddRetType | Add1ArgType), - NEONMAP1(vpminnmqd_f64, arm64_neon_fminnmv, AddRetType | Add1ArgType), - NEONMAP1(vpminnms_f32, arm64_neon_fminnmv, AddRetType | Add1ArgType), - NEONMAP1(vpminqd_f64, arm64_neon_fminv, AddRetType | Add1ArgType), - NEONMAP1(vpmins_f32, arm64_neon_fminv, AddRetType | Add1ArgType), - NEONMAP1(vqabsb_s8, arm64_neon_sqabs, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqabsd_s64, arm64_neon_sqabs, Add1ArgType), - NEONMAP1(vqabsh_s16, arm64_neon_sqabs, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqabss_s32, arm64_neon_sqabs, Add1ArgType), - NEONMAP1(vqaddb_s8, arm64_neon_sqadd, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqaddb_u8, arm64_neon_uqadd, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqaddd_s64, arm64_neon_sqadd, Add1ArgType), - NEONMAP1(vqaddd_u64, arm64_neon_uqadd, Add1ArgType), - NEONMAP1(vqaddh_s16, arm64_neon_sqadd, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqaddh_u16, arm64_neon_uqadd, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqadds_s32, arm64_neon_sqadd, Add1ArgType), - NEONMAP1(vqadds_u32, arm64_neon_uqadd, Add1ArgType), - NEONMAP1(vqdmulhh_s16, arm64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqdmulhs_s32, arm64_neon_sqdmulh, Add1ArgType), - NEONMAP1(vqdmullh_s16, arm64_neon_sqdmull, VectorRet | Use128BitVectors), - NEONMAP1(vqdmulls_s32, arm64_neon_sqdmulls_scalar, 0), - NEONMAP1(vqmovnd_s64, arm64_neon_scalar_sqxtn, AddRetType | Add1ArgType), - NEONMAP1(vqmovnd_u64, arm64_neon_scalar_uqxtn, AddRetType | Add1ArgType), - NEONMAP1(vqmovnh_s16, arm64_neon_sqxtn, VectorRet | Use64BitVectors), - NEONMAP1(vqmovnh_u16, arm64_neon_uqxtn, VectorRet | Use64BitVectors), - NEONMAP1(vqmovns_s32, arm64_neon_sqxtn, VectorRet | Use64BitVectors), - NEONMAP1(vqmovns_u32, arm64_neon_uqxtn, VectorRet | Use64BitVectors), - NEONMAP1(vqmovund_s64, arm64_neon_scalar_sqxtun, AddRetType | Add1ArgType), - NEONMAP1(vqmovunh_s16, arm64_neon_sqxtun, VectorRet | Use64BitVectors), - NEONMAP1(vqmovuns_s32, arm64_neon_sqxtun, VectorRet | Use64BitVectors), - NEONMAP1(vqnegb_s8, arm64_neon_sqneg, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqnegd_s64, arm64_neon_sqneg, Add1ArgType), - NEONMAP1(vqnegh_s16, arm64_neon_sqneg, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqnegs_s32, arm64_neon_sqneg, Add1ArgType), - NEONMAP1(vqrdmulhh_s16, arm64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqrdmulhs_s32, arm64_neon_sqrdmulh, Add1ArgType), - NEONMAP1(vqrshlb_s8, arm64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqrshlb_u8, arm64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqrshld_s64, arm64_neon_sqrshl, Add1ArgType), - NEONMAP1(vqrshld_u64, arm64_neon_uqrshl, Add1ArgType), - NEONMAP1(vqrshlh_s16, arm64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqrshlh_u16, arm64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqrshls_s32, arm64_neon_sqrshl, Add1ArgType), - NEONMAP1(vqrshls_u32, arm64_neon_uqrshl, Add1ArgType), - NEONMAP1(vqrshrnd_n_s64, arm64_neon_sqrshrn, AddRetType), - NEONMAP1(vqrshrnd_n_u64, arm64_neon_uqrshrn, AddRetType), - NEONMAP1(vqrshrnh_n_s16, arm64_neon_sqrshrn, VectorRet | Use64BitVectors), - NEONMAP1(vqrshrnh_n_u16, arm64_neon_uqrshrn, VectorRet | Use64BitVectors), - NEONMAP1(vqrshrns_n_s32, arm64_neon_sqrshrn, VectorRet | Use64BitVectors), - NEONMAP1(vqrshrns_n_u32, arm64_neon_uqrshrn, VectorRet | Use64BitVectors), - NEONMAP1(vqrshrund_n_s64, arm64_neon_sqrshrun, AddRetType), - NEONMAP1(vqrshrunh_n_s16, arm64_neon_sqrshrun, VectorRet | Use64BitVectors), - NEONMAP1(vqrshruns_n_s32, arm64_neon_sqrshrun, VectorRet | Use64BitVectors), - NEONMAP1(vqshlb_n_s8, arm64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqshlb_n_u8, arm64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqshlb_s8, arm64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqshlb_u8, arm64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqshld_s64, arm64_neon_sqshl, Add1ArgType), - NEONMAP1(vqshld_u64, arm64_neon_uqshl, Add1ArgType), - NEONMAP1(vqshlh_n_s16, arm64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqshlh_n_u16, arm64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqshlh_s16, arm64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqshlh_u16, arm64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqshls_n_s32, arm64_neon_sqshl, Add1ArgType), - NEONMAP1(vqshls_n_u32, arm64_neon_uqshl, Add1ArgType), - NEONMAP1(vqshls_s32, arm64_neon_sqshl, Add1ArgType), - NEONMAP1(vqshls_u32, arm64_neon_uqshl, Add1ArgType), - NEONMAP1(vqshlub_n_s8, arm64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqshluh_n_s16, arm64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqshlus_n_s32, arm64_neon_sqshlu, Add1ArgType), - NEONMAP1(vqshrnd_n_s64, arm64_neon_sqshrn, AddRetType), - NEONMAP1(vqshrnd_n_u64, arm64_neon_uqshrn, AddRetType), - NEONMAP1(vqshrnh_n_s16, arm64_neon_sqshrn, VectorRet | Use64BitVectors), - NEONMAP1(vqshrnh_n_u16, arm64_neon_uqshrn, VectorRet | Use64BitVectors), - NEONMAP1(vqshrns_n_s32, arm64_neon_sqshrn, VectorRet | Use64BitVectors), - NEONMAP1(vqshrns_n_u32, arm64_neon_uqshrn, VectorRet | Use64BitVectors), - NEONMAP1(vqshrund_n_s64, arm64_neon_sqshrun, AddRetType), - NEONMAP1(vqshrunh_n_s16, arm64_neon_sqshrun, VectorRet | Use64BitVectors), - NEONMAP1(vqshruns_n_s32, arm64_neon_sqshrun, VectorRet | Use64BitVectors), - NEONMAP1(vqsubb_s8, arm64_neon_sqsub, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqsubb_u8, arm64_neon_uqsub, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqsubd_s64, arm64_neon_sqsub, Add1ArgType), - NEONMAP1(vqsubd_u64, arm64_neon_uqsub, Add1ArgType), - NEONMAP1(vqsubh_s16, arm64_neon_sqsub, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqsubh_u16, arm64_neon_uqsub, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqsubs_s32, arm64_neon_sqsub, Add1ArgType), - NEONMAP1(vqsubs_u32, arm64_neon_uqsub, Add1ArgType), - NEONMAP1(vrecped_f64, arm64_neon_frecpe, Add1ArgType), - NEONMAP1(vrecpes_f32, arm64_neon_frecpe, Add1ArgType), - NEONMAP1(vrecpxd_f64, arm64_neon_frecpx, Add1ArgType), - NEONMAP1(vrecpxs_f32, arm64_neon_frecpx, Add1ArgType), - NEONMAP1(vrshld_s64, arm64_neon_srshl, Add1ArgType), - NEONMAP1(vrshld_u64, arm64_neon_urshl, Add1ArgType), - NEONMAP1(vrsqrted_f64, arm64_neon_frsqrte, Add1ArgType), - NEONMAP1(vrsqrtes_f32, arm64_neon_frsqrte, Add1ArgType), - NEONMAP1(vrsqrtsd_f64, arm64_neon_frsqrts, Add1ArgType), - NEONMAP1(vrsqrtss_f32, arm64_neon_frsqrts, Add1ArgType), - NEONMAP1(vsha1cq_u32, arm64_crypto_sha1c, 0), - NEONMAP1(vsha1h_u32, arm64_crypto_sha1h, 0), - NEONMAP1(vsha1mq_u32, arm64_crypto_sha1m, 0), - NEONMAP1(vsha1pq_u32, arm64_crypto_sha1p, 0), - NEONMAP1(vshld_s64, arm64_neon_sshl, Add1ArgType), - NEONMAP1(vshld_u64, arm64_neon_ushl, Add1ArgType), - NEONMAP1(vslid_n_s64, arm64_neon_vsli, Vectorize1ArgType), - NEONMAP1(vslid_n_u64, arm64_neon_vsli, Vectorize1ArgType), - NEONMAP1(vsqaddb_u8, arm64_neon_usqadd, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vsqaddd_u64, arm64_neon_usqadd, Add1ArgType), - NEONMAP1(vsqaddh_u16, arm64_neon_usqadd, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vsqadds_u32, arm64_neon_usqadd, Add1ArgType), - NEONMAP1(vsrid_n_s64, arm64_neon_vsri, Vectorize1ArgType), - NEONMAP1(vsrid_n_u64, arm64_neon_vsri, Vectorize1ArgType), - NEONMAP1(vuqaddb_s8, arm64_neon_suqadd, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vuqaddd_s64, arm64_neon_suqadd, Add1ArgType), - NEONMAP1(vuqaddh_s16, arm64_neon_suqadd, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vuqadds_s32, arm64_neon_suqadd, Add1ArgType), +static NeonIntrinsicInfo AArch64SISDIntrinsicMap[] = { + NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType), + NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType), + NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType), + NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType), + NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType), + NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType), + NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType), + NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType), + NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType), + NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType), + NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType), + NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType), + NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType), + NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType), + NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType), + NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType), + NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType), + NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType), + NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType), + NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType), + NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType), + NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType), + NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType), + NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType), + NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType), + NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType), + NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType), + NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType), + NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), + NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), + NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), + NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), + NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType), + NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), + NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType), + NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), + NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType), + NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), + NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType), + NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), + NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType), + NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), + NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType), + NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), + NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), + NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), + NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), + NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), + NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0), + NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), + NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), + NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), + NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), + NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType), + NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType), + NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), + NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType), + NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType), + NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType), + NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), + NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), + NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType), + NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), + NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType), + NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType), + NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), + NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType), + NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType), + NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType), + NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0), + NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType), + NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType), + NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType), + NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType), + NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), + NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), + NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType), + NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), + NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType), + NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), + NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType), + NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), + NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType), + NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType), + NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType), + NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType), + NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType), + NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType), + NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType), + NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors), + NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0), + NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType), + NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType), + NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors), + NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors), + NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors), + NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors), + NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType), + NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors), + NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors), + NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType), + NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType), + NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType), + NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType), + NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType), + NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType), + NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType), + NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType), + NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType), + NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors), + NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors), + NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors), + NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors), + NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType), + NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors), + NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors), + NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType), + NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType), + NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType), + NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType), + NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType), + NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType), + NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType), + NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType), + NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType), + NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors), + NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors), + NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors), + NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors), + NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType), + NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors), + NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors), + NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType), + NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType), + NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType), + NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType), + NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType), + NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType), + NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType), + NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType), + NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType), + NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType), + NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType), + NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType), + NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType), + NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType), + NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0), + NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0), + NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0), + NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0), + NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType), + NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType), + NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType), + NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType), + NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType), + NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType), + NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType), + NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType), + NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType), + NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType), }; #undef NEONMAP0 @@ -2391,8 +2391,8 @@ static NeonIntrinsicInfo ARM64SISDIntrinsicMap[] = { static bool NEONSIMDIntrinsicsProvenSorted = false; -static bool ARM64SIMDIntrinsicsProvenSorted = false; -static bool ARM64SISDIntrinsicsProvenSorted = false; +static bool AArch64SIMDIntrinsicsProvenSorted = false; +static bool AArch64SISDIntrinsicsProvenSorted = false; static const NeonIntrinsicInfo * @@ -3534,7 +3534,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, } } -static Value *EmitARM64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID, +static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID, const CallExpr *E, SmallVectorImpl<Value *> &Ops) { unsigned int Int = 0; @@ -3597,20 +3597,20 @@ static Value *EmitARM64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID, case NEON::BI__builtin_neon_vtbl1_v: { TblOps.push_back(Ops[0]); return packTBLDVectorList(CGF, TblOps, nullptr, Ops[1], Ty, - Intrinsic::arm64_neon_tbl1, "vtbl1"); + Intrinsic::aarch64_neon_tbl1, "vtbl1"); } case NEON::BI__builtin_neon_vtbl2_v: { TblOps.push_back(Ops[0]); TblOps.push_back(Ops[1]); return packTBLDVectorList(CGF, TblOps, nullptr, Ops[2], Ty, - Intrinsic::arm64_neon_tbl1, "vtbl1"); + Intrinsic::aarch64_neon_tbl1, "vtbl1"); } case NEON::BI__builtin_neon_vtbl3_v: { TblOps.push_back(Ops[0]); TblOps.push_back(Ops[1]); TblOps.push_back(Ops[2]); return packTBLDVectorList(CGF, TblOps, nullptr, Ops[3], Ty, - Intrinsic::arm64_neon_tbl2, "vtbl2"); + Intrinsic::aarch64_neon_tbl2, "vtbl2"); } case NEON::BI__builtin_neon_vtbl4_v: { TblOps.push_back(Ops[0]); @@ -3618,12 +3618,12 @@ static Value *EmitARM64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID, TblOps.push_back(Ops[2]); TblOps.push_back(Ops[3]); return packTBLDVectorList(CGF, TblOps, nullptr, Ops[4], Ty, - Intrinsic::arm64_neon_tbl2, "vtbl2"); + Intrinsic::aarch64_neon_tbl2, "vtbl2"); } case NEON::BI__builtin_neon_vtbx1_v: { TblOps.push_back(Ops[1]); Value *TblRes = packTBLDVectorList(CGF, TblOps, nullptr, Ops[2], Ty, - Intrinsic::arm64_neon_tbl1, "vtbl1"); + Intrinsic::aarch64_neon_tbl1, "vtbl1"); llvm::Constant *Eight = ConstantInt::get(VTy->getElementType(), 8); Value* EightV = llvm::ConstantVector::getSplat(nElts, Eight); @@ -3638,14 +3638,14 @@ static Value *EmitARM64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID, TblOps.push_back(Ops[1]); TblOps.push_back(Ops[2]); return packTBLDVectorList(CGF, TblOps, Ops[0], Ops[3], Ty, - Intrinsic::arm64_neon_tbx1, "vtbx1"); + Intrinsic::aarch64_neon_tbx1, "vtbx1"); } case NEON::BI__builtin_neon_vtbx3_v: { TblOps.push_back(Ops[1]); TblOps.push_back(Ops[2]); TblOps.push_back(Ops[3]); Value *TblRes = packTBLDVectorList(CGF, TblOps, nullptr, Ops[4], Ty, - Intrinsic::arm64_neon_tbl2, "vtbl2"); + Intrinsic::aarch64_neon_tbl2, "vtbl2"); llvm::Constant *TwentyFour = ConstantInt::get(VTy->getElementType(), 24); Value* TwentyFourV = llvm::ConstantVector::getSplat(nElts, TwentyFour); @@ -3663,32 +3663,32 @@ static Value *EmitARM64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID, TblOps.push_back(Ops[3]); TblOps.push_back(Ops[4]); return packTBLDVectorList(CGF, TblOps, Ops[0], Ops[5], Ty, - Intrinsic::arm64_neon_tbx2, "vtbx2"); + Intrinsic::aarch64_neon_tbx2, "vtbx2"); } case NEON::BI__builtin_neon_vqtbl1_v: case NEON::BI__builtin_neon_vqtbl1q_v: - Int = Intrinsic::arm64_neon_tbl1; s = "vtbl1"; break; + Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break; case NEON::BI__builtin_neon_vqtbl2_v: case NEON::BI__builtin_neon_vqtbl2q_v: { - Int = Intrinsic::arm64_neon_tbl2; s = "vtbl2"; break; + Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break; case NEON::BI__builtin_neon_vqtbl3_v: case NEON::BI__builtin_neon_vqtbl3q_v: - Int = Intrinsic::arm64_neon_tbl3; s = "vtbl3"; break; + Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break; case NEON::BI__builtin_neon_vqtbl4_v: case NEON::BI__builtin_neon_vqtbl4q_v: - Int = Intrinsic::arm64_neon_tbl4; s = "vtbl4"; break; + Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break; case NEON::BI__builtin_neon_vqtbx1_v: case NEON::BI__builtin_neon_vqtbx1q_v: - Int = Intrinsic::arm64_neon_tbx1; s = "vtbx1"; break; + Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break; case NEON::BI__builtin_neon_vqtbx2_v: case NEON::BI__builtin_neon_vqtbx2q_v: - Int = Intrinsic::arm64_neon_tbx2; s = "vtbx2"; break; + Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break; case NEON::BI__builtin_neon_vqtbx3_v: case NEON::BI__builtin_neon_vqtbx3q_v: - Int = Intrinsic::arm64_neon_tbx3; s = "vtbx3"; break; + Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break; case NEON::BI__builtin_neon_vqtbx4_v: case NEON::BI__builtin_neon_vqtbx4q_v: - Int = Intrinsic::arm64_neon_tbx4; s = "vtbx4"; break; + Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break; } } @@ -3720,7 +3720,7 @@ Value *CodeGenFunction::vectorWrapScalar8(Value *Op) { Value *CodeGenFunction:: emitVectorWrappedScalar8Intrinsic(unsigned Int, SmallVectorImpl<Value*> &Ops, const char *Name) { - // i8 is not a legal types for ARM64, so we can't just use + // i8 is not a legal types for AArch64, so we can't just use // a normal overloaed intrinsic call for these scalar types. Instead // we'll build 64-bit vectors w/ lane zero being our input values and // perform the operation on that. The back end can pattern match directly @@ -3736,7 +3736,7 @@ emitVectorWrappedScalar8Intrinsic(unsigned Int, SmallVectorImpl<Value*> &Ops, Value *CodeGenFunction:: emitVectorWrappedScalar16Intrinsic(unsigned Int, SmallVectorImpl<Value*> &Ops, const char *Name) { - // i16 is not a legal types for ARM64, so we can't just use + // i16 is not a legal types for AArch64, so we can't just use // a normal overloaed intrinsic call for these scalar types. Instead // we'll build 64-bit vectors w/ lane zero being our input values and // perform the operation on that. The back end can pattern match directly @@ -3749,9 +3749,9 @@ emitVectorWrappedScalar16Intrinsic(unsigned Int, SmallVectorImpl<Value*> &Ops, return Builder.CreateExtractElement(V, CI, "lane0"); } -Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, - const CallExpr *E) { - if (BuiltinID == ARM64::BI__clear_cache) { +Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, + const CallExpr *E) { + if (BuiltinID == AArch64::BI__clear_cache) { assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments"); const FunctionDecl *FD = E->getDirectCallee(); SmallVector<Value*, 2> Ops; @@ -3763,9 +3763,9 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops); } - if (BuiltinID == ARM64::BI__builtin_arm_ldrex && + if (BuiltinID == AArch64::BI__builtin_arm_ldrex && getContext().getTypeSize(E->getType()) == 128) { - Function *F = CGM.getIntrinsic(Intrinsic::arm64_ldxp); + Function *F = CGM.getIntrinsic(Intrinsic::aarch64_ldxp); Value *LdPtr = EmitScalarExpr(E->getArg(0)); Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy), @@ -3781,7 +3781,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */); Val = Builder.CreateOr(Val, Val1); return Builder.CreateBitCast(Val, ConvertType(E->getType())); - } else if (BuiltinID == ARM64::BI__builtin_arm_ldrex) { + } else if (BuiltinID == AArch64::BI__builtin_arm_ldrex) { Value *LoadAddr = EmitScalarExpr(E->getArg(0)); QualType Ty = E->getType(); @@ -3790,7 +3790,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, getContext().getTypeSize(Ty)); LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo()); - Function *F = CGM.getIntrinsic(Intrinsic::arm64_ldxr, LoadAddr->getType()); + Function *F = CGM.getIntrinsic(Intrinsic::aarch64_ldxr, LoadAddr->getType()); Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr"); if (RealResTy->isPointerTy()) @@ -3800,9 +3800,9 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, return Builder.CreateBitCast(Val, RealResTy); } - if (BuiltinID == ARM64::BI__builtin_arm_strex && + if (BuiltinID == AArch64::BI__builtin_arm_strex && getContext().getTypeSize(E->getArg(0)->getType()) == 128) { - Function *F = CGM.getIntrinsic(Intrinsic::arm64_stxp); + Function *F = CGM.getIntrinsic(Intrinsic::aarch64_stxp); llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty, NULL); Value *One = llvm::ConstantInt::get(Int32Ty, 1); @@ -3819,7 +3819,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy); return Builder.CreateCall3(F, Arg0, Arg1, StPtr, "stxp"); - } else if (BuiltinID == ARM64::BI__builtin_arm_strex) { + } else if (BuiltinID == AArch64::BI__builtin_arm_strex) { Value *StoreVal = EmitScalarExpr(E->getArg(0)); Value *StoreAddr = EmitScalarExpr(E->getArg(1)); @@ -3835,34 +3835,34 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty); } - Function *F = CGM.getIntrinsic(Intrinsic::arm64_stxr, StoreAddr->getType()); + Function *F = CGM.getIntrinsic(Intrinsic::aarch64_stxr, StoreAddr->getType()); return Builder.CreateCall2(F, StoreVal, StoreAddr, "stxr"); } - if (BuiltinID == ARM64::BI__builtin_arm_clrex) { - Function *F = CGM.getIntrinsic(Intrinsic::arm64_clrex); + if (BuiltinID == AArch64::BI__builtin_arm_clrex) { + Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex); return Builder.CreateCall(F); } // CRC32 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic; switch (BuiltinID) { - case ARM64::BI__builtin_arm_crc32b: - CRCIntrinsicID = Intrinsic::arm64_crc32b; break; - case ARM64::BI__builtin_arm_crc32cb: - CRCIntrinsicID = Intrinsic::arm64_crc32cb; break; - case ARM64::BI__builtin_arm_crc32h: - CRCIntrinsicID = Intrinsic::arm64_crc32h; break; - case ARM64::BI__builtin_arm_crc32ch: - CRCIntrinsicID = Intrinsic::arm64_crc32ch; break; - case ARM64::BI__builtin_arm_crc32w: - CRCIntrinsicID = Intrinsic::arm64_crc32w; break; - case ARM64::BI__builtin_arm_crc32cw: - CRCIntrinsicID = Intrinsic::arm64_crc32cw; break; - case ARM64::BI__builtin_arm_crc32d: - CRCIntrinsicID = Intrinsic::arm64_crc32x; break; - case ARM64::BI__builtin_arm_crc32cd: - CRCIntrinsicID = Intrinsic::arm64_crc32cx; break; + case AArch64::BI__builtin_arm_crc32b: + CRCIntrinsicID = Intrinsic::aarch64_crc32b; break; + case AArch64::BI__builtin_arm_crc32cb: + CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break; + case AArch64::BI__builtin_arm_crc32h: + CRCIntrinsicID = Intrinsic::aarch64_crc32h; break; + case AArch64::BI__builtin_arm_crc32ch: + CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break; + case AArch64::BI__builtin_arm_crc32w: + CRCIntrinsicID = Intrinsic::aarch64_crc32w; break; + case AArch64::BI__builtin_arm_crc32cw: + CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break; + case AArch64::BI__builtin_arm_crc32d: + CRCIntrinsicID = Intrinsic::aarch64_crc32x; break; + case AArch64::BI__builtin_arm_crc32cd: + CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break; } if (CRCIntrinsicID != Intrinsic::not_intrinsic) { @@ -3880,9 +3880,9 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) Ops.push_back(EmitScalarExpr(E->getArg(i))); - llvm::ArrayRef<NeonIntrinsicInfo> SISDMap(ARM64SISDIntrinsicMap); + llvm::ArrayRef<NeonIntrinsicInfo> SISDMap(AArch64SISDIntrinsicMap); const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap( - SISDMap, BuiltinID, ARM64SISDIntrinsicsProvenSorted); + SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted); if (Builtin) { Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1))); @@ -4226,27 +4226,27 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, ProductOps.push_back(vectorWrapScalar16(Ops[1])); ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2)))); llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4); - Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_sqdmull, VTy), + Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy), ProductOps, "vqdmlXl"); Constant *CI = ConstantInt::get(Int32Ty, 0); Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0"); unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16 - ? Intrinsic::arm64_neon_sqadd - : Intrinsic::arm64_neon_sqsub; + ? Intrinsic::aarch64_neon_sqadd + : Intrinsic::aarch64_neon_sqsub; return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl"); } case NEON::BI__builtin_neon_vqshlud_n_s64: { Ops.push_back(EmitScalarExpr(E->getArg(1))); Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty); - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_sqshlu, Int64Ty), + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty), Ops, "vqshlu_n"); } case NEON::BI__builtin_neon_vqshld_n_u64: case NEON::BI__builtin_neon_vqshld_n_s64: { unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64 - ? Intrinsic::arm64_neon_uqshl - : Intrinsic::arm64_neon_sqshl; + ? Intrinsic::aarch64_neon_uqshl + : Intrinsic::aarch64_neon_sqshl; Ops.push_back(EmitScalarExpr(E->getArg(1))); Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty); return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n"); @@ -4254,8 +4254,8 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, case NEON::BI__builtin_neon_vrshrd_n_u64: case NEON::BI__builtin_neon_vrshrd_n_s64: { unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64 - ? Intrinsic::arm64_neon_urshl - : Intrinsic::arm64_neon_srshl; + ? Intrinsic::aarch64_neon_urshl + : Intrinsic::aarch64_neon_srshl; Ops.push_back(EmitScalarExpr(E->getArg(1))); int SV = cast<ConstantInt>(Ops[1])->getSExtValue(); Ops[1] = ConstantInt::get(Int64Ty, -SV); @@ -4264,8 +4264,8 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, case NEON::BI__builtin_neon_vrsrad_n_u64: case NEON::BI__builtin_neon_vrsrad_n_s64: { unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64 - ? Intrinsic::arm64_neon_urshl - : Intrinsic::arm64_neon_srshl; + ? Intrinsic::aarch64_neon_urshl + : Intrinsic::aarch64_neon_srshl; Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty); Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2)))); Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Int64Ty), Ops[1], @@ -4323,7 +4323,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, ProductOps.push_back(vectorWrapScalar16(Ops[1])); ProductOps.push_back(vectorWrapScalar16(Ops[2])); llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4); - Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_sqdmull, VTy), + Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy), ProductOps, "vqdmlXl"); Constant *CI = ConstantInt::get(Int32Ty, 0); Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0"); @@ -4331,8 +4331,8 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 || BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16) - ? Intrinsic::arm64_neon_sqadd - : Intrinsic::arm64_neon_sqsub; + ? Intrinsic::aarch64_neon_sqadd + : Intrinsic::aarch64_neon_sqsub; return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl"); } case NEON::BI__builtin_neon_vqdmlals_s32: @@ -4341,12 +4341,12 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, ProductOps.push_back(Ops[1]); ProductOps.push_back(EmitScalarExpr(E->getArg(2))); Ops[1] = - EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_sqdmulls_scalar), + EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar), ProductOps, "vqdmlXl"); unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32 - ? Intrinsic::arm64_neon_sqadd - : Intrinsic::arm64_neon_sqsub; + ? Intrinsic::aarch64_neon_sqadd + : Intrinsic::aarch64_neon_sqsub; return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl"); } case NEON::BI__builtin_neon_vqdmlals_lane_s32: @@ -4359,14 +4359,14 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, ProductOps.push_back(Ops[1]); ProductOps.push_back(Ops[2]); Ops[1] = - EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_sqdmulls_scalar), + EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar), ProductOps, "vqdmlXl"); Ops.pop_back(); unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 || BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32) - ? Intrinsic::arm64_neon_sqadd - : Intrinsic::arm64_neon_sqsub; + ? Intrinsic::aarch64_neon_sqadd + : Intrinsic::aarch64_neon_sqsub; return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl"); } } @@ -4376,17 +4376,17 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, if (!Ty) return nullptr; - // Not all intrinsics handled by the common case work for ARM64 yet, so only + // Not all intrinsics handled by the common case work for AArch64 yet, so only // defer to common code if it's been added to our special map. - Builtin = findNeonIntrinsicInMap(ARM64SIMDIntrinsicMap, BuiltinID, - ARM64SIMDIntrinsicsProvenSorted); + Builtin = findNeonIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID, + AArch64SIMDIntrinsicsProvenSorted); if (Builtin) return EmitCommonNeonBuiltinExpr( Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic, Builtin->NameHint, Builtin->TypeModifier, E, Ops, nullptr); - if (Value *V = EmitARM64TblBuiltinExpr(*this, BuiltinID, E, Ops)) + if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops)) return V; unsigned Int; @@ -4492,26 +4492,26 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, } case NEON::BI__builtin_neon_vmull_v: // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. - Int = usgn ? Intrinsic::arm64_neon_umull : Intrinsic::arm64_neon_smull; - if (Type.isPoly()) Int = Intrinsic::arm64_neon_pmull; + Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull; + if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull"); case NEON::BI__builtin_neon_vmax_v: case NEON::BI__builtin_neon_vmaxq_v: // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. - Int = usgn ? Intrinsic::arm64_neon_umax : Intrinsic::arm64_neon_smax; - if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::arm64_neon_fmax; + Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax; + if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax"); case NEON::BI__builtin_neon_vmin_v: case NEON::BI__builtin_neon_vminq_v: // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. - Int = usgn ? Intrinsic::arm64_neon_umin : Intrinsic::arm64_neon_smin; - if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::arm64_neon_fmin; + Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin; + if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin"); case NEON::BI__builtin_neon_vabd_v: case NEON::BI__builtin_neon_vabdq_v: // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. - Int = usgn ? Intrinsic::arm64_neon_uabd : Intrinsic::arm64_neon_sabd; - if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::arm64_neon_fabd; + Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd; + if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd"); case NEON::BI__builtin_neon_vpadal_v: case NEON::BI__builtin_neon_vpadalq_v: { @@ -4521,7 +4521,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::Type *ArgTy = llvm::VectorType::get( llvm::IntegerType::get(getLLVMContext(), BitWidth/2), 2*ArgElts); llvm::Type* Tys[2] = { VTy, ArgTy }; - Int = usgn ? Intrinsic::arm64_neon_uaddlp : Intrinsic::arm64_neon_saddlp; + Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp; SmallVector<llvm::Value*, 1> TmpOps; TmpOps.push_back(Ops[1]); Function *F = CGM.getIntrinsic(Int, Tys); @@ -4532,33 +4532,33 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, case NEON::BI__builtin_neon_vpmin_v: case NEON::BI__builtin_neon_vpminq_v: // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. - Int = usgn ? Intrinsic::arm64_neon_uminp : Intrinsic::arm64_neon_sminp; - if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::arm64_neon_fminp; + Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp; + if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin"); case NEON::BI__builtin_neon_vpmax_v: case NEON::BI__builtin_neon_vpmaxq_v: // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. - Int = usgn ? Intrinsic::arm64_neon_umaxp : Intrinsic::arm64_neon_smaxp; - if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::arm64_neon_fmaxp; + Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp; + if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax"); case NEON::BI__builtin_neon_vminnm_v: case NEON::BI__builtin_neon_vminnmq_v: - Int = Intrinsic::arm64_neon_fminnm; + Int = Intrinsic::aarch64_neon_fminnm; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm"); case NEON::BI__builtin_neon_vmaxnm_v: case NEON::BI__builtin_neon_vmaxnmq_v: - Int = Intrinsic::arm64_neon_fmaxnm; + Int = Intrinsic::aarch64_neon_fmaxnm; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm"); case NEON::BI__builtin_neon_vrecpss_f32: { llvm::Type *f32Type = llvm::Type::getFloatTy(getLLVMContext()); Ops.push_back(EmitScalarExpr(E->getArg(1))); - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_frecps, f32Type), + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, f32Type), Ops, "vrecps"); } case NEON::BI__builtin_neon_vrecpsd_f64: { llvm::Type *f64Type = llvm::Type::getDoubleTy(getLLVMContext()); Ops.push_back(EmitScalarExpr(E->getArg(1))); - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_frecps, f64Type), + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, f64Type), Ops, "vrecps"); } case NEON::BI__builtin_neon_vrshr_n_v: @@ -4566,34 +4566,34 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, // FIXME: this can be shared with 32-bit ARM, but not AArch64 at the // moment. After the final merge it should be added to // EmitCommonNeonBuiltinExpr. - Int = usgn ? Intrinsic::arm64_neon_urshl : Intrinsic::arm64_neon_srshl; + Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", 1, true); case NEON::BI__builtin_neon_vqshlu_n_v: case NEON::BI__builtin_neon_vqshluq_n_v: // FIXME: AArch64 and ARM use different intrinsics for this, but are // essentially compatible. It should be in EmitCommonNeonBuiltinExpr after // the final merge. - Int = Intrinsic::arm64_neon_sqshlu; + Int = Intrinsic::aarch64_neon_sqshlu; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n", 1, false); case NEON::BI__builtin_neon_vqshrun_n_v: // FIXME: as above - Int = Intrinsic::arm64_neon_sqshrun; + Int = Intrinsic::aarch64_neon_sqshrun; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n"); case NEON::BI__builtin_neon_vqrshrun_n_v: // FIXME: and again. - Int = Intrinsic::arm64_neon_sqrshrun; + Int = Intrinsic::aarch64_neon_sqrshrun; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n"); case NEON::BI__builtin_neon_vqshrn_n_v: // FIXME: guess - Int = usgn ? Intrinsic::arm64_neon_uqshrn : Intrinsic::arm64_neon_sqshrn; + Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n"); case NEON::BI__builtin_neon_vrshrn_n_v: // FIXME: there might be a pattern here. - Int = Intrinsic::arm64_neon_rshrn; + Int = Intrinsic::aarch64_neon_rshrn; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n"); case NEON::BI__builtin_neon_vqrshrn_n_v: // FIXME: another one - Int = usgn ? Intrinsic::arm64_neon_uqrshrn : Intrinsic::arm64_neon_sqrshrn; + Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n"); case NEON::BI__builtin_neon_vrnda_v: case NEON::BI__builtin_neon_vrndaq_v: { @@ -4612,7 +4612,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, } case NEON::BI__builtin_neon_vrndn_v: case NEON::BI__builtin_neon_vrndnq_v: { - Int = Intrinsic::arm64_neon_frintn; + Int = Intrinsic::aarch64_neon_frintn; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn"); } case NEON::BI__builtin_neon_vrndp_v: @@ -4699,7 +4699,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, case NEON::BI__builtin_neon_vcvtaq_s64_v: case NEON::BI__builtin_neon_vcvta_u64_v: case NEON::BI__builtin_neon_vcvtaq_u64_v: { - Int = usgn ? Intrinsic::arm64_neon_fcvtau : Intrinsic::arm64_neon_fcvtas; + Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas; bool Double = (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64); llvm::Type *InTy = @@ -4717,7 +4717,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, case NEON::BI__builtin_neon_vcvtmq_s64_v: case NEON::BI__builtin_neon_vcvtm_u64_v: case NEON::BI__builtin_neon_vcvtmq_u64_v: { - Int = usgn ? Intrinsic::arm64_neon_fcvtmu : Intrinsic::arm64_neon_fcvtms; + Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms; bool Double = (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64); llvm::Type *InTy = @@ -4735,7 +4735,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, case NEON::BI__builtin_neon_vcvtnq_s64_v: case NEON::BI__builtin_neon_vcvtn_u64_v: case NEON::BI__builtin_neon_vcvtnq_u64_v: { - Int = usgn ? Intrinsic::arm64_neon_fcvtnu : Intrinsic::arm64_neon_fcvtns; + Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns; bool Double = (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64); llvm::Type *InTy = @@ -4753,7 +4753,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, case NEON::BI__builtin_neon_vcvtpq_s64_v: case NEON::BI__builtin_neon_vcvtp_u64_v: case NEON::BI__builtin_neon_vcvtpq_u64_v: { - Int = usgn ? Intrinsic::arm64_neon_fcvtpu : Intrinsic::arm64_neon_fcvtps; + Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps; bool Double = (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64); llvm::Type *InTy = @@ -4765,7 +4765,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, } case NEON::BI__builtin_neon_vmulx_v: case NEON::BI__builtin_neon_vmulxq_v: { - Int = Intrinsic::arm64_neon_fmulx; + Int = Intrinsic::aarch64_neon_fmulx; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx"); } case NEON::BI__builtin_neon_vmul_lane_v: @@ -4786,12 +4786,12 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd"); case NEON::BI__builtin_neon_vpmaxnm_v: case NEON::BI__builtin_neon_vpmaxnmq_v: { - Int = Intrinsic::arm64_neon_fmaxnmp; + Int = Intrinsic::aarch64_neon_fmaxnmp; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm"); } case NEON::BI__builtin_neon_vpminnm_v: case NEON::BI__builtin_neon_vpminnmq_v: { - Int = Intrinsic::arm64_neon_fminnmp; + Int = Intrinsic::aarch64_neon_fminnmp; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm"); } case NEON::BI__builtin_neon_vsqrt_v: @@ -4802,7 +4802,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, } case NEON::BI__builtin_neon_vrbit_v: case NEON::BI__builtin_neon_vrbitq_v: { - Int = Intrinsic::arm64_neon_rbit; + Int = Intrinsic::aarch64_neon_rbit; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit"); } case NEON::BI__builtin_neon_vaddv_u8: @@ -4810,7 +4810,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, usgn = true; // FALLTHROUGH case NEON::BI__builtin_neon_vaddv_s8: { - Int = usgn ? Intrinsic::arm64_neon_uaddv : Intrinsic::arm64_neon_saddv; + Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8); @@ -4824,7 +4824,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, usgn = true; // FALLTHROUGH case NEON::BI__builtin_neon_vaddv_s16: { - Int = usgn ? Intrinsic::arm64_neon_uaddv : Intrinsic::arm64_neon_saddv; + Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4); @@ -4838,7 +4838,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, usgn = true; // FALLTHROUGH case NEON::BI__builtin_neon_vaddvq_s8: { - Int = usgn ? Intrinsic::arm64_neon_uaddv : Intrinsic::arm64_neon_saddv; + Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16); @@ -4852,7 +4852,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, usgn = true; // FALLTHROUGH case NEON::BI__builtin_neon_vaddvq_s16: { - Int = usgn ? Intrinsic::arm64_neon_uaddv : Intrinsic::arm64_neon_saddv; + Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8); @@ -4863,7 +4863,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::IntegerType::get(getLLVMContext(), 16)); } case NEON::BI__builtin_neon_vmaxv_u8: { - Int = Intrinsic::arm64_neon_umaxv; + Int = Intrinsic::aarch64_neon_umaxv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8); @@ -4874,7 +4874,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::IntegerType::get(getLLVMContext(), 8)); } case NEON::BI__builtin_neon_vmaxv_u16: { - Int = Intrinsic::arm64_neon_umaxv; + Int = Intrinsic::aarch64_neon_umaxv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4); @@ -4885,7 +4885,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::IntegerType::get(getLLVMContext(), 16)); } case NEON::BI__builtin_neon_vmaxvq_u8: { - Int = Intrinsic::arm64_neon_umaxv; + Int = Intrinsic::aarch64_neon_umaxv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16); @@ -4896,7 +4896,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::IntegerType::get(getLLVMContext(), 8)); } case NEON::BI__builtin_neon_vmaxvq_u16: { - Int = Intrinsic::arm64_neon_umaxv; + Int = Intrinsic::aarch64_neon_umaxv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8); @@ -4907,7 +4907,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::IntegerType::get(getLLVMContext(), 16)); } case NEON::BI__builtin_neon_vmaxv_s8: { - Int = Intrinsic::arm64_neon_smaxv; + Int = Intrinsic::aarch64_neon_smaxv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8); @@ -4918,7 +4918,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::IntegerType::get(getLLVMContext(), 8)); } case NEON::BI__builtin_neon_vmaxv_s16: { - Int = Intrinsic::arm64_neon_smaxv; + Int = Intrinsic::aarch64_neon_smaxv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4); @@ -4929,7 +4929,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::IntegerType::get(getLLVMContext(), 16)); } case NEON::BI__builtin_neon_vmaxvq_s8: { - Int = Intrinsic::arm64_neon_smaxv; + Int = Intrinsic::aarch64_neon_smaxv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16); @@ -4940,7 +4940,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::IntegerType::get(getLLVMContext(), 8)); } case NEON::BI__builtin_neon_vmaxvq_s16: { - Int = Intrinsic::arm64_neon_smaxv; + Int = Intrinsic::aarch64_neon_smaxv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8); @@ -4951,7 +4951,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::IntegerType::get(getLLVMContext(), 16)); } case NEON::BI__builtin_neon_vminv_u8: { - Int = Intrinsic::arm64_neon_uminv; + Int = Intrinsic::aarch64_neon_uminv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8); @@ -4962,7 +4962,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::IntegerType::get(getLLVMContext(), 8)); } case NEON::BI__builtin_neon_vminv_u16: { - Int = Intrinsic::arm64_neon_uminv; + Int = Intrinsic::aarch64_neon_uminv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4); @@ -4973,7 +4973,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::IntegerType::get(getLLVMContext(), 16)); } case NEON::BI__builtin_neon_vminvq_u8: { - Int = Intrinsic::arm64_neon_uminv; + Int = Intrinsic::aarch64_neon_uminv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16); @@ -4984,7 +4984,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::IntegerType::get(getLLVMContext(), 8)); } case NEON::BI__builtin_neon_vminvq_u16: { - Int = Intrinsic::arm64_neon_uminv; + Int = Intrinsic::aarch64_neon_uminv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8); @@ -4995,7 +4995,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::IntegerType::get(getLLVMContext(), 16)); } case NEON::BI__builtin_neon_vminv_s8: { - Int = Intrinsic::arm64_neon_sminv; + Int = Intrinsic::aarch64_neon_sminv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8); @@ -5006,7 +5006,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::IntegerType::get(getLLVMContext(), 8)); } case NEON::BI__builtin_neon_vminv_s16: { - Int = Intrinsic::arm64_neon_sminv; + Int = Intrinsic::aarch64_neon_sminv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4); @@ -5017,7 +5017,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::IntegerType::get(getLLVMContext(), 16)); } case NEON::BI__builtin_neon_vminvq_s8: { - Int = Intrinsic::arm64_neon_sminv; + Int = Intrinsic::aarch64_neon_sminv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16); @@ -5028,7 +5028,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::IntegerType::get(getLLVMContext(), 8)); } case NEON::BI__builtin_neon_vminvq_s16: { - Int = Intrinsic::arm64_neon_sminv; + Int = Intrinsic::aarch64_neon_sminv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8); @@ -5044,7 +5044,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, return Builder.CreateFMul(Ops[0], RHS); } case NEON::BI__builtin_neon_vaddlv_u8: { - Int = Intrinsic::arm64_neon_uaddlv; + Int = Intrinsic::aarch64_neon_uaddlv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8); @@ -5055,7 +5055,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::IntegerType::get(getLLVMContext(), 16)); } case NEON::BI__builtin_neon_vaddlv_u16: { - Int = Intrinsic::arm64_neon_uaddlv; + Int = Intrinsic::aarch64_neon_uaddlv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4); @@ -5064,7 +5064,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); } case NEON::BI__builtin_neon_vaddlvq_u8: { - Int = Intrinsic::arm64_neon_uaddlv; + Int = Intrinsic::aarch64_neon_uaddlv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16); @@ -5075,7 +5075,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::IntegerType::get(getLLVMContext(), 16)); } case NEON::BI__builtin_neon_vaddlvq_u16: { - Int = Intrinsic::arm64_neon_uaddlv; + Int = Intrinsic::aarch64_neon_uaddlv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8); @@ -5084,7 +5084,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); } case NEON::BI__builtin_neon_vaddlv_s8: { - Int = Intrinsic::arm64_neon_saddlv; + Int = Intrinsic::aarch64_neon_saddlv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8); @@ -5095,7 +5095,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::IntegerType::get(getLLVMContext(), 16)); } case NEON::BI__builtin_neon_vaddlv_s16: { - Int = Intrinsic::arm64_neon_saddlv; + Int = Intrinsic::aarch64_neon_saddlv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4); @@ -5104,7 +5104,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); } case NEON::BI__builtin_neon_vaddlvq_s8: { - Int = Intrinsic::arm64_neon_saddlv; + Int = Intrinsic::aarch64_neon_saddlv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16); @@ -5115,7 +5115,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::IntegerType::get(getLLVMContext(), 16)); } case NEON::BI__builtin_neon_vaddlvq_s16: { - Int = Intrinsic::arm64_neon_saddlv; + Int = Intrinsic::aarch64_neon_saddlv; Ty = llvm::IntegerType::get(getLLVMContext(), 32); VTy = llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8); @@ -5125,13 +5125,13 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, } case NEON::BI__builtin_neon_vsri_n_v: case NEON::BI__builtin_neon_vsriq_n_v: { - Int = Intrinsic::arm64_neon_vsri; + Int = Intrinsic::aarch64_neon_vsri; llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty); return EmitNeonCall(Intrin, Ops, "vsri_n"); } case NEON::BI__builtin_neon_vsli_n_v: case NEON::BI__builtin_neon_vsliq_n_v: { - Int = Intrinsic::arm64_neon_vsli; + Int = Intrinsic::aarch64_neon_vsli; llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty); return EmitNeonCall(Intrin, Ops, "vsli_n"); } @@ -5142,7 +5142,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, return Builder.CreateAdd(Ops[0], Ops[1]); case NEON::BI__builtin_neon_vrsra_n_v: case NEON::BI__builtin_neon_vrsraq_n_v: { - Int = usgn ? Intrinsic::arm64_neon_urshl : Intrinsic::arm64_neon_srshl; + Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl; SmallVector<llvm::Value*,2> TmpOps; TmpOps.push_back(Ops[1]); TmpOps.push_back(Ops[2]); @@ -5166,15 +5166,15 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, switch (BuiltinID) { case NEON::BI__builtin_neon_vld1_x2_v: case NEON::BI__builtin_neon_vld1q_x2_v: - Int = Intrinsic::arm64_neon_ld1x2; + Int = Intrinsic::aarch64_neon_ld1x2; break; case NEON::BI__builtin_neon_vld1_x3_v: case NEON::BI__builtin_neon_vld1q_x3_v: - Int = Intrinsic::arm64_neon_ld1x3; + Int = Intrinsic::aarch64_neon_ld1x3; break; case NEON::BI__builtin_neon_vld1_x4_v: case NEON::BI__builtin_neon_vld1q_x4_v: - Int = Intrinsic::arm64_neon_ld1x4; + Int = Intrinsic::aarch64_neon_ld1x4; break; } Function *F = CGM.getIntrinsic(Int, Tys); @@ -5195,15 +5195,15 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, switch (BuiltinID) { case NEON::BI__builtin_neon_vst1_x2_v: case NEON::BI__builtin_neon_vst1q_x2_v: - Int = Intrinsic::arm64_neon_st1x2; + Int = Intrinsic::aarch64_neon_st1x2; break; case NEON::BI__builtin_neon_vst1_x3_v: case NEON::BI__builtin_neon_vst1q_x3_v: - Int = Intrinsic::arm64_neon_st1x3; + Int = Intrinsic::aarch64_neon_st1x3; break; case NEON::BI__builtin_neon_vst1_x4_v: case NEON::BI__builtin_neon_vst1q_x4_v: - Int = Intrinsic::arm64_neon_st1x4; + Int = Intrinsic::aarch64_neon_st1x4; break; } SmallVector<Value *, 4> IntOps(Ops.begin()+1, Ops.end()); @@ -5247,7 +5247,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::Type *PTy = llvm::PointerType::getUnqual(VTy); Ops[1] = Builder.CreateBitCast(Ops[1], PTy); llvm::Type *Tys[2] = { VTy, PTy }; - Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld2, Tys); + Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld2"); Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ops[1]->getType())); @@ -5258,7 +5258,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::Type *PTy = llvm::PointerType::getUnqual(VTy); Ops[1] = Builder.CreateBitCast(Ops[1], PTy); llvm::Type *Tys[2] = { VTy, PTy }; - Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld3, Tys); + Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld3"); Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ops[1]->getType())); @@ -5269,7 +5269,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::Type *PTy = llvm::PointerType::getUnqual(VTy); Ops[1] = Builder.CreateBitCast(Ops[1], PTy); llvm::Type *Tys[2] = { VTy, PTy }; - Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld4, Tys); + Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld4"); Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ops[1]->getType())); @@ -5281,7 +5281,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::PointerType::getUnqual(VTy->getElementType()); Ops[1] = Builder.CreateBitCast(Ops[1], PTy); llvm::Type *Tys[2] = { VTy, PTy }; - Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld2r, Tys); + Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld2"); Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ops[1]->getType())); @@ -5293,7 +5293,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::PointerType::getUnqual(VTy->getElementType()); Ops[1] = Builder.CreateBitCast(Ops[1], PTy); llvm::Type *Tys[2] = { VTy, PTy }; - Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld3r, Tys); + Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld3"); Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ops[1]->getType())); @@ -5305,7 +5305,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, llvm::PointerType::getUnqual(VTy->getElementType()); Ops[1] = Builder.CreateBitCast(Ops[1], PTy); llvm::Type *Tys[2] = { VTy, PTy }; - Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld4r, Tys); + Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld4"); Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ops[1]->getType())); @@ -5314,7 +5314,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, case NEON::BI__builtin_neon_vld2_lane_v: case NEON::BI__builtin_neon_vld2q_lane_v: { llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; - Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld2lane, Tys); + Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys); Ops.push_back(Ops[1]); Ops.erase(Ops.begin()+1); Ops[1] = Builder.CreateBitCast(Ops[1], Ty); @@ -5330,7 +5330,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, case NEON::BI__builtin_neon_vld3_lane_v: case NEON::BI__builtin_neon_vld3q_lane_v: { llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; - Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld3lane, Tys); + Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys); Ops.push_back(Ops[1]); Ops.erase(Ops.begin()+1); Ops[1] = Builder.CreateBitCast(Ops[1], Ty); @@ -5347,7 +5347,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, case NEON::BI__builtin_neon_vld4_lane_v: case NEON::BI__builtin_neon_vld4q_lane_v: { llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; - Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld4lane, Tys); + Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys); Ops.push_back(Ops[1]); Ops.erase(Ops.begin()+1); Ops[1] = Builder.CreateBitCast(Ops[1], Ty); @@ -5367,7 +5367,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, Ops.push_back(Ops[0]); Ops.erase(Ops.begin()); llvm::Type *Tys[2] = { VTy, Ops[2]->getType() }; - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_st2, Tys), + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys), Ops, ""); } case NEON::BI__builtin_neon_vst2_lane_v: @@ -5377,7 +5377,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, Ops[2] = Builder.CreateZExt(Ops[2], llvm::IntegerType::get(getLLVMContext(), 64)); llvm::Type *Tys[2] = { VTy, Ops[3]->getType() }; - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_st2lane, Tys), + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys), Ops, ""); } case NEON::BI__builtin_neon_vst3_v: @@ -5385,7 +5385,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, Ops.push_back(Ops[0]); Ops.erase(Ops.begin()); llvm::Type *Tys[2] = { VTy, Ops[3]->getType() }; - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_st3, Tys), + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys), Ops, ""); } case NEON::BI__builtin_neon_vst3_lane_v: @@ -5395,7 +5395,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, Ops[3] = Builder.CreateZExt(Ops[3], llvm::IntegerType::get(getLLVMContext(), 64)); llvm::Type *Tys[2] = { VTy, Ops[4]->getType() }; - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_st3lane, Tys), + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys), Ops, ""); } case NEON::BI__builtin_neon_vst4_v: @@ -5403,7 +5403,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, Ops.push_back(Ops[0]); Ops.erase(Ops.begin()); llvm::Type *Tys[2] = { VTy, Ops[4]->getType() }; - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_st4, Tys), + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys), Ops, ""); } case NEON::BI__builtin_neon_vst4_lane_v: @@ -5413,7 +5413,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, Ops[4] = Builder.CreateZExt(Ops[4], llvm::IntegerType::get(getLLVMContext(), 64)); llvm::Type *Tys[2] = { VTy, Ops[5]->getType() }; - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_st4lane, Tys), + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys), Ops, ""); } case NEON::BI__builtin_neon_vtrn_v: @@ -5476,45 +5476,45 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID, return SV; } case NEON::BI__builtin_neon_vqtbl1q_v: { - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_tbl1, Ty), + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty), Ops, "vtbl1"); } case NEON::BI__builtin_neon_vqtbl2q_v: { - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_tbl2, Ty), + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty), Ops, "vtbl2"); } case NEON::BI__builtin_neon_vqtbl3q_v: { - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_tbl3, Ty), + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty), Ops, "vtbl3"); } case NEON::BI__builtin_neon_vqtbl4q_v: { - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_tbl4, Ty), + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty), Ops, "vtbl4"); } case NEON::BI__builtin_neon_vqtbx1q_v: { - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_tbx1, Ty), + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty), Ops, "vtbx1"); } case NEON::BI__builtin_neon_vqtbx2q_v: { - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_tbx2, Ty), + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty), Ops, "vtbx2"); } case NEON::BI__builtin_neon_vqtbx3q_v: { - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_tbx3, Ty), + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty), Ops, "vtbx3"); } case NEON::BI__builtin_neon_vqtbx4q_v: { - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_tbx4, Ty), + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty), Ops, "vtbx4"); } case NEON::BI__builtin_neon_vsqadd_v: case NEON::BI__builtin_neon_vsqaddq_v: { - Int = Intrinsic::arm64_neon_usqadd; + Int = Intrinsic::aarch64_neon_usqadd; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd"); } case NEON::BI__builtin_neon_vuqadd_v: case NEON::BI__builtin_neon_vuqaddq_v: { - Int = Intrinsic::arm64_neon_suqadd; + Int = Intrinsic::aarch64_neon_suqadd; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd"); } } diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp index 7259d455949..95503cc7ef6 100644 --- a/clang/lib/CodeGen/CGObjCMac.cpp +++ b/clang/lib/CodeGen/CGObjCMac.cpp @@ -5034,7 +5034,8 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm) // arm64 targets use "int" ivar offset variables. All others, // including OS X x86_64 and Windows x86_64, use "long" ivar offsets. - if (CGM.getTarget().getTriple().getArch() == llvm::Triple::arm64) + if (CGM.getTarget().getTriple().getArch() == llvm::Triple::arm64 || + CGM.getTarget().getTriple().getArch() == llvm::Triple::aarch64) IvarOffsetVarTy = IntTy; else IvarOffsetVarTy = LongTy; diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index 74433392e15..750bec8b07f 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -2226,14 +2226,14 @@ public: llvm::Value *EmitConcatVectors(llvm::Value *Lo, llvm::Value *Hi, llvm::Type *ArgTy); llvm::Value *EmitExtractHigh(llvm::Value *In, llvm::Type *ResTy); - // Helper functions for EmitARM64BuiltinExpr. + // Helper functions for EmitAArch64BuiltinExpr. llvm::Value *vectorWrapScalar8(llvm::Value *Op); llvm::Value *vectorWrapScalar16(llvm::Value *Op); llvm::Value *emitVectorWrappedScalar8Intrinsic( unsigned Int, SmallVectorImpl<llvm::Value *> &Ops, const char *Name); llvm::Value *emitVectorWrappedScalar16Intrinsic( unsigned Int, SmallVectorImpl<llvm::Value *> &Ops, const char *Name); - llvm::Value *EmitARM64BuiltinExpr(unsigned BuiltinID, const CallExpr *E); + llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E); llvm::Value *EmitNeon64Call(llvm::Function *F, llvm::SmallVectorImpl<llvm::Value *> &O, const char *name); diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp index 88c4d96c1d3..c72d17281ed 100644 --- a/clang/lib/CodeGen/TargetInfo.cpp +++ b/clang/lib/CodeGen/TargetInfo.cpp @@ -3104,12 +3104,12 @@ PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, } //===----------------------------------------------------------------------===// -// ARM64 ABI Implementation +// AArch64 ABI Implementation //===----------------------------------------------------------------------===// namespace { -class ARM64ABIInfo : public ABIInfo { +class AArch64ABIInfo : public ABIInfo { public: enum ABIKind { AAPCS = 0, @@ -3120,7 +3120,7 @@ private: ABIKind Kind; public: - ARM64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {} + AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {} private: ABIKind getABIKind() const { return Kind; } @@ -3212,10 +3212,10 @@ private: } }; -class ARM64TargetCodeGenInfo : public TargetCodeGenInfo { +class AArch64TargetCodeGenInfo : public TargetCodeGenInfo { public: - ARM64TargetCodeGenInfo(CodeGenTypes &CGT, ARM64ABIInfo::ABIKind Kind) - : TargetCodeGenInfo(new ARM64ABIInfo(CGT, Kind)) {} + AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind) + : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {} StringRef getARCRetainAutoreleasedReturnValueMarker() const { return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue"; @@ -3231,12 +3231,12 @@ static bool isHomogeneousAggregate(QualType Ty, const Type *&Base, ASTContext &Context, uint64_t *HAMembers = nullptr); -ABIArgInfo ARM64ABIInfo::classifyArgumentType(QualType Ty, - unsigned &AllocatedVFP, - bool &IsHA, - unsigned &AllocatedGPR, - bool &IsSmallAggr, - bool IsNamedArg) const { +ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty, + unsigned &AllocatedVFP, + bool &IsHA, + unsigned &AllocatedGPR, + bool &IsSmallAggr, + bool IsNamedArg) const { // Handle illegal vector types here. if (isIllegalVectorType(Ty)) { uint64_t Size = getContext().getTypeSize(Ty); @@ -3346,7 +3346,7 @@ ABIArgInfo ARM64ABIInfo::classifyArgumentType(QualType Ty, return ABIArgInfo::getIndirect(0, /*ByVal=*/false); } -ABIArgInfo ARM64ABIInfo::classifyReturnType(QualType RetTy) const { +ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const { if (RetTy->isVoidType()) return ABIArgInfo::getIgnore(); @@ -3382,8 +3382,8 @@ ABIArgInfo ARM64ABIInfo::classifyReturnType(QualType RetTy) const { return ABIArgInfo::getIndirect(0); } -/// isIllegalVectorType - check whether the vector type is legal for ARM64. -bool ARM64ABIInfo::isIllegalVectorType(QualType Ty) const { +/// isIllegalVectorType - check whether the vector type is legal for AArch64. +bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const { if (const VectorType *VT = Ty->getAs<VectorType>()) { // Check whether VT is legal. unsigned NumElements = VT->getNumElements(); @@ -3624,7 +3624,7 @@ static llvm::Value *EmitAArch64VAArg(llvm::Value *VAListAddr, QualType Ty, return ResAddr; } -llvm::Value *ARM64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty, +llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const { unsigned AllocatedGPR = 0, AllocatedVFP = 0; @@ -3636,7 +3636,7 @@ llvm::Value *ARM64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty, AI.isIndirect(), CGF); } -llvm::Value *ARM64ABIInfo::EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty, +llvm::Value *AArch64ABIInfo::EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const { // We do not support va_arg for aggregates or illegal vector types. // Lower VAArg here for these cases and use the LLVM va_arg instruction for @@ -6473,11 +6473,11 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { case llvm::Triple::aarch64_be: case llvm::Triple::arm64: case llvm::Triple::arm64_be: { - ARM64ABIInfo::ABIKind Kind = ARM64ABIInfo::AAPCS; + AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS; if (strcmp(getTarget().getABI(), "darwinpcs") == 0) - Kind = ARM64ABIInfo::DarwinPCS; + Kind = AArch64ABIInfo::DarwinPCS; - return *(TheTargetCodeGenInfo = new ARM64TargetCodeGenInfo(Types, Kind)); + return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types, Kind)); } case llvm::Triple::arm: diff --git a/clang/lib/Driver/ToolChains.cpp b/clang/lib/Driver/ToolChains.cpp index 5f9b19c3ff7..8d8e7c7c206 100644 --- a/clang/lib/Driver/ToolChains.cpp +++ b/clang/lib/Driver/ToolChains.cpp @@ -400,7 +400,8 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args, // it never went into the SDK. // Linking against libgcc_s.1 isn't needed for iOS 5.0+ if (isIPhoneOSVersionLT(5, 0) && !isTargetIOSSimulator() && - getTriple().getArch() != llvm::Triple::arm64) + (getTriple().getArch() != llvm::Triple::arm64 && + getTriple().getArch() != llvm::Triple::aarch64)) CmdArgs.push_back("-lgcc_s.1"); // We currently always need a static runtime library for iOS. @@ -520,6 +521,7 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const { if (!OSXTarget.empty() && !iOSTarget.empty()) { if (getTriple().getArch() == llvm::Triple::arm || getTriple().getArch() == llvm::Triple::arm64 || + getTriple().getArch() == llvm::Triple::aarch64 || getTriple().getArch() == llvm::Triple::thumb) OSXTarget = ""; else @@ -656,6 +658,7 @@ void DarwinClang::AddCCKextLibArgs(const ArgList &Args, // Use the newer cc_kext for iOS ARM after 6.0. if (!isTargetIPhoneOS() || isTargetIOSSimulator() || getTriple().getArch() == llvm::Triple::arm64 || + getTriple().getArch() == llvm::Triple::aarch64 || !isIPhoneOSVersionLT(6, 0)) { llvm::sys::path::append(P, "libclang_rt.cc_kext.a"); } else { @@ -926,7 +929,8 @@ DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args, // but we can't check the deployment target in the translation code until // it is set here. if (isTargetIOSBased() && !isIPhoneOSVersionLT(6, 0) && - getTriple().getArch() != llvm::Triple::arm64) { + getTriple().getArch() != llvm::Triple::arm64 && + getTriple().getArch() != llvm::Triple::aarch64) { for (ArgList::iterator it = DAL->begin(), ie = DAL->end(); it != ie; ) { Arg *A = *it; ++it; @@ -993,7 +997,8 @@ bool MachO::isPIEDefault() const { bool MachO::isPICDefaultForced() const { return (getArch() == llvm::Triple::x86_64 || - getArch() == llvm::Triple::arm64); + getArch() == llvm::Triple::arm64 || + getArch() == llvm::Triple::aarch64); } bool MachO::SupportsProfiling() const { @@ -1082,7 +1087,8 @@ void Darwin::addStartObjectFileArgs(const llvm::opt::ArgList &Args, if (isTargetIOSSimulator()) { ; // iOS simulator does not need crt1.o. } else if (isTargetIPhoneOS()) { - if (getArch() == llvm::Triple::arm64) + if (getArch() == llvm::Triple::arm64 || + getArch() == llvm::Triple::aarch64) ; // iOS does not need any crt1 files for arm64 else if (isIPhoneOSVersionLT(3, 1)) CmdArgs.push_back("-lcrt1.o"); diff --git a/clang/lib/Driver/Tools.cpp b/clang/lib/Driver/Tools.cpp index 4c097c5120a..76b7962cd55 100644 --- a/clang/lib/Driver/Tools.cpp +++ b/clang/lib/Driver/Tools.cpp @@ -841,8 +841,9 @@ void Clang::AddARMTargetArgs(const ArgList &Args, } } -/// getARM64TargetCPU - Get the (LLVM) name of the ARM64 cpu we are targeting. -static std::string getARM64TargetCPU(const ArgList &Args) { +/// getAArch64TargetCPU - Get the (LLVM) name of the AArch64 cpu we are +/// targeting. +static std::string getAArch64TargetCPU(const ArgList &Args) { // If we have -mcpu=, use that. if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) { StringRef MCPU = A->getValue(); @@ -864,8 +865,8 @@ static std::string getARM64TargetCPU(const ArgList &Args) { return "generic"; } -void Clang::AddARM64TargetArgs(const ArgList &Args, - ArgStringList &CmdArgs) const { +void Clang::AddAArch64TargetArgs(const ArgList &Args, + ArgStringList &CmdArgs) const { std::string TripleStr = getToolChain().ComputeEffectiveClangTriple(Args); llvm::Triple Triple(TripleStr); @@ -890,11 +891,11 @@ void Clang::AddARM64TargetArgs(const ArgList &Args, CmdArgs.push_back(ABIName); CmdArgs.push_back("-target-cpu"); - CmdArgs.push_back(Args.MakeArgString(getARM64TargetCPU(Args))); + CmdArgs.push_back(Args.MakeArgString(getAArch64TargetCPU(Args))); if (Args.hasArg(options::OPT_mstrict_align)) { CmdArgs.push_back("-backend-option"); - CmdArgs.push_back("-arm64-strict-align"); + CmdArgs.push_back("-aarch64-strict-align"); } } @@ -1327,7 +1328,7 @@ static std::string getCPUName(const ArgList &Args, const llvm::Triple &T) { case llvm::Triple::aarch64_be: case llvm::Triple::arm64: case llvm::Triple::arm64_be: - return getARM64TargetCPU(Args); + return getAArch64TargetCPU(Args); case llvm::Triple::arm: case llvm::Triple::armeb: @@ -2455,7 +2456,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, // PIC or PIE options above, if these show up, PIC is disabled. llvm::Triple Triple(TripleStr); if (KernelOrKext && (!Triple.isiOS() || Triple.isOSVersionLT(6) || - Triple.getArch() == llvm::Triple::arm64)) + Triple.getArch() == llvm::Triple::arm64 || + Triple.getArch() == llvm::Triple::aarch64)) PIC = PIE = false; if (Args.hasArg(options::OPT_static)) PIC = PIE = false; @@ -2782,9 +2784,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, AddARMTargetArgs(Args, CmdArgs, KernelOrKext); break; + case llvm::Triple::aarch64: + case llvm::Triple::aarch64_be: case llvm::Triple::arm64: case llvm::Triple::arm64_be: - AddARM64TargetArgs(Args, CmdArgs); + AddAArch64TargetArgs(Args, CmdArgs); break; case llvm::Triple::mips: @@ -3408,16 +3412,20 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, options::OPT_munaligned_access)) { if (A->getOption().matches(options::OPT_mno_unaligned_access)) { CmdArgs.push_back("-backend-option"); - if (getToolChain().getTriple().getArch() == llvm::Triple::arm64 || + if (getToolChain().getTriple().getArch() == llvm::Triple::aarch64 || + getToolChain().getTriple().getArch() == llvm::Triple::aarch64_be || + getToolChain().getTriple().getArch() == llvm::Triple::arm64 || getToolChain().getTriple().getArch() == llvm::Triple::arm64_be) - CmdArgs.push_back("-arm64-strict-align"); + CmdArgs.push_back("-aarch64-strict-align"); else CmdArgs.push_back("-arm-strict-align"); } else { CmdArgs.push_back("-backend-option"); - if (getToolChain().getTriple().getArch() == llvm::Triple::arm64 || + if (getToolChain().getTriple().getArch() == llvm::Triple::aarch64 || + getToolChain().getTriple().getArch() == llvm::Triple::aarch64_be || + getToolChain().getTriple().getArch() == llvm::Triple::arm64 || getToolChain().getTriple().getArch() == llvm::Triple::arm64_be) - CmdArgs.push_back("-arm64-no-strict-align"); + CmdArgs.push_back("-aarch64-no-strict-align"); else CmdArgs.push_back("-arm-no-strict-align"); } diff --git a/clang/lib/Frontend/InitHeaderSearch.cpp b/clang/lib/Frontend/InitHeaderSearch.cpp index 34c01b45b81..d2890f0857c 100644 --- a/clang/lib/Frontend/InitHeaderSearch.cpp +++ b/clang/lib/Frontend/InitHeaderSearch.cpp @@ -379,6 +379,7 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple, const HeaderSearchOp "arm-apple-darwin10", "v6", "", triple); break; + case llvm::Triple::aarch64: case llvm::Triple::arm64: AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1", "arm64-apple-darwin10", "", "", triple); diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp index 78ba66bfb9b..592de529b59 100644 --- a/clang/lib/Sema/SemaChecking.cpp +++ b/clang/lib/Sema/SemaChecking.cpp @@ -313,7 +313,7 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { case llvm::Triple::aarch64_be: case llvm::Triple::arm64: case llvm::Triple::arm64_be: - if (CheckARM64BuiltinFunctionCall(BuiltinID, TheCall)) + if (CheckAArch64BuiltinFunctionCall(BuiltinID, TheCall)) return ExprError(); break; case llvm::Triple::mips: @@ -473,11 +473,11 @@ bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth) { assert((BuiltinID == ARM::BI__builtin_arm_ldrex || BuiltinID == ARM::BI__builtin_arm_strex || - BuiltinID == ARM64::BI__builtin_arm_ldrex || - BuiltinID == ARM64::BI__builtin_arm_strex) && + BuiltinID == AArch64::BI__builtin_arm_ldrex || + BuiltinID == AArch64::BI__builtin_arm_strex) && "unexpected ARM builtin"); bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || - BuiltinID == ARM64::BI__builtin_arm_ldrex; + BuiltinID == AArch64::BI__builtin_arm_ldrex; DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); @@ -608,12 +608,12 @@ bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); } -bool Sema::CheckARM64BuiltinFunctionCall(unsigned BuiltinID, +bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { llvm::APSInt Result; - if (BuiltinID == ARM64::BI__builtin_arm_ldrex || - BuiltinID == ARM64::BI__builtin_arm_strex) { + if (BuiltinID == AArch64::BI__builtin_arm_ldrex || + BuiltinID == AArch64::BI__builtin_arm_strex) { return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); } |