summaryrefslogtreecommitdiffstats
path: root/clang/lib/Sema/SemaChecking.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/Sema/SemaChecking.cpp')
-rw-r--r--clang/lib/Sema/SemaChecking.cpp52
1 files changed, 40 insertions, 12 deletions
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index 32bf6e01c9f..41c7613c74a 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -309,6 +309,10 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
if (CheckARMBuiltinFunctionCall(BuiltinID, TheCall))
return ExprError();
break;
+ case llvm::Triple::arm64:
+ if (CheckARM64BuiltinFunctionCall(BuiltinID, TheCall))
+ return ExprError();
+ break;
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
if (CheckAArch64BuiltinFunctionCall(BuiltinID, TheCall))
@@ -369,7 +373,7 @@ static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) {
/// the vector type specified by the NeonTypeFlags. This is used to check
/// the pointer arguments for Neon load/store intrinsics.
static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
- bool IsAArch64) {
+ bool IsPolyUnsigned, bool IsInt64Long) {
switch (Flags.getEltType()) {
case NeonTypeFlags::Int8:
return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
@@ -378,15 +382,15 @@ static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
case NeonTypeFlags::Int32:
return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
case NeonTypeFlags::Int64:
- if (IsAArch64)
+ if (IsInt64Long)
return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
else
return Flags.isUnsigned() ? Context.UnsignedLongLongTy
: Context.LongLongTy;
case NeonTypeFlags::Poly8:
- return IsAArch64 ? Context.UnsignedCharTy : Context.SignedCharTy;
+ return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
case NeonTypeFlags::Poly16:
- return IsAArch64 ? Context.UnsignedShortTy : Context.ShortTy;
+ return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
case NeonTypeFlags::Poly64:
return Context.UnsignedLongTy;
case NeonTypeFlags::Poly128:
@@ -434,9 +438,13 @@ bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg);
QualType RHSTy = RHS.get()->getType();
- bool IsAArch64 =
- Context.getTargetInfo().getTriple().getArch() == llvm::Triple::aarch64;
- QualType EltTy = getNeonEltType(NeonTypeFlags(TV), Context, IsAArch64);
+ llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch();
+ bool IsPolyUnsigned =
+ Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::arm64;
+ bool IsInt64Long =
+ Context.getTargetInfo().getInt64Type() == TargetInfo::SignedLong;
+ QualType EltTy =
+ getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long);
if (HasConstPtr)
EltTy = EltTy.withConst();
QualType LHSTy = Context.getPointerType(EltTy);
@@ -487,11 +495,15 @@ bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
return false;
}
-bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall) {
+bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
+ unsigned MaxWidth) {
assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
- BuiltinID == ARM::BI__builtin_arm_strex) &&
+ BuiltinID == ARM::BI__builtin_arm_strex ||
+ BuiltinID == ARM64::BI__builtin_arm_ldrex ||
+ BuiltinID == ARM64::BI__builtin_arm_strex) &&
"unexpected ARM builtin");
- bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex;
+ bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
+ BuiltinID == ARM64::BI__builtin_arm_ldrex;
DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
@@ -552,7 +564,8 @@ bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall) {
}
// But ARM doesn't have instructions to deal with 128-bit versions.
- if (Context.getTypeSize(ValType) > 64) {
+ if (Context.getTypeSize(ValType) > MaxWidth) {
+ assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate");
Diag(DRE->getLocStart(), diag::err_atomic_exclusive_builtin_pointer_size)
<< PointerArg->getType() << PointerArg->getSourceRange();
return true;
@@ -598,7 +611,7 @@ bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
BuiltinID == ARM::BI__builtin_arm_strex) {
- return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall);
+ return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64);
}
if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
@@ -636,6 +649,21 @@ bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
return false;
}
+bool Sema::CheckARM64BuiltinFunctionCall(unsigned BuiltinID,
+ CallExpr *TheCall) {
+ llvm::APSInt Result;
+
+ if (BuiltinID == ARM64::BI__builtin_arm_ldrex ||
+ BuiltinID == ARM64::BI__builtin_arm_strex) {
+ return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128);
+ }
+
+ if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
+ return true;
+
+ return false;
+}
+
bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
unsigned i = 0, l = 0, u = 0;
switch (BuiltinID) {
OpenPOWER on IntegriCloud