summaryrefslogtreecommitdiffstats
path: root/clang/lib
diff options
context:
space:
mode:
authorYaxun Liu <Yaxun.Liu@amd.com>2017-08-04 18:16:31 +0000
committerYaxun Liu <Yaxun.Liu@amd.com>2017-08-04 18:16:31 +0000
commit39195062c20c5fd7d01678ff87c9c2851644a669 (patch)
tree91440b8bb2b2bd18fe5a3fa684ae01cca7179ce8 /clang/lib
parent0afcef27a12db3730941b257d9535f7e32479fdb (diff)
downloadbcm5719-llvm-39195062c20c5fd7d01678ff87c9c2851644a669.tar.gz
bcm5719-llvm-39195062c20c5fd7d01678ff87c9c2851644a669.zip
Add OpenCL 2.0 atomic builtin functions as Clang builtin
OpenCL 2.0 atomic builtin functions have a scope argument which is ideally represented as synchronization scope argument in LLVM atomic instructions. Clang supports translating Clang atomic builtin functions to LLVM atomic instructions. However it currently does not support synchronization scope of LLVM atomic instructions. Without this, users have to use LLVM assembly code to implement OpenCL atomic builtin functions. This patch adds OpenCL 2.0 atomic builtin functions as Clang builtin functions, which supports generating LLVM atomic instructions with synchronization scope operand. Currently only constant memory scope argument is supported. Support of non-constant memory scope argument will be added later. Differential Revision: https://reviews.llvm.org/D28691 llvm-svn: 310082
Diffstat (limited to 'clang/lib')
-rw-r--r--clang/lib/AST/ASTContext.cpp9
-rw-r--r--clang/lib/AST/Expr.cpp31
-rw-r--r--clang/lib/AST/StmtPrinter.cpp6
-rw-r--r--clang/lib/Basic/Targets/AMDGPU.cpp2
-rw-r--r--clang/lib/CodeGen/CGAtomic.cpp186
-rw-r--r--clang/lib/CodeGen/CGExpr.cpp2
-rw-r--r--clang/lib/CodeGen/TargetInfo.cpp27
-rw-r--r--clang/lib/CodeGen/TargetInfo.h5
-rw-r--r--clang/lib/Frontend/InitPreprocessor.cpp13
-rw-r--r--clang/lib/Headers/opencl-c.h25
-rw-r--r--clang/lib/Sema/SemaChecking.cpp94
11 files changed, 327 insertions, 73 deletions
diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp
index c60373c5a90..34a78c6a0c6 100644
--- a/clang/lib/AST/ASTContext.cpp
+++ b/clang/lib/AST/ASTContext.cpp
@@ -1182,7 +1182,14 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
ObjCSuperType = QualType();
// void * type
- VoidPtrTy = getPointerType(VoidTy);
+ if (LangOpts.OpenCLVersion >= 200) {
+ auto Q = VoidTy.getQualifiers();
+ Q.setAddressSpace(LangAS::opencl_generic);
+ VoidPtrTy = getPointerType(getCanonicalType(
+ getQualifiedType(VoidTy.getUnqualifiedType(), Q)));
+ } else {
+ VoidPtrTy = getPointerType(VoidTy);
+ }
// nullptr type (C++0x 2.14.7)
InitBuiltinType(NullPtrTy, BuiltinType::NullPtr);
diff --git a/clang/lib/AST/Expr.cpp b/clang/lib/AST/Expr.cpp
index afc7fa8ea09..8cb9f76d965 100644
--- a/clang/lib/AST/Expr.cpp
+++ b/clang/lib/AST/Expr.cpp
@@ -3938,12 +3938,17 @@ AtomicExpr::AtomicExpr(SourceLocation BLoc, ArrayRef<Expr*> args,
unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
switch (Op) {
case AO__c11_atomic_init:
+ case AO__opencl_atomic_init:
+ return 2;
case AO__c11_atomic_load:
+ case AO__opencl_atomic_load:
case AO__atomic_load_n:
- return 2;
+ return 3;
case AO__c11_atomic_store:
case AO__c11_atomic_exchange:
+ case AO__opencl_atomic_store:
+ case AO__opencl_atomic_exchange:
case AO__atomic_load:
case AO__atomic_store:
case AO__atomic_store_n:
@@ -3953,6 +3958,13 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
case AO__c11_atomic_fetch_and:
case AO__c11_atomic_fetch_or:
case AO__c11_atomic_fetch_xor:
+ case AO__opencl_atomic_fetch_add:
+ case AO__opencl_atomic_fetch_sub:
+ case AO__opencl_atomic_fetch_and:
+ case AO__opencl_atomic_fetch_or:
+ case AO__opencl_atomic_fetch_xor:
+ case AO__opencl_atomic_fetch_min:
+ case AO__opencl_atomic_fetch_max:
case AO__atomic_fetch_add:
case AO__atomic_fetch_sub:
case AO__atomic_fetch_and:
@@ -3965,22 +3977,31 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
case AO__atomic_or_fetch:
case AO__atomic_xor_fetch:
case AO__atomic_nand_fetch:
- return 3;
+ return 4;
case AO__atomic_exchange:
- return 4;
+ return 5;
case AO__c11_atomic_compare_exchange_strong:
case AO__c11_atomic_compare_exchange_weak:
- return 5;
+ case AO__opencl_atomic_compare_exchange_strong:
+ case AO__opencl_atomic_compare_exchange_weak:
+ return 6;
case AO__atomic_compare_exchange:
case AO__atomic_compare_exchange_n:
- return 6;
+ return 7;
}
llvm_unreachable("unknown atomic op");
}
+QualType AtomicExpr::getValueType() const {
+ auto T = getPtr()->getType()->castAs<PointerType>()->getPointeeType();
+ if (auto AT = T->getAs<AtomicType>())
+ return AT->getValueType();
+ return T;
+}
+
QualType OMPArraySectionExpr::getBaseOriginalType(const Expr *Base) {
unsigned ArraySectionCount = 0;
while (auto *OASE = dyn_cast<OMPArraySectionExpr>(Base->IgnoreParens())) {
diff --git a/clang/lib/AST/StmtPrinter.cpp b/clang/lib/AST/StmtPrinter.cpp
index f4418c92406..e3b96284bd1 100644
--- a/clang/lib/AST/StmtPrinter.cpp
+++ b/clang/lib/AST/StmtPrinter.cpp
@@ -1891,7 +1891,8 @@ void StmtPrinter::VisitAtomicExpr(AtomicExpr *Node) {
// AtomicExpr stores its subexpressions in a permuted order.
PrintExpr(Node->getPtr());
if (Node->getOp() != AtomicExpr::AO__c11_atomic_load &&
- Node->getOp() != AtomicExpr::AO__atomic_load_n) {
+ Node->getOp() != AtomicExpr::AO__atomic_load_n &&
+ Node->getOp() != AtomicExpr::AO__opencl_atomic_load) {
OS << ", ";
PrintExpr(Node->getVal1());
}
@@ -1905,7 +1906,8 @@ void StmtPrinter::VisitAtomicExpr(AtomicExpr *Node) {
OS << ", ";
PrintExpr(Node->getWeak());
}
- if (Node->getOp() != AtomicExpr::AO__c11_atomic_init) {
+ if (Node->getOp() != AtomicExpr::AO__c11_atomic_init &&
+ Node->getOp() != AtomicExpr::AO__opencl_atomic_init) {
OS << ", ";
PrintExpr(Node->getOrder());
}
diff --git a/clang/lib/Basic/Targets/AMDGPU.cpp b/clang/lib/Basic/Targets/AMDGPU.cpp
index 8c1b863229c..395dbf822f8 100644
--- a/clang/lib/Basic/Targets/AMDGPU.cpp
+++ b/clang/lib/Basic/Targets/AMDGPU.cpp
@@ -328,6 +328,8 @@ AMDGPUTargetInfo::AMDGPUTargetInfo(const llvm::Triple &Triple,
PtrDiffType = SignedLong;
IntPtrType = SignedLong;
}
+
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
}
void AMDGPUTargetInfo::adjust(LangOptions &Opts) {
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index a6e6fec206d..5e6633fff16 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -15,6 +15,7 @@
#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "llvm/IR/DataLayout.h"
@@ -359,13 +360,15 @@ static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
Address Val1, Address Val2,
uint64_t Size,
llvm::AtomicOrdering SuccessOrder,
- llvm::AtomicOrdering FailureOrder) {
+ llvm::AtomicOrdering FailureOrder,
+ llvm::SyncScope::ID Scope) {
// Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
- Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder);
+ Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder,
+ Scope);
Pair->setVolatile(E->isVolatile());
Pair->setWeak(IsWeak);
@@ -407,7 +410,8 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
Address Val1, Address Val2,
llvm::Value *FailureOrderVal,
uint64_t Size,
- llvm::AtomicOrdering SuccessOrder) {
+ llvm::AtomicOrdering SuccessOrder,
+ llvm::SyncScope::ID Scope) {
llvm::AtomicOrdering FailureOrder;
if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
auto FOS = FO->getSExtValue();
@@ -435,7 +439,7 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
}
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
- FailureOrder);
+ FailureOrder, Scope);
return;
}
@@ -460,13 +464,13 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
// doesn't fold to a constant for the ordering.
CGF.Builder.SetInsertPoint(MonotonicBB);
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
- Size, SuccessOrder, llvm::AtomicOrdering::Monotonic);
+ Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
CGF.Builder.CreateBr(ContBB);
if (AcquireBB) {
CGF.Builder.SetInsertPoint(AcquireBB);
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
- Size, SuccessOrder, llvm::AtomicOrdering::Acquire);
+ Size, SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
CGF.Builder.CreateBr(ContBB);
SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
AcquireBB);
@@ -476,7 +480,7 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
if (SeqCstBB) {
CGF.Builder.SetInsertPoint(SeqCstBB);
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
- llvm::AtomicOrdering::SequentiallyConsistent);
+ llvm::AtomicOrdering::SequentiallyConsistent, Scope);
CGF.Builder.CreateBr(ContBB);
SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
SeqCstBB);
@@ -488,27 +492,31 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
Address Ptr, Address Val1, Address Val2,
llvm::Value *IsWeak, llvm::Value *FailureOrder,
- uint64_t Size, llvm::AtomicOrdering Order) {
+ uint64_t Size, llvm::AtomicOrdering Order,
+ llvm::SyncScope::ID Scope) {
llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
switch (E->getOp()) {
case AtomicExpr::AO__c11_atomic_init:
+ case AtomicExpr::AO__opencl_atomic_init:
llvm_unreachable("Already handled!");
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Order);
+ FailureOrder, Size, Order, Scope);
return;
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Order);
+ FailureOrder, Size, Order, Scope);
return;
case AtomicExpr::AO__atomic_compare_exchange:
case AtomicExpr::AO__atomic_compare_exchange_n: {
if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
- Val1, Val2, FailureOrder, Size, Order);
+ Val1, Val2, FailureOrder, Size, Order, Scope);
} else {
// Create all the relevant BB's
llvm::BasicBlock *StrongBB =
@@ -522,12 +530,12 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
CGF.Builder.SetInsertPoint(StrongBB);
emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Order);
+ FailureOrder, Size, Order, Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(WeakBB);
emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Order);
+ FailureOrder, Size, Order, Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(ContBB);
@@ -535,26 +543,29 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
return;
}
case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__opencl_atomic_load:
case AtomicExpr::AO__atomic_load_n:
case AtomicExpr::AO__atomic_load: {
llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
- Load->setAtomic(Order);
+ Load->setAtomic(Order, Scope);
Load->setVolatile(E->isVolatile());
CGF.Builder.CreateStore(Load, Dest);
return;
}
case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__opencl_atomic_store:
case AtomicExpr::AO__atomic_store:
case AtomicExpr::AO__atomic_store_n: {
llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
- Store->setAtomic(Order);
+ Store->setAtomic(Order, Scope);
Store->setVolatile(E->isVolatile());
return;
}
case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__opencl_atomic_exchange:
case AtomicExpr::AO__atomic_exchange_n:
case AtomicExpr::AO__atomic_exchange:
Op = llvm::AtomicRMWInst::Xchg;
@@ -564,6 +575,7 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
PostOp = llvm::Instruction::Add;
// Fall through.
case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__opencl_atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_add:
Op = llvm::AtomicRMWInst::Add;
break;
@@ -572,14 +584,26 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
PostOp = llvm::Instruction::Sub;
// Fall through.
case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__opencl_atomic_fetch_sub:
case AtomicExpr::AO__atomic_fetch_sub:
Op = llvm::AtomicRMWInst::Sub;
break;
+ case AtomicExpr::AO__opencl_atomic_fetch_min:
+ Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min
+ : llvm::AtomicRMWInst::UMin;
+ break;
+
+ case AtomicExpr::AO__opencl_atomic_fetch_max:
+ Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max
+ : llvm::AtomicRMWInst::UMax;
+ break;
+
case AtomicExpr::AO__atomic_and_fetch:
PostOp = llvm::Instruction::And;
// Fall through.
case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__opencl_atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_and:
Op = llvm::AtomicRMWInst::And;
break;
@@ -588,6 +612,7 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
PostOp = llvm::Instruction::Or;
// Fall through.
case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__opencl_atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_or:
Op = llvm::AtomicRMWInst::Or;
break;
@@ -596,6 +621,7 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
PostOp = llvm::Instruction::Xor;
// Fall through.
case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__opencl_atomic_fetch_xor:
case AtomicExpr::AO__atomic_fetch_xor:
Op = llvm::AtomicRMWInst::Xor;
break;
@@ -610,7 +636,7 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
llvm::AtomicRMWInst *RMWI =
- CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order);
+ CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order, Scope);
RMWI->setVolatile(E->isVolatile());
// For __atomic_*_fetch operations, perform the operation again to
@@ -677,19 +703,23 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
Address Dest = Address::invalid();
Address Ptr(EmitScalarExpr(E->getPtr()), alignChars);
- if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
+ if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||
+ E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
EmitAtomicInit(E->getVal1(), lvalue);
return RValue::get(nullptr);
}
llvm::Value *Order = EmitScalarExpr(E->getOrder());
+ llvm::Value *Scope = EmitScalarExpr(E->getScope());
switch (E->getOp()) {
case AtomicExpr::AO__c11_atomic_init:
+ case AtomicExpr::AO__opencl_atomic_init:
llvm_unreachable("Already handled above with EmitAtomicInit!");
case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__opencl_atomic_load:
case AtomicExpr::AO__atomic_load_n:
break;
@@ -708,6 +738,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
case AtomicExpr::AO__atomic_compare_exchange_n:
case AtomicExpr::AO__atomic_compare_exchange:
Val1 = EmitPointerWithAlignment(E->getVal1());
@@ -716,12 +748,15 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
else
Val2 = EmitValToTemp(*this, E->getVal2());
OrderFail = EmitScalarExpr(E->getOrderFail());
- if (E->getNumSubExprs() == 6)
+ if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
+ E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
IsWeak = EmitScalarExpr(E->getWeak());
break;
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__opencl_atomic_fetch_add:
+ case AtomicExpr::AO__opencl_atomic_fetch_sub:
if (MemTy->isPointerType()) {
// For pointer arithmetic, we're required to do a bit of math:
// adding 1 to an int* is not the same as adding 1 to a uintptr_t.
@@ -744,11 +779,18 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__atomic_sub_fetch:
case AtomicExpr::AO__c11_atomic_store:
case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__opencl_atomic_store:
+ case AtomicExpr::AO__opencl_atomic_exchange:
case AtomicExpr::AO__atomic_store_n:
case AtomicExpr::AO__atomic_exchange_n:
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__opencl_atomic_fetch_and:
+ case AtomicExpr::AO__opencl_atomic_fetch_or:
+ case AtomicExpr::AO__opencl_atomic_fetch_xor:
+ case AtomicExpr::AO__opencl_atomic_fetch_min:
+ case AtomicExpr::AO__opencl_atomic_fetch_max:
case AtomicExpr::AO__atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_xor:
@@ -784,18 +826,26 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
bool UseOptimizedLibcall = false;
switch (E->getOp()) {
case AtomicExpr::AO__c11_atomic_init:
+ case AtomicExpr::AO__opencl_atomic_init:
llvm_unreachable("Already handled above with EmitAtomicInit!");
case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__opencl_atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_add:
case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__opencl_atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_and:
case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__opencl_atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_nand:
case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__opencl_atomic_fetch_sub:
case AtomicExpr::AO__atomic_fetch_sub:
case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__opencl_atomic_fetch_xor:
+ case AtomicExpr::AO__opencl_atomic_fetch_min:
+ case AtomicExpr::AO__opencl_atomic_fetch_max:
case AtomicExpr::AO__atomic_fetch_xor:
case AtomicExpr::AO__atomic_add_fetch:
case AtomicExpr::AO__atomic_and_fetch:
@@ -812,6 +862,11 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__c11_atomic_exchange:
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__opencl_atomic_load:
+ case AtomicExpr::AO__opencl_atomic_store:
+ case AtomicExpr::AO__opencl_atomic_exchange:
+ case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
case AtomicExpr::AO__atomic_load_n:
case AtomicExpr::AO__atomic_load:
case AtomicExpr::AO__atomic_store_n:
@@ -833,7 +888,24 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
getContext().getSizeType());
}
// Atomic address is the first or second parameter
- Args.add(RValue::get(EmitCastToVoidPtr(Ptr.getPointer())),
+ // The OpenCL atomic library functions only accept pointer arguments to
+ // generic address space.
+ auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {
+ if (!E->isOpenCL())
+ return V;
+ auto AS = PT->getAs<PointerType>()->getPointeeType().getAddressSpace();
+ if (AS == LangAS::opencl_generic)
+ return V;
+ auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic);
+ auto T = V->getType();
+ auto *DestType = T->getPointerElementType()->getPointerTo(DestAS);
+
+ return getTargetHooks().performAddrSpaceCast(
+ *this, V, AS, LangAS::opencl_generic, DestType, false);
+ };
+
+ Args.add(RValue::get(CastToGenericAddrSpace(
+ EmitCastToVoidPtr(Ptr.getPointer()), E->getPtr()->getType())),
getContext().VoidPtrTy);
std::string LibCallName;
@@ -844,6 +916,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
switch (E->getOp()) {
case AtomicExpr::AO__c11_atomic_init:
+ case AtomicExpr::AO__opencl_atomic_init:
llvm_unreachable("Already handled!");
// There is only one libcall for compare an exchange, because there is no
@@ -855,13 +928,17 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// int success, int failure)
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
case AtomicExpr::AO__atomic_compare_exchange:
case AtomicExpr::AO__atomic_compare_exchange_n:
LibCallName = "__atomic_compare_exchange";
RetTy = getContext().BoolTy;
HaveRetTy = true;
- Args.add(RValue::get(EmitCastToVoidPtr(Val1.getPointer())),
- getContext().VoidPtrTy);
+ Args.add(
+ RValue::get(CastToGenericAddrSpace(
+ EmitCastToVoidPtr(Val1.getPointer()), E->getVal1()->getType())),
+ getContext().VoidPtrTy);
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
MemTy, E->getExprLoc(), sizeChars);
Args.add(RValue::get(Order), getContext().IntTy);
@@ -871,6 +948,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// int order)
// T __atomic_exchange_N(T *mem, T val, int order)
case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__opencl_atomic_exchange:
case AtomicExpr::AO__atomic_exchange_n:
case AtomicExpr::AO__atomic_exchange:
LibCallName = "__atomic_exchange";
@@ -880,6 +958,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// void __atomic_store(size_t size, void *mem, void *val, int order)
// void __atomic_store_N(T *mem, T val, int order)
case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__opencl_atomic_store:
case AtomicExpr::AO__atomic_store:
case AtomicExpr::AO__atomic_store_n:
LibCallName = "__atomic_store";
@@ -891,6 +970,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// void __atomic_load(size_t size, void *mem, void *return, int order)
// T __atomic_load_N(T *mem, int order)
case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__opencl_atomic_load:
case AtomicExpr::AO__atomic_load:
case AtomicExpr::AO__atomic_load_n:
LibCallName = "__atomic_load";
@@ -901,6 +981,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
PostOp = llvm::Instruction::Add;
// Fall through.
case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__opencl_atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_add:
LibCallName = "__atomic_fetch_add";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
@@ -912,6 +993,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
PostOp = llvm::Instruction::And;
// Fall through.
case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__opencl_atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_and:
LibCallName = "__atomic_fetch_and";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
@@ -923,6 +1005,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
PostOp = llvm::Instruction::Or;
// Fall through.
case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__opencl_atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_or:
LibCallName = "__atomic_fetch_or";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
@@ -934,6 +1017,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
PostOp = llvm::Instruction::Sub;
// Fall through.
case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__opencl_atomic_fetch_sub:
case AtomicExpr::AO__atomic_fetch_sub:
LibCallName = "__atomic_fetch_sub";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
@@ -945,11 +1029,26 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
PostOp = llvm::Instruction::Xor;
// Fall through.
case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__opencl_atomic_fetch_xor:
case AtomicExpr::AO__atomic_fetch_xor:
LibCallName = "__atomic_fetch_xor";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
MemTy, E->getExprLoc(), sizeChars);
break;
+ case AtomicExpr::AO__opencl_atomic_fetch_min:
+ LibCallName = E->getValueType()->isSignedIntegerType()
+ ? "__atomic_fetch_min"
+ : "__atomic_fetch_umin";
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ LoweredMemTy, E->getExprLoc(), sizeChars);
+ break;
+ case AtomicExpr::AO__opencl_atomic_fetch_max:
+ LibCallName = E->getValueType()->isSignedIntegerType()
+ ? "__atomic_fetch_max"
+ : "__atomic_fetch_umax";
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ LoweredMemTy, E->getExprLoc(), sizeChars);
+ break;
// T __atomic_nand_fetch_N(T *mem, T val, int order)
// T __atomic_fetch_nand_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_nand_fetch:
@@ -962,6 +1061,11 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
break;
}
+ if (E->isOpenCL()) {
+ LibCallName = std::string("__opencl") +
+ StringRef(LibCallName).drop_front(1).str();
+
+ }
// Optimized functions have the size in their name.
if (UseOptimizedLibcall)
LibCallName += "_" + llvm::utostr(Size);
@@ -982,6 +1086,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// order is always the last parameter
Args.add(RValue::get(Order),
getContext().IntTy);
+ if (E->isOpenCL())
+ Args.add(RValue::get(Scope), getContext().IntTy);
// PostOp is only needed for the atomic_*_fetch operations, and
// thus is only needed for and implemented in the
@@ -1018,12 +1124,20 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
}
bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
+ E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
E->getOp() == AtomicExpr::AO__atomic_store ||
E->getOp() == AtomicExpr::AO__atomic_store_n;
bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
+ E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
E->getOp() == AtomicExpr::AO__atomic_load ||
E->getOp() == AtomicExpr::AO__atomic_load_n;
+ assert(isa<llvm::ConstantInt>(Scope) &&
+ "Non-constant synchronization scope not supported");
+ auto SCID = getTargetHooks().getLLVMSyncScopeID(
+ static_cast<SyncScope>(cast<llvm::ConstantInt>(Scope)->getZExtValue()),
+ getLLVMContext());
+
if (isa<llvm::ConstantInt>(Order)) {
auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
// We should not ever get to a case where the ordering isn't a valid C ABI
@@ -1032,30 +1146,30 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
switch ((llvm::AtomicOrderingCABI)ord) {
case llvm::AtomicOrderingCABI::relaxed:
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Monotonic);
+ llvm::AtomicOrdering::Monotonic, SCID);
break;
case llvm::AtomicOrderingCABI::consume:
case llvm::AtomicOrderingCABI::acquire:
if (IsStore)
break; // Avoid crashing on code with undefined behavior
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Acquire);
+ llvm::AtomicOrdering::Acquire, SCID);
break;
case llvm::AtomicOrderingCABI::release:
if (IsLoad)
break; // Avoid crashing on code with undefined behavior
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Release);
+ llvm::AtomicOrdering::Release, SCID);
break;
case llvm::AtomicOrderingCABI::acq_rel:
if (IsLoad || IsStore)
break; // Avoid crashing on code with undefined behavior
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::AcquireRelease);
+ llvm::AtomicOrdering::AcquireRelease, SCID);
break;
case llvm::AtomicOrderingCABI::seq_cst:
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::SequentiallyConsistent);
+ llvm::AtomicOrdering::SequentiallyConsistent, SCID);
break;
}
if (RValTy->isVoidType())
@@ -1091,13 +1205,13 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// Emit all the different atomics
Builder.SetInsertPoint(MonotonicBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
- Size, llvm::AtomicOrdering::Monotonic);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
+ llvm::AtomicOrdering::Monotonic, SCID);
Builder.CreateBr(ContBB);
if (!IsStore) {
Builder.SetInsertPoint(AcquireBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
- Size, llvm::AtomicOrdering::Acquire);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
+ llvm::AtomicOrdering::Acquire, SCID);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
AcquireBB);
@@ -1106,23 +1220,23 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
}
if (!IsLoad) {
Builder.SetInsertPoint(ReleaseBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
- Size, llvm::AtomicOrdering::Release);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
+ llvm::AtomicOrdering::Release, SCID);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
ReleaseBB);
}
if (!IsLoad && !IsStore) {
Builder.SetInsertPoint(AcqRelBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
- Size, llvm::AtomicOrdering::AcquireRelease);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
+ llvm::AtomicOrdering::AcquireRelease, SCID);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
AcqRelBB);
}
Builder.SetInsertPoint(SeqCstBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
- Size, llvm::AtomicOrdering::SequentiallyConsistent);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
+ llvm::AtomicOrdering::SequentiallyConsistent, SCID);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
SeqCstBB);
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index ec48b2cc86e..cd7911a6046 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -48,7 +48,7 @@ using namespace CodeGen;
llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
unsigned addressSpace =
- cast<llvm::PointerType>(value->getType())->getAddressSpace();
+ cast<llvm::PointerType>(value->getType())->getAddressSpace();
llvm::PointerType *destType = Int8PtrTy;
if (addressSpace)
diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp
index 12341431ac0..d2fcbeadbc1 100644
--- a/clang/lib/CodeGen/TargetInfo.cpp
+++ b/clang/lib/CodeGen/TargetInfo.cpp
@@ -444,6 +444,11 @@ TargetCodeGenInfo::performAddrSpaceCast(CodeGenModule &CGM, llvm::Constant *Src,
return llvm::ConstantExpr::getPointerCast(Src, DestTy);
}
+llvm::SyncScope::ID
+TargetCodeGenInfo::getLLVMSyncScopeID(SyncScope S, llvm::LLVMContext &C) const {
+ return C.getOrInsertSyncScopeID(""); /* default sync scope */
+}
+
static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
/// isEmptyField - Return true iff a the field is "empty", that is it
@@ -7430,6 +7435,8 @@ public:
}
unsigned getGlobalVarAddressSpace(CodeGenModule &CGM,
const VarDecl *D) const override;
+ llvm::SyncScope::ID getLLVMSyncScopeID(SyncScope S,
+ llvm::LLVMContext &C) const override;
};
}
@@ -7539,6 +7546,26 @@ AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
return DefaultGlobalAS;
}
+llvm::SyncScope::ID
+AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(SyncScope S,
+ llvm::LLVMContext &C) const {
+ StringRef Name;
+ switch (S) {
+ case SyncScope::OpenCLWorkGroup:
+ Name = "workgroup";
+ break;
+ case SyncScope::OpenCLDevice:
+ Name = "agent";
+ break;
+ case SyncScope::OpenCLAllSVMDevices:
+ Name = "";
+ break;
+ case SyncScope::OpenCLSubGroup:
+ Name = "subgroup";
+ }
+ return C.getOrInsertSyncScopeID(Name);
+}
+
//===----------------------------------------------------------------------===//
// SPARC v8 ABI Implementation.
// Based on the SPARC Compliance Definition version 2.4.1.
diff --git a/clang/lib/CodeGen/TargetInfo.h b/clang/lib/CodeGen/TargetInfo.h
index 7149dec1dad..c26d7c710c1 100644
--- a/clang/lib/CodeGen/TargetInfo.h
+++ b/clang/lib/CodeGen/TargetInfo.h
@@ -19,6 +19,7 @@
#include "CGValue.h"
#include "clang/AST/Type.h"
#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SyncScope.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
@@ -260,6 +261,10 @@ public:
virtual llvm::Constant *
performAddrSpaceCast(CodeGenModule &CGM, llvm::Constant *V, unsigned SrcAddr,
unsigned DestAddr, llvm::Type *DestTy) const;
+
+ /// Get the syncscope used in LLVM IR.
+ virtual llvm::SyncScope::ID getLLVMSyncScopeID(SyncScope S,
+ llvm::LLVMContext &C) const;
};
} // namespace CodeGen
diff --git a/clang/lib/Frontend/InitPreprocessor.cpp b/clang/lib/Frontend/InitPreprocessor.cpp
index 64128dfdf53..493144fb953 100644
--- a/clang/lib/Frontend/InitPreprocessor.cpp
+++ b/clang/lib/Frontend/InitPreprocessor.cpp
@@ -14,6 +14,7 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/MacroBuilder.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/SyncScope.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/Version.h"
#include "clang/Frontend/FrontendDiagnostic.h"
@@ -575,6 +576,18 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__ATOMIC_ACQ_REL", "4");
Builder.defineMacro("__ATOMIC_SEQ_CST", "5");
+ // Define macros for the OpenCL memory scope.
+ // The values should match clang SyncScope enum.
+ assert(static_cast<unsigned>(SyncScope::OpenCLWorkGroup) == 1 &&
+ static_cast<unsigned>(SyncScope::OpenCLDevice) == 2 &&
+ static_cast<unsigned>(SyncScope::OpenCLAllSVMDevices) == 3 &&
+ static_cast<unsigned>(SyncScope::OpenCLSubGroup) == 4);
+ Builder.defineMacro("__OPENCL_MEMORY_SCOPE_WORK_ITEM", "0");
+ Builder.defineMacro("__OPENCL_MEMORY_SCOPE_WORK_GROUP", "1");
+ Builder.defineMacro("__OPENCL_MEMORY_SCOPE_DEVICE", "2");
+ Builder.defineMacro("__OPENCL_MEMORY_SCOPE_ALL_SVM_DEVICES", "3");
+ Builder.defineMacro("__OPENCL_MEMORY_SCOPE_SUB_GROUP", "4");
+
// Support for #pragma redefine_extname (Sun compatibility)
Builder.defineMacro("__PRAGMA_REDEFINE_EXTNAME", "1");
diff --git a/clang/lib/Headers/opencl-c.h b/clang/lib/Headers/opencl-c.h
index 58c8daf3a53..9657e45a2f4 100644
--- a/clang/lib/Headers/opencl-c.h
+++ b/clang/lib/Headers/opencl-c.h
@@ -13141,13 +13141,14 @@ void __ovld __conv barrier(cl_mem_fence_flags flags);
#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
-typedef enum memory_scope
-{
- memory_scope_work_item,
- memory_scope_work_group,
- memory_scope_device,
- memory_scope_all_svm_devices,
- memory_scope_sub_group
+typedef enum memory_scope {
+ memory_scope_work_item = __OPENCL_MEMORY_SCOPE_WORK_ITEM,
+ memory_scope_work_group = __OPENCL_MEMORY_SCOPE_WORK_GROUP,
+ memory_scope_device = __OPENCL_MEMORY_SCOPE_DEVICE,
+ memory_scope_all_svm_devices = __OPENCL_MEMORY_SCOPE_ALL_SVM_DEVICES,
+#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups)
+ memory_scope_sub_group = __OPENCL_MEMORY_SCOPE_SUB_GROUP
+#endif
} memory_scope;
void __ovld __conv work_group_barrier(cl_mem_fence_flags flags, memory_scope scope);
@@ -13952,11 +13953,11 @@ unsigned long __ovld atom_xor(volatile __local unsigned long *p, unsigned long v
// enum values aligned with what clang uses in EmitAtomicExpr()
typedef enum memory_order
{
- memory_order_relaxed,
- memory_order_acquire,
- memory_order_release,
- memory_order_acq_rel,
- memory_order_seq_cst
+ memory_order_relaxed = __ATOMIC_RELAXED,
+ memory_order_acquire = __ATOMIC_ACQUIRE,
+ memory_order_release = __ATOMIC_RELEASE,
+ memory_order_acq_rel = __ATOMIC_ACQ_REL,
+ memory_order_seq_cst = __ATOMIC_SEQ_CST
} memory_order;
// double atomics support requires extensions cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index 81dd36cf671..578dbf0aadc 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -25,6 +25,7 @@
#include "clang/AST/StmtObjC.h"
#include "clang/Analysis/Analyses/FormatString.h"
#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/SyncScope.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering.
@@ -2787,15 +2788,18 @@ static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) {
auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering;
switch (Op) {
case AtomicExpr::AO__c11_atomic_init:
+ case AtomicExpr::AO__opencl_atomic_init:
llvm_unreachable("There is no ordering argument for an init");
case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__opencl_atomic_load:
case AtomicExpr::AO__atomic_load_n:
case AtomicExpr::AO__atomic_load:
return OrderingCABI != llvm::AtomicOrderingCABI::release &&
OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__opencl_atomic_store:
case AtomicExpr::AO__atomic_store:
case AtomicExpr::AO__atomic_store_n:
return OrderingCABI != llvm::AtomicOrderingCABI::consume &&
@@ -2812,7 +2816,9 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
CallExpr *TheCall = cast<CallExpr>(TheCallResult.get());
DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
- // All these operations take one of the following forms:
+ // All the non-OpenCL operations take one of the following forms.
+ // The OpenCL operations take the __c11 forms with one extra argument for
+ // synchronization scope.
enum {
// C __c11_atomic_init(A *, C)
Init,
@@ -2833,6 +2839,7 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
// bool __atomic_compare_exchange(A *, C *, CP, bool, int, int)
GNUCmpXchg
} Form = Init;
+ const unsigned NumForm = GNUCmpXchg + 1;
const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 };
const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 };
// where:
@@ -2842,12 +2849,18 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
// M is C if C is an integer, and ptrdiff_t if C is a pointer, and
// the int parameters are for orderings.
+ static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm
+ && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm,
+ "need to update code for modified forms");
static_assert(AtomicExpr::AO__c11_atomic_init == 0 &&
AtomicExpr::AO__c11_atomic_fetch_xor + 1 ==
AtomicExpr::AO__atomic_load,
"need to update code for modified C11 atomics");
- bool IsC11 = Op >= AtomicExpr::AO__c11_atomic_init &&
- Op <= AtomicExpr::AO__c11_atomic_fetch_xor;
+ bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init &&
+ Op <= AtomicExpr::AO__opencl_atomic_fetch_max;
+ bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init &&
+ Op <= AtomicExpr::AO__c11_atomic_fetch_xor) ||
+ IsOpenCL;
bool IsN = Op == AtomicExpr::AO__atomic_load_n ||
Op == AtomicExpr::AO__atomic_store_n ||
Op == AtomicExpr::AO__atomic_exchange_n ||
@@ -2856,10 +2869,12 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
switch (Op) {
case AtomicExpr::AO__c11_atomic_init:
+ case AtomicExpr::AO__opencl_atomic_init:
Form = Init;
break;
case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__opencl_atomic_load:
case AtomicExpr::AO__atomic_load_n:
Form = Load;
break;
@@ -2869,6 +2884,7 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
break;
case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__opencl_atomic_store:
case AtomicExpr::AO__atomic_store:
case AtomicExpr::AO__atomic_store_n:
Form = Copy;
@@ -2876,6 +2892,10 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__opencl_atomic_fetch_add:
+ case AtomicExpr::AO__opencl_atomic_fetch_sub:
+ case AtomicExpr::AO__opencl_atomic_fetch_min:
+ case AtomicExpr::AO__opencl_atomic_fetch_max:
case AtomicExpr::AO__atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_sub:
case AtomicExpr::AO__atomic_add_fetch:
@@ -2885,6 +2905,9 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__opencl_atomic_fetch_and:
+ case AtomicExpr::AO__opencl_atomic_fetch_or:
+ case AtomicExpr::AO__opencl_atomic_fetch_xor:
case AtomicExpr::AO__atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_xor:
@@ -2897,6 +2920,7 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
break;
case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__opencl_atomic_exchange:
case AtomicExpr::AO__atomic_exchange_n:
Form = Xchg;
break;
@@ -2907,6 +2931,8 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
Form = C11CmpXchg;
break;
@@ -2916,16 +2942,19 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
break;
}
+ unsigned AdjustedNumArgs = NumArgs[Form];
+ if (IsOpenCL && Op != AtomicExpr::AO__opencl_atomic_init)
+ ++AdjustedNumArgs;
// Check we have the right number of arguments.
- if (TheCall->getNumArgs() < NumArgs[Form]) {
+ if (TheCall->getNumArgs() < AdjustedNumArgs) {
Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
- << 0 << NumArgs[Form] << TheCall->getNumArgs()
+ << 0 << AdjustedNumArgs << TheCall->getNumArgs()
<< TheCall->getCallee()->getSourceRange();
return ExprError();
- } else if (TheCall->getNumArgs() > NumArgs[Form]) {
- Diag(TheCall->getArg(NumArgs[Form])->getLocStart(),
+ } else if (TheCall->getNumArgs() > AdjustedNumArgs) {
+ Diag(TheCall->getArg(AdjustedNumArgs)->getLocStart(),
diag::err_typecheck_call_too_many_args)
- << 0 << NumArgs[Form] << TheCall->getNumArgs()
+ << 0 << AdjustedNumArgs << TheCall->getNumArgs()
<< TheCall->getCallee()->getSourceRange();
return ExprError();
}
@@ -2953,9 +2982,11 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
<< Ptr->getType() << Ptr->getSourceRange();
return ExprError();
}
- if (AtomTy.isConstQualified()) {
+ if (AtomTy.isConstQualified() ||
+ AtomTy.getAddressSpace() == LangAS::opencl_constant) {
Diag(DRE->getLocStart(), diag::err_atomic_op_needs_non_const_atomic)
- << Ptr->getType() << Ptr->getSourceRange();
+ << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType()
+ << Ptr->getSourceRange();
return ExprError();
}
ValType = AtomTy->getAs<AtomicType>()->getValueType();
@@ -3024,7 +3055,8 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
ValType.removeLocalVolatile();
ValType.removeLocalConst();
QualType ResultType = ValType;
- if (Form == Copy || Form == LoadCopy || Form == GNUXchg || Form == Init)
+ if (Form == Copy || Form == LoadCopy || Form == GNUXchg ||
+ Form == Init)
ResultType = Context.VoidTy;
else if (Form == C11CmpXchg || Form == GNUCmpXchg)
ResultType = Context.BoolTy;
@@ -3038,7 +3070,7 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
// The first argument --- the pointer --- has a fixed type; we
// deduce the types of the rest of the arguments accordingly. Walk
// the remaining arguments, converting them to the deduced value type.
- for (unsigned i = 1; i != NumArgs[Form]; ++i) {
+ for (unsigned i = 1; i != TheCall->getNumArgs(); ++i) {
QualType Ty;
if (i < NumVals[Form] + 1) {
switch (i) {
@@ -3080,7 +3112,7 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
break;
}
} else {
- // The order(s) are always converted to int.
+ // The order(s) and scope are always converted to int.
Ty = Context.IntTy;
}
@@ -3093,6 +3125,27 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
TheCall->setArg(i, Arg.get());
}
+ Expr *Scope;
+ if (Form != Init) {
+ if (IsOpenCL) {
+ Scope = TheCall->getArg(TheCall->getNumArgs() - 1);
+ llvm::APSInt Result(32);
+ if (!Scope->isIntegerConstantExpr(Result, Context))
+ Diag(Scope->getLocStart(),
+ diag::err_atomic_op_has_non_constant_synch_scope)
+ << Scope->getSourceRange();
+ else if (!isValidSyncScopeValue(Result.getZExtValue()))
+ Diag(Scope->getLocStart(), diag::err_atomic_op_has_invalid_synch_scope)
+ << Scope->getSourceRange();
+ } else {
+ Scope = IntegerLiteral::Create(
+ Context,
+ llvm::APInt(Context.getTypeSize(Context.IntTy),
+ static_cast<unsigned>(SyncScope::OpenCLAllSVMDevices)),
+ Context.IntTy, SourceLocation());
+ }
+ }
+
// Permute the arguments into a 'consistent' order.
SmallVector<Expr*, 5> SubExprs;
SubExprs.push_back(Ptr);
@@ -3103,28 +3156,33 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
break;
case Load:
SubExprs.push_back(TheCall->getArg(1)); // Order
+ SubExprs.push_back(Scope); // Scope
break;
case LoadCopy:
case Copy:
case Arithmetic:
case Xchg:
SubExprs.push_back(TheCall->getArg(2)); // Order
+ SubExprs.push_back(Scope); // Scope
SubExprs.push_back(TheCall->getArg(1)); // Val1
break;
case GNUXchg:
// Note, AtomicExpr::getVal2() has a special case for this atomic.
SubExprs.push_back(TheCall->getArg(3)); // Order
+ SubExprs.push_back(Scope); // Scope
SubExprs.push_back(TheCall->getArg(1)); // Val1
SubExprs.push_back(TheCall->getArg(2)); // Val2
break;
case C11CmpXchg:
SubExprs.push_back(TheCall->getArg(3)); // Order
+ SubExprs.push_back(Scope); // Scope
SubExprs.push_back(TheCall->getArg(1)); // Val1
SubExprs.push_back(TheCall->getArg(4)); // OrderFail
SubExprs.push_back(TheCall->getArg(2)); // Val2
break;
case GNUCmpXchg:
SubExprs.push_back(TheCall->getArg(4)); // Order
+ SubExprs.push_back(Scope); // Scope
SubExprs.push_back(TheCall->getArg(1)); // Val1
SubExprs.push_back(TheCall->getArg(5)); // OrderFail
SubExprs.push_back(TheCall->getArg(2)); // Val2
@@ -3146,10 +3204,14 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
TheCall->getRParenLoc());
if ((Op == AtomicExpr::AO__c11_atomic_load ||
- (Op == AtomicExpr::AO__c11_atomic_store)) &&
+ Op == AtomicExpr::AO__c11_atomic_store ||
+ Op == AtomicExpr::AO__opencl_atomic_load ||
+ Op == AtomicExpr::AO__opencl_atomic_store ) &&
Context.AtomicUsesUnsupportedLibcall(AE))
- Diag(AE->getLocStart(), diag::err_atomic_load_store_uses_lib) <<
- ((Op == AtomicExpr::AO__c11_atomic_load) ? 0 : 1);
+ Diag(AE->getLocStart(), diag::err_atomic_load_store_uses_lib)
+ << ((Op == AtomicExpr::AO__c11_atomic_load ||
+ Op == AtomicExpr::AO__opencl_atomic_load)
+ ? 0 : 1);
return AE;
}
OpenPOWER on IntegriCloud