summaryrefslogtreecommitdiffstats
path: root/clang
diff options
context:
space:
mode:
authorJames Y Knight <jyknight@google.com>2015-08-05 16:57:36 +0000
committerJames Y Knight <jyknight@google.com>2015-08-05 16:57:36 +0000
commit81167fb799a8dd8378c2824f6fb65264403976e7 (patch)
tree4d6b8d1f67515d042297f74844dacbcea4ce80de /clang
parent95f0606e6210ba034001b65f496e718274d20094 (diff)
downloadbcm5719-llvm-81167fb799a8dd8378c2824f6fb65264403976e7.tar.gz
bcm5719-llvm-81167fb799a8dd8378c2824f6fb65264403976e7.zip
Add missing atomic libcall support.
Support for emitting libcalls for __atomic_fetch_nand and __atomic_{add,sub,and,or,xor,nand}_fetch was missing; add it, and some test cases. Differential Revision: http://reviews.llvm.org/D10847 llvm-svn: 244063
Diffstat (limited to 'clang')
-rw-r--r--clang/docs/LanguageExtensions.rst3
-rw-r--r--clang/lib/CodeGen/CGAtomic.cpp74
-rw-r--r--clang/test/CodeGen/atomic-ops-libcall.c72
3 files changed, 146 insertions, 3 deletions
diff --git a/clang/docs/LanguageExtensions.rst b/clang/docs/LanguageExtensions.rst
index 6a4dd5ccbf3..88e42e95ba5 100644
--- a/clang/docs/LanguageExtensions.rst
+++ b/clang/docs/LanguageExtensions.rst
@@ -1715,6 +1715,9 @@ The macros ``__ATOMIC_RELAXED``, ``__ATOMIC_CONSUME``, ``__ATOMIC_ACQUIRE``,
provided, with values corresponding to the enumerators of C11's
``memory_order`` enumeration.
+(Note that Clang additionally provides GCC-compatible ``__atomic_*``
+builtins)
+
Low-level ARM exclusive memory builtins
---------------------------------------
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index 9839617c0e4..fc4b66bbbbc 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -699,7 +699,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
switch (E->getOp()) {
case AtomicExpr::AO__c11_atomic_init:
- llvm_unreachable("Already handled!");
+ llvm_unreachable("Already handled above with EmitAtomicInit!");
case AtomicExpr::AO__c11_atomic_load:
case AtomicExpr::AO__atomic_load_n:
@@ -785,20 +785,43 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
if (UseLibcall) {
bool UseOptimizedLibcall = false;
switch (E->getOp()) {
+ case AtomicExpr::AO__c11_atomic_init:
+ llvm_unreachable("Already handled above with EmitAtomicInit!");
+
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_add:
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_and:
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_nand:
case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__atomic_fetch_sub:
case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__atomic_and_fetch:
+ case AtomicExpr::AO__atomic_nand_fetch:
+ case AtomicExpr::AO__atomic_or_fetch:
+ case AtomicExpr::AO__atomic_sub_fetch:
+ case AtomicExpr::AO__atomic_xor_fetch:
// For these, only library calls for certain sizes exist.
UseOptimizedLibcall = true;
break;
- default:
+
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__atomic_load_n:
+ case AtomicExpr::AO__atomic_load:
+ case AtomicExpr::AO__atomic_store_n:
+ case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__atomic_exchange:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
+ case AtomicExpr::AO__atomic_compare_exchange:
// Only use optimized library calls for sizes for which they exist.
if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
UseOptimizedLibcall = true;
@@ -820,6 +843,9 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
QualType RetTy;
bool HaveRetTy = false;
switch (E->getOp()) {
+ case AtomicExpr::AO__c11_atomic_init:
+ llvm_unreachable("Already handled!");
+
// There is only one libcall for compare an exchange, because there is no
// optimisation benefit possible from a libcall version of a weak compare
// and exchange.
@@ -903,7 +929,49 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
E->getExprLoc(), sizeChars);
break;
- default: return EmitUnsupportedRValue(E, "atomic library call");
+ // T __atomic_fetch_nand_N(T *mem, T val, int order)
+ case AtomicExpr::AO__atomic_fetch_nand:
+ LibCallName = "__atomic_fetch_nand";
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
+ E->getExprLoc(), sizeChars);
+ break;
+
+ // T __atomic_add_fetch_N(T *mem, T val, int order)
+ case AtomicExpr::AO__atomic_add_fetch:
+ LibCallName = "__atomic_add_fetch";
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
+ E->getExprLoc(), sizeChars);
+ break;
+ // T __atomic_and_fetch_N(T *mem, T val, int order)
+ case AtomicExpr::AO__atomic_and_fetch:
+ LibCallName = "__atomic_and_fetch";
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
+ E->getExprLoc(), sizeChars);
+ break;
+ // T __atomic_or_fetch_N(T *mem, T val, int order)
+ case AtomicExpr::AO__atomic_or_fetch:
+ LibCallName = "__atomic_or_fetch";
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
+ E->getExprLoc(), sizeChars);
+ break;
+ // T __atomic_sub_fetch_N(T *mem, T val, int order)
+ case AtomicExpr::AO__atomic_sub_fetch:
+ LibCallName = "__atomic_sub_fetch";
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
+ E->getExprLoc(), sizeChars);
+ break;
+ // T __atomic_xor_fetch_N(T *mem, T val, int order)
+ case AtomicExpr::AO__atomic_xor_fetch:
+ LibCallName = "__atomic_xor_fetch";
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
+ E->getExprLoc(), sizeChars);
+ break;
+ // T __atomic_nand_fetch_N(T *mem, T val, int order)
+ case AtomicExpr::AO__atomic_nand_fetch:
+ LibCallName = "__atomic_nand_fetch";
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
+ E->getExprLoc(), sizeChars);
+ break;
}
// Optimized functions have the size in their name.
diff --git a/clang/test/CodeGen/atomic-ops-libcall.c b/clang/test/CodeGen/atomic-ops-libcall.c
index e55a1bd1eae..5b9ba467d81 100644
--- a/clang/test/CodeGen/atomic-ops-libcall.c
+++ b/clang/test/CodeGen/atomic-ops-libcall.c
@@ -35,3 +35,75 @@ int *fp2a(int **p) {
// Note, the GNU builtins do not multiply by sizeof(T)!
return __atomic_fetch_sub(p, 4, memory_order_relaxed);
}
+
+int test_atomic_fetch_add(int *p) {
+ // CHECK: test_atomic_fetch_add
+ // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+ return __atomic_fetch_add(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_fetch_sub(int *p) {
+ // CHECK: test_atomic_fetch_sub
+ // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+ return __atomic_fetch_sub(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_fetch_and(int *p) {
+ // CHECK: test_atomic_fetch_and
+ // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_and_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+ return __atomic_fetch_and(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_fetch_or(int *p) {
+ // CHECK: test_atomic_fetch_or
+ // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_or_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+ return __atomic_fetch_or(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_fetch_xor(int *p) {
+ // CHECK: test_atomic_fetch_xor
+ // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_xor_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+ return __atomic_fetch_xor(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_fetch_nand(int *p) {
+ // CHECK: test_atomic_fetch_nand
+ // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_nand_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+ return __atomic_fetch_nand(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_add_fetch(int *p) {
+ // CHECK: test_atomic_add_fetch
+ // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_add_fetch_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+ return __atomic_add_fetch(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_sub_fetch(int *p) {
+ // CHECK: test_atomic_sub_fetch
+ // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_sub_fetch_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+ return __atomic_sub_fetch(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_and_fetch(int *p) {
+ // CHECK: test_atomic_and_fetch
+ // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_and_fetch_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+ return __atomic_and_fetch(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_or_fetch(int *p) {
+ // CHECK: test_atomic_or_fetch
+ // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_or_fetch_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+ return __atomic_or_fetch(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_xor_fetch(int *p) {
+ // CHECK: test_atomic_xor_fetch
+ // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_xor_fetch_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+ return __atomic_xor_fetch(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_nand_fetch(int *p) {
+ // CHECK: test_atomic_nand_fetch
+ // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_nand_fetch_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+ return __atomic_nand_fetch(p, 55, memory_order_seq_cst);
+}
OpenPOWER on IntegriCloud