summaryrefslogtreecommitdiffstats
path: root/clang/test/CodeGen/atomic-ops-libcall.c
diff options
context:
space:
mode:
authorJames Y Knight <jyknight@google.com>2015-11-12 18:37:29 +0000
committerJames Y Knight <jyknight@google.com>2015-11-12 18:37:29 +0000
commit7aefb5b62324d3b62473c0f508aadf64bb46876d (patch)
tree520b8cb41fcf93b0ba33df60177aa77c7ea434ad /clang/test/CodeGen/atomic-ops-libcall.c
parenteed0145dd2a8c2ae72553105aac8ad5381d39c03 (diff)
downloadbcm5719-llvm-7aefb5b62324d3b62473c0f508aadf64bb46876d.tar.gz
bcm5719-llvm-7aefb5b62324d3b62473c0f508aadf64bb46876d.zip
Correct atomic libcall support for __atomic_*_fetch builtins.
In r244063, I had caused these builtins to call the same-named library functions, __atomic_*_fetch_SIZE. However, this was incorrect: while those functions are in fact supported by GCC's libatomic, they're not documented by the spec (and gcc doesn't ever call them). Instead, you're /supposed/ to call the __atomic_fetch_* builtins and then redo the operation inline to return the final value. Differential Revision: http://reviews.llvm.org/D14385 llvm-svn: 252920
Diffstat (limited to 'clang/test/CodeGen/atomic-ops-libcall.c')
-rw-r--r--clang/test/CodeGen/atomic-ops-libcall.c19
1 files changed, 13 insertions, 6 deletions
diff --git a/clang/test/CodeGen/atomic-ops-libcall.c b/clang/test/CodeGen/atomic-ops-libcall.c
index 5b9ba467d81..0093a8cbbbb 100644
--- a/clang/test/CodeGen/atomic-ops-libcall.c
+++ b/clang/test/CodeGen/atomic-ops-libcall.c
@@ -74,36 +74,43 @@ int test_atomic_fetch_nand(int *p) {
int test_atomic_add_fetch(int *p) {
// CHECK: test_atomic_add_fetch
- // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_add_fetch_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+ // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+ // CHECK: {{%[^ ]*}} = add i32 [[CALL]], 55
return __atomic_add_fetch(p, 55, memory_order_seq_cst);
}
int test_atomic_sub_fetch(int *p) {
// CHECK: test_atomic_sub_fetch
- // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_sub_fetch_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+ // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+ // CHECK: {{%[^ ]*}} = add i32 [[CALL]], -55
return __atomic_sub_fetch(p, 55, memory_order_seq_cst);
}
int test_atomic_and_fetch(int *p) {
// CHECK: test_atomic_and_fetch
- // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_and_fetch_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+ // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_and_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+ // CHECK: {{%[^ ]*}} = and i32 [[CALL]], 55
return __atomic_and_fetch(p, 55, memory_order_seq_cst);
}
int test_atomic_or_fetch(int *p) {
// CHECK: test_atomic_or_fetch
- // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_or_fetch_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+ // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_or_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+ // CHECK: {{%[^ ]*}} = or i32 [[CALL]], 55
return __atomic_or_fetch(p, 55, memory_order_seq_cst);
}
int test_atomic_xor_fetch(int *p) {
// CHECK: test_atomic_xor_fetch
- // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_xor_fetch_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+ // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_xor_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+ // CHECK: {{%[^ ]*}} = xor i32 [[CALL]], 55
return __atomic_xor_fetch(p, 55, memory_order_seq_cst);
}
int test_atomic_nand_fetch(int *p) {
// CHECK: test_atomic_nand_fetch
- // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_nand_fetch_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+ // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_nand_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+ // CHECK: [[OR:%[^ ]*]] = or i32 [[CALL]], -56
+ // CHECK: {{%[^ ]*}} = xor i32 [[OR]], 55
return __atomic_nand_fetch(p, 55, memory_order_seq_cst);
}
OpenPOWER on IntegriCloud