diff options
author | Eric Christopher <echristo@gmail.com> | 2019-11-25 16:33:19 -0800 |
---|---|---|
committer | Eric Christopher <echristo@gmail.com> | 2019-11-25 17:16:46 -0800 |
commit | 8ff85ed905a7306977d07a5cd67ab4d5a56fafb4 (patch) | |
tree | a52bc11417f5a8598b2cb9dd59a08bffeeea1fb3 /clang/test/CodeGen/atomic-ops-libcall.c | |
parent | 3687ddef2c8274b926907f63d7762cc98325459e (diff) | |
download | bcm5719-llvm-8ff85ed905a7306977d07a5cd67ab4d5a56fafb4.tar.gz bcm5719-llvm-8ff85ed905a7306977d07a5cd67ab4d5a56fafb4.zip |
As a follow-up to my initial mail to llvm-dev here's a first pass at the O1 described there.
This change doesn't include any change to move from selection dag to fast isel
and that will come with other numbers that should help inform that decision.
There also haven't been any real debuggability studies with this pipeline yet,
this is just the initial start done so that people could see it and we could start
tweaking after.
Test updates: Outside of the newpm tests most of the updates are coming from either
optimization passes not run anymore (and without a compelling argument at the moment)
that were largely used for canonicalization in clang.
Original post:
http://lists.llvm.org/pipermail/llvm-dev/2019-April/131494.html
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65410
Diffstat (limited to 'clang/test/CodeGen/atomic-ops-libcall.c')
-rw-r--r-- | clang/test/CodeGen/atomic-ops-libcall.c | 34 |
1 files changed, 17 insertions, 17 deletions
diff --git a/clang/test/CodeGen/atomic-ops-libcall.c b/clang/test/CodeGen/atomic-ops-libcall.c index c673b07f8ed..ca79688c8a0 100644 --- a/clang/test/CodeGen/atomic-ops-libcall.c +++ b/clang/test/CodeGen/atomic-ops-libcall.c @@ -10,109 +10,109 @@ enum memory_order { int *test_c11_atomic_fetch_add_int_ptr(_Atomic(int *) *p) { // CHECK: test_c11_atomic_fetch_add_int_ptr - // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 12, i32 5) + // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 12, i32 5) return __c11_atomic_fetch_add(p, 3, memory_order_seq_cst); } int *test_c11_atomic_fetch_sub_int_ptr(_Atomic(int *) *p) { // CHECK: test_c11_atomic_fetch_sub_int_ptr - // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 20, i32 5) + // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 20, i32 5) return __c11_atomic_fetch_sub(p, 5, memory_order_seq_cst); } int test_c11_atomic_fetch_add_int(_Atomic(int) *p) { // CHECK: test_c11_atomic_fetch_add_int - // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 3, i32 5) + // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 3, i32 5) return __c11_atomic_fetch_add(p, 3, memory_order_seq_cst); } int test_c11_atomic_fetch_sub_int(_Atomic(int) *p) { // CHECK: test_c11_atomic_fetch_sub_int - // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 5, i32 5) + // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 5, i32 5) return __c11_atomic_fetch_sub(p, 5, memory_order_seq_cst); } int *fp2a(int **p) { // CHECK: @fp2a - // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 4, i32 0) + // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 4, i32 0) // Note, the GNU builtins do not multiply by sizeof(T)! return __atomic_fetch_sub(p, 4, memory_order_relaxed); } int test_atomic_fetch_add(int *p) { // CHECK: test_atomic_fetch_add - // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 55, i32 5) + // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 55, i32 5) return __atomic_fetch_add(p, 55, memory_order_seq_cst); } int test_atomic_fetch_sub(int *p) { // CHECK: test_atomic_fetch_sub - // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 55, i32 5) + // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 55, i32 5) return __atomic_fetch_sub(p, 55, memory_order_seq_cst); } int test_atomic_fetch_and(int *p) { // CHECK: test_atomic_fetch_and - // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_and_4(i8* {{%[0-9]+}}, i32 55, i32 5) + // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_and_4(i8* {{%[0-9]+}}, i32 55, i32 5) return __atomic_fetch_and(p, 55, memory_order_seq_cst); } int test_atomic_fetch_or(int *p) { // CHECK: test_atomic_fetch_or - // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_or_4(i8* {{%[0-9]+}}, i32 55, i32 5) + // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_or_4(i8* {{%[0-9]+}}, i32 55, i32 5) return __atomic_fetch_or(p, 55, memory_order_seq_cst); } int test_atomic_fetch_xor(int *p) { // CHECK: test_atomic_fetch_xor - // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_xor_4(i8* {{%[0-9]+}}, i32 55, i32 5) + // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_xor_4(i8* {{%[0-9]+}}, i32 55, i32 5) return __atomic_fetch_xor(p, 55, memory_order_seq_cst); } int test_atomic_fetch_nand(int *p) { // CHECK: test_atomic_fetch_nand - // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_nand_4(i8* {{%[0-9]+}}, i32 55, i32 5) + // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_nand_4(i8* {{%[0-9]+}}, i32 55, i32 5) return __atomic_fetch_nand(p, 55, memory_order_seq_cst); } int test_atomic_add_fetch(int *p) { // CHECK: test_atomic_add_fetch - // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 55, i32 5) + // CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 55, i32 5) // CHECK: {{%[^ ]*}} = add i32 [[CALL]], 55 return __atomic_add_fetch(p, 55, memory_order_seq_cst); } int test_atomic_sub_fetch(int *p) { // CHECK: test_atomic_sub_fetch - // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 55, i32 5) + // CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 55, i32 5) // CHECK: {{%[^ ]*}} = add i32 [[CALL]], -55 return __atomic_sub_fetch(p, 55, memory_order_seq_cst); } int test_atomic_and_fetch(int *p) { // CHECK: test_atomic_and_fetch - // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_and_4(i8* {{%[0-9]+}}, i32 55, i32 5) + // CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_and_4(i8* {{%[0-9]+}}, i32 55, i32 5) // CHECK: {{%[^ ]*}} = and i32 [[CALL]], 55 return __atomic_and_fetch(p, 55, memory_order_seq_cst); } int test_atomic_or_fetch(int *p) { // CHECK: test_atomic_or_fetch - // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_or_4(i8* {{%[0-9]+}}, i32 55, i32 5) + // CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_or_4(i8* {{%[0-9]+}}, i32 55, i32 5) // CHECK: {{%[^ ]*}} = or i32 [[CALL]], 55 return __atomic_or_fetch(p, 55, memory_order_seq_cst); } int test_atomic_xor_fetch(int *p) { // CHECK: test_atomic_xor_fetch - // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_xor_4(i8* {{%[0-9]+}}, i32 55, i32 5) + // CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_xor_4(i8* {{%[0-9]+}}, i32 55, i32 5) // CHECK: {{%[^ ]*}} = xor i32 [[CALL]], 55 return __atomic_xor_fetch(p, 55, memory_order_seq_cst); } int test_atomic_nand_fetch(int *p) { // CHECK: test_atomic_nand_fetch - // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_nand_4(i8* {{%[0-9]+}}, i32 55, i32 5) + // CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_nand_4(i8* {{%[0-9]+}}, i32 55, i32 5) // FIXME: We should not be checking optimized IR. It changes independently of clang. // FIXME-CHECK: [[AND:%[^ ]*]] = and i32 [[CALL]], 55 // FIXME-CHECK: {{%[^ ]*}} = xor i32 [[AND]], -1 |