diff options
author | Tim Northover <tnorthover@apple.com> | 2015-11-09 19:56:35 +0000 |
---|---|---|
committer | Tim Northover <tnorthover@apple.com> | 2015-11-09 19:56:35 +0000 |
commit | cc2a6e0608738498cf4e677baf8566460c7083b4 (patch) | |
tree | 982693aaff11a52f1e880f0c585554781d740391 /clang/test/CodeGen/atomic-ops.c | |
parent | 9c76869bc33c25d07d5b4ed3eacf310e3f3537cb (diff) | |
download | bcm5719-llvm-cc2a6e0608738498cf4e677baf8566460c7083b4.tar.gz bcm5719-llvm-cc2a6e0608738498cf4e677baf8566460c7083b4.zip |
Atomics: support __c11_* calls on _Atomic struct types.
When a struct's size is not a power of 2, the corresponding _Atomic() type is
promoted to the nearest. We already correctly handled normal C++ expressions of
this form, but direct calls to the __c11_atomic_whatever builtins ended up
performing dodgy operations on the smaller non-atomic types (e.g. memcpy too
much). Later optimisations removed this as undefined behaviour.
This patch converts EmitAtomicExpr to allocate its temporaries at the full
atomic width, sidestepping the issue.
llvm-svn: 252507
Diffstat (limited to 'clang/test/CodeGen/atomic-ops.c')
-rw-r--r-- | clang/test/CodeGen/atomic-ops.c | 18 |
1 files changed, 12 insertions, 6 deletions
diff --git a/clang/test/CodeGen/atomic-ops.c b/clang/test/CodeGen/atomic-ops.c index d8f7d28392a..353f77c37bd 100644 --- a/clang/test/CodeGen/atomic-ops.c +++ b/clang/test/CodeGen/atomic-ops.c @@ -179,8 +179,8 @@ struct S fd1(struct S *a) { // CHECK-LABEL: @fd1 // CHECK: [[RETVAL:%.*]] = alloca %struct.S, align 4 // CHECK: [[RET:%.*]] = alloca %struct.S, align 4 - // CHECK: [[CALL:%.*]] = call i64 @__atomic_load_8( // CHECK: [[CAST:%.*]] = bitcast %struct.S* [[RET]] to i64* + // CHECK: [[CALL:%.*]] = call i64 @__atomic_load_8( // CHECK: store i64 [[CALL]], i64* [[CAST]], align 4 struct S ret; __atomic_load(a, &ret, memory_order_seq_cst); @@ -195,8 +195,9 @@ void fd2(struct S *a, struct S *b) { // CHECK-NEXT: store %struct.S* %b, %struct.S** [[B_ADDR]], align 4 // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4 // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4 - // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i8* + // CHECK-NEXT: [[COERCED_A_TMP:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i64* // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i64* + // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast i64* [[COERCED_A_TMP]] to i8* // CHECK-NEXT: [[LOAD_B:%.*]] = load i64, i64* [[COERCED_B]], align 4 // CHECK-NEXT: call void @__atomic_store_8(i8* [[COERCED_A]], i64 [[LOAD_B]], // CHECK-NEXT: ret void @@ -214,11 +215,12 @@ void fd3(struct S *a, struct S *b, struct S *c) { // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4 // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4 // CHECK-NEXT: [[LOAD_C_PTR:%.*]] = load %struct.S*, %struct.S** [[C_ADDR]], align 4 - // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i8* + // CHECK-NEXT: [[COERCED_A_TMP:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i64* // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i64* + // CHECK-NEXT: [[COERCED_C:%.*]] = bitcast %struct.S* [[LOAD_C_PTR]] to i64* + // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast i64* [[COERCED_A_TMP]] to i8* // CHECK-NEXT: [[LOAD_B:%.*]] = load i64, i64* [[COERCED_B]], align 4 // CHECK-NEXT: [[CALL:%.*]] = call i64 @__atomic_exchange_8(i8* [[COERCED_A]], i64 [[LOAD_B]], - // CHECK-NEXT: [[COERCED_C:%.*]] = bitcast %struct.S* [[LOAD_C_PTR]] to i64* // CHECK-NEXT: store i64 [[CALL]], i64* [[COERCED_C]], align 4 __atomic_exchange(a, b, c, memory_order_seq_cst); @@ -235,9 +237,11 @@ _Bool fd4(struct S *a, struct S *b, struct S *c) { // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4 // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4 // CHECK-NEXT: [[LOAD_C_PTR:%.*]] = load %struct.S*, %struct.S** [[C_ADDR]], align 4 - // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i8* - // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i8* + // CHECK-NEXT: [[COERCED_A_TMP:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i64* + // CHECK-NEXT: [[COERCED_B_TMP:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i64* // CHECK-NEXT: [[COERCED_C:%.*]] = bitcast %struct.S* [[LOAD_C_PTR]] to i64* + // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast i64* [[COERCED_A_TMP]] to i8* + // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast i64* [[COERCED_B_TMP]] to i8* // CHECK-NEXT: [[LOAD_C:%.*]] = load i64, i64* [[COERCED_C]], align 4 // CHECK-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange_8(i8* [[COERCED_A]], i8* [[COERCED_B]], i64 [[LOAD_C]] // CHECK-NEXT: ret i1 [[CALL]] @@ -312,6 +316,8 @@ struct Seventeen { char c[17]; } seventeen; +struct Incomplete; + int lock_free(struct Incomplete *incomplete) { // CHECK-LABEL: @lock_free |