diff options
author | Daniel Neilson <dneilson@azul.com> | 2018-01-19 17:12:54 +0000 |
---|---|---|
committer | Daniel Neilson <dneilson@azul.com> | 2018-01-19 17:12:54 +0000 |
commit | 6e938effaaf2016eb76e1b73aba2aa38a245cb70 (patch) | |
tree | 965da6011c9ae6d115f9be07a2929fffcede713a /clang/test/CodeGen/c11atomics-ios.c | |
parent | 751a2cebc5bef43730ff3289d3272121fb1a1066 (diff) | |
download | bcm5719-llvm-6e938effaaf2016eb76e1b73aba2aa38a245cb70.tar.gz bcm5719-llvm-6e938effaaf2016eb76e1b73aba2aa38a245cb70.zip |
Change memcpy/memove/memset to have dest and source alignment attributes (Step 1).
Summary:
Upstream LLVM is changing the the prototypes of the @llvm.memcpy/memmove/memset
intrinsics. This change updates the Clang tests for this change.
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change removes the alignment argument in favour of placing the alignment
attribute on the source and destination pointers of the memory intrinsic call.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
At this time the source and destination alignments must be the same (Step 1).
Step 2 of the change, to be landed shortly, will relax that contraint and allow
the source and destination to have different alignments.
llvm-svn: 322964
Diffstat (limited to 'clang/test/CodeGen/c11atomics-ios.c')
-rw-r--r-- | clang/test/CodeGen/c11atomics-ios.c | 30 |
1 files changed, 15 insertions, 15 deletions
diff --git a/clang/test/CodeGen/c11atomics-ios.c b/clang/test/CodeGen/c11atomics-ios.c index fb731dfd7a1..6e3170f1237 100644 --- a/clang/test/CodeGen/c11atomics-ios.c +++ b/clang/test/CodeGen/c11atomics-ios.c @@ -132,7 +132,7 @@ void testStruct(_Atomic(S) *fp) { // CHECK-NEXT: [[T0:%.*]] = load [[S]]*, [[S]]** [[FP]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[S]]* [[TMP0]] to i8* // CHECK-NEXT: [[T2:%.*]] = bitcast [[S]]* [[F]] to i8* -// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[T1]], i8* [[T2]], i32 8, i32 2, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[T1]], i8* align 2 [[T2]], i32 8, i1 false) // CHECK-NEXT: [[T3:%.*]] = bitcast [[S]]* [[TMP0]] to i64* // CHECK-NEXT: [[T4:%.*]] = load i64, i64* [[T3]], align 8 // CHECK-NEXT: [[T5:%.*]] = bitcast [[S]]* [[T0]] to i64* @@ -154,7 +154,7 @@ void testPromotedStruct(_Atomic(PS) *fp) { // CHECK-NEXT: [[P:%.*]] = load [[APS]]*, [[APS]]** [[FP]] // CHECK-NEXT: [[T0:%.*]] = bitcast [[APS]]* [[P]] to i8* -// CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* [[T0]], i8 0, i64 8, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[T0]], i8 0, i64 8, i1 false) // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[P]], i32 0, i32 0 // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]], [[PS]]* [[T0]], i32 0, i32 0 // CHECK-NEXT: store i16 1, i16* [[T1]], align 8 @@ -165,7 +165,7 @@ void testPromotedStruct(_Atomic(PS) *fp) { __c11_atomic_init(fp, (PS){1,2,3}); // CHECK-NEXT: [[T0:%.*]] = bitcast [[APS]]* [[X]] to i8* -// CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* [[T0]], i8 0, i32 8, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 8 [[T0]], i8 0, i32 8, i1 false) // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[X]], i32 0, i32 0 // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]], [[PS]]* [[T0]], i32 0, i32 0 // CHECK-NEXT: store i16 1, i16* [[T1]], align 8 @@ -183,16 +183,16 @@ void testPromotedStruct(_Atomic(PS) *fp) { // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[TMP0]], i32 0, i32 0 // CHECK-NEXT: [[T1:%.*]] = bitcast [[PS]]* [[F]] to i8* // CHECK-NEXT: [[T2:%.*]] = bitcast [[PS]]* [[T0]] to i8* -// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[T1]], i8* [[T2]], i32 6, i32 2, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[T1]], i8* align 2 [[T2]], i32 6, i1 false) PS f = *fp; // CHECK-NEXT: [[T0:%.*]] = load [[APS]]*, [[APS]]** [[FP]] // CHECK-NEXT: [[T1:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[TMP1]] to i8* -// CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* [[T1]], i8 0, i32 8, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 8 [[T1]], i8 0, i32 8, i1 false) // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[TMP1]], i32 0, i32 0 // CHECK-NEXT: [[T2:%.*]] = bitcast [[PS]]* [[T1]] to i8* // CHECK-NEXT: [[T3:%.*]] = bitcast [[PS]]* [[F]] to i8* -// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[T2]], i8* [[T3]], i32 6, i32 2, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[T2]], i8* align 2 [[T3]], i32 6, i1 false) // CHECK-NEXT: [[T4:%.*]] = bitcast [[APS]]* [[TMP1]] to i64* // CHECK-NEXT: [[T5:%.*]] = load i64, i64* [[T4]], align 8 // CHECK-NEXT: [[T6:%.*]] = bitcast [[APS]]* [[T0]] to i64* @@ -215,7 +215,7 @@ PS test_promoted_load(_Atomic(PS) *addr) { // CHECK: [[ATOMIC_RES_STRUCT:%.*]] = bitcast i64* [[ATOMIC_RES64]] to %struct.PS* // CHECK: [[AGG_RESULT8:%.*]] = bitcast %struct.PS* %agg.result to i8* // CHECK: [[ATOMIC_RES8:%.*]] = bitcast %struct.PS* [[ATOMIC_RES_STRUCT]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[AGG_RESULT8]], i8* [[ATOMIC_RES8]], i32 6, i32 2, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[AGG_RESULT8]], i8* align 2 [[ATOMIC_RES8]], i32 6, i1 false) return __c11_atomic_load(addr, 5); } @@ -232,11 +232,11 @@ void test_promoted_store(_Atomic(PS) *addr, PS *val) { // CHECK: [[VAL:%.*]] = load %struct.PS*, %struct.PS** [[VAL_ARG]], align 4 // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* // CHECK: [[VAL8:%.*]] = bitcast %struct.PS* [[VAL]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[NONATOMIC_TMP8]], i8* [[VAL8]], i32 6, i32 2, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[NONATOMIC_TMP8]], i8* align 2 [[VAL8]], i32 6, i1 false) // CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64* // CHECK: [[ATOMIC_VAL8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i8* // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[ATOMIC_VAL8]], i8* [[NONATOMIC_TMP8]], i64 6, i32 2, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) // CHECK: [[ATOMIC_VAL64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i64* // CHECK: [[VAL64:%.*]] = load i64, i64* [[ATOMIC_VAL64]], align 8 // CHECK: store atomic i64 [[VAL64]], i64* [[ADDR64]] seq_cst, align 8 @@ -257,11 +257,11 @@ PS test_promoted_exchange(_Atomic(PS) *addr, PS *val) { // CHECK: [[VAL:%.*]] = load %struct.PS*, %struct.PS** [[VAL_ARG]], align 4 // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* // CHECK: [[VAL8:%.*]] = bitcast %struct.PS* [[VAL]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[NONATOMIC_TMP8]], i8* [[VAL8]], i32 6, i32 2, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[NONATOMIC_TMP8]], i8* align 2 [[VAL8]], i32 6, i1 false) // CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64* // CHECK: [[ATOMIC_VAL8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i8* // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[ATOMIC_VAL8]], i8* [[NONATOMIC_TMP8]], i64 6, i32 2, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) // CHECK: [[ATOMIC_VAL64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i64* // CHECK: [[ATOMIC_RES64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_RES]] to i64* // CHECK: [[VAL64:%.*]] = load i64, i64* [[ATOMIC_VAL64]], align 8 @@ -270,7 +270,7 @@ PS test_promoted_exchange(_Atomic(PS) *addr, PS *val) { // CHECK: [[ATOMIC_RES_STRUCT:%.*]] = bitcast i64* [[ATOMIC_RES64]] to %struct.PS* // CHECK: [[AGG_RESULT8:%.*]] = bitcast %struct.PS* %agg.result to i8* // CHECK: [[ATOMIC_RES8:%.*]] = bitcast %struct.PS* [[ATOMIC_RES_STRUCT]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[AGG_RESULT8]], i8* [[ATOMIC_RES8]], i32 6, i32 2, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[AGG_RESULT8]], i8* align 2 [[ATOMIC_RES8]], i32 6, i1 false) return __c11_atomic_exchange(addr, *val, 5); } @@ -291,15 +291,15 @@ _Bool test_promoted_cmpxchg(_Atomic(PS) *addr, PS *desired, PS *new) { // CHECK: [[NEW:%.*]] = load %struct.PS*, %struct.PS** [[NEW_ARG]], align 4 // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* // CHECK: [[NEW8:%.*]] = bitcast %struct.PS* [[NEW]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[NONATOMIC_TMP8]], i8* [[NEW8]], i32 6, i32 2, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[NONATOMIC_TMP8]], i8* align 2 [[NEW8]], i32 6, i1 false) // CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64* // CHECK: [[ATOMIC_DESIRED8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_DESIRED:%.*]] to i8* // CHECK: [[DESIRED8:%.*]] = bitcast %struct.PS* [[DESIRED]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[ATOMIC_DESIRED8]], i8* [[DESIRED8]], i64 6, i32 2, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 [[ATOMIC_DESIRED8]], i8* align 2 [[DESIRED8]], i64 6, i1 false) // CHECK: [[ATOMIC_DESIRED64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_DESIRED:%.*]] to i64* // CHECK: [[ATOMIC_NEW8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_NEW]] to i8* // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[ATOMIC_NEW8]], i8* [[NONATOMIC_TMP8]], i64 6, i32 2, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 [[ATOMIC_NEW8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) // CHECK: [[ATOMIC_NEW64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_NEW]] to i64* // CHECK: [[ATOMIC_DESIRED_VAL64:%.*]] = load i64, i64* [[ATOMIC_DESIRED64]], align 8 // CHECK: [[ATOMIC_NEW_VAL64:%.*]] = load i64, i64* [[ATOMIC_NEW64]], align 8 |