diff options
| author | John McCall <rjmccall@apple.com> | 2011-06-24 21:55:10 +0000 |
|---|---|---|
| committer | John McCall <rjmccall@apple.com> | 2011-06-24 21:55:10 +0000 |
| commit | 23c29fea92a130313bd389f23a696165eae4a36b (patch) | |
| tree | eb365d076c07900ff896e24907685ec3fb80dcc0 /clang/test/CodeGenObjC | |
| parent | 932e5b5d52be4f901bf5370ee6d98349ee209643 (diff) | |
| download | bcm5719-llvm-23c29fea92a130313bd389f23a696165eae4a36b.tar.gz bcm5719-llvm-23c29fea92a130313bd389f23a696165eae4a36b.zip | |
Change the IR-generation of VLAs so that we capture bounds,
not sizes; so that we use well-typed allocas; and so that we
properly recurse through the full set of variably-modified types.
llvm-svn: 133827
Diffstat (limited to 'clang/test/CodeGenObjC')
| -rw-r--r-- | clang/test/CodeGenObjC/arc.m | 37 |
1 files changed, 20 insertions, 17 deletions
diff --git a/clang/test/CodeGenObjC/arc.m b/clang/test/CodeGenObjC/arc.m index 1d5b4f42205..2a51454e758 100644 --- a/clang/test/CodeGenObjC/arc.m +++ b/clang/test/CodeGenObjC/arc.m @@ -487,23 +487,23 @@ void test20(unsigned n) { id x[n]; // Capture the VLA size. + // CHECK-NEXT: [[T0:%.*]] = load i32* [[N]], align 4 + // CHECK-NEXT: [[DIM:%.*]] = zext i32 [[T0]] to i64 + + // Save the stack pointer. // CHECK-NEXT: [[T0:%.*]] = call i8* @llvm.stacksave() // CHECK-NEXT: store i8* [[T0]], i8** [[SAVED_STACK]] - // CHECK-NEXT: [[T0:%.*]] = load i32* [[N]], align 4 - // CHECK-NEXT: [[T1:%.*]] = zext i32 [[T0]] to i64 - // CHECK-NEXT: [[VLA_SIZE:%.*]] = mul i64 8, [[T1]] // Allocate the VLA. - // CHECK-NEXT: [[T0:%.*]] = alloca i8, i64 [[VLA_SIZE]], align 16 - // CHECK-NEXT: [[VLA:%.*]] = bitcast i8* [[T0]] to i8** + // CHECK-NEXT: [[VLA:%.*]] = alloca i8*, i64 [[DIM]], align 16 // Zero-initialize. // CHECK-NEXT: [[T0:%.*]] = bitcast i8** [[VLA]] to i8* - // CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* [[T0]], i8 0, i64 [[VLA_SIZE]], i32 8, i1 false) + // CHECK-NEXT: [[T1:%.*]] = mul nuw i64 [[DIM]], 8 + // CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* [[T0]], i8 0, i64 [[T1]], i32 8, i1 false) // Destroy. - // CHECK-NEXT: [[VLA_COUNT:%.*]] = udiv i64 [[VLA_SIZE]], 8 - // CHECK-NEXT: [[END:%.*]] = getelementptr inbounds i8** [[VLA]], i64 [[VLA_COUNT]] + // CHECK-NEXT: [[END:%.*]] = getelementptr inbounds i8** [[VLA]], i64 [[DIM]] // CHECK-NEXT: br label // CHECK: [[CUR:%.*]] = phi i8** @@ -529,25 +529,28 @@ void test21(unsigned n) { id x[2][n][3]; // Capture the VLA size. + // CHECK-NEXT: [[T0:%.*]] = load i32* [[N]], align 4 + // CHECK-NEXT: [[DIM:%.*]] = zext i32 [[T0]] to i64 + // CHECK-NEXT: [[T0:%.*]] = call i8* @llvm.stacksave() // CHECK-NEXT: store i8* [[T0]], i8** [[SAVED_STACK]] - // CHECK-NEXT: [[T0:%.*]] = load i32* [[N]], align 4 - // CHECK-NEXT: [[T1:%.*]] = zext i32 [[T0]] to i64 - // CHECK-NEXT: [[T2:%.*]] = mul i64 24, [[T1]] - // CHECK-NEXT: [[VLA_SIZE:%.*]] = mul i64 [[T2]], 2 + // Allocate the VLA. - // CHECK-NEXT: [[T0:%.*]] = alloca i8, i64 [[VLA_SIZE]], align 16 - // CHECK-NEXT: [[VLA:%.*]] = bitcast i8* [[T0]] to [3 x i8*]* + // CHECK-NEXT: [[T0:%.*]] = mul nuw i64 2, [[DIM]] + // CHECK-NEXT: [[VLA:%.*]] = alloca [3 x i8*], i64 [[T0]], align 16 // Zero-initialize. // CHECK-NEXT: [[T0:%.*]] = bitcast [3 x i8*]* [[VLA]] to i8* - // CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* [[T0]], i8 0, i64 [[VLA_SIZE]], i32 8, i1 false) + // CHECK-NEXT: [[T1:%.*]] = mul nuw i64 2, [[DIM]] + // CHECK-NEXT: [[T2:%.*]] = mul nuw i64 [[T1]], 24 + // CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* [[T0]], i8 0, i64 [[T2]], i32 8, i1 false) // Destroy. + // CHECK-NEXT: [[T0:%.*]] = mul nuw i64 2, [[DIM]] // CHECK-NEXT: [[BEGIN:%.*]] = getelementptr inbounds [3 x i8*]* [[VLA]], i32 0, i32 0 - // CHECK-NEXT: [[VLA_COUNT:%.*]] = udiv i64 [[VLA_SIZE]], 8 - // CHECK-NEXT: [[END:%.*]] = getelementptr inbounds i8** [[BEGIN]], i64 [[VLA_COUNT]] + // CHECK-NEXT: [[T1:%.*]] = mul nuw i64 [[T0]], 3 + // CHECK-NEXT: [[END:%.*]] = getelementptr inbounds i8** [[BEGIN]], i64 [[T1]] // CHECK-NEXT: br label // CHECK: [[CUR:%.*]] = phi i8** |

