diff options
author | Akira Hatanaka <ahatanaka@apple.com> | 2016-04-12 23:10:58 +0000 |
---|---|---|
committer | Akira Hatanaka <ahatanaka@apple.com> | 2016-04-12 23:10:58 +0000 |
commit | 2d3690bc986c9d852ffd96f96ba274750ef084b3 (patch) | |
tree | 70328df8e83bf56e94ece9e321653bd894410495 /clang/test | |
parent | 16a7d637dd4432cbe35b9088cc339c838c4ea64d (diff) | |
download | bcm5719-llvm-2d3690bc986c9d852ffd96f96ba274750ef084b3.tar.gz bcm5719-llvm-2d3690bc986c9d852ffd96f96ba274750ef084b3.zip |
[ObjC] Pop all cleanups created in EmitObjCForCollectionStmt before
exiting the for-in loop.
This commit fixes a bug where EmitObjCForCollectionStmt didn't pop
cleanups for captures.
For example, in the following for-in loop, a block which captures self
is passed to foo1:
for (id x in [self foo1:^{ use(self); }]) {
use(x);
break;
}
Previously, the code in EmitObjCForCollectionStmt wouldn't pop the
cleanup for the captured self before exiting the loop, which caused
code-gen to generate an IR in which objc_release was called twice on
the captured self.
This commit fixes the bug by entering a RunCleanupsScope before the
loop condition is evaluated and forcing its cleanup before exiting the
loop.
rdar://problem/16865751
Differential Revision: http://reviews.llvm.org/D18618
llvm-svn: 266147
Diffstat (limited to 'clang/test')
-rw-r--r-- | clang/test/CodeGenObjC/arc-foreach.m | 51 |
1 files changed, 51 insertions, 0 deletions
diff --git a/clang/test/CodeGenObjC/arc-foreach.m b/clang/test/CodeGenObjC/arc-foreach.m index 90d9c1f1261..db150e88a59 100644 --- a/clang/test/CodeGenObjC/arc-foreach.m +++ b/clang/test/CodeGenObjC/arc-foreach.m @@ -170,4 +170,55 @@ void test3(NSArray *array) { // CHECK-LP64-NEXT: br label [[L]] } +@interface NSObject @end + +@interface I1 : NSObject +- (NSArray *) foo1:(void (^)(void))block; +- (void) foo2; +@end + +NSArray *array4; + +@implementation I1 : NSObject +- (NSArray *) foo1:(void (^)(void))block { + block(); + return array4; +} + +- (void) foo2 { + for (id x in [self foo1:^{ use(self); }]) { + use(x); + break; + } +} +@end + +// CHECK-LP64-LABEL: define internal void @"\01-[I1 foo2]"( +// CHECK-LP64: [[SELF_ADDR:%.*]] = alloca [[TY:%.*]]*, +// CHECK-LP64: [[BLOCK:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [[TY]]* }>, +// CHECK-LP64: store [[TY]]* %self, [[TY]]** [[SELF_ADDR]] +// CHECK-LP64: [[T0:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [[TY]]* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [[TY]]* }>* [[BLOCK]], i32 0, i32 5 +// CHECK-LP64: [[BC:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [[TY]]* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [[TY]]* }>* [[BLOCK]], i32 0, i32 5 +// CHECK-LP64: [[T1:%.*]] = load [[TY]]*, [[TY]]** [[SELF_ADDR]] +// CHECK-LP64: [[T2:%.*]] = bitcast [[TY]]* [[T1]] to i8* +// CHECK-LP64: [[T3:%.*]] = call i8* @objc_retain(i8* [[T2]]) +// CHECK-LP64: [[T4:%.*]] = bitcast i8* [[T3]] to [[TY]]* +// CHECK-LP64: store [[TY]]* [[T4]], [[TY]]** [[BC]] + +// CHECK-LP64: [[T5:%.*]] = bitcast [[TY]]** [[T0]] to i8** +// CHECK-LP64: call void @objc_storeStrong(i8** [[T5]], i8* null) +// CHECK-LP64: switch i32 {{%.*}}, label %[[UNREACHABLE:.*]] [ +// CHECK-LP64-NEXT: i32 0, label %[[CLEANUP_CONT:.*]] +// CHECK-LP64-NEXT: i32 2, label %[[FORCOLL_END:.*]] +// CHECK-LP64-NEXT: ] + +// CHECK-LP64: {{^|:}}[[CLEANUP_CONT]] +// CHECK-LP64-NEXT: br label %[[FORCOLL_END]] + +// CHECK-LP64: {{^|:}}[[FORCOLL_END]] +// CHECK-LP64-NEXT: ret void + +// CHECK-LP64: {{^|:}}[[UNREACHABLE]] +// CHECK-LP64-NEXT: unreachable + // CHECK-LP64: attributes [[NUW]] = { nounwind } |