summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
authorDale Johannesen <dalej@apple.com>2009-10-09 00:11:32 +0000
committerDale Johannesen <dalej@apple.com>2009-10-09 00:11:32 +0000
commit3059924bddca79e0fea5f1473d9a66985e1fda4a (patch)
tree2db52f2abaed31a15c57a898c145e904a7b9a91c /llvm/test
parent816fe0a4aa54d8bb077b8191f7250fcea03d544c (diff)
downloadbcm5719-llvm-3059924bddca79e0fea5f1473d9a66985e1fda4a.tar.gz
bcm5719-llvm-3059924bddca79e0fea5f1473d9a66985e1fda4a.zip
When considering whether to inline Callee into Caller,
and that will make Caller too big to inline, see if it might be better to inline Caller into its callers instead. This situation is described in PR 2973, although I haven't tried the specific case in SPASS. llvm-svn: 83602
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/Transforms/Inline/nested-inline.ll111
1 files changed, 111 insertions, 0 deletions
diff --git a/llvm/test/Transforms/Inline/nested-inline.ll b/llvm/test/Transforms/Inline/nested-inline.ll
new file mode 100644
index 00000000000..12926671722
--- /dev/null
+++ b/llvm/test/Transforms/Inline/nested-inline.ll
@@ -0,0 +1,111 @@
+; RUN: opt < %s -inline -S | FileCheck %s
+; Test that bar and bar2 are both inlined throughout and removed.
+@A = weak global i32 0 ; <i32*> [#uses=1]
+@B = weak global i32 0 ; <i32*> [#uses=1]
+@C = weak global i32 0 ; <i32*> [#uses=1]
+
+define fastcc void @foo(i32 %X) {
+entry:
+; CHECK: @foo
+ %ALL = alloca i32, align 4 ; <i32*> [#uses=1]
+ %tmp1 = and i32 %X, 1 ; <i32> [#uses=1]
+ %tmp1.upgrd.1 = icmp eq i32 %tmp1, 0 ; <i1> [#uses=1]
+ br i1 %tmp1.upgrd.1, label %cond_next, label %cond_true
+
+cond_true: ; preds = %entry
+ store i32 1, i32* @A
+ br label %cond_next
+
+cond_next: ; preds = %cond_true, %entry
+ %tmp4 = and i32 %X, 2 ; <i32> [#uses=1]
+ %tmp4.upgrd.2 = icmp eq i32 %tmp4, 0 ; <i1> [#uses=1]
+ br i1 %tmp4.upgrd.2, label %cond_next7, label %cond_true5
+
+cond_true5: ; preds = %cond_next
+ store i32 1, i32* @B
+ br label %cond_next7
+
+cond_next7: ; preds = %cond_true5, %cond_next
+ %tmp10 = and i32 %X, 4 ; <i32> [#uses=1]
+ %tmp10.upgrd.3 = icmp eq i32 %tmp10, 0 ; <i1> [#uses=1]
+ br i1 %tmp10.upgrd.3, label %cond_next13, label %cond_true11
+
+cond_true11: ; preds = %cond_next7
+ store i32 1, i32* @C
+ br label %cond_next13
+
+cond_next13: ; preds = %cond_true11, %cond_next7
+ %tmp16 = and i32 %X, 8 ; <i32> [#uses=1]
+ %tmp16.upgrd.4 = icmp eq i32 %tmp16, 0 ; <i1> [#uses=1]
+ br i1 %tmp16.upgrd.4, label %UnifiedReturnBlock, label %cond_true17
+
+cond_true17: ; preds = %cond_next13
+ call void @ext( i32* %ALL )
+ ret void
+
+UnifiedReturnBlock: ; preds = %cond_next13
+ ret void
+}
+
+; CHECK-NOT: @bar
+define internal fastcc void @bar(i32 %X) {
+entry:
+ %ALL = alloca i32, align 4 ; <i32*> [#uses=1]
+ %tmp1 = and i32 %X, 1 ; <i32> [#uses=1]
+ %tmp1.upgrd.1 = icmp eq i32 %tmp1, 0 ; <i1> [#uses=1]
+ br i1 %tmp1.upgrd.1, label %cond_next, label %cond_true
+
+cond_true: ; preds = %entry
+ store i32 1, i32* @A
+ br label %cond_next
+
+cond_next: ; preds = %cond_true, %entry
+ %tmp4 = and i32 %X, 2 ; <i32> [#uses=1]
+ %tmp4.upgrd.2 = icmp eq i32 %tmp4, 0 ; <i1> [#uses=1]
+ br i1 %tmp4.upgrd.2, label %cond_next7, label %cond_true5
+
+cond_true5: ; preds = %cond_next
+ store i32 1, i32* @B
+ br label %cond_next7
+
+cond_next7: ; preds = %cond_true5, %cond_next
+ %tmp10 = and i32 %X, 4 ; <i32> [#uses=1]
+ %tmp10.upgrd.3 = icmp eq i32 %tmp10, 0 ; <i1> [#uses=1]
+ br i1 %tmp10.upgrd.3, label %cond_next13, label %cond_true11
+
+cond_true11: ; preds = %cond_next7
+ store i32 1, i32* @C
+ br label %cond_next13
+
+cond_next13: ; preds = %cond_true11, %cond_next7
+ %tmp16 = and i32 %X, 8 ; <i32> [#uses=1]
+ %tmp16.upgrd.4 = icmp eq i32 %tmp16, 0 ; <i1> [#uses=1]
+ br i1 %tmp16.upgrd.4, label %UnifiedReturnBlock, label %cond_true17
+
+cond_true17: ; preds = %cond_next13
+ call void @foo( i32 %X )
+ ret void
+
+UnifiedReturnBlock: ; preds = %cond_next13
+ ret void
+}
+
+define internal fastcc void @bar2(i32 %X) {
+entry:
+ call void @foo( i32 %X )
+ ret void
+}
+
+declare void @ext(i32*)
+
+define void @test(i32 %X) {
+entry:
+; CHECK: test
+; CHECK-NOT: @bar
+ tail call fastcc void @bar( i32 %X )
+ tail call fastcc void @bar( i32 %X )
+ tail call fastcc void @bar2( i32 %X )
+ tail call fastcc void @bar2( i32 %X )
+ ret void
+; CHECK: ret
+}
OpenPOWER on IntegriCloud