summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/lib/Transforms/IPO/Inliner.cpp13
-rw-r--r--llvm/test/Transforms/Inline/cgscc-cycle.ll109
-rw-r--r--llvm/test/Transforms/Inline/monster_scc.ll46
3 files changed, 128 insertions, 40 deletions
diff --git a/llvm/lib/Transforms/IPO/Inliner.cpp b/llvm/lib/Transforms/IPO/Inliner.cpp
index 3275226925c..66aea45323f 100644
--- a/llvm/lib/Transforms/IPO/Inliner.cpp
+++ b/llvm/lib/Transforms/IPO/Inliner.cpp
@@ -1158,10 +1158,19 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
// SCC splits and merges. To avoid this, we capture the originating caller
// node and the SCC containing the call edge. This is a slight over
// approximation of the possible inlining decisions that must be avoided,
- // but is relatively efficient to store.
+ // but is relatively efficient to store. We use C != OldC to know when
+ // a new SCC is generated and the original SCC may be generated via merge
+ // in later iterations.
+ //
+ // It is also possible that even if no new SCC is generated
+ // (i.e., C == OldC), the original SCC could be split and then merged
+ // into the same one as itself. and the original SCC will be added into
+ // UR.CWorklist again, we want to catch such cases too.
+ //
// FIXME: This seems like a very heavyweight way of retaining the inline
// history, we should look for a more efficient way of tracking it.
- if (C != OldC && llvm::any_of(InlinedCallees, [&](Function *Callee) {
+ if ((C != OldC || UR.CWorklist.count(OldC)) &&
+ llvm::any_of(InlinedCallees, [&](Function *Callee) {
return CG.lookupSCC(*CG.lookup(*Callee)) == OldC;
})) {
LLVM_DEBUG(dbgs() << "Inlined an internal call edge and split an SCC, "
diff --git a/llvm/test/Transforms/Inline/cgscc-cycle.ll b/llvm/test/Transforms/Inline/cgscc-cycle.ll
index 69874c3ef2f..bc3bdc99fff 100644
--- a/llvm/test/Transforms/Inline/cgscc-cycle.ll
+++ b/llvm/test/Transforms/Inline/cgscc-cycle.ll
@@ -5,7 +5,7 @@
; some out-of-band way to prevent infinitely re-inlining and re-transforming the
; code.
;
-; RUN: opt < %s -passes='cgscc(inline,function(sroa,instcombine))' -S | FileCheck %s
+; RUN: opt < %s -passes='cgscc(inline,function(sroa,instcombine))' -inline-threshold=50 -S | FileCheck %s
; The `test1_*` collection of functions form a directly cycling pattern.
@@ -123,3 +123,110 @@ bb2:
ret void
}
+
+; Another infinite inlining case. The initial callgraph is like following:
+;
+; test3_a <---> test3_b
+; | ^
+; v |
+; test3_c <---> test3_d
+;
+; For all the call edges in the call graph, only test3_c and test3_d can be
+; inlined into test3_a, and no other call edge can be inlined.
+;
+; After test3_c is inlined into test3_a, the original call edge test3_a->test3_c
+; will be removed, a new call edge will be added and the call graph becomes:
+;
+; test3_a <---> test3_b
+; \ ^
+; v /
+; test3_c <---> test3_d
+; But test3_a, test3_b, test3_c and test3_d still belong to the same SCC.
+;
+; Then after test3_a->test3_d is inlined, when test3_a->test3_d is converted to
+; a ref edge, the original SCC will be split into two: {test3_c, test3_d} and
+; {test3_a, test3_b}, immediately after the newly added ref edge
+; test3_a->test3_c will be converted to a call edge, and the two SCCs will be
+; merged into the original one again. During this cycle, the original SCC will
+; be added into UR.CWorklist again and this creates an infinite loop.
+
+@a = global i64 0
+@b = global i64 0
+
+define void @test3_c(i32 %i) {
+entry:
+ %cmp = icmp eq i32 %i, 5
+ br i1 %cmp, label %if.end, label %if.then
+
+if.then: ; preds = %entry
+ %call = tail call i64 @random()
+ %t0 = load i64, i64* @a
+ %add = add nsw i64 %t0, %call
+ store i64 %add, i64* @a
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ tail call void @test3_d(i32 %i)
+ %t6 = load i64, i64* @a
+ %add85 = add nsw i64 %t6, 1
+ store i64 %add85, i64* @a
+ ret void
+}
+
+declare i64 @random()
+
+define void @test3_d(i32 %i) {
+entry:
+ %cmp = icmp eq i32 %i, 5
+ br i1 %cmp, label %if.end, label %if.then
+
+if.then: ; preds = %entry
+ %call = tail call i64 @random()
+ %t0 = load i64, i64* @a
+ %add = add nsw i64 %t0, %call
+ store i64 %add, i64* @a
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ tail call void @test3_c(i32 %i)
+ tail call void @test3_b()
+ %t6 = load i64, i64* @a
+ %add79 = add nsw i64 %t6, 3
+ store i64 %add79, i64* @a
+ ret void
+}
+
+; Function Attrs: noinline
+define void @test3_b() #0 {
+entry:
+ tail call void @test3_a()
+ %t0 = load i64, i64* @a
+ %add = add nsw i64 %t0, 2
+ store i64 %add, i64* @a
+ ret void
+}
+
+; Check test3_c is inlined into test3_a once and only once.
+; CHECK-LABEL: @test3_a(
+; CHECK: tail call void @test3_b()
+; CHECK-NEXT: tail call void @test3_d(i32 5)
+; CHECK-NEXT: %[[LD1:.*]] = load i64, i64* @a
+; CHECK-NEXT: %[[ADD1:.*]] = add nsw i64 %[[LD1]], 1
+; CHECK-NEXT: store i64 %[[ADD1]], i64* @a
+; CHECK-NEXT: %[[LD2:.*]] = load i64, i64* @b
+; CHECK-NEXT: %[[ADD2:.*]] = add nsw i64 %[[LD2]], 5
+; CHECK-NEXT: store i64 %[[ADD2]], i64* @b
+; CHECK-NEXT: ret void
+
+; Function Attrs: noinline
+define void @test3_a() #0 {
+entry:
+ tail call void @test3_b()
+ tail call void @test3_c(i32 5)
+ %t0 = load i64, i64* @b
+ %add = add nsw i64 %t0, 5
+ store i64 %add, i64* @b
+ ret void
+}
+
+attributes #0 = { noinline }
diff --git a/llvm/test/Transforms/Inline/monster_scc.ll b/llvm/test/Transforms/Inline/monster_scc.ll
index 0f8f1f21c8b..b32a2aed331 100644
--- a/llvm/test/Transforms/Inline/monster_scc.ll
+++ b/llvm/test/Transforms/Inline/monster_scc.ll
@@ -154,11 +154,7 @@ if.end3:
; NEW-NOT: call
; NEW: call void @_Z1fILb1ELi2EEvPbS0_(
; NEW-NOT: call
-; NEW: call void @_Z1gi(
-; NEW-NOT: call
-; NEW: call void @_Z1fILb1ELi0EEvPbS0_(
-; NEW-NOT: call
-; NEW: call void @_Z1fILb0ELi0EEvPbS0_(
+; NEW: call void @_Z1fILb1ELi3EEvPbS0_(
; NEW-NOT: call
; NEW: call void @_Z1fILb0ELi3EEvPbS0_(
; NEW-NOT: call
@@ -198,19 +194,11 @@ if.end3:
; NEW-NOT: call
; NEW: call void @_Z1gi(
; NEW-NOT: call
-; NEW: call void @_Z1gi(
-; NEW-NOT: call
-; NEW: call void @_Z1fILb1ELi0EEvPbS0_(
-; NEW-NOT: call
-; NEW: call void @_Z1fILb0ELi0EEvPbS0_(
+; NEW: call void @_Z1fILb1ELi3EEvPbS0_(
; NEW-NOT: call
; NEW: call void @_Z1fILb0ELi3EEvPbS0_(
; NEW-NOT: call
-; NEW: call void @_Z1gi(
-; NEW-NOT: call
-; NEW: call void @_Z1fILb1ELi0EEvPbS0_(
-; NEW-NOT: call
-; NEW: call void @_Z1fILb0ELi0EEvPbS0_(
+; NEW: call void @_Z1fILb1ELi3EEvPbS0_(
; NEW-NOT: call
; NEW: call void @_Z1fILb0ELi3EEvPbS0_(
; NEW-NOT: call
@@ -260,7 +248,7 @@ if.end3:
; NEW-NOT: call
; NEW: call void @_Z1fILb1ELi4EEvPbS0_(
; NEW-NOT: call
-; NEW: call void @_Z1fILb0ELi0EEvPbS0_(
+; NEW: call void @_Z1fILb0ELi4EEvPbS0_(
; NEW-NOT: call
define void @_Z1fILb0ELi2EEvPbS0_(i8* %B, i8* %E) {
entry:
@@ -304,21 +292,13 @@ if.end3:
; NEW-NOT: call
; NEW: call void @_Z1gi(
; NEW-NOT: call
-; NEW: call void @_Z1gi(
-; NEW-NOT: call
-; NEW: call void @_Z1fILb1ELi1EEvPbS0_(
-; NEW-NOT: call
-; NEW: call void @_Z1fILb0ELi1EEvPbS0_(
-; NEW-NOT: call
-; NEW: call void @_Z1fILb0ELi0EEvPbS0_(
-; NEW-NOT: call
-; NEW: call void @_Z1gi(
+; NEW: call void @_Z1fILb1ELi4EEvPbS0_(
; NEW-NOT: call
-; NEW: call void @_Z1fILb1ELi1EEvPbS0_(
+; NEW: call void @_Z1fILb0ELi4EEvPbS0_(
; NEW-NOT: call
-; NEW: call void @_Z1fILb0ELi1EEvPbS0_(
+; NEW: call void @_Z1fILb1ELi4EEvPbS0_(
; NEW-NOT: call
-; NEW: call void @_Z1fILb0ELi0EEvPbS0_(
+; NEW: call void @_Z1fILb0ELi4EEvPbS0_(
; NEW-NOT: call
define void @_Z1fILb1ELi2EEvPbS0_(i8* %B, i8* %E) {
entry:
@@ -433,15 +413,7 @@ entry:
; NEW-NOT: call
; NEW: call void @_Z1fILb1ELi1EEvPbS0_(
; NEW-NOT: call
-; NEW: call void @_Z1fILb1ELi2EEvPbS0_(
-; NEW-NOT: call
-; NEW: call void @_Z1gi(
-; NEW-NOT: call
-; NEW: call void @_Z1fILb1ELi0EEvPbS0_(
-; NEW-NOT: call
-; NEW: call void @_Z1fILb0ELi0EEvPbS0_(
-; NEW-NOT: call
-; NEW: call void @_Z1fILb0ELi3EEvPbS0_(
+; NEW: call void @_Z1fILb0ELi1EEvPbS0_(
; NEW-NOT: call
define void @_Z1fILb1ELi4EEvPbS0_(i8* %B, i8* %E) {
entry:
OpenPOWER on IntegriCloud