From e8262675a3b71e613a0595a01bd67d9e096b2cda Mon Sep 17 00:00:00 2001 From: Chris Lattner Date: Sat, 1 May 2010 01:05:10 +0000 Subject: The inliner has traditionally not considered call sites that appear due to inlining a callee as candidates for futher inlining, but a recent patch made it do this if those call sites were indirect and became direct. Unfortunately, in bizarre cases (see testcase) doing this can cause us to infinitely inline mutually recursive functions into callers not in the cycle. Fix this by keeping track of the inline history from which callsite inline candidates got inlined from. This shouldn't affect any "real world" code, but is required for a follow on patch that is coming up next. llvm-svn: 102822 --- .../Transforms/Inline/noinline-recursive-fn.ll | 45 +++++++++++++++++++++- 1 file changed, 43 insertions(+), 2 deletions(-) (limited to 'llvm/test/Transforms/Inline/noinline-recursive-fn.ll') diff --git a/llvm/test/Transforms/Inline/noinline-recursive-fn.ll b/llvm/test/Transforms/Inline/noinline-recursive-fn.ll index dcae0243300..1d5ebbbf0fa 100644 --- a/llvm/test/Transforms/Inline/noinline-recursive-fn.ll +++ b/llvm/test/Transforms/Inline/noinline-recursive-fn.ll @@ -2,7 +2,7 @@ ; This effectively is just peeling off the first iteration of a loop, and the ; inliner heuristics are not set up for this. -; RUN: opt -inline %s -S | grep "call void @foo(i32 42)" +; RUN: opt -inline %s -S | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" target triple = "x86_64-apple-darwin10.3" @@ -11,7 +11,6 @@ target triple = "x86_64-apple-darwin10.3" define internal void @foo(i32 %x) nounwind ssp { entry: - %"alloca point" = bitcast i32 0 to i32 ; [#uses=0] %0 = icmp slt i32 %x, 0 ; [#uses=1] br i1 %0, label %return, label %bb @@ -25,8 +24,50 @@ return: ; preds = %entry ret void } + +;; CHECK: @bonk +;; CHECK: call void @foo(i32 42) define void @bonk() nounwind ssp { entry: call void @foo(i32 42) nounwind ssp ret void } + + + +;; Here is an indirect case that should not be infinitely inlined. + +define internal void @f1(i32 %x, i8* %Foo, i8* %Bar) nounwind ssp { +entry: + %0 = bitcast i8* %Bar to void (i32, i8*, i8*)* + %1 = sub nsw i32 %x, 1 + call void %0(i32 %1, i8* %Foo, i8* %Bar) nounwind + volatile store i32 42, i32* @g, align 4 + ret void +} + +define internal void @f2(i32 %x, i8* %Foo, i8* %Bar) nounwind ssp { +entry: + %0 = icmp slt i32 %x, 0 ; [#uses=1] + br i1 %0, label %return, label %bb + +bb: ; preds = %entry + %1 = bitcast i8* %Foo to void (i32, i8*, i8*)* ; [#uses=1] + call void %1(i32 %x, i8* %Foo, i8* %Bar) nounwind + volatile store i32 13, i32* @g, align 4 + ret void + +return: ; preds = %entry + ret void +} + + +; CHECK: @top_level +; CHECK: call void @f2(i32 122 +; Here we inline one instance of the cycle, but we don't want to completely +; unroll it. +define void @top_level() nounwind ssp { +entry: + call void @f2(i32 123, i8* bitcast (void (i32, i8*, i8*)* @f1 to i8*), i8* bitcast (void (i32, i8*, i8*)* @f2 to i8*)) nounwind ssp + ret void +} -- cgit v1.2.3