diff options
| -rw-r--r-- | llvm/lib/Analysis/InlineCost.cpp | 12 | ||||
| -rw-r--r-- | llvm/test/Transforms/Inline/inline-varargs.ll | 29 | 
2 files changed, 34 insertions, 7 deletions
| diff --git a/llvm/lib/Analysis/InlineCost.cpp b/llvm/lib/Analysis/InlineCost.cpp index 1d879f8aee9..fb032e0404c 100644 --- a/llvm/lib/Analysis/InlineCost.cpp +++ b/llvm/lib/Analysis/InlineCost.cpp @@ -137,7 +137,7 @@ class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {    bool HasReturn;    bool HasIndirectBr;    bool HasUninlineableIntrinsic; -  bool UsesVarArgs; +  bool InitsVargArgs;    /// Number of bytes allocated statically by the callee.    uint64_t AllocatedSize; @@ -283,7 +283,7 @@ public:          IsCallerRecursive(false), IsRecursiveCall(false),          ExposesReturnsTwice(false), HasDynamicAlloca(false),          ContainsNoDuplicateCall(false), HasReturn(false), HasIndirectBr(false), -        HasUninlineableIntrinsic(false), UsesVarArgs(false), AllocatedSize(0), +        HasUninlineableIntrinsic(false), InitsVargArgs(false), AllocatedSize(0),          NumInstructions(0), NumVectorInstructions(0), VectorBonus(0),          SingleBBBonus(0), EnableLoadElimination(true), LoadEliminationCost(0),          NumConstantArgs(0), NumConstantOffsetPtrArgs(0), NumAllocaArgs(0), @@ -1239,8 +1239,7 @@ bool CallAnalyzer::visitCallSite(CallSite CS) {          HasUninlineableIntrinsic = true;          return false;        case Intrinsic::vastart: -      case Intrinsic::vaend: -        UsesVarArgs = true; +        InitsVargArgs = true;          return false;        }      } @@ -1587,7 +1586,7 @@ CallAnalyzer::analyzeBlock(BasicBlock *BB,        IR = "indirect branch";      else if (HasUninlineableIntrinsic)        IR = "uninlinable intrinsic"; -    else if (UsesVarArgs) +    else if (InitsVargArgs)        IR = "varargs";      if (!IR) {        if (ORE) @@ -2079,9 +2078,8 @@ bool llvm::isInlineViable(Function &F) {          // Disallow inlining functions that call @llvm.localescape. Doing this          // correctly would require major changes to the inliner.          case llvm::Intrinsic::localescape: -        // Disallow inlining of functions that access VarArgs. +        // Disallow inlining of functions that initialize VarArgs with va_start.          case llvm::Intrinsic::vastart: -        case llvm::Intrinsic::vaend:            return false;          }      } diff --git a/llvm/test/Transforms/Inline/inline-varargs.ll b/llvm/test/Transforms/Inline/inline-varargs.ll index e84db690eb4..d229ef39d59 100644 --- a/llvm/test/Transforms/Inline/inline-varargs.ll +++ b/llvm/test/Transforms/Inline/inline-varargs.ll @@ -84,6 +84,35 @@ define i32 @call_vargs() {  ; CHECK: %res1 = call i32 (...) @varg_accessed(i32 10)  ; CHECK-NEXT: %res2 = call i32 (...) @varg_accessed_alwaysinline(i32 15) +define void @caller_with_vastart(i8* noalias nocapture readnone %args, ...) { +entry: +  %ap = alloca i8*, align 4 +  %ap.ptr = bitcast i8** %ap to i8* +  %ap2 = alloca i8*, align 4 +  %ap2.ptr = bitcast i8** %ap to i8* +  call void @llvm.va_start(i8* nonnull %ap.ptr) +  call fastcc void @callee_with_vaend(i8* nonnull %ap.ptr) +  call void @llvm.va_start(i8* nonnull %ap2.ptr) +  call fastcc void @callee_with_vaend_alwaysinline(i8* nonnull %ap2.ptr) +  ret void +} + +define internal fastcc void @callee_with_vaend_alwaysinline(i8* %a) alwaysinline { +entry: +  tail call void @llvm.va_end(i8* %a) +  ret void +} + +define internal fastcc void @callee_with_vaend(i8* %a) { +entry: +  tail call void @llvm.va_end(i8* %a) +  ret void +} + +; CHECK-LABEL: @caller_with_vastart +; CHECK-NOT: @callee_with_vaend +; CHECK-NOT: @callee_with_vaend_alwaysinline +  declare void @llvm.va_start(i8*)  declare void @llvm.va_end(i8*) | 

