summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
authorChad Rosier <mcrosier@apple.com>2011-05-20 00:59:28 +0000
committerChad Rosier <mcrosier@apple.com>2011-05-20 00:59:28 +0000
commit552f8c4819dba0970591da3c15532f76eb42aea2 (patch)
tree6a7ece8337399d1870de6f5d9d2459878a5add51 /llvm
parente8d2e9eb351724552453425578fdcd2fd39c8fc0 (diff)
downloadbcm5719-llvm-552f8c4819dba0970591da3c15532f76eb42aea2.tar.gz
bcm5719-llvm-552f8c4819dba0970591da3c15532f76eb42aea2.zip
Don't attempt to tail call optimize for Win64.
llvm-svn: 131709
Diffstat (limited to 'llvm')
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp13
-rw-r--r--llvm/test/CodeGen/X86/vararg_tailcall.ll4
2 files changed, 9 insertions, 8 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index e5156f8d413..ce1dc09760a 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -2531,17 +2531,18 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
return false;
// Do not sibcall optimize vararg calls unless all arguments are passed via
- // registers
+ // registers.
if (isVarArg && !Outs.empty()) {
+
+ // Optimizing for varargs on Win64 is unlikely to be safe without
+ // additional testing.
+ if (Subtarget->isTargetWin64())
+ return false;
+
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CalleeCC, isVarArg, getTargetMachine(),
ArgLocs, *DAG.getContext());
- // Allocate shadow area for Win64
- if (Subtarget->isTargetWin64()) {
- CCInfo.AllocateStack(32, 8);
- }
-
CCInfo.AnalyzeCallOperands(Outs, CC_X86);
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
if (!ArgLocs[i].isRegLoc())
diff --git a/llvm/test/CodeGen/X86/vararg_tailcall.ll b/llvm/test/CodeGen/X86/vararg_tailcall.ll
index d50b83acc8a..73d80ebc1d5 100644
--- a/llvm/test/CodeGen/X86/vararg_tailcall.ll
+++ b/llvm/test/CodeGen/X86/vararg_tailcall.ll
@@ -12,7 +12,7 @@
; X64: @foo
; X64: jmp
; WIN64: @foo
-; WIN64: jmp
+; WIN64: callq
define void @foo(i64 %arg) nounwind optsize ssp noredzone {
entry:
%call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8]* @.str, i64 0, i64 0), i64 %arg) nounwind optsize noredzone
@@ -36,7 +36,7 @@ declare void @bar2(i8*, i64) optsize noredzone
; X64: @foo2
; X64: jmp
; WIN64: @foo2
-; WIN64: jmp
+; WIN64: callq
define i8* @foo2(i8* %arg) nounwind optsize ssp noredzone {
entry:
%tmp1 = load i8** @sel, align 8, !tbaa !0
OpenPOWER on IntegriCloud