summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
authorReid Kleckner <reid@kleckner.net>2015-01-12 23:28:23 +0000
committerReid Kleckner <reid@kleckner.net>2015-01-12 23:28:23 +0000
commitbba20f06de1c11f8332387788914c70bef5607df (patch)
tree9ba13ac3d54b7cd261ede31b73664fa8b6a28a24 /llvm
parent64dae8354b11f1ee8e0f894195d648f659b4b515 (diff)
downloadbcm5719-llvm-bba20f06de1c11f8332387788914c70bef5607df.tar.gz
bcm5719-llvm-bba20f06de1c11f8332387788914c70bef5607df.zip
musttail: Only set the inreg flag for fastcall and vectorcall
Otherwise we'll attempt to forward ECX, EDX, and EAX for cdecl and stdcall thunks, leaving us with no scratch registers for indirect call targets. Fixes PR22052. llvm-svn: 225729
Diffstat (limited to 'llvm')
-rw-r--r--llvm/lib/CodeGen/CallingConvLower.cpp19
-rw-r--r--llvm/test/CodeGen/X86/musttail-varargs.ll14
2 files changed, 30 insertions, 3 deletions
diff --git a/llvm/lib/CodeGen/CallingConvLower.cpp b/llvm/lib/CodeGen/CallingConvLower.cpp
index fbe8b7c10e6..034ffb34b9c 100644
--- a/llvm/lib/CodeGen/CallingConvLower.cpp
+++ b/llvm/lib/CodeGen/CallingConvLower.cpp
@@ -181,15 +181,28 @@ void CCState::AnalyzeCallResult(MVT VT, CCAssignFn Fn) {
}
}
+static bool isValueTypeInRegForCC(CallingConv::ID CC, MVT VT) {
+ if (VT.isVector())
+ return true; // Assume -msse-regparm might be in effect.
+ if (!VT.isInteger())
+ return false;
+ if (CC == CallingConv::X86_VectorCall || CC == CallingConv::X86_FastCall)
+ return true;
+ return false;
+}
+
void CCState::getRemainingRegParmsForType(SmallVectorImpl<MCPhysReg> &Regs,
MVT VT, CCAssignFn Fn) {
unsigned SavedStackOffset = StackOffset;
unsigned NumLocs = Locs.size();
- // Allocate something of this value type repeatedly with just the inreg flag
- // set until we get assigned a location in memory.
+ // Set the 'inreg' flag if it is used for this calling convention.
ISD::ArgFlagsTy Flags;
- Flags.setInReg();
+ if (isValueTypeInRegForCC(CallingConv, VT))
+ Flags.setInReg();
+
+ // Allocate something of this value type repeatedly until we get assigned a
+ // location in memory.
bool HaveRegParm = true;
while (HaveRegParm) {
if (Fn(0, VT, VT, CCValAssign::Full, Flags, *this)) {
diff --git a/llvm/test/CodeGen/X86/musttail-varargs.ll b/llvm/test/CodeGen/X86/musttail-varargs.ll
index b6ca184e71f..7f105a13a6a 100644
--- a/llvm/test/CodeGen/X86/musttail-varargs.ll
+++ b/llvm/test/CodeGen/X86/musttail-varargs.ll
@@ -1,5 +1,6 @@
; RUN: llc < %s -enable-tail-merge=0 -mtriple=x86_64-linux | FileCheck %s --check-prefix=LINUX
; RUN: llc < %s -enable-tail-merge=0 -mtriple=x86_64-windows | FileCheck %s --check-prefix=WINDOWS
+; RUN: llc < %s -enable-tail-merge=0 -mtriple=i686-windows | FileCheck %s --check-prefix=X86
; Test that we actually spill and reload all arguments in the variadic argument
; pack. Doing a normal call will clobber all argument registers, and we will
@@ -72,6 +73,12 @@ define void @f_thunk(i8* %this, ...) {
; WINDOWS-NOT: mov{{.}}ps
; WINDOWS: jmpq *{{.*}} # TAILCALL
+; No regparms on normal x86 conventions.
+
+; X86-LABEL: _f_thunk:
+; X86: calll _get_f
+; X86: jmpl *{{.*}} # TAILCALL
+
; This thunk shouldn't require any spills and reloads, assuming the register
; allocator knows what it's doing.
@@ -89,6 +96,9 @@ define void @g_thunk(i8* %fptr_i8, ...) {
; WINDOWS-NOT: movq
; WINDOWS: jmpq *%rcx # TAILCALL
+; X86-LABEL: _g_thunk:
+; X86: jmpl *%eax # TAILCALL
+
; Do a simple multi-exit multi-bb test.
%struct.Foo = type { i1, i8*, i8* }
@@ -124,3 +134,7 @@ else:
; WINDOWS: jne
; WINDOWS: jmpq *{{.*}} # TAILCALL
; WINDOWS: jmpq *{{.*}} # TAILCALL
+; X86-LABEL: _h_thunk:
+; X86: jne
+; X86: jmpl *{{.*}} # TAILCALL
+; X86: jmpl *{{.*}} # TAILCALL
OpenPOWER on IntegriCloud