summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target
diff options
context:
space:
mode:
authorCharles Davis <cdavis5x@gmail.com>2013-07-12 06:02:35 +0000
committerCharles Davis <cdavis5x@gmail.com>2013-07-12 06:02:35 +0000
commite8f297ca9494b5cc99628b779b7959b2b84def1d (patch)
tree4d59098084ac18e62462041cf37f6935dd755269 /llvm/lib/Target
parentaaaec3db98f4a7b96c20d1b8ffeb80a5cb924127 (diff)
downloadbcm5719-llvm-e8f297ca9494b5cc99628b779b7959b2b84def1d.tar.gz
bcm5719-llvm-e8f297ca9494b5cc99628b779b7959b2b84def1d.zip
Target/X86: Add explicit Win64 and System V/x86-64 calling conventions.
Summary: This patch adds explicit calling convention types for the Win64 and System V/x86-64 ABIs. This allows code to override the default, and use the Win64 convention on a target that wants to use SysV (and vice-versa). This is needed to implement the `ms_abi` and `sysv_abi` GNU attributes. Reviewers: CC: llvm-svn: 186144
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/X86/X86CallingConv.td7
-rw-r--r--llvm/lib/Target/X86/X86FastISel.cpp25
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp30
-rw-r--r--llvm/lib/Target/X86/X86Subtarget.h8
4 files changed, 45 insertions, 25 deletions
diff --git a/llvm/lib/Target/X86/X86CallingConv.td b/llvm/lib/Target/X86/X86CallingConv.td
index 9eafbd55a5a..40c5d91b605 100644
--- a/llvm/lib/Target/X86/X86CallingConv.td
+++ b/llvm/lib/Target/X86/X86CallingConv.td
@@ -156,6 +156,11 @@ def RetCC_X86_32 : CallingConv<[
def RetCC_X86_64 : CallingConv<[
// HiPE uses RetCC_X86_64_HiPE
CCIfCC<"CallingConv::HiPE", CCDelegateTo<RetCC_X86_64_HiPE>>,
+
+ // Handle explicit CC selection
+ CCIfCC<"CallingConv::X86_64_Win64", CCDelegateTo<RetCC_X86_Win64_C>>,
+ CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<RetCC_X86_64_C>>,
+
// Mingw64 and native Win64 use Win64 CC
CCIfSubtarget<"isTargetWin64()", CCDelegateTo<RetCC_X86_Win64_C>>,
@@ -489,6 +494,8 @@ def CC_X86_32 : CallingConv<[
def CC_X86_64 : CallingConv<[
CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_64_GHC>>,
CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_64_HiPE>>,
+ CCIfCC<"CallingConv::X86_64_Win64", CCDelegateTo<CC_X86_Win64_C>>,
+ CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<CC_X86_64_C>>,
// Mingw64 and native Win64 use Win64 CC
CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp
index f8f06f60164..9c91e935f0b 100644
--- a/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/llvm/lib/Target/X86/X86FastISel.cpp
@@ -712,10 +712,11 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
CallingConv::ID CC = F.getCallingConv();
if (CC != CallingConv::C &&
CC != CallingConv::Fast &&
- CC != CallingConv::X86_FastCall)
+ CC != CallingConv::X86_FastCall &&
+ CC != CallingConv::X86_64_SysV)
return false;
- if (Subtarget->isTargetWin64())
+ if (Subtarget->isCallingConvWin64(CC))
return false;
// Don't handle popping bytes on return for now.
@@ -1705,9 +1706,6 @@ bool X86FastISel::FastLowerArguments() {
if (!FuncInfo.CanLowerReturn)
return false;
- if (Subtarget->isTargetWin64())
- return false;
-
const Function *F = FuncInfo.Fn;
if (F->isVarArg())
return false;
@@ -1715,7 +1713,10 @@ bool X86FastISel::FastLowerArguments() {
CallingConv::ID CC = F->getCallingConv();
if (CC != CallingConv::C)
return false;
-
+
+ if (Subtarget->isCallingConvWin64(CC))
+ return false;
+
if (!Subtarget->is64Bit())
return false;
@@ -1817,8 +1818,10 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
// Handle only C and fastcc calling conventions for now.
ImmutableCallSite CS(CI);
CallingConv::ID CC = CS.getCallingConv();
+ bool isWin64 = Subtarget->isCallingConvWin64(CC);
if (CC != CallingConv::C && CC != CallingConv::Fast &&
- CC != CallingConv::X86_FastCall)
+ CC != CallingConv::X86_FastCall && CC != CallingConv::X86_64_Win64 &&
+ CC != CallingConv::X86_64_SysV)
return false;
// fastcc with -tailcallopt is intended to provide a guaranteed
@@ -1832,7 +1835,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
// Don't know how to handle Win64 varargs yet. Nothing special needed for
// x86-32. Special handling for x86-64 is implemented.
- if (isVarArg && Subtarget->isTargetWin64())
+ if (isVarArg && isWin64)
return false;
// Fast-isel doesn't know about callee-pop yet.
@@ -1962,7 +1965,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
I->getParent()->getContext());
// Allocate shadow area for Win64
- if (Subtarget->isTargetWin64())
+ if (isWin64)
CCInfo.AllocateStack(32, 8);
CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CC_X86);
@@ -2078,7 +2081,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
X86::EBX).addReg(Base);
}
- if (Subtarget->is64Bit() && isVarArg && !Subtarget->isTargetWin64()) {
+ if (Subtarget->is64Bit() && isVarArg && !isWin64) {
// Count the number of XMM registers allocated.
static const uint16_t XMMArgRegs[] = {
X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
@@ -2147,7 +2150,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
if (Subtarget->isPICStyleGOT())
MIB.addReg(X86::EBX, RegState::Implicit);
- if (Subtarget->is64Bit() && isVarArg && !Subtarget->isTargetWin64())
+ if (Subtarget->is64Bit() && isVarArg && !isWin64)
MIB.addReg(X86::AL, RegState::Implicit);
// Add implicit physical register uses to the call.
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index f00df3543a8..6284dd7e58e 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -1880,13 +1880,19 @@ static bool IsTailCallConvention(CallingConv::ID CC) {
CC == CallingConv::HiPE);
}
+/// \brief Return true if the calling convention is a C calling convention.
+static bool IsCCallConvention(CallingConv::ID CC) {
+ return (CC == CallingConv::C || CC == CallingConv::X86_64_Win64 ||
+ CC == CallingConv::X86_64_SysV);
+}
+
bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
return false;
CallSite CS(CI);
CallingConv::ID CalleeCC = CS.getCallingConv();
- if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C)
+ if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
return false;
return true;
@@ -1961,7 +1967,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
MachineFrameInfo *MFI = MF.getFrameInfo();
bool Is64Bit = Subtarget->is64Bit();
bool IsWindows = Subtarget->isTargetWindows();
- bool IsWin64 = Subtarget->isTargetWin64();
+ bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
"Var args not supported with calling convention fastcc, ghc or hipe");
@@ -1972,9 +1978,8 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
ArgLocs, *DAG.getContext());
// Allocate shadow area for Win64
- if (IsWin64) {
+ if (IsWin64)
CCInfo.AllocateStack(32, 8);
- }
CCInfo.AnalyzeFormalArguments(Ins, CC_X86);
@@ -2287,7 +2292,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
MachineFunction &MF = DAG.getMachineFunction();
bool Is64Bit = Subtarget->is64Bit();
- bool IsWin64 = Subtarget->isTargetWin64();
+ bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
bool IsWindows = Subtarget->isTargetWindows();
StructReturnType SR = callIsStructReturn(Outs);
bool IsSibcall = false;
@@ -2320,9 +2325,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
ArgLocs, *DAG.getContext());
// Allocate shadow area for Win64
- if (IsWin64) {
+ if (IsWin64)
CCInfo.AllocateStack(32, 8);
- }
CCInfo.AnalyzeCallOperands(Outs, CC_X86);
@@ -2833,13 +2837,12 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG &DAG) const {
- if (!IsTailCallConvention(CalleeCC) &&
- CalleeCC != CallingConv::C)
+ if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
return false;
// If -tailcallopt is specified, make fastcc functions tail-callable.
const MachineFunction &MF = DAG.getMachineFunction();
- const Function *CallerF = DAG.getMachineFunction().getFunction();
+ const Function *CallerF = MF.getFunction();
// If the function return type is x86_fp80 and the callee return type is not,
// then the FP_EXTEND of the call result is not a nop. It's not safe to
@@ -2849,6 +2852,8 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
CallingConv::ID CallerCC = CallerF->getCallingConv();
bool CCMatch = CallerCC == CalleeCC;
+ bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
+ bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC);
if (getTargetMachine().Options.GuaranteedTailCallOpt) {
if (IsTailCallConvention(CalleeCC) && CCMatch)
@@ -2882,7 +2887,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
// Optimizing for varargs on Win64 is unlikely to be safe without
// additional testing.
- if (Subtarget->isTargetWin64())
+ if (IsCalleeWin64 || IsCallerWin64)
return false;
SmallVector<CCValAssign, 16> ArgLocs;
@@ -2957,9 +2962,8 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
getTargetMachine(), ArgLocs, *DAG.getContext());
// Allocate shadow area for Win64
- if (Subtarget->isTargetWin64()) {
+ if (IsCalleeWin64)
CCInfo.AllocateStack(32, 8);
- }
CCInfo.AnalyzeCallOperands(Outs, CC_X86);
if (CCInfo.getNextStackOffset()) {
diff --git a/llvm/lib/Target/X86/X86Subtarget.h b/llvm/lib/Target/X86/X86Subtarget.h
index 66832b989be..01a28d0fd1d 100644
--- a/llvm/lib/Target/X86/X86Subtarget.h
+++ b/llvm/lib/Target/X86/X86Subtarget.h
@@ -338,7 +338,13 @@ public:
}
bool isPICStyleStubAny() const {
return PICStyle == PICStyles::StubDynamicNoPIC ||
- PICStyle == PICStyles::StubPIC; }
+ PICStyle == PICStyles::StubPIC;
+ }
+
+ bool isCallingConvWin64(CallingConv::ID CC) const {
+ return (isTargetWin64() && CC != CallingConv::X86_64_SysV) ||
+ CC == CallingConv::X86_64_Win64;
+ }
/// ClassifyGlobalReference - Classify a global variable reference for the
/// current subtarget according to how we should reference it in a non-pcrel
OpenPOWER on IntegriCloud