summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorReid Kleckner <rnk@google.com>2019-01-28 21:28:40 +0000
committerReid Kleckner <rnk@google.com>2019-01-28 21:28:40 +0000
commit96c581d7d0362b7d24aa7ec9cb2278dd3782b32c (patch)
treead4461206ed02bf457c9dbcc5d22d7b454dab16b /llvm/lib
parent2d73ecd0a34ad06e748a7be89e50795dd20deada (diff)
downloadbcm5719-llvm-96c581d7d0362b7d24aa7ec9cb2278dd3782b32c.tar.gz
bcm5719-llvm-96c581d7d0362b7d24aa7ec9cb2278dd3782b32c.zip
[AArch64] Include AArch64GenCallingConv.inc once
Summary: Avoids duplicating generated static helpers for calling convention analysis. This also means you can modify AArch64CallingConv.td without recompiling the AArch64ISelLowering.cpp monolith, so it provides faster incremental rebuilds. Saves 12K in llc.exe, but adds a new object file, which is large. Reviewers: efriedma, t.p.northover Subscribers: mgorny, javed.absar, kristof.beyls, hiraditya, llvm-commits Differential Revision: https://reviews.llvm.org/D56948 llvm-svn: 352430
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Target/AArch64/AArch64CallingConvention.cpp134
-rw-r--r--llvm/lib/Target/AArch64/AArch64CallingConvention.h149
-rw-r--r--llvm/lib/Target/AArch64/AArch64CallingConvention.td8
-rw-r--r--llvm/lib/Target/AArch64/AArch64FastISel.cpp6
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp2
-rw-r--r--llvm/lib/Target/AArch64/CMakeLists.txt1
6 files changed, 171 insertions, 129 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp b/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp
new file mode 100644
index 00000000000..02538a18761
--- /dev/null
+++ b/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp
@@ -0,0 +1,134 @@
+//=== AArch64CallingConvention.cpp - AArch64 CC impl ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the table-generated and custom routines for the AArch64
+// Calling Convention.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AArch64CallingConvention.h"
+#include "AArch64.h"
+#include "AArch64InstrInfo.h"
+#include "AArch64Subtarget.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/IR/CallingConv.h"
+using namespace llvm;
+
+static const MCPhysReg XRegList[] = {AArch64::X0, AArch64::X1, AArch64::X2,
+ AArch64::X3, AArch64::X4, AArch64::X5,
+ AArch64::X6, AArch64::X7};
+static const MCPhysReg HRegList[] = {AArch64::H0, AArch64::H1, AArch64::H2,
+ AArch64::H3, AArch64::H4, AArch64::H5,
+ AArch64::H6, AArch64::H7};
+static const MCPhysReg SRegList[] = {AArch64::S0, AArch64::S1, AArch64::S2,
+ AArch64::S3, AArch64::S4, AArch64::S5,
+ AArch64::S6, AArch64::S7};
+static const MCPhysReg DRegList[] = {AArch64::D0, AArch64::D1, AArch64::D2,
+ AArch64::D3, AArch64::D4, AArch64::D5,
+ AArch64::D6, AArch64::D7};
+static const MCPhysReg QRegList[] = {AArch64::Q0, AArch64::Q1, AArch64::Q2,
+ AArch64::Q3, AArch64::Q4, AArch64::Q5,
+ AArch64::Q6, AArch64::Q7};
+
+static bool finishStackBlock(SmallVectorImpl<CCValAssign> &PendingMembers,
+ MVT LocVT, ISD::ArgFlagsTy &ArgFlags,
+ CCState &State, unsigned SlotAlign) {
+ unsigned Size = LocVT.getSizeInBits() / 8;
+ unsigned StackAlign =
+ State.getMachineFunction().getDataLayout().getStackAlignment();
+ unsigned Align = std::min(ArgFlags.getOrigAlign(), StackAlign);
+
+ for (auto &It : PendingMembers) {
+ It.convertToMem(State.AllocateStack(Size, std::max(Align, SlotAlign)));
+ State.addLoc(It);
+ SlotAlign = 1;
+ }
+
+ // All pending members have now been allocated
+ PendingMembers.clear();
+ return true;
+}
+
+/// The Darwin variadic PCS places anonymous arguments in 8-byte stack slots. An
+/// [N x Ty] type must still be contiguous in memory though.
+static bool CC_AArch64_Custom_Stack_Block(
+ unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State) {
+ SmallVectorImpl<CCValAssign> &PendingMembers = State.getPendingLocs();
+
+ // Add the argument to the list to be allocated once we know the size of the
+ // block.
+ PendingMembers.push_back(
+ CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
+
+ if (!ArgFlags.isInConsecutiveRegsLast())
+ return true;
+
+ return finishStackBlock(PendingMembers, LocVT, ArgFlags, State, 8);
+}
+
+/// Given an [N x Ty] block, it should be passed in a consecutive sequence of
+/// registers. If no such sequence is available, mark the rest of the registers
+/// of that type as used and place the argument on the stack.
+static bool CC_AArch64_Custom_Block(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State) {
+ // Try to allocate a contiguous block of registers, each of the correct
+ // size to hold one member.
+ ArrayRef<MCPhysReg> RegList;
+ if (LocVT.SimpleTy == MVT::i64)
+ RegList = XRegList;
+ else if (LocVT.SimpleTy == MVT::f16)
+ RegList = HRegList;
+ else if (LocVT.SimpleTy == MVT::f32 || LocVT.is32BitVector())
+ RegList = SRegList;
+ else if (LocVT.SimpleTy == MVT::f64 || LocVT.is64BitVector())
+ RegList = DRegList;
+ else if (LocVT.SimpleTy == MVT::f128 || LocVT.is128BitVector())
+ RegList = QRegList;
+ else {
+ // Not an array we want to split up after all.
+ return false;
+ }
+
+ SmallVectorImpl<CCValAssign> &PendingMembers = State.getPendingLocs();
+
+ // Add the argument to the list to be allocated once we know the size of the
+ // block.
+ PendingMembers.push_back(
+ CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
+
+ if (!ArgFlags.isInConsecutiveRegsLast())
+ return true;
+
+ unsigned RegResult = State.AllocateRegBlock(RegList, PendingMembers.size());
+ if (RegResult) {
+ for (auto &It : PendingMembers) {
+ It.convertToReg(RegResult);
+ State.addLoc(It);
+ ++RegResult;
+ }
+ PendingMembers.clear();
+ return true;
+ }
+
+ // Mark all regs in the class as unavailable
+ for (auto Reg : RegList)
+ State.AllocateReg(Reg);
+
+ const AArch64Subtarget &Subtarget = static_cast<const AArch64Subtarget &>(
+ State.getMachineFunction().getSubtarget());
+ unsigned SlotAlign = Subtarget.isTargetDarwin() ? 1 : 8;
+
+ return finishStackBlock(PendingMembers, LocVT, ArgFlags, State, SlotAlign);
+}
+
+// TableGen provides definitions of the calling convention analysis entry
+// points.
+#include "AArch64GenCallingConv.inc"
diff --git a/llvm/lib/Target/AArch64/AArch64CallingConvention.h b/llvm/lib/Target/AArch64/AArch64CallingConvention.h
index 3a00c63384b..13cc0c583fd 100644
--- a/llvm/lib/Target/AArch64/AArch64CallingConvention.h
+++ b/llvm/lib/Target/AArch64/AArch64CallingConvention.h
@@ -1,4 +1,4 @@
-//=== AArch64CallingConv.h - Custom Calling Convention Routines -*- C++ -*-===//
+//=== AArch64CallingConvention.h - AArch64 CC entry points ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,133 +6,40 @@
//
//===----------------------------------------------------------------------===//
//
-// This file contains the custom routines for the AArch64 Calling Convention
-// that aren't done by tablegen.
+// This file declares the entry points for AArch64 calling convention analysis.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64CALLINGCONVENTION_H
#define LLVM_LIB_TARGET_AARCH64_AARCH64CALLINGCONVENTION_H
-#include "AArch64.h"
-#include "AArch64InstrInfo.h"
-#include "AArch64Subtarget.h"
#include "llvm/CodeGen/CallingConvLower.h"
-#include "llvm/CodeGen/TargetInstrInfo.h"
-#include "llvm/IR/CallingConv.h"
-namespace {
-using namespace llvm;
-
-static const MCPhysReg XRegList[] = {AArch64::X0, AArch64::X1, AArch64::X2,
- AArch64::X3, AArch64::X4, AArch64::X5,
- AArch64::X6, AArch64::X7};
-static const MCPhysReg HRegList[] = {AArch64::H0, AArch64::H1, AArch64::H2,
- AArch64::H3, AArch64::H4, AArch64::H5,
- AArch64::H6, AArch64::H7};
-static const MCPhysReg SRegList[] = {AArch64::S0, AArch64::S1, AArch64::S2,
- AArch64::S3, AArch64::S4, AArch64::S5,
- AArch64::S6, AArch64::S7};
-static const MCPhysReg DRegList[] = {AArch64::D0, AArch64::D1, AArch64::D2,
- AArch64::D3, AArch64::D4, AArch64::D5,
- AArch64::D6, AArch64::D7};
-static const MCPhysReg QRegList[] = {AArch64::Q0, AArch64::Q1, AArch64::Q2,
- AArch64::Q3, AArch64::Q4, AArch64::Q5,
- AArch64::Q6, AArch64::Q7};
-
-static bool finishStackBlock(SmallVectorImpl<CCValAssign> &PendingMembers,
- MVT LocVT, ISD::ArgFlagsTy &ArgFlags,
- CCState &State, unsigned SlotAlign) {
- unsigned Size = LocVT.getSizeInBits() / 8;
- unsigned StackAlign =
- State.getMachineFunction().getDataLayout().getStackAlignment();
- unsigned Align = std::min(ArgFlags.getOrigAlign(), StackAlign);
-
- for (auto &It : PendingMembers) {
- It.convertToMem(State.AllocateStack(Size, std::max(Align, SlotAlign)));
- State.addLoc(It);
- SlotAlign = 1;
- }
-
- // All pending members have now been allocated
- PendingMembers.clear();
- return true;
-}
-
-/// The Darwin variadic PCS places anonymous arguments in 8-byte stack slots. An
-/// [N x Ty] type must still be contiguous in memory though.
-static bool CC_AArch64_Custom_Stack_Block(
- unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags, CCState &State) {
- SmallVectorImpl<CCValAssign> &PendingMembers = State.getPendingLocs();
-
- // Add the argument to the list to be allocated once we know the size of the
- // block.
- PendingMembers.push_back(
- CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
-
- if (!ArgFlags.isInConsecutiveRegsLast())
- return true;
-
- return finishStackBlock(PendingMembers, LocVT, ArgFlags, State, 8);
-}
-
-/// Given an [N x Ty] block, it should be passed in a consecutive sequence of
-/// registers. If no such sequence is available, mark the rest of the registers
-/// of that type as used and place the argument on the stack.
-static bool CC_AArch64_Custom_Block(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags, CCState &State) {
- // Try to allocate a contiguous block of registers, each of the correct
- // size to hold one member.
- ArrayRef<MCPhysReg> RegList;
- if (LocVT.SimpleTy == MVT::i64)
- RegList = XRegList;
- else if (LocVT.SimpleTy == MVT::f16)
- RegList = HRegList;
- else if (LocVT.SimpleTy == MVT::f32 || LocVT.is32BitVector())
- RegList = SRegList;
- else if (LocVT.SimpleTy == MVT::f64 || LocVT.is64BitVector())
- RegList = DRegList;
- else if (LocVT.SimpleTy == MVT::f128 || LocVT.is128BitVector())
- RegList = QRegList;
- else {
- // Not an array we want to split up after all.
- return false;
- }
-
- SmallVectorImpl<CCValAssign> &PendingMembers = State.getPendingLocs();
-
- // Add the argument to the list to be allocated once we know the size of the
- // block.
- PendingMembers.push_back(
- CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
-
- if (!ArgFlags.isInConsecutiveRegsLast())
- return true;
-
- unsigned RegResult = State.AllocateRegBlock(RegList, PendingMembers.size());
- if (RegResult) {
- for (auto &It : PendingMembers) {
- It.convertToReg(RegResult);
- State.addLoc(It);
- ++RegResult;
- }
- PendingMembers.clear();
- return true;
- }
-
- // Mark all regs in the class as unavailable
- for (auto Reg : RegList)
- State.AllocateReg(Reg);
-
- const AArch64Subtarget &Subtarget = static_cast<const AArch64Subtarget &>(
- State.getMachineFunction().getSubtarget());
- unsigned SlotAlign = Subtarget.isTargetDarwin() ? 1 : 8;
-
- return finishStackBlock(PendingMembers, LocVT, ArgFlags, State, SlotAlign);
-}
-
-}
+namespace llvm {
+bool CC_AArch64_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
+ CCState &State);
+bool CC_AArch64_DarwinPCS_VarArg(unsigned ValNo, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State);
+bool CC_AArch64_DarwinPCS(unsigned ValNo, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State);
+bool CC_AArch64_Win64_VarArg(unsigned ValNo, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State);
+bool CC_AArch64_WebKit_JS(unsigned ValNo, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State);
+bool CC_AArch64_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
+ CCState &State);
+bool RetCC_AArch64_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
+ CCState &State);
+bool RetCC_AArch64_WebKit_JS(unsigned ValNo, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State);
+} // namespace llvm
#endif
diff --git a/llvm/lib/Target/AArch64/AArch64CallingConvention.td b/llvm/lib/Target/AArch64/AArch64CallingConvention.td
index 32c5e641227..e164dcbf63b 100644
--- a/llvm/lib/Target/AArch64/AArch64CallingConvention.td
+++ b/llvm/lib/Target/AArch64/AArch64CallingConvention.td
@@ -21,6 +21,7 @@ class CCIfBigEndian<CCAction A> :
// ARM AAPCS64 Calling Convention
//===----------------------------------------------------------------------===//
+let Entry = 1 in
def CC_AArch64_AAPCS : CallingConv<[
CCIfType<[iPTR], CCBitConvertToType<i64>>,
CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
@@ -88,6 +89,7 @@ def CC_AArch64_AAPCS : CallingConv<[
CCAssignToStack<16, 16>>
]>;
+let Entry = 1 in
def RetCC_AArch64_AAPCS : CallingConv<[
CCIfType<[iPTR], CCBitConvertToType<i64>>,
CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
@@ -121,6 +123,7 @@ def RetCC_AArch64_AAPCS : CallingConv<[
]>;
// Vararg functions on windows pass floats in integer registers
+let Entry = 1 in
def CC_AArch64_Win64_VarArg : CallingConv<[
CCIfType<[f16, f32], CCPromoteToType<f64>>,
CCIfType<[f64], CCBitConvertToType<i64>>,
@@ -132,6 +135,7 @@ def CC_AArch64_Win64_VarArg : CallingConv<[
// from the standard one at this level:
// + i128s (i.e. split i64s) don't need even registers.
// + Stack slots are sized as needed rather than being at least 64-bit.
+let Entry = 1 in
def CC_AArch64_DarwinPCS : CallingConv<[
CCIfType<[iPTR], CCBitConvertToType<i64>>,
CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
@@ -188,6 +192,7 @@ def CC_AArch64_DarwinPCS : CallingConv<[
CCAssignToStack<16, 16>>
]>;
+let Entry = 1 in
def CC_AArch64_DarwinPCS_VarArg : CallingConv<[
CCIfType<[iPTR], CCBitConvertToType<i64>>,
CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
@@ -212,6 +217,7 @@ def CC_AArch64_DarwinPCS_VarArg : CallingConv<[
// in register and the remaining arguments on stack. We allow 32bit stack slots,
// so that WebKit can write partial values in the stack and define the other
// 32bit quantity as undef.
+let Entry = 1 in
def CC_AArch64_WebKit_JS : CallingConv<[
// Handle i1, i8, i16, i32, and i64 passing in register X0 (W0).
CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
@@ -223,6 +229,7 @@ def CC_AArch64_WebKit_JS : CallingConv<[
CCIfType<[i64, f64], CCAssignToStack<8, 8>>
]>;
+let Entry = 1 in
def RetCC_AArch64_WebKit_JS : CallingConv<[
CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
[X0, X1, X2, X3, X4, X5, X6, X7]>>,
@@ -256,6 +263,7 @@ def RetCC_AArch64_WebKit_JS : CallingConv<[
// The AArch64 register mapping is under the heading "The ARMv8/AArch64 ABI
// register mapping".
+let Entry = 1 in
def CC_AArch64_GHC : CallingConv<[
CCIfType<[iPTR], CCBitConvertToType<i64>>,
diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
index 3fb99fe3fde..a63ef542954 100644
--- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
@@ -304,8 +304,6 @@ public:
} // end anonymous namespace
-#include "AArch64GenCallingConv.inc"
-
/// Check if the sign-/zero-extend will be a noop.
static bool isIntExtFree(const Instruction *I) {
assert((isa<ZExtInst>(I) || isa<SExtInst>(I)) &&
@@ -5171,10 +5169,6 @@ bool AArch64FastISel::fastSelectInstruction(const Instruction *I) {
return selectAtomicCmpXchg(cast<AtomicCmpXchgInst>(I));
}
- // Silence warnings.
- (void)&CC_AArch64_DarwinPCS_VarArg;
- (void)&CC_AArch64_Win64_VarArg;
-
// fall-back to target-independent instruction selection.
return selectOperator(I, I->getOpcode());
}
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index df77e8e7d5b..31a6e7e7c9f 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -2972,8 +2972,6 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
// Calling Convention Implementation
//===----------------------------------------------------------------------===//
-#include "AArch64GenCallingConv.inc"
-
/// Selects the correct CCAssignFn for a given CallingConvention value.
CCAssignFn *AArch64TargetLowering::CCAssignFnForCall(CallingConv::ID CC,
bool IsVarArg) const {
diff --git a/llvm/lib/Target/AArch64/CMakeLists.txt b/llvm/lib/Target/AArch64/CMakeLists.txt
index 7778882d491..e219f5f3b67 100644
--- a/llvm/lib/Target/AArch64/CMakeLists.txt
+++ b/llvm/lib/Target/AArch64/CMakeLists.txt
@@ -24,6 +24,7 @@ add_llvm_target(AArch64CodeGen
AArch64AdvSIMDScalarPass.cpp
AArch64AsmPrinter.cpp
AArch64BranchTargets.cpp
+ AArch64CallingConvention.cpp
AArch64CallLowering.cpp
AArch64CleanupLocalDynamicTLSPass.cpp
AArch64CollectLOH.cpp
OpenPOWER on IntegriCloud