diff options
author | Juergen Ributzka <juergen@apple.com> | 2014-08-05 05:43:48 +0000 |
---|---|---|
committer | Juergen Ributzka <juergen@apple.com> | 2014-08-05 05:43:48 +0000 |
commit | a126d1ef3ce7392e236586dbc7389434124e59cb (patch) | |
tree | d04b80bde2a9514ce6408de1d5d13b60c5158c78 /llvm/lib/Target | |
parent | ec100526e330972937da513b06b7cf12ae258b52 (diff) | |
download | bcm5719-llvm-a126d1ef3ce7392e236586dbc7389434124e59cb.tar.gz bcm5719-llvm-a126d1ef3ce7392e236586dbc7389434124e59cb.zip |
[FastISel][AArch64] Implement the FastLowerArguments hook.
This implements basic argument lowering for AArch64 in FastISel. It only
handles a small subset of the C calling convention. It supports simple
arguments that can be passed in GPR and FPR registers.
This should cover most of the trivial cases without falling back to
SelectionDAG.
This fixes <rdar://problem/17890986>.
llvm-svn: 214846
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64FastISel.cpp | 103 |
1 files changed, 103 insertions, 0 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp index bf418e0fde0..7aebf4be042 100644 --- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp +++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp @@ -94,6 +94,7 @@ class AArch64FastISel : public FastISel { const AArch64Subtarget *Subtarget; LLVMContext *Context; + bool FastLowerArguments() override; bool FastLowerCall(CallLoweringInfo &CLI) override; bool FastLowerIntrinsicCall(const IntrinsicInst *II) override; @@ -1313,6 +1314,108 @@ bool AArch64FastISel::SelectIntToFP(const Instruction *I, bool Signed) { return true; } +bool AArch64FastISel::FastLowerArguments() { + if (!FuncInfo.CanLowerReturn) + return false; + + const Function *F = FuncInfo.Fn; + if (F->isVarArg()) + return false; + + CallingConv::ID CC = F->getCallingConv(); + if (CC != CallingConv::C) + return false; + + // Only handle simple cases like i1/i8/i16/i32/i64/f32/f64 of up to 8 GPR and + // FPR each. + unsigned GPRCnt = 0; + unsigned FPRCnt = 0; + unsigned Idx = 0; + for (auto const &Arg : F->args()) { + // The first argument is at index 1. + ++Idx; + if (F->getAttributes().hasAttribute(Idx, Attribute::ByVal) || + F->getAttributes().hasAttribute(Idx, Attribute::InReg) || + F->getAttributes().hasAttribute(Idx, Attribute::StructRet) || + F->getAttributes().hasAttribute(Idx, Attribute::Nest)) + return false; + + Type *ArgTy = Arg.getType(); + if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy()) + return false; + + EVT ArgVT = TLI.getValueType(ArgTy); + if (!ArgVT.isSimple()) return false; + switch (ArgVT.getSimpleVT().SimpleTy) { + default: return false; + case MVT::i1: + case MVT::i8: + case MVT::i16: + case MVT::i32: + case MVT::i64: + ++GPRCnt; + break; + case MVT::f16: + case MVT::f32: + case MVT::f64: + ++FPRCnt; + break; + } + + if (GPRCnt > 8 || FPRCnt > 8) + return false; + } + + static const MCPhysReg Registers[5][8] = { + { AArch64::W0, AArch64::W1, AArch64::W2, AArch64::W3, AArch64::W4, + AArch64::W5, AArch64::W6, AArch64::W7 }, + { AArch64::X0, AArch64::X1, AArch64::X2, AArch64::X3, AArch64::X4, + AArch64::X5, AArch64::X6, AArch64::X7 }, + { AArch64::H0, AArch64::H1, AArch64::H2, AArch64::H3, AArch64::H4, + AArch64::H5, AArch64::H6, AArch64::H7 }, + { AArch64::S0, AArch64::S1, AArch64::S2, AArch64::S3, AArch64::S4, + AArch64::S5, AArch64::S6, AArch64::S7 }, + { AArch64::D0, AArch64::D1, AArch64::D2, AArch64::D3, AArch64::D4, + AArch64::D5, AArch64::D6, AArch64::D7 } + }; + + unsigned GPRIdx = 0; + unsigned FPRIdx = 0; + for (auto const &Arg : F->args()) { + MVT VT = TLI.getSimpleValueType(Arg.getType()); + unsigned SrcReg; + switch (VT.SimpleTy) { + default: llvm_unreachable("Unexpected value type."); + case MVT::i1: + case MVT::i8: + case MVT::i16: VT = MVT::i32; // fall-through + case MVT::i32: SrcReg = Registers[0][GPRIdx++]; break; + case MVT::i64: SrcReg = Registers[1][GPRIdx++]; break; + case MVT::f16: SrcReg = Registers[2][FPRIdx++]; break; + case MVT::f32: SrcReg = Registers[3][FPRIdx++]; break; + case MVT::f64: SrcReg = Registers[4][FPRIdx++]; break; + } + + // Skip unused arguments. + if (Arg.use_empty()) { + UpdateValueMap(&Arg, 0); + continue; + } + + const TargetRegisterClass *RC = TLI.getRegClassFor(VT); + unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC); + // FIXME: Unfortunately it's necessary to emit a copy from the livein copy. + // Without this, EmitLiveInCopies may eliminate the livein if its only + // use is a bitcast (which isn't turned into an instruction). + unsigned ResultReg = createResultReg(RC); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), ResultReg) + .addReg(DstReg, getKillRegState(true)); + UpdateValueMap(&Arg, ResultReg); + } + return true; +} + bool AArch64FastISel::ProcessCallArgs(CallLoweringInfo &CLI, SmallVectorImpl<MVT> &OutVTs, unsigned &NumBytes) { |