summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
diff options
context:
space:
mode:
authorAmara Emerson <aemerson@apple.com>2019-09-03 21:42:28 +0000
committerAmara Emerson <aemerson@apple.com>2019-09-03 21:42:28 +0000
commitfbaf425b790000810611c085a39ed1b81e7545fe (patch)
tree55b0ad073ffa48ae4cdab06b569108c45d77b2fb /llvm/lib/Target/AArch64/AArch64CallLowering.cpp
parentccb1862bc99d293c4b9f397651a8b76ad1efe900 (diff)
downloadbcm5719-llvm-fbaf425b790000810611c085a39ed1b81e7545fe.tar.gz
bcm5719-llvm-fbaf425b790000810611c085a39ed1b81e7545fe.zip
[GlobalISel][CallLowering] Add support for splitting types according to calling conventions.
On AArch64, s128 types have to be split into s64 GPRs when passed as arguments. This change adds the generic support in call lowering for dealing with multiple registers, for incoming and outgoing args. Support for splitting for return types not yet implemented. Differential Revision: https://reviews.llvm.org/D66180 llvm-svn: 370822
Diffstat (limited to 'llvm/lib/Target/AArch64/AArch64CallLowering.cpp')
-rw-r--r--llvm/lib/Target/AArch64/AArch64CallLowering.cpp15
1 files changed, 8 insertions, 7 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
index e389aaaeb71..9ace33b3985 100644
--- a/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
@@ -174,12 +174,13 @@ struct OutgoingArgHandler : public CallLowering::ValueHandler {
bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
const CallLowering::ArgInfo &Info,
+ ISD::ArgFlagsTy Flags,
CCState &State) override {
bool Res;
if (Info.IsFixed)
- Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
+ Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
else
- Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
+ Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Flags, State);
StackSize = State.getNextStackOffset();
return Res;
@@ -208,7 +209,7 @@ void AArch64CallLowering::splitToValueTypes(
// No splitting to do, but we want to replace the original type (e.g. [1 x
// double] -> double).
SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx),
- OrigArg.Flags, OrigArg.IsFixed);
+ OrigArg.Flags[0], OrigArg.IsFixed);
return;
}
@@ -219,13 +220,13 @@ void AArch64CallLowering::splitToValueTypes(
OrigArg.Ty, CallConv, false);
for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) {
Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx);
- SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.Flags,
+ SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.Flags[0],
OrigArg.IsFixed);
if (NeedsRegBlock)
- SplitArgs.back().Flags.setInConsecutiveRegs();
+ SplitArgs.back().Flags[0].setInConsecutiveRegs();
}
- SplitArgs.back().Flags.setInConsecutiveRegsLast();
+ SplitArgs.back().Flags[0].setInConsecutiveRegsLast();
}
bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
@@ -419,7 +420,7 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
splitToValueTypes(OrigArg, SplitArgs, DL, MRI, Info.CallConv);
// AAPCS requires that we zero-extend i1 to 8 bits by the caller.
if (OrigArg.Ty->isIntegerTy(1))
- SplitArgs.back().Flags.setZExt();
+ SplitArgs.back().Flags[0].setZExt();
}
// Find out which ABI gets to decide where things go.
OpenPOWER on IntegriCloud