diff options
| author | Diana Picus <diana.picus@linaro.org> | 2017-06-15 09:42:02 +0000 |
|---|---|---|
| committer | Diana Picus <diana.picus@linaro.org> | 2017-06-15 09:42:02 +0000 |
| commit | 8fd1601d322235b34f97505f795f83f56be587c1 (patch) | |
| tree | 8f42c329b1326d4000a314844b7b21c28defeddf /llvm/lib/Target/ARM | |
| parent | 0a26d2c298a6a493945adf131596dbbfc4a1d216 (diff) | |
| download | bcm5719-llvm-8fd1601d322235b34f97505f795f83f56be587c1.tar.gz bcm5719-llvm-8fd1601d322235b34f97505f795f83f56be587c1.zip | |
[ARM] GlobalISel: Lower only homogeneous struct args
Lowering mixed struct args, params and returns used G_INSERT, which is a
bit more convoluted to support through the entire pipeline. Since they
don't occur that often in practice, it's probably wiser to leave them
out until later.
Meanwhile, we can lower homogeneous structs using G_MERGE_VALUES, which
has good support in the legalizer. These occur e.g. as the return of
__aeabi_idivmod, so it's nice to be able to support them.
llvm-svn: 305458
Diffstat (limited to 'llvm/lib/Target/ARM')
| -rw-r--r-- | llvm/lib/Target/ARM/ARMCallLowering.cpp | 55 |
1 files changed, 24 insertions, 31 deletions
diff --git a/llvm/lib/Target/ARM/ARMCallLowering.cpp b/llvm/lib/Target/ARM/ARMCallLowering.cpp index a7ac9a1dca6..e498f70b820 100644 --- a/llvm/lib/Target/ARM/ARMCallLowering.cpp +++ b/llvm/lib/Target/ARM/ARMCallLowering.cpp @@ -35,9 +35,19 @@ ARMCallLowering::ARMCallLowering(const ARMTargetLowering &TLI) static bool isSupportedType(const DataLayout &DL, const ARMTargetLowering &TLI, Type *T) { - if (T->isArrayTy() || T->isStructTy()) + if (T->isArrayTy()) return true; + if (T->isStructTy()) { + // For now we only allow homogeneous structs that we can manipulate with + // G_MERGE_VALUES and G_UNMERGE_VALUES + auto StructT = cast<StructType>(T); + for (unsigned i = 1, e = StructT->getNumElements(); i != e; ++i) + if (StructT->getElementType(i) != StructT->getElementType(0)) + return false; + return true; + } + EVT VT = TLI.getValueType(DL, T, true); if (!VT.isSimple() || VT.isVector() || !(VT.isInteger() || VT.isFloatingPoint())) @@ -220,12 +230,16 @@ bool ARMCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder, return false; SmallVector<ArgInfo, 4> SplitVTs; + SmallVector<unsigned, 4> Regs; ArgInfo RetInfo(VReg, Val->getType()); setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F); splitToValueTypes(RetInfo, SplitVTs, MF, [&](unsigned Reg, uint64_t Offset) { - MIRBuilder.buildExtract(Reg, VReg, Offset); + Regs.push_back(Reg); }); + if (Regs.size() > 1) + MIRBuilder.buildUnmerge(Regs, VReg); + CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(F.getCallingConv(), F.isVarArg()); @@ -344,26 +358,6 @@ struct IncomingValueHandler : public CallLowering::ValueHandler { return 1; } - /// Merge the values in \p SrcRegs into \p DstReg at offsets \p SrcOffsets. - /// Note that the source registers are not required to have homogeneous types, - /// so we use G_INSERT rather than G_MERGE_VALUES. - // FIXME: Use G_MERGE_VALUES if the types are homogeneous. - void mergeRegisters(unsigned DstReg, ArrayRef<unsigned> SrcRegs, - ArrayRef<uint64_t> SrcOffsets) { - LLT Ty = MRI.getType(DstReg); - - unsigned Dst = MRI.createGenericVirtualRegister(Ty); - MIRBuilder.buildUndef(Dst); - - for (unsigned i = 0; i < SrcRegs.size(); ++i) { - unsigned Tmp = MRI.createGenericVirtualRegister(Ty); - MIRBuilder.buildInsert(Tmp, Dst, SrcRegs[i], SrcOffsets[i]); - Dst = Tmp; - } - - MIRBuilder.buildCopy(DstReg, Dst); - } - /// Marking a physical register as used is different between formal /// parameters, where it's a basic block live-in, and call returns, where it's /// an implicit-def of the call instruction. @@ -413,22 +407,19 @@ bool ARMCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, SmallVector<ArgInfo, 8> ArgInfos; SmallVector<unsigned, 4> SplitRegs; - SmallVector<uint64_t, 4> RegOffsets; unsigned Idx = 0; for (auto &Arg : F.args()) { ArgInfo AInfo(VRegs[Idx], Arg.getType()); setArgFlags(AInfo, Idx + AttributeList::FirstArgIndex, DL, F); SplitRegs.clear(); - RegOffsets.clear(); splitToValueTypes(AInfo, ArgInfos, MF, [&](unsigned Reg, uint64_t Offset) { SplitRegs.push_back(Reg); - RegOffsets.push_back(Offset); }); if (!SplitRegs.empty()) - ArgHandler.mergeRegisters(VRegs[Idx], SplitRegs, RegOffsets); + MIRBuilder.buildMerge(VRegs[Idx], SplitRegs); Idx++; } @@ -490,9 +481,13 @@ bool ARMCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, if (!Arg.IsFixed) return false; + SmallVector<unsigned, 8> Regs; splitToValueTypes(Arg, ArgInfos, MF, [&](unsigned Reg, uint64_t Offset) { - MIRBuilder.buildExtract(Reg, Arg.Reg, Offset); + Regs.push_back(Reg); }); + + if (Regs.size() > 1) + MIRBuilder.buildUnmerge(Regs, Arg.Reg); } auto ArgAssignFn = TLI.CCAssignFnForCall(CallConv, /*IsVarArg=*/false); @@ -508,11 +503,9 @@ bool ARMCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, return false; ArgInfos.clear(); - SmallVector<uint64_t, 8> RegOffsets; SmallVector<unsigned, 8> SplitRegs; splitToValueTypes(OrigRet, ArgInfos, MF, [&](unsigned Reg, uint64_t Offset) { - RegOffsets.push_back(Offset); SplitRegs.push_back(Reg); }); @@ -521,10 +514,10 @@ bool ARMCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, if (!handleAssignments(MIRBuilder, ArgInfos, RetHandler)) return false; - if (!RegOffsets.empty()) { + if (!SplitRegs.empty()) { // We have split the value and allocated each individual piece, now build // it up again. - RetHandler.mergeRegisters(OrigRet.Reg, SplitRegs, RegOffsets); + MIRBuilder.buildMerge(OrigRet.Reg, SplitRegs); } } |

