diff options
Diffstat (limited to 'llvm/lib/Target')
| -rw-r--r-- | llvm/lib/Target/AArch64/AArch64CallLowering.cpp | 149 | ||||
| -rw-r--r-- | llvm/lib/Target/AArch64/AArch64CallLowering.h | 22 | ||||
| -rw-r--r-- | llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 6 | ||||
| -rw-r--r-- | llvm/lib/Target/AArch64/AArch64ISelLowering.h | 3 | ||||
| -rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp | 2 | ||||
| -rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPUCallLowering.h | 2 |
6 files changed, 143 insertions, 41 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/AArch64CallLowering.cpp index 9abac46dcec..d960218a5db 100644 --- a/llvm/lib/Target/AArch64/AArch64CallLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64CallLowering.cpp @@ -18,7 +18,8 @@ #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" #include "llvm/CodeGen/MachineInstrBuilder.h" - +#include "llvm/Target/TargetRegisterInfo.h" +#include "llvm/Target/TargetSubtargetInfo.h" using namespace llvm; #ifndef LLVM_BUILD_GLOBAL_ISEL @@ -30,61 +31,54 @@ AArch64CallLowering::AArch64CallLowering(const AArch64TargetLowering &TLI) } bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, - const Value *Val, unsigned VReg) const { - MachineInstr *Return = MIRBuilder.buildInstr(AArch64::RET_ReallyLR); - assert(Return && "Unable to build a return instruction?!"); + const Value *Val, unsigned VReg) const { + MachineFunction &MF = MIRBuilder.getMF(); + const Function &F = *MF.getFunction(); + + MachineInstrBuilder MIB = MIRBuilder.buildInstr(AArch64::RET_ReallyLR); + assert(MIB.getInstr() && "Unable to build a return instruction?!"); assert(((Val && VReg) || (!Val && !VReg)) && "Return value without a vreg"); if (VReg) { - assert((Val->getType()->isIntegerTy() || Val->getType()->isPointerTy()) && - "Type not supported yet"); - const Function &F = *MIRBuilder.getMF().getFunction(); - const DataLayout &DL = F.getParent()->getDataLayout(); - unsigned Size = DL.getTypeSizeInBits(Val->getType()); - assert((Size == 64 || Size == 32) && "Size not supported yet"); - unsigned ResReg = (Size == 32) ? AArch64::W0 : AArch64::X0; - // Set the insertion point to be right before Return. - MIRBuilder.setInstr(*Return, /* Before */ true); - MachineInstr *Copy = MIRBuilder.buildCopy(ResReg, VReg); - (void)Copy; - assert(Copy->getNextNode() == Return && - "The insertion did not happen where we expected"); - MachineInstrBuilder(MIRBuilder.getMF(), Return) - .addReg(ResReg, RegState::Implicit); + MIRBuilder.setInstr(*MIB.getInstr(), /* Before */ true); + const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>(); + CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(F.getCallingConv()); + + handleAssignments( + MIRBuilder, AssignFn, MVT::getVT(Val->getType()), VReg, + [&](MachineIRBuilder &MIRBuilder, unsigned ValReg, unsigned PhysReg) { + MIRBuilder.buildCopy(PhysReg, ValReg); + MIB.addUse(PhysReg, RegState::Implicit); + }); } return true; } -bool AArch64CallLowering::lowerFormalArguments( - MachineIRBuilder &MIRBuilder, const Function::ArgumentListType &Args, - const SmallVectorImpl<unsigned> &VRegs) const { +bool AArch64CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder, + CCAssignFn *AssignFn, + ArrayRef<MVT> ArgTypes, + ArrayRef<unsigned> ArgRegs, + AssignFnTy AssignValToReg) const { MachineFunction &MF = MIRBuilder.getMF(); const Function &F = *MF.getFunction(); SmallVector<CCValAssign, 16> ArgLocs; CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); - unsigned NumArgs = Args.size(); - Function::const_arg_iterator CurOrigArg = Args.begin(); - const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>(); - for (unsigned i = 0; i != NumArgs; ++i, ++CurOrigArg) { - MVT ValVT = MVT::getVT(CurOrigArg->getType()); - CCAssignFn *AssignFn = - TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false); - bool Res = - AssignFn(i, ValVT, ValVT, CCValAssign::Full, ISD::ArgFlagsTy(), CCInfo); + unsigned NumArgs = ArgTypes.size(); + auto CurVT = ArgTypes.begin(); + for (unsigned i = 0; i != NumArgs; ++i, ++CurVT) { + bool Res = AssignFn(i, *CurVT, *CurVT, CCValAssign::Full, ISD::ArgFlagsTy(), + CCInfo); assert(!Res && "Call operand has unhandled type"); (void)Res; } - assert(ArgLocs.size() == Args.size() && + assert(ArgLocs.size() == ArgTypes.size() && "We have a different number of location and args?!"); for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { CCValAssign &VA = ArgLocs[i]; assert(VA.isRegLoc() && "Not yet implemented"); - // Transform the arguments in physical registers into virtual ones. - MIRBuilder.getMBB().addLiveIn(VA.getLocReg()); - MIRBuilder.buildCopy(VRegs[i], VA.getLocReg()); switch (VA.getLocInfo()) { default: @@ -103,6 +97,91 @@ bool AArch64CallLowering::lowerFormalArguments( assert(0 && "Not yet implemented"); break; } + + // Everything checks out, tell the caller where we've decided this + // parameter/return value should go. + AssignValToReg(MIRBuilder, ArgRegs[i], VA.getLocReg()); } return true; } + +bool AArch64CallLowering::lowerFormalArguments( + MachineIRBuilder &MIRBuilder, const Function::ArgumentListType &Args, + ArrayRef<unsigned> VRegs) const { + MachineFunction &MF = MIRBuilder.getMF(); + const Function &F = *MF.getFunction(); + + SmallVector<MVT, 8> ArgTys; + for (auto &Arg : Args) + ArgTys.push_back(MVT::getVT(Arg.getType())); + + const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>(); + CCAssignFn *AssignFn = + TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false); + + return handleAssignments( + MIRBuilder, AssignFn, ArgTys, VRegs, + [](MachineIRBuilder &MIRBuilder, unsigned ValReg, unsigned PhysReg) { + MIRBuilder.getMBB().addLiveIn(PhysReg); + MIRBuilder.buildCopy(ValReg, PhysReg); + }); +} + +bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, + const CallInst &CI, unsigned CalleeReg, + unsigned ResReg, + ArrayRef<unsigned> ArgRegs) const { + MachineFunction &MF = MIRBuilder.getMF(); + const Function &F = *MF.getFunction(); + + // First step is to marshall all the function's parameters into the correct + // physregs and memory locations. Gather the sequence of argument types that + // we'll pass to the assigner function. + SmallVector<MVT, 8> ArgTys; + for (auto &Arg : CI.arg_operands()) + ArgTys.push_back(MVT::getVT(Arg->getType())); + + // Find out which ABI gets to decide where things go. + const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>(); + CCAssignFn *CallAssignFn = + TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false); + + // And finally we can do the actual assignments. For a call we need to keep + // track of the registers used because they'll be implicit uses of the BL. + SmallVector<unsigned, 8> PhysRegs; + handleAssignments( + MIRBuilder, CallAssignFn, ArgTys, ArgRegs, + [&](MachineIRBuilder &MIRBuilder, unsigned ValReg, unsigned PhysReg) { + MIRBuilder.buildCopy(PhysReg, ValReg); + PhysRegs.push_back(PhysReg); + }); + + // Now we can build the actual call instruction. + MachineInstrBuilder MIB; + if (CalleeReg) + MIB = MIRBuilder.buildInstr(AArch64::BLR).addUse(CalleeReg); + else + MIB = MIRBuilder.buildInstr(AArch64::BL) + .addGlobalAddress(CI.getCalledFunction()); + + // Tell the call which registers are clobbered. + auto TRI = MF.getSubtarget().getRegisterInfo(); + MIB.addRegMask(TRI->getCallPreservedMask(MF, F.getCallingConv())); + + for (auto Reg : PhysRegs) + MIB.addUse(Reg, RegState::Implicit); + + // Finally we can copy the returned value back into its virtual-register. In + // symmetry with the arugments, the physical register must be an + // implicit-define of the call instruction. + CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(F.getCallingConv()); + if (!CI.getType()->isVoidTy()) + handleAssignments( + MIRBuilder, RetAssignFn, MVT::getVT(CI.getType()), ResReg, + [&](MachineIRBuilder &MIRBuilder, unsigned ValReg, unsigned PhysReg) { + MIRBuilder.buildCopy(ValReg, PhysReg); + MIB.addDef(PhysReg, RegState::Implicit); + }); + + return true; +} diff --git a/llvm/lib/Target/AArch64/AArch64CallLowering.h b/llvm/lib/Target/AArch64/AArch64CallLowering.h index 41162280346..f6030ad5aa0 100644 --- a/llvm/lib/Target/AArch64/AArch64CallLowering.h +++ b/llvm/lib/Target/AArch64/AArch64CallLowering.h @@ -16,6 +16,8 @@ #define LLVM_LIB_TARGET_AARCH64_AARCH64CALLLOWERING #include "llvm/CodeGen/GlobalISel/CallLowering.h" +#include "llvm/CodeGen/CallingConvLower.h" +#include "llvm/CodeGen/ValueTypes.h" namespace llvm { @@ -27,10 +29,22 @@ class AArch64CallLowering: public CallLowering { bool lowerReturn(MachineIRBuilder &MIRBuiler, const Value *Val, unsigned VReg) const override; - bool - lowerFormalArguments(MachineIRBuilder &MIRBuilder, - const Function::ArgumentListType &Args, - const SmallVectorImpl<unsigned> &VRegs) const override; + + bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, + const Function::ArgumentListType &Args, + ArrayRef<unsigned> VRegs) const override; + + bool lowerCall(MachineIRBuilder &MIRBuilder, const CallInst &CI, + unsigned CalleeReg, unsigned ResReg, + const ArrayRef<unsigned> ArgRegs) const override; + +private: + typedef std::function<void(MachineIRBuilder &, unsigned, unsigned)> + AssignFnTy; + + bool handleAssignments(MachineIRBuilder &MIRBuilder, CCAssignFn *AssignFn, + ArrayRef<MVT> ArgsTypes, ArrayRef<unsigned> ArgRegs, + AssignFnTy AssignValToReg) const; }; } // End of namespace llvm; #endif diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 03c474c4456..7a4f516ae3e 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -2434,6 +2434,12 @@ CCAssignFn *AArch64TargetLowering::CCAssignFnForCall(CallingConv::ID CC, } } +CCAssignFn * +AArch64TargetLowering::CCAssignFnForReturn(CallingConv::ID CC) const { + return CC == CallingConv::WebKit_JS ? RetCC_AArch64_WebKit_JS + : RetCC_AArch64_AAPCS; +} + SDValue AArch64TargetLowering::LowerFormalArguments( SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h index 4c411a425ed..69cfcbeebdd 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -230,6 +230,9 @@ public: /// Selects the correct CCAssignFn for a given CallingConvention value. CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const; + /// Selects the correct CCAssignFn for a given CallingConvention value. + CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC) const; + /// Determine which of the bits specified in Mask are known to be either zero /// or one and return them in the KnownZero/KnownOne bitsets. void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero, diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp index 1a1da8a254a..693c18c75ea 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp @@ -36,7 +36,7 @@ bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, bool AMDGPUCallLowering::lowerFormalArguments( MachineIRBuilder &MIRBuilder, const Function::ArgumentListType &Args, - const SmallVectorImpl<unsigned> &VRegs) const { + ArrayRef<unsigned> VRegs) const { // TODO: Implement once there are generic loads/stores. return true; } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.h index 61174bacdac..60b801f6f25 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.h @@ -30,7 +30,7 @@ class AMDGPUCallLowering: public CallLowering { bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function::ArgumentListType &Args, - const SmallVectorImpl<unsigned> &VRegs) const override; + ArrayRef<unsigned> VRegs) const override; }; } // End of namespace llvm; #endif |

