diff options
| author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2018-07-28 13:25:19 +0000 |
|---|---|---|
| committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2018-07-28 13:25:19 +0000 |
| commit | 81920b0a253c388d75b14ce8a80a8d977fac4d04 (patch) | |
| tree | 1800ab124ee1dcb91ba8c1dc82669265d5363896 /llvm/lib/Target | |
| parent | 72b0e38b267a0c47d56d746aaf370be937ef96f4 (diff) | |
| download | bcm5719-llvm-81920b0a253c388d75b14ce8a80a8d977fac4d04.tar.gz bcm5719-llvm-81920b0a253c388d75b14ce8a80a8d977fac4d04.zip | |
DAG: Add calling convention argument to calling convention funcs
This seems like a pretty glaring omission, and AMDGPU
wants to treat kernels differently from other calling
conventions.
llvm-svn: 338194
Diffstat (limited to 'llvm/lib/Target')
| -rw-r--r-- | llvm/lib/Target/AArch64/AArch64FastISel.cpp | 2 | ||||
| -rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 7 | ||||
| -rw-r--r-- | llvm/lib/Target/ARM/ARMFastISel.cpp | 2 | ||||
| -rw-r--r-- | llvm/lib/Target/Mips/MipsCallLowering.cpp | 3 | ||||
| -rw-r--r-- | llvm/lib/Target/Mips/MipsFastISel.cpp | 2 | ||||
| -rw-r--r-- | llvm/lib/Target/Mips/MipsISelLowering.cpp | 6 | ||||
| -rw-r--r-- | llvm/lib/Target/Mips/MipsISelLowering.h | 5 | ||||
| -rw-r--r-- | llvm/lib/Target/PowerPC/PPCFastISel.cpp | 2 | ||||
| -rw-r--r-- | llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 2 | ||||
| -rw-r--r-- | llvm/lib/Target/PowerPC/PPCISelLowering.h | 2 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86FastISel.cpp | 2 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 6 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.h | 3 |
13 files changed, 27 insertions, 17 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp index 43a3ae77a17..572d1c22fee 100644 --- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp +++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp @@ -3774,7 +3774,7 @@ bool AArch64FastISel::selectRet(const Instruction *I) { if (Ret->getNumOperands() > 0) { CallingConv::ID CC = F.getCallingConv(); SmallVector<ISD::OutputArg, 4> Outs; - GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL); + GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL); // Analyze operands of the call, assigning locations to each operand. SmallVector<CCValAssign, 16> ValLocs; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp index 0cfdaf66544..002ee7fcc72 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -907,6 +907,7 @@ void AMDGPUTargetLowering::analyzeFormalArgumentsCompute( LLVMContext &Ctx = Fn.getParent()->getContext(); const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF); const unsigned ExplicitOffset = ST.getExplicitKernelArgOffset(Fn); + CallingConv::ID CC = Fn.getCallingConv(); unsigned MaxAlign = 1; uint64_t ExplicitArgOffset = 0; @@ -940,10 +941,8 @@ void AMDGPUTargetLowering::analyzeFormalArgumentsCompute( EVT ArgVT = ValueVTs[Value]; EVT MemVT = ArgVT; - MVT RegisterVT = - getRegisterTypeForCallingConv(Ctx, ArgVT); - unsigned NumRegs = - getNumRegistersForCallingConv(Ctx, ArgVT); + MVT RegisterVT = getRegisterTypeForCallingConv(Ctx, CC, ArgVT); + unsigned NumRegs = getNumRegistersForCallingConv(Ctx, CC, ArgVT); if (NumRegs == 1) { // This argument is not split, so the IR type is the memory type. diff --git a/llvm/lib/Target/ARM/ARMFastISel.cpp b/llvm/lib/Target/ARM/ARMFastISel.cpp index 26d4aaa12ac..a66cd7053c0 100644 --- a/llvm/lib/Target/ARM/ARMFastISel.cpp +++ b/llvm/lib/Target/ARM/ARMFastISel.cpp @@ -2116,7 +2116,7 @@ bool ARMFastISel::SelectRet(const Instruction *I) { CallingConv::ID CC = F.getCallingConv(); if (Ret->getNumOperands() > 0) { SmallVector<ISD::OutputArg, 4> Outs; - GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL); + GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL); // Analyze operands of the call, assigning locations to each operand. SmallVector<CCValAssign, 16> ValLocs; diff --git a/llvm/lib/Target/Mips/MipsCallLowering.cpp b/llvm/lib/Target/Mips/MipsCallLowering.cpp index e82f62260b3..a705ebb6b19 100644 --- a/llvm/lib/Target/Mips/MipsCallLowering.cpp +++ b/llvm/lib/Target/Mips/MipsCallLowering.cpp @@ -418,7 +418,8 @@ void MipsCallLowering::subTargetRegTypeForCallingConv( for (auto &Arg : Args) { EVT VT = TLI.getValueType(DL, Arg.Ty); - MVT RegisterVT = TLI.getRegisterTypeForCallingConv(F.getContext(), VT); + MVT RegisterVT = TLI.getRegisterTypeForCallingConv(F.getContext(), + F.getCallingConv(), VT); ISD::ArgFlagsTy Flags = Arg.Flags; Flags.setOrigAlign(TLI.getABIAlignmentForCallingConv(Arg.Ty, DL)); diff --git a/llvm/lib/Target/Mips/MipsFastISel.cpp b/llvm/lib/Target/Mips/MipsFastISel.cpp index 7b39507812e..19b30a44e86 100644 --- a/llvm/lib/Target/Mips/MipsFastISel.cpp +++ b/llvm/lib/Target/Mips/MipsFastISel.cpp @@ -1662,7 +1662,7 @@ bool MipsFastISel::selectRet(const Instruction *I) { return false; SmallVector<ISD::OutputArg, 4> Outs; - GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL); + GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL); // Analyze operands of the call, assigning locations to each operand. SmallVector<CCValAssign, 16> ValLocs; diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp index 9ffc38356b7..0677d378a11 100644 --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -111,6 +111,7 @@ static bool isShiftedMask(uint64_t I, uint64_t &Pos, uint64_t &Size) { // The MIPS MSA ABI passes vector arguments in the integer register set. // The number of integer registers used is dependant on the ABI used. MVT MipsTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context, + CallingConv::ID CC, EVT VT) const { if (VT.isVector()) { if (Subtarget.isABI_O32()) { @@ -123,6 +124,7 @@ MVT MipsTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context, } unsigned MipsTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context, + CallingConv::ID CC, EVT VT) const { if (VT.isVector()) return std::max((VT.getSizeInBits() / (Subtarget.isABI_O32() ? 32 : 64)), @@ -131,10 +133,10 @@ unsigned MipsTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context, } unsigned MipsTargetLowering::getVectorTypeBreakdownForCallingConv( - LLVMContext &Context, EVT VT, EVT &IntermediateVT, + LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const { // Break down vector types to either 2 i64s or 4 i32s. - RegisterVT = getRegisterTypeForCallingConv(Context, VT) ; + RegisterVT = getRegisterTypeForCallingConv(Context, CC, VT); IntermediateVT = RegisterVT; NumIntermediates = VT.getSizeInBits() < RegisterVT.getSizeInBits() ? VT.getVectorNumElements() diff --git a/llvm/lib/Target/Mips/MipsISelLowering.h b/llvm/lib/Target/Mips/MipsISelLowering.h index b58d92c370d..5a0de45c44f 100644 --- a/llvm/lib/Target/Mips/MipsISelLowering.h +++ b/llvm/lib/Target/Mips/MipsISelLowering.h @@ -288,17 +288,18 @@ class TargetRegisterClass; /// Return the register type for a given MVT, ensuring vectors are treated /// as a series of gpr sized integers. - MVT getRegisterTypeForCallingConv(LLVMContext &Context, + MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override; /// Return the number of registers for a given MVT, ensuring vectors are /// treated as a series of gpr sized integers. unsigned getNumRegistersForCallingConv(LLVMContext &Context, + CallingConv::ID CC, EVT VT) const override; /// Break down vectors to the correct number of gpr sized integers. unsigned getVectorTypeBreakdownForCallingConv( - LLVMContext &Context, EVT VT, EVT &IntermediateVT, + LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override; /// Return the correct alignment for the current calling convention. diff --git a/llvm/lib/Target/PowerPC/PPCFastISel.cpp b/llvm/lib/Target/PowerPC/PPCFastISel.cpp index b00655b5022..f212894035d 100644 --- a/llvm/lib/Target/PowerPC/PPCFastISel.cpp +++ b/llvm/lib/Target/PowerPC/PPCFastISel.cpp @@ -1697,7 +1697,7 @@ bool PPCFastISel::SelectRet(const Instruction *I) { if (Ret->getNumOperands() > 0) { SmallVector<ISD::OutputArg, 4> Outs; - GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL); + GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL); // Analyze operands of the call, assigning locations to each operand. SmallVector<CCValAssign, 16> ValLocs; diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index 1e3e14c7114..140d6ed25ee 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -1224,6 +1224,7 @@ unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty, } unsigned PPCTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context, + CallingConv:: ID CC, EVT VT) const { if (Subtarget.hasSPE() && VT == MVT::f64) return 2; @@ -1231,6 +1232,7 @@ unsigned PPCTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context, } MVT PPCTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context, + CallingConv:: ID CC, EVT VT) const { if (Subtarget.hasSPE() && VT == MVT::f64) return MVT::i32; diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h index 9b8d6435515..06c7a51b2eb 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.h +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h @@ -872,9 +872,11 @@ namespace llvm { MCContext &Ctx) const override; unsigned getNumRegistersForCallingConv(LLVMContext &Context, + CallingConv:: ID CC, EVT VT) const override; MVT getRegisterTypeForCallingConv(LLVMContext &Context, + CallingConv:: ID CC, EVT VT) const override; private: diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp index de8b40f28a8..f82e46f0c59 100644 --- a/llvm/lib/Target/X86/X86FastISel.cpp +++ b/llvm/lib/Target/X86/X86FastISel.cpp @@ -1195,7 +1195,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) { if (Ret->getNumOperands() > 0) { SmallVector<ISD::OutputArg, 4> Outs; - GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL); + GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL); // Analyze operands of the call, assigning locations to each operand. SmallVector<CCValAssign, 16> ValLocs; diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index d91d23f0529..b4407e8ed29 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -1800,17 +1800,19 @@ X86TargetLowering::getPreferredVectorAction(EVT VT) const { } MVT X86TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context, + CallingConv::ID CC, EVT VT) const { if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI()) return MVT::v32i8; - return TargetLowering::getRegisterTypeForCallingConv(Context, VT); + return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); } unsigned X86TargetLowering::getNumRegistersForCallingConv(LLVMContext &Context, + CallingConv::ID CC, EVT VT) const { if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI()) return 1; - return TargetLowering::getNumRegistersForCallingConv(Context, VT); + return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT); } EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL, diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h index 32215b170a8..b71e9619d19 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -1097,10 +1097,11 @@ namespace llvm { /// Customize the preferred legalization strategy for certain types. LegalizeTypeAction getPreferredVectorAction(EVT VT) const override; - MVT getRegisterTypeForCallingConv(LLVMContext &Context, + MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override; unsigned getNumRegistersForCallingConv(LLVMContext &Context, + CallingConv::ID CC, EVT VT) const override; bool isIntDivCheap(EVT VT, AttributeList Attr) const override; |

