diff options
Diffstat (limited to 'llvm/lib/Target/X86/X86TargetTransformInfo.cpp')
-rw-r--r-- | llvm/lib/Target/X86/X86TargetTransformInfo.cpp | 289 |
1 files changed, 136 insertions, 153 deletions
diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp index 9d7f1238fff..d792f930fc4 100644 --- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp @@ -17,6 +17,7 @@ #include "X86.h" #include "X86TargetMachine.h" #include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/CodeGen/BasicTTIImpl.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/Support/Debug.h" #include "llvm/Target/CostTable.h" @@ -25,110 +26,92 @@ using namespace llvm; #define DEBUG_TYPE "x86tti" -// Declare the pass initialization routine locally as target-specific passes -// don't have a target-wide initialization entry point, and so we rely on the -// pass constructor initialization. -namespace llvm { -void initializeX86TTIPass(PassRegistry &); -} - namespace { -class X86TTI final : public ImmutablePass, public TargetTransformInfo { +class X86TTIImpl : public BasicTTIImplBase<X86TTIImpl> { + typedef BasicTTIImplBase<X86TTIImpl> BaseT; + typedef TargetTransformInfo TTI; + const X86Subtarget *ST; const X86TargetLowering *TLI; - /// Estimate the overhead of scalarizing an instruction. Insert and Extract - /// are set if the result needs to be inserted and/or extracted from vectors. - unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const; + unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract); public: - X86TTI() : ImmutablePass(ID), ST(nullptr), TLI(nullptr) { - llvm_unreachable("This pass cannot be directly constructed"); - } - - X86TTI(const X86TargetMachine *TM) - : ImmutablePass(ID), ST(TM->getSubtargetImpl()), - TLI(TM->getSubtargetImpl()->getTargetLowering()) { - initializeX86TTIPass(*PassRegistry::getPassRegistry()); - } - - void initializePass() override { - pushTTIStack(this); + explicit X86TTIImpl(const X86TargetMachine *TM = nullptr) + : BaseT(TM), ST(TM ? TM->getSubtargetImpl() : nullptr), + TLI(ST ? ST->getTargetLowering() : nullptr) {} + + // Provide value semantics. MSVC requires that we spell all of these out. + X86TTIImpl(const X86TTIImpl &Arg) + : BaseT(static_cast<const BaseT &>(Arg)), ST(Arg.ST), TLI(Arg.TLI) {} + X86TTIImpl(X86TTIImpl &&Arg) + : BaseT(std::move(static_cast<BaseT &>(Arg))), ST(std::move(Arg.ST)), + TLI(std::move(Arg.TLI)) {} + X86TTIImpl &operator=(const X86TTIImpl &RHS) { + BaseT::operator=(static_cast<const BaseT &>(RHS)); + ST = RHS.ST; + TLI = RHS.TLI; + return *this; } - - void getAnalysisUsage(AnalysisUsage &AU) const override { - TargetTransformInfo::getAnalysisUsage(AU); - } - - /// Pass identification. - static char ID; - - /// Provide necessary pointer adjustments for the two base classes. - void *getAdjustedAnalysisPointer(const void *ID) override { - if (ID == &TargetTransformInfo::ID) - return (TargetTransformInfo*)this; - return this; + X86TTIImpl &operator=(X86TTIImpl &&RHS) { + BaseT::operator=(std::move(static_cast<BaseT &>(RHS))); + ST = std::move(RHS.ST); + TLI = std::move(RHS.TLI); + return *this; } /// \name Scalar TTI Implementations /// @{ - PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override; + TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth); /// @} /// \name Vector TTI Implementations /// @{ - unsigned getNumberOfRegisters(bool Vector) const override; - unsigned getRegisterBitWidth(bool Vector) const override; - unsigned getMaxInterleaveFactor() const override; - unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind, - OperandValueKind, OperandValueProperties, - OperandValueProperties) const override; - unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, - int Index, Type *SubTp) const override; - unsigned getCastInstrCost(unsigned Opcode, Type *Dst, - Type *Src) const override; - unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, - Type *CondTy) const override; - unsigned getVectorInstrCost(unsigned Opcode, Type *Val, - unsigned Index) const override; + unsigned getNumberOfRegisters(bool Vector); + unsigned getRegisterBitWidth(bool Vector); + unsigned getMaxInterleaveFactor(); + unsigned getArithmeticInstrCost( + unsigned Opcode, Type *Ty, + TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue, + TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue, + TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None, + TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None); + unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, + Type *SubTp); + unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src); + unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy); + unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index); unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, - unsigned AddressSpace) const override; - unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, - unsigned Alignment, - unsigned AddressSpace) const override; + unsigned AddressSpace); + unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, + unsigned AddressSpace); - unsigned getAddressComputationCost(Type *PtrTy, - bool IsComplex) const override; + unsigned getAddressComputationCost(Type *PtrTy, bool IsComplex); - unsigned getReductionCost(unsigned Opcode, Type *Ty, - bool IsPairwiseForm) const override; + unsigned getReductionCost(unsigned Opcode, Type *Ty, bool IsPairwiseForm); - unsigned getIntImmCost(int64_t) const; + unsigned getIntImmCost(int64_t); - unsigned getIntImmCost(const APInt &Imm, Type *Ty) const override; + unsigned getIntImmCost(const APInt &Imm, Type *Ty); unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, - Type *Ty) const override; + Type *Ty); unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, - Type *Ty) const override; - bool isLegalMaskedLoad (Type *DataType, int Consecutive) const override; - bool isLegalMaskedStore(Type *DataType, int Consecutive) const override; + Type *Ty); + bool isLegalMaskedLoad(Type *DataType, int Consecutive); + bool isLegalMaskedStore(Type *DataType, int Consecutive); /// @} }; } // end anonymous namespace -INITIALIZE_AG_PASS(X86TTI, TargetTransformInfo, "x86tti", - "X86 Target Transform Info", true, true, false) -char X86TTI::ID = 0; - ImmutablePass * llvm::createX86TargetTransformInfoPass(const X86TargetMachine *TM) { - return new X86TTI(TM); + return new TargetTransformInfoWrapperPass(X86TTIImpl(TM)); } @@ -138,15 +121,16 @@ llvm::createX86TargetTransformInfoPass(const X86TargetMachine *TM) { // //===----------------------------------------------------------------------===// -X86TTI::PopcntSupportKind X86TTI::getPopcntSupport(unsigned TyWidth) const { +TargetTransformInfo::PopcntSupportKind +X86TTIImpl::getPopcntSupport(unsigned TyWidth) { assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); // TODO: Currently the __builtin_popcount() implementation using SSE3 // instructions is inefficient. Once the problem is fixed, we should // call ST->hasSSE3() instead of ST->hasPOPCNT(). - return ST->hasPOPCNT() ? PSK_FastHardware : PSK_Software; + return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software; } -unsigned X86TTI::getNumberOfRegisters(bool Vector) const { +unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) { if (Vector && !ST->hasSSE1()) return 0; @@ -158,7 +142,7 @@ unsigned X86TTI::getNumberOfRegisters(bool Vector) const { return 8; } -unsigned X86TTI::getRegisterBitWidth(bool Vector) const { +unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) { if (Vector) { if (ST->hasAVX512()) return 512; if (ST->hasAVX()) return 256; @@ -172,7 +156,7 @@ unsigned X86TTI::getRegisterBitWidth(bool Vector) const { } -unsigned X86TTI::getMaxInterleaveFactor() const { +unsigned X86TTIImpl::getMaxInterleaveFactor() { if (ST->isAtom()) return 1; @@ -184,10 +168,10 @@ unsigned X86TTI::getMaxInterleaveFactor() const { return 2; } -unsigned X86TTI::getArithmeticInstrCost( - unsigned Opcode, Type *Ty, OperandValueKind Op1Info, - OperandValueKind Op2Info, OperandValueProperties Opd1PropInfo, - OperandValueProperties Opd2PropInfo) const { +unsigned X86TTIImpl::getArithmeticInstrCost( + unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info, + TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo, + TTI::OperandValueProperties Opd2PropInfo) { // Legalize the type. std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty); @@ -442,17 +426,16 @@ unsigned X86TTI::getArithmeticInstrCost( return LT.first * 6; // Fallback to the default implementation. - return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty, Op1Info, - Op2Info); + return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info); } -unsigned X86TTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index, - Type *SubTp) const { +unsigned X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, + Type *SubTp) { // We only estimate the cost of reverse and alternate shuffles. - if (Kind != SK_Reverse && Kind != SK_Alternate) - return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp); + if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate) + return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); - if (Kind == SK_Reverse) { + if (Kind == TTI::SK_Reverse) { std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp); unsigned Cost = 1; if (LT.second.getSizeInBits() > 128) @@ -462,7 +445,7 @@ unsigned X86TTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index, return Cost * LT.first; } - if (Kind == SK_Alternate) { + if (Kind == TTI::SK_Alternate) { // 64-bit packed float vectors (v2f32) are widened to type v4f32. // 64-bit packed integer vectors (v2i32) are promoted to type v2i64. std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp); @@ -555,13 +538,13 @@ unsigned X86TTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index, int Idx = CostTableLookup(SSEAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second); if (Idx != -1) return LT.first * SSEAltShuffleTbl[Idx].Cost; - return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp); + return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); } - return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp); + return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); } -unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const { +unsigned X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) { int ISD = TLI->InstructionOpcodeToISD(Opcode); assert(ISD && "Invalid opcode"); @@ -643,7 +626,7 @@ unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const { // The function getSimpleVT only handles simple value types. if (!SrcTy.isSimple() || !DstTy.isSimple()) - return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src); + return BaseT::getCastInstrCost(Opcode, Dst, Src); static const TypeConversionCostTblEntry<MVT::SimpleValueType> AVX2ConversionTbl[] = { @@ -762,11 +745,11 @@ unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const { return AVXConversionTbl[Idx].Cost; } - return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src); + return BaseT::getCastInstrCost(Opcode, Dst, Src); } -unsigned X86TTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, - Type *CondTy) const { +unsigned X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, + Type *CondTy) { // Legalize the type. std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy); @@ -832,11 +815,11 @@ unsigned X86TTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, return LT.first * SSE42CostTbl[Idx].Cost; } - return TargetTransformInfo::getCmpSelInstrCost(Opcode, ValTy, CondTy); + return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy); } -unsigned X86TTI::getVectorInstrCost(unsigned Opcode, Type *Val, - unsigned Index) const { +unsigned X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, + unsigned Index) { assert(Val->isVectorTy() && "This must be a vector type"); if (Index != -1U) { @@ -856,26 +839,27 @@ unsigned X86TTI::getVectorInstrCost(unsigned Opcode, Type *Val, return 0; } - return TargetTransformInfo::getVectorInstrCost(Opcode, Val, Index); + return BaseT::getVectorInstrCost(Opcode, Val, Index); } -unsigned X86TTI::getScalarizationOverhead(Type *Ty, bool Insert, - bool Extract) const { +unsigned X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert, + bool Extract) { assert (Ty->isVectorTy() && "Can only scalarize vectors"); unsigned Cost = 0; for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { if (Insert) - Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); + Cost += getVectorInstrCost(Instruction::InsertElement, Ty, i); if (Extract) - Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i); + Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, i); } return Cost; } -unsigned X86TTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, - unsigned AddressSpace) const { +unsigned X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, + unsigned Alignment, + unsigned AddressSpace) { // Handle non-power-of-two vectors such as <3 x float> if (VectorType *VTy = dyn_cast<VectorType>(Src)) { unsigned NumElem = VTy->getVectorNumElements(); @@ -893,10 +877,8 @@ unsigned X86TTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, // Assume that all other non-power-of-two numbers are scalarized. if (!isPowerOf2_32(NumElem)) { - unsigned Cost = TargetTransformInfo::getMemoryOpCost(Opcode, - VTy->getScalarType(), - Alignment, - AddressSpace); + unsigned Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), + Alignment, AddressSpace); unsigned SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load, Opcode==Instruction::Store); @@ -920,9 +902,9 @@ unsigned X86TTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, return Cost; } -unsigned X86TTI::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, - unsigned Alignment, - unsigned AddressSpace) const { +unsigned X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, + unsigned Alignment, + unsigned AddressSpace) { VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy); if (!SrcVTy) // To calculate scalar take the regular cost, without mask @@ -945,9 +927,9 @@ unsigned X86TTI::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, unsigned ValueSplitCost = getScalarizationOverhead(SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store); - unsigned MemopCost = NumElem * - TargetTransformInfo::getMemoryOpCost(Opcode, SrcVTy->getScalarType(), - Alignment, AddressSpace); + unsigned MemopCost = + NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(), + Alignment, AddressSpace); return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost; } @@ -957,15 +939,14 @@ unsigned X86TTI::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, if (LT.second != TLI->getValueType(SrcVTy).getSimpleVT() && LT.second.getVectorNumElements() == NumElem) // Promotion requires expand/truncate for data and a shuffle for mask. - Cost += getShuffleCost(TargetTransformInfo::SK_Alternate, SrcVTy, 0, 0) + - getShuffleCost(TargetTransformInfo::SK_Alternate, MaskTy, 0, 0); - + Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, 0) + + getShuffleCost(TTI::SK_Alternate, MaskTy, 0, 0); + else if (LT.second.getVectorNumElements() > NumElem) { VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(), LT.second.getVectorNumElements()); // Expanding requires fill mask with zeroes - Cost += getShuffleCost(TargetTransformInfo::SK_InsertSubvector, - NewMaskTy, 0, MaskTy); + Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy); } if (!ST->hasAVX512()) return Cost + LT.first*4; // Each maskmov costs 4 @@ -974,7 +955,7 @@ unsigned X86TTI::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, return Cost+LT.first; } -unsigned X86TTI::getAddressComputationCost(Type *Ty, bool IsComplex) const { +unsigned X86TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) { // Address computations in vectorized code with non-consecutive addresses will // likely result in more instructions compared to scalar code where the // computation can more often be merged into the index mode. The resulting @@ -984,11 +965,11 @@ unsigned X86TTI::getAddressComputationCost(Type *Ty, bool IsComplex) const { if (Ty->isVectorTy() && IsComplex) return NumVectorInstToHideOverhead; - return TargetTransformInfo::getAddressComputationCost(Ty, IsComplex); + return BaseT::getAddressComputationCost(Ty, IsComplex); } -unsigned X86TTI::getReductionCost(unsigned Opcode, Type *ValTy, - bool IsPairwise) const { +unsigned X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy, + bool IsPairwise) { std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy); @@ -1064,23 +1045,23 @@ unsigned X86TTI::getReductionCost(unsigned Opcode, Type *ValTy, } } - return TargetTransformInfo::getReductionCost(Opcode, ValTy, IsPairwise); + return BaseT::getReductionCost(Opcode, ValTy, IsPairwise); } /// \brief Calculate the cost of materializing a 64-bit value. This helper /// method might only calculate a fraction of a larger immediate. Therefore it /// is valid to return a cost of ZERO. -unsigned X86TTI::getIntImmCost(int64_t Val) const { +unsigned X86TTIImpl::getIntImmCost(int64_t Val) { if (Val == 0) - return TCC_Free; + return TTI::TCC_Free; if (isInt<32>(Val)) - return TCC_Basic; + return TTI::TCC_Basic; - return 2 * TCC_Basic; + return 2 * TTI::TCC_Basic; } -unsigned X86TTI::getIntImmCost(const APInt &Imm, Type *Ty) const { +unsigned X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { assert(Ty->isIntegerTy()); unsigned BitSize = Ty->getPrimitiveSizeInBits(); @@ -1092,10 +1073,10 @@ unsigned X86TTI::getIntImmCost(const APInt &Imm, Type *Ty) const { // Fixme: Create a cost model for types larger than i128 once the codegen // issues have been fixed. if (BitSize > 128) - return TCC_Free; + return TTI::TCC_Free; if (Imm == 0) - return TCC_Free; + return TTI::TCC_Free; // Sign-extend all constants to a multiple of 64-bit. APInt ImmVal = Imm; @@ -1114,26 +1095,27 @@ unsigned X86TTI::getIntImmCost(const APInt &Imm, Type *Ty) const { return std::max(1U, Cost); } -unsigned X86TTI::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, - Type *Ty) const { +unsigned X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, + const APInt &Imm, Type *Ty) { assert(Ty->isIntegerTy()); unsigned BitSize = Ty->getPrimitiveSizeInBits(); // There is no cost model for constants with a bit size of 0. Return TCC_Free // here, so that constant hoisting will ignore this constant. if (BitSize == 0) - return TCC_Free; + return TTI::TCC_Free; unsigned ImmIdx = ~0U; switch (Opcode) { - default: return TCC_Free; + default: + return TTI::TCC_Free; case Instruction::GetElementPtr: // Always hoist the base address of a GetElementPtr. This prevents the // creation of new constants for every base constant that gets constant // folded with the offset. if (Idx == 0) - return 2 * TCC_Basic; - return TCC_Free; + return 2 * TTI::TCC_Basic; + return TTI::TCC_Free; case Instruction::Store: ImmIdx = 0; break; @@ -1155,7 +1137,7 @@ unsigned X86TTI::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, case Instruction::LShr: case Instruction::AShr: if (Idx == 1) - return TCC_Free; + return TTI::TCC_Free; break; case Instruction::Trunc: case Instruction::ZExt: @@ -1173,27 +1155,28 @@ unsigned X86TTI::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, if (Idx == ImmIdx) { unsigned NumConstants = (BitSize + 63) / 64; - unsigned Cost = X86TTI::getIntImmCost(Imm, Ty); - return (Cost <= NumConstants * TCC_Basic) - ? static_cast<unsigned>(TCC_Free) - : Cost; + unsigned Cost = X86TTIImpl::getIntImmCost(Imm, Ty); + return (Cost <= NumConstants * TTI::TCC_Basic) + ? static_cast<unsigned>(TTI::TCC_Free) + : Cost; } - return X86TTI::getIntImmCost(Imm, Ty); + return X86TTIImpl::getIntImmCost(Imm, Ty); } -unsigned X86TTI::getIntImmCost(Intrinsic::ID IID, unsigned Idx, - const APInt &Imm, Type *Ty) const { +unsigned X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, + const APInt &Imm, Type *Ty) { assert(Ty->isIntegerTy()); unsigned BitSize = Ty->getPrimitiveSizeInBits(); // There is no cost model for constants with a bit size of 0. Return TCC_Free // here, so that constant hoisting will ignore this constant. if (BitSize == 0) - return TCC_Free; + return TTI::TCC_Free; switch (IID) { - default: return TCC_Free; + default: + return TTI::TCC_Free; case Intrinsic::sadd_with_overflow: case Intrinsic::uadd_with_overflow: case Intrinsic::ssub_with_overflow: @@ -1201,22 +1184,22 @@ unsigned X86TTI::getIntImmCost(Intrinsic::ID IID, unsigned Idx, case Intrinsic::smul_with_overflow: case Intrinsic::umul_with_overflow: if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue())) - return TCC_Free; + return TTI::TCC_Free; break; case Intrinsic::experimental_stackmap: if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) - return TCC_Free; + return TTI::TCC_Free; break; case Intrinsic::experimental_patchpoint_void: case Intrinsic::experimental_patchpoint_i64: if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) - return TCC_Free; + return TTI::TCC_Free; break; } - return X86TTI::getIntImmCost(Imm, Ty); + return X86TTIImpl::getIntImmCost(Imm, Ty); } -bool X86TTI::isLegalMaskedLoad(Type *DataTy, int Consecutive) const { +bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, int Consecutive) { int DataWidth = DataTy->getPrimitiveSizeInBits(); // Todo: AVX512 allows gather/scatter, works with strided and random as well @@ -1227,7 +1210,7 @@ bool X86TTI::isLegalMaskedLoad(Type *DataTy, int Consecutive) const { return false; } -bool X86TTI::isLegalMaskedStore(Type *DataType, int Consecutive) const { +bool X86TTIImpl::isLegalMaskedStore(Type *DataType, int Consecutive) { return isLegalMaskedLoad(DataType, Consecutive); } |