diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2017-01-07 21:47:10 +0000 |
---|---|---|
committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2017-01-07 21:47:10 +0000 |
commit | 82e3e05fe29e68a54c9f216eed9aede7bb9d4510 (patch) | |
tree | 01d6015d11455504d2339b3351d4418e9b775882 /llvm/lib/Target/X86/X86TargetTransformInfo.cpp | |
parent | e70644dab7dc0208cf00ba3605199e0bdc278c19 (diff) | |
download | bcm5719-llvm-82e3e05fe29e68a54c9f216eed9aede7bb9d4510.tar.gz bcm5719-llvm-82e3e05fe29e68a54c9f216eed9aede7bb9d4510.zip |
[CostModel][X86] Match 256-bit vector shift 'splat' costs for AVX2 and above
We were matching against general vector shift costs before the uniform splat costs
llvm-svn: 291365
Diffstat (limited to 'llvm/lib/Target/X86/X86TargetTransformInfo.cpp')
-rw-r--r-- | llvm/lib/Target/X86/X86TargetTransformInfo.cpp | 89 |
1 files changed, 44 insertions, 45 deletions
diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp index 29cd8edc46f..829b47b7aa2 100644 --- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp @@ -263,7 +263,7 @@ int X86TTIImpl::getArithmeticInstrCost( if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second)) return LT.first * Entry->Cost; - static const CostTblEntry AVX2CostTable[] = { + static const CostTblEntry AVX2ShiftCostTable[] = { // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to // customize them to detect the cases where shift amount is a scalar one. { ISD::SHL, MVT::v4i32, 1 }, @@ -287,11 +287,11 @@ int X86TTIImpl::getArithmeticInstrCost( // is lowered into a vector multiply (vpmullw). return LT.first; - if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second)) + if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second)) return LT.first * Entry->Cost; } - static const CostTblEntry XOPCostTable[] = { + static const CostTblEntry XOPShiftCostTable[] = { // 128bit shifts take 1cy, but right shifts require negation beforehand. { ISD::SHL, MVT::v16i8, 1 }, { ISD::SRL, MVT::v16i8, 2 }, @@ -322,48 +322,7 @@ int X86TTIImpl::getArithmeticInstrCost( // Look for XOP lowering tricks. if (ST->hasXOP()) - if (const auto *Entry = CostTableLookup(XOPCostTable, ISD, LT.second)) - return LT.first * Entry->Cost; - - static const CostTblEntry AVX2CustomCostTable[] = { - { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence. - { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. - - { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence. - { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. - - { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence. - { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence. - { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence. - { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence. - - { ISD::SUB, MVT::v32i8, 1 }, // psubb - { ISD::ADD, MVT::v32i8, 1 }, // paddb - { ISD::SUB, MVT::v16i16, 1 }, // psubw - { ISD::ADD, MVT::v16i16, 1 }, // paddw - { ISD::SUB, MVT::v8i32, 1 }, // psubd - { ISD::ADD, MVT::v8i32, 1 }, // paddd - { ISD::SUB, MVT::v4i64, 1 }, // psubq - { ISD::ADD, MVT::v4i64, 1 }, // paddq - - { ISD::MUL, MVT::v32i8, 17 }, // extend/pmullw/trunc sequence. - { ISD::MUL, MVT::v16i8, 7 }, // extend/pmullw/trunc sequence. - { ISD::MUL, MVT::v16i16, 1 }, // pmullw - { ISD::MUL, MVT::v8i32, 1 }, // pmulld - { ISD::MUL, MVT::v4i64, 8 }, // 3*pmuludq/3*shift/2*add - - { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/ - { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ - { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ - { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/ - { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ - { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ - }; - - // Look for AVX2 lowering tricks for custom cases. - if (ST->hasAVX2()) - if (const auto *Entry = CostTableLookup(AVX2CustomCostTable, ISD, - LT.second)) + if (const auto *Entry = CostTableLookup(XOPShiftCostTable, ISD, LT.second)) return LT.first * Entry->Cost; static const CostTblEntry @@ -415,6 +374,46 @@ int X86TTIImpl::getArithmeticInstrCost( ISD = ISD::MUL; } + static const CostTblEntry AVX2CostTable[] = { + { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence. + { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. + + { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence. + { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. + + { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence. + { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence. + { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence. + { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence. + + { ISD::SUB, MVT::v32i8, 1 }, // psubb + { ISD::ADD, MVT::v32i8, 1 }, // paddb + { ISD::SUB, MVT::v16i16, 1 }, // psubw + { ISD::ADD, MVT::v16i16, 1 }, // paddw + { ISD::SUB, MVT::v8i32, 1 }, // psubd + { ISD::ADD, MVT::v8i32, 1 }, // paddd + { ISD::SUB, MVT::v4i64, 1 }, // psubq + { ISD::ADD, MVT::v4i64, 1 }, // paddq + + { ISD::MUL, MVT::v32i8, 17 }, // extend/pmullw/trunc sequence. + { ISD::MUL, MVT::v16i8, 7 }, // extend/pmullw/trunc sequence. + { ISD::MUL, MVT::v16i16, 1 }, // pmullw + { ISD::MUL, MVT::v8i32, 1 }, // pmulld + { ISD::MUL, MVT::v4i64, 8 }, // 3*pmuludq/3*shift/2*add + + { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/ + { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ + { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ + { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/ + { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ + { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ + }; + + // Look for AVX2 lowering tricks for custom cases. + if (ST->hasAVX2()) + if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second)) + return LT.first * Entry->Cost; + static const CostTblEntry AVX1CostTable[] = { // We don't have to scalarize unsupported ops. We can issue two half-sized // operations and we only need to extract the upper YMM half. |