summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
diff options
context:
space:
mode:
authorMichael Kuperstein <mkuper@google.com>2016-08-04 22:48:03 +0000
committerMichael Kuperstein <mkuper@google.com>2016-08-04 22:48:03 +0000
commit3ceac2bbd5e48a3e7d90b7344d45d9f7cf3106a5 (patch)
tree6ae8ac6b6ffc55fa5e76436031397a45b035ca96 /llvm/lib/Target/X86/X86TargetTransformInfo.cpp
parent742c38361bfcf53561c0fca9289abbda03a5f40b (diff)
downloadbcm5719-llvm-3ceac2bbd5e48a3e7d90b7344d45d9f7cf3106a5.tar.gz
bcm5719-llvm-3ceac2bbd5e48a3e7d90b7344d45d9f7cf3106a5.zip
[LV, X86] Be more optimistic about vectorizing shifts.
Shifts with a uniform but non-constant count were considered very expensive to vectorize, because the splat of the uniform count and the shift would tend to appear in different blocks. That made the splat invisible to ISel, and we'd scalarize the shift at codegen time. Since r201655, CodeGenPrepare sinks those splats to be next to their use, and we are able to select the appropriate vector shifts. This updates the cost model to to take this into account by making shifts by a uniform cheap again. Differential Revision: https://reviews.llvm.org/D23049 llvm-svn: 277782
Diffstat (limited to 'llvm/lib/Target/X86/X86TargetTransformInfo.cpp')
-rw-r--r--llvm/lib/Target/X86/X86TargetTransformInfo.cpp43
1 files changed, 22 insertions, 21 deletions
diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
index 6ea4d420cb4..657a0451719 100644
--- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
+++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
@@ -240,9 +240,16 @@ int X86TTIImpl::getArithmeticInstrCost(
static const CostTblEntry
SSE2UniformConstCostTable[] = {
- // We don't correctly identify costs of casts because they are marked as
- // custom.
// Constant splats are cheaper for the following instructions.
+ { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
+ { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
+ { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
+ { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
+ };
+
+ static const CostTblEntry
+ SSE2UniformCostTable[] = {
+ // Uniform splats are cheaper for the following instructions.
{ ISD::SHL, MVT::v16i8, 1 }, // psllw.
{ ISD::SHL, MVT::v32i8, 2 }, // psllw.
{ ISD::SHL, MVT::v8i16, 1 }, // psllw.
@@ -269,21 +276,21 @@ int X86TTIImpl::getArithmeticInstrCost(
{ ISD::SRA, MVT::v8i32, 2 }, // psrad.
{ ISD::SRA, MVT::v2i64, 4 }, // 2 x psrad + shuffle.
{ ISD::SRA, MVT::v4i64, 8 }, // 2 x psrad + shuffle.
-
- { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
- { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
- { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
- { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
};
- if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
- ST->hasSSE2()) {
- // pmuldq sequence.
- if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
- return LT.first * 15;
-
- if (const auto *Entry = CostTableLookup(SSE2UniformConstCostTable, ISD,
- LT.second))
+ if (ST->hasSSE2() &&
+ ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
+ (Op2Info == TargetTransformInfo::OK_UniformValue))) {
+ if (Op2Info == TargetTransformInfo::OK_UniformConstantValue) {
+ // pmuldq sequence.
+ if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
+ return LT.first * 15;
+ if (const auto *Entry =
+ CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second))
+ return LT.first * Entry->Cost;
+ }
+ if (const auto *Entry =
+ CostTableLookup(SSE2UniformCostTable, ISD, LT.second))
return LT.first * Entry->Cost;
}
@@ -312,12 +319,6 @@ int X86TTIImpl::getArithmeticInstrCost(
static const CostTblEntry SSE2CostTable[] = {
// We don't correctly identify costs of casts because they are marked as
// custom.
- // For some cases, where the shift amount is a scalar we would be able
- // to generate better code. Unfortunately, when this is the case the value
- // (the splat) will get hoisted out of the loop, thereby making it invisible
- // to ISel. The cost model must return worst case assumptions because it is
- // used for vectorization and we don't want to make vectorized code worse
- // than scalar code.
{ ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence.
{ ISD::SHL, MVT::v32i8, 2*26 }, // cmpgtb sequence.
{ ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence.
OpenPOWER on IntegriCloud