diff options
author | NAKAMURA Takumi <geek4civic@gmail.com> | 2015-07-14 04:03:49 +0000 |
---|---|---|
committer | NAKAMURA Takumi <geek4civic@gmail.com> | 2015-07-14 04:03:49 +0000 |
commit | 0b305db8df558eb420d13508a527e4df92b1227e (patch) | |
tree | d05e93676c913ddfc672fda31fd42e170519d72a /llvm/lib/Target/X86/X86TargetTransformInfo.cpp | |
parent | bbfa514f5b62249b05cf5f553caad35d3a4cf46a (diff) | |
download | bcm5719-llvm-0b305db8df558eb420d13508a527e4df92b1227e.tar.gz bcm5719-llvm-0b305db8df558eb420d13508a527e4df92b1227e.zip |
Prune trailing whitespaces and CRs.
llvm-svn: 242117
Diffstat (limited to 'llvm/lib/Target/X86/X86TargetTransformInfo.cpp')
-rw-r--r-- | llvm/lib/Target/X86/X86TargetTransformInfo.cpp | 46 |
1 files changed, 23 insertions, 23 deletions
diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp index a7164ec8ba5..7df72609184 100644 --- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp @@ -261,18 +261,18 @@ unsigned X86TTIImpl::getArithmeticInstrCost( { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul. { ISD::SHL, MVT::v2i64, 2*10 }, // Scalarized. { ISD::SHL, MVT::v4i64, 4*10 }, // Scalarized. -
- { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence.
- { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence.
- { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend.
- { ISD::SRL, MVT::v2i64, 2*10 }, // Scalarized.
-
- { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence.
- { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence.
- { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend.
- { ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized.
-
- // It is not a good idea to vectorize division. We have to scalarize it and
+ + { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence. + { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence. + { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend. + { ISD::SRL, MVT::v2i64, 2*10 }, // Scalarized. + + { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence. + { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence. + { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend. + { ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized. + + // It is not a good idea to vectorize division. We have to scalarize it and // in the process we will often end up having to spilling regular // registers. The overhead of division is going to dominate most kernels // anyways so try hard to prevent vectorization of division - it is @@ -1117,17 +1117,17 @@ unsigned X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, } return X86TTIImpl::getIntImmCost(Imm, Ty); } -
-bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, int Consecutive) {
- int DataWidth = DataTy->getPrimitiveSizeInBits();
-
- // Todo: AVX512 allows gather/scatter, works with strided and random as well
- if ((DataWidth < 32) || (Consecutive == 0))
- return false;
- if (ST->hasAVX512() || ST->hasAVX2())
- return true;
- return false;
-}
+ +bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, int Consecutive) { + int DataWidth = DataTy->getPrimitiveSizeInBits(); + + // Todo: AVX512 allows gather/scatter, works with strided and random as well + if ((DataWidth < 32) || (Consecutive == 0)) + return false; + if (ST->hasAVX512() || ST->hasAVX2()) + return true; + return false; +} bool X86TTIImpl::isLegalMaskedStore(Type *DataType, int Consecutive) { return isLegalMaskedLoad(DataType, Consecutive); |