summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target
diff options
context:
space:
mode:
authorEvan Cheng <evan.cheng@apple.com>2011-01-06 07:58:36 +0000
committerEvan Cheng <evan.cheng@apple.com>2011-01-06 07:58:36 +0000
commit7998b1d6fe08c1b3010f790cc96d75137a404bf7 (patch)
tree9b159b82359393c5742c2a4a7847faad78b24e10 /llvm/lib/Target
parent245de78e062ab074154ee4c410ceefef7535c12e (diff)
downloadbcm5719-llvm-7998b1d6fe08c1b3010f790cc96d75137a404bf7.tar.gz
bcm5719-llvm-7998b1d6fe08c1b3010f790cc96d75137a404bf7.zip
Use movups to lower memcpy and memset even if it's not fast (like corei7).
The theory is it's still faster than a pair of movq / a quad of movl. This will probably hurt older chips like P4 but should run faster on current and future Intel processors. rdar://8817010 llvm-svn: 122955
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp6
1 files changed, 1 insertions, 5 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index f871b5a7701..ddec78bfff3 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -1063,12 +1063,8 @@ X86TargetLowering::getOptimalMemOpType(uint64_t Size,
// linux. This is because the stack realignment code can't handle certain
// cases like PR2962. This should be removed when PR2962 is fixed.
const Function *F = MF.getFunction();
- if (NonScalarIntSafe &&
- !F->hasFnAttr(Attribute::NoImplicitFloat)) {
+ if (NonScalarIntSafe && !F->hasFnAttr(Attribute::NoImplicitFloat)) {
if (Size >= 16 &&
- (Subtarget->isUnalignedMemAccessFast() ||
- ((DstAlign == 0 || DstAlign >= 16) &&
- (SrcAlign == 0 || SrcAlign >= 16))) &&
Subtarget->getStackAlignment() >= 16) {
if (Subtarget->hasSSE2())
return MVT::v4i32;
OpenPOWER on IntegriCloud