summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll
diff options
context:
space:
mode:
authorEvan Cheng <evan.cheng@apple.com>2011-01-06 07:58:36 +0000
committerEvan Cheng <evan.cheng@apple.com>2011-01-06 07:58:36 +0000
commit7998b1d6fe08c1b3010f790cc96d75137a404bf7 (patch)
tree9b159b82359393c5742c2a4a7847faad78b24e10 /llvm/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll
parent245de78e062ab074154ee4c410ceefef7535c12e (diff)
downloadbcm5719-llvm-7998b1d6fe08c1b3010f790cc96d75137a404bf7.tar.gz
bcm5719-llvm-7998b1d6fe08c1b3010f790cc96d75137a404bf7.zip
Use movups to lower memcpy and memset even if it's not fast (like corei7).
The theory is it's still faster than a pair of movq / a quad of movl. This will probably hurt older chips like P4 but should run faster on current and future Intel processors. rdar://8817010 llvm-svn: 122955
Diffstat (limited to 'llvm/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll')
-rw-r--r--llvm/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll8
1 files changed, 4 insertions, 4 deletions
diff --git a/llvm/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll b/llvm/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll
index 8fe0309421e..66dc0eabac3 100644
--- a/llvm/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll
+++ b/llvm/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll
@@ -19,8 +19,8 @@ entry:
}
; CHECK: movq ___stack_chk_guard@GOTPCREL(%rip), %rax
-; CHECK: movb 30(%rsp), %dl
-; CHECK: movb (%rsp), %sil
-; CHECK: movb %sil, (%rsp)
-; CHECK: movb %dl, 30(%rsp)
+; CHECK: movb 30(%rsp), %cl
+; CHECK: movb (%rsp), %dl
+; CHECK: movb %dl, (%rsp)
+; CHECK: movb %cl, 30(%rsp)
; CHECK: callq ___stack_chk_fail
OpenPOWER on IntegriCloud