diff options
author | Ahmed Bougacha <ahmed.bougacha@gmail.com> | 2016-05-06 17:42:57 +0000 |
---|---|---|
committer | Ahmed Bougacha <ahmed.bougacha@gmail.com> | 2016-05-06 17:42:57 +0000 |
commit | 258426ca7ad95bdf66638a1c06c1658b1d1939fd (patch) | |
tree | fc3baf0c119bd44fe66ed32b8a9d75f19be4e082 /llvm/test/CodeGen/X86/vector-bitreverse.ll | |
parent | d97dd11f2f5e8b3bb02d6e45aa3dc8f115dc17a8 (diff) | |
download | bcm5719-llvm-258426ca7ad95bdf66638a1c06c1658b1d1939fd.tar.gz bcm5719-llvm-258426ca7ad95bdf66638a1c06c1658b1d1939fd.zip |
[X86] Teach X86FixupBWInsts to promote MOV8rr/MOV16rr to MOV32rr.
Codesize is less (16) or equal (8), and we avoid partial dependencies.
Differential Revision: http://reviews.llvm.org/D19999
llvm-svn: 268760
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-bitreverse.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/vector-bitreverse.ll | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/llvm/test/CodeGen/X86/vector-bitreverse.ll b/llvm/test/CodeGen/X86/vector-bitreverse.ll index 46e73b008d8..16397bf1afb 100644 --- a/llvm/test/CodeGen/X86/vector-bitreverse.ll +++ b/llvm/test/CodeGen/X86/vector-bitreverse.ll @@ -9,68 +9,68 @@ define i8 @test_bitreverse_i8(i8 %a) nounwind { ; SSE-LABEL: test_bitreverse_i8: ; SSE: # BB#0: -; SSE-NEXT: movb %dil, %al +; SSE-NEXT: movl %edi, %eax ; SSE-NEXT: shlb $7, %al -; SSE-NEXT: movb %dil, %cl +; SSE-NEXT: movl %edi, %ecx ; SSE-NEXT: shlb $5, %cl ; SSE-NEXT: andb $64, %cl -; SSE-NEXT: movb %dil, %dl +; SSE-NEXT: movl %edi, %edx ; SSE-NEXT: shlb $3, %dl ; SSE-NEXT: andb $32, %dl ; SSE-NEXT: orb %cl, %dl -; SSE-NEXT: movb %dil, %cl +; SSE-NEXT: movl %edi, %ecx ; SSE-NEXT: addb %cl, %cl ; SSE-NEXT: andb $16, %cl ; SSE-NEXT: orb %dl, %cl -; SSE-NEXT: movb %dil, %dl +; SSE-NEXT: movl %edi, %edx ; SSE-NEXT: shrb %dl ; SSE-NEXT: andb $8, %dl ; SSE-NEXT: orb %cl, %dl -; SSE-NEXT: movb %dil, %cl +; SSE-NEXT: movl %edi, %ecx ; SSE-NEXT: shrb $3, %cl ; SSE-NEXT: andb $4, %cl ; SSE-NEXT: orb %dl, %cl -; SSE-NEXT: movb %dil, %dl +; SSE-NEXT: movl %edi, %edx ; SSE-NEXT: shrb $5, %dl ; SSE-NEXT: andb $2, %dl ; SSE-NEXT: orb %cl, %dl ; SSE-NEXT: shrb $7, %dil ; SSE-NEXT: orb %dl, %dil ; SSE-NEXT: orb %al, %dil -; SSE-NEXT: movb %dil, %al +; SSE-NEXT: movl %edi, %eax ; SSE-NEXT: retq ; ; AVX-LABEL: test_bitreverse_i8: ; AVX: # BB#0: -; AVX-NEXT: movb %dil, %al +; AVX-NEXT: movl %edi, %eax ; AVX-NEXT: shlb $7, %al -; AVX-NEXT: movb %dil, %cl +; AVX-NEXT: movl %edi, %ecx ; AVX-NEXT: shlb $5, %cl ; AVX-NEXT: andb $64, %cl -; AVX-NEXT: movb %dil, %dl +; AVX-NEXT: movl %edi, %edx ; AVX-NEXT: shlb $3, %dl ; AVX-NEXT: andb $32, %dl ; AVX-NEXT: orb %cl, %dl -; AVX-NEXT: movb %dil, %cl +; AVX-NEXT: movl %edi, %ecx ; AVX-NEXT: addb %cl, %cl ; AVX-NEXT: andb $16, %cl ; AVX-NEXT: orb %dl, %cl -; AVX-NEXT: movb %dil, %dl +; AVX-NEXT: movl %edi, %edx ; AVX-NEXT: shrb %dl ; AVX-NEXT: andb $8, %dl ; AVX-NEXT: orb %cl, %dl -; AVX-NEXT: movb %dil, %cl +; AVX-NEXT: movl %edi, %ecx ; AVX-NEXT: shrb $3, %cl ; AVX-NEXT: andb $4, %cl ; AVX-NEXT: orb %dl, %cl -; AVX-NEXT: movb %dil, %dl +; AVX-NEXT: movl %edi, %edx ; AVX-NEXT: shrb $5, %dl ; AVX-NEXT: andb $2, %dl ; AVX-NEXT: orb %cl, %dl ; AVX-NEXT: shrb $7, %dil ; AVX-NEXT: orb %dl, %dil ; AVX-NEXT: orb %al, %dil -; AVX-NEXT: movb %dil, %al +; AVX-NEXT: movl %edi, %eax ; AVX-NEXT: retq ; ; XOP-LABEL: test_bitreverse_i8: |