summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/vector-bitreverse.ll
diff options
context:
space:
mode:
authorAhmed Bougacha <ahmed.bougacha@gmail.com>2016-05-07 01:11:17 +0000
committerAhmed Bougacha <ahmed.bougacha@gmail.com>2016-05-07 01:11:17 +0000
commit04a8fc2e37baf725aa641e789b7237f19d5b515d (patch)
tree999608823be7cec67ac31f5f9ee4459c8436c261 /llvm/test/CodeGen/X86/vector-bitreverse.ll
parent068ac4af391339fc0639e825b51ef53d1a792174 (diff)
downloadbcm5719-llvm-04a8fc2e37baf725aa641e789b7237f19d5b515d.tar.gz
bcm5719-llvm-04a8fc2e37baf725aa641e789b7237f19d5b515d.zip
[X86] Teach X86FixupBWInsts to promote MOV8rr/MOV16rr to MOV32rr.
This re-applies r268760, reverted in r268794. Fixes http://llvm.org/PR27670 The original imp-defs assertion was way overzealous: forward all implicit operands, except imp-defs of the new super-reg def (r268787 for GR64, but also possible for GR16->GR32), or imp-uses of the new super-reg use. While there, mark the source use as Undef, and add an imp-use of the old source reg: that should cover any case of dead super-regs. At the stage the pass runs, flags are unlikely to matter anyway; still, let's be as correct as possible. Also add MIR tests for the various interesting cases. Original commit message: Codesize is less (16) or equal (8), and we avoid partial dependencies. Differential Revision: http://reviews.llvm.org/D19999 llvm-svn: 268831
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-bitreverse.ll')
-rw-r--r--llvm/test/CodeGen/X86/vector-bitreverse.ll32
1 files changed, 16 insertions, 16 deletions
diff --git a/llvm/test/CodeGen/X86/vector-bitreverse.ll b/llvm/test/CodeGen/X86/vector-bitreverse.ll
index 46e73b008d8..16397bf1afb 100644
--- a/llvm/test/CodeGen/X86/vector-bitreverse.ll
+++ b/llvm/test/CodeGen/X86/vector-bitreverse.ll
@@ -9,68 +9,68 @@
define i8 @test_bitreverse_i8(i8 %a) nounwind {
; SSE-LABEL: test_bitreverse_i8:
; SSE: # BB#0:
-; SSE-NEXT: movb %dil, %al
+; SSE-NEXT: movl %edi, %eax
; SSE-NEXT: shlb $7, %al
-; SSE-NEXT: movb %dil, %cl
+; SSE-NEXT: movl %edi, %ecx
; SSE-NEXT: shlb $5, %cl
; SSE-NEXT: andb $64, %cl
-; SSE-NEXT: movb %dil, %dl
+; SSE-NEXT: movl %edi, %edx
; SSE-NEXT: shlb $3, %dl
; SSE-NEXT: andb $32, %dl
; SSE-NEXT: orb %cl, %dl
-; SSE-NEXT: movb %dil, %cl
+; SSE-NEXT: movl %edi, %ecx
; SSE-NEXT: addb %cl, %cl
; SSE-NEXT: andb $16, %cl
; SSE-NEXT: orb %dl, %cl
-; SSE-NEXT: movb %dil, %dl
+; SSE-NEXT: movl %edi, %edx
; SSE-NEXT: shrb %dl
; SSE-NEXT: andb $8, %dl
; SSE-NEXT: orb %cl, %dl
-; SSE-NEXT: movb %dil, %cl
+; SSE-NEXT: movl %edi, %ecx
; SSE-NEXT: shrb $3, %cl
; SSE-NEXT: andb $4, %cl
; SSE-NEXT: orb %dl, %cl
-; SSE-NEXT: movb %dil, %dl
+; SSE-NEXT: movl %edi, %edx
; SSE-NEXT: shrb $5, %dl
; SSE-NEXT: andb $2, %dl
; SSE-NEXT: orb %cl, %dl
; SSE-NEXT: shrb $7, %dil
; SSE-NEXT: orb %dl, %dil
; SSE-NEXT: orb %al, %dil
-; SSE-NEXT: movb %dil, %al
+; SSE-NEXT: movl %edi, %eax
; SSE-NEXT: retq
;
; AVX-LABEL: test_bitreverse_i8:
; AVX: # BB#0:
-; AVX-NEXT: movb %dil, %al
+; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: shlb $7, %al
-; AVX-NEXT: movb %dil, %cl
+; AVX-NEXT: movl %edi, %ecx
; AVX-NEXT: shlb $5, %cl
; AVX-NEXT: andb $64, %cl
-; AVX-NEXT: movb %dil, %dl
+; AVX-NEXT: movl %edi, %edx
; AVX-NEXT: shlb $3, %dl
; AVX-NEXT: andb $32, %dl
; AVX-NEXT: orb %cl, %dl
-; AVX-NEXT: movb %dil, %cl
+; AVX-NEXT: movl %edi, %ecx
; AVX-NEXT: addb %cl, %cl
; AVX-NEXT: andb $16, %cl
; AVX-NEXT: orb %dl, %cl
-; AVX-NEXT: movb %dil, %dl
+; AVX-NEXT: movl %edi, %edx
; AVX-NEXT: shrb %dl
; AVX-NEXT: andb $8, %dl
; AVX-NEXT: orb %cl, %dl
-; AVX-NEXT: movb %dil, %cl
+; AVX-NEXT: movl %edi, %ecx
; AVX-NEXT: shrb $3, %cl
; AVX-NEXT: andb $4, %cl
; AVX-NEXT: orb %dl, %cl
-; AVX-NEXT: movb %dil, %dl
+; AVX-NEXT: movl %edi, %edx
; AVX-NEXT: shrb $5, %dl
; AVX-NEXT: andb $2, %dl
; AVX-NEXT: orb %cl, %dl
; AVX-NEXT: shrb $7, %dil
; AVX-NEXT: orb %dl, %dil
; AVX-NEXT: orb %al, %dil
-; AVX-NEXT: movb %dil, %al
+; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_i8:
OpenPOWER on IntegriCloud