diff options
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r-- | llvm/test/CodeGen/X86/fast-isel-bc.ll | 17 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-mmx.ll | 11 |
2 files changed, 4 insertions, 24 deletions
diff --git a/llvm/test/CodeGen/X86/fast-isel-bc.ll b/llvm/test/CodeGen/X86/fast-isel-bc.ll index 3bc84c88ad8..3287f992cd0 100644 --- a/llvm/test/CodeGen/X86/fast-isel-bc.ll +++ b/llvm/test/CodeGen/X86/fast-isel-bc.ll @@ -7,19 +7,12 @@ declare void @func2(x86_mmx) ; This isn't spectacular, but it's MMX code at -O0... -; For now, handling of x86_mmx parameters in fast Isel is unimplemented, -; so we get pretty poor code. The below is preferable. -; CHEK: movl $2, %eax -; CHEK: movd %rax, %mm0 -; CHEK: movd %mm0, %rdi define void @func1() nounwind { ; X86-LABEL: func1: ; X86: ## %bb.0: ; X86-NEXT: subl $12, %esp -; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; X86-NEXT: movsd %xmm0, (%esp) -; X86-NEXT: movq (%esp), %mm0 +; X86-NEXT: movq LCPI0_0, %mm0 ## mm0 = 0x200000000 ; X86-NEXT: calll _func2 ; X86-NEXT: addl $12, %esp ; X86-NEXT: retl @@ -27,13 +20,7 @@ define void @func1() nounwind { ; X64-LABEL: func1: ; X64: ## %bb.0: ; X64-NEXT: pushq %rax -; X64-NEXT: movl $2, %eax -; X64-NEXT: movl %eax, %ecx -; X64-NEXT: movq %rcx, %xmm0 -; X64-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7] -; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; X64-NEXT: movq %xmm0, (%rsp) -; X64-NEXT: movq (%rsp), %mm0 +; X64-NEXT: movq {{.*}}(%rip), %mm0 ## mm0 = 0x200000000 ; X64-NEXT: movq2dq %mm0, %xmm0 ; X64-NEXT: callq _func2 ; X64-NEXT: popq %rax diff --git a/llvm/test/CodeGen/X86/vector-shuffle-mmx.ll b/llvm/test/CodeGen/X86/vector-shuffle-mmx.ll index a2f6ecc25cc..a00df1456b7 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-mmx.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-mmx.ll @@ -33,25 +33,18 @@ define void @test1() { ; X32: ## %bb.0: ## %entry ; X32-NEXT: pushl %edi ; X32-NEXT: .cfi_def_cfa_offset 8 -; X32-NEXT: subl $8, %esp -; X32-NEXT: .cfi_def_cfa_offset 16 ; X32-NEXT: .cfi_offset %edi, -8 ; X32-NEXT: pxor %mm0, %mm0 -; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; X32-NEXT: movsd %xmm0, (%esp) -; X32-NEXT: movq (%esp), %mm1 +; X32-NEXT: movq LCPI1_0, %mm1 ## mm1 = 0x7070606040400000 ; X32-NEXT: xorl %edi, %edi ; X32-NEXT: maskmovq %mm1, %mm0 -; X32-NEXT: addl $8, %esp ; X32-NEXT: popl %edi ; X32-NEXT: retl ; ; X64-LABEL: test1: ; X64: ## %bb.0: ## %entry ; X64-NEXT: pxor %mm0, %mm0 -; X64-NEXT: movq {{.*}}(%rip), %rax -; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %mm1 +; X64-NEXT: movq {{.*}}(%rip), %mm1 ## mm1 = 0x7070606040400000 ; X64-NEXT: xorl %edi, %edi ; X64-NEXT: maskmovq %mm1, %mm0 ; X64-NEXT: retq |