diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/vec_extract-mmx.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/vec_extract-mmx.ll | 28 |
1 files changed, 28 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/vec_extract-mmx.ll b/llvm/test/CodeGen/X86/vec_extract-mmx.ll index 4d1ecd14af8..329437cfeda 100644 --- a/llvm/test/CodeGen/X86/vec_extract-mmx.ll +++ b/llvm/test/CodeGen/X86/vec_extract-mmx.ll @@ -140,5 +140,33 @@ define i32 @test3(x86_mmx %a) nounwind { ret i32 %tmp1 } +; Verify we don't muck with extractelts from the upper lane. +define i32 @test4(x86_mmx %a) nounwind { +; X32-LABEL: test4: +; X32: # BB#0: +; X32-NEXT: pushl %ebp +; X32-NEXT: movl %esp, %ebp +; X32-NEXT: andl $-8, %esp +; X32-NEXT: subl $8, %esp +; X32-NEXT: movq %mm0, (%esp) +; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,0,1] +; X32-NEXT: movd %xmm0, %eax +; X32-NEXT: movl %ebp, %esp +; X32-NEXT: popl %ebp +; X32-NEXT: retl +; +; X64-LABEL: test4: +; X64: # BB#0: +; X64-NEXT: movq %mm0, -{{[0-9]+}}(%rsp) +; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,0,1] +; X64-NEXT: movd %xmm0, %eax +; X64-NEXT: retq + %tmp0 = bitcast x86_mmx %a to <2 x i32> + %tmp1 = extractelement <2 x i32> %tmp0, i32 1 + ret i32 %tmp1 +} + declare x86_mmx @llvm.x86.sse.pshuf.w(x86_mmx, i8) declare void @llvm.x86.mmx.emms() |