summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/vec_extract-mmx.ll
diff options
context:
space:
mode:
authorSimon Pilgrim <llvm-dev@redking.me.uk>2016-04-01 20:55:19 +0000
committerSimon Pilgrim <llvm-dev@redking.me.uk>2016-04-01 20:55:19 +0000
commita372a0f29560f6946212f41c82205892f99cd3a3 (patch)
tree1b2809261c3a80d571eb51ae8cf591dc9cd1b626 /llvm/test/CodeGen/X86/vec_extract-mmx.ll
parent66b1bb45b5aa2816621d83f75edb5622b848fc15 (diff)
downloadbcm5719-llvm-a372a0f29560f6946212f41c82205892f99cd3a3.tar.gz
bcm5719-llvm-a372a0f29560f6946212f41c82205892f99cd3a3.zip
[X86][SSE] Regenerated the vec_extract tests.
llvm-svn: 265183
Diffstat (limited to 'llvm/test/CodeGen/X86/vec_extract-mmx.ll')
-rw-r--r--llvm/test/CodeGen/X86/vec_extract-mmx.ll104
1 files changed, 81 insertions, 23 deletions
diff --git a/llvm/test/CodeGen/X86/vec_extract-mmx.ll b/llvm/test/CodeGen/X86/vec_extract-mmx.ll
index 780066d2da1..6d64a9e5571 100644
--- a/llvm/test/CodeGen/X86/vec_extract-mmx.ll
+++ b/llvm/test/CodeGen/X86/vec_extract-mmx.ll
@@ -1,12 +1,35 @@
-; RUN: llc < %s -march=x86-64 -mattr=+mmx,+sse2 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X64
-define i32 @test0(<1 x i64>* %v4) {
-; CHECK-LABEL: test0:
-; CHECK: # BB#0:{{.*}} %entry
-; CHECK: pshufw $238, (%[[REG:[a-z]+]]), %mm0
-; CHECK-NEXT: movd %mm0, %eax
-; CHECK-NEXT: addl $32, %eax
-; CHECK-NEXT: retq
+define i32 @test0(<1 x i64>* %v4) nounwind {
+; X32-LABEL: test0:
+; X32: # BB#0: # %entry
+; X32-NEXT: pushl %ebp
+; X32-NEXT: movl %esp, %ebp
+; X32-NEXT: andl $-8, %esp
+; X32-NEXT: subl $24, %esp
+; X32-NEXT: movl 8(%ebp), %eax
+; X32-NEXT: movl (%eax), %ecx
+; X32-NEXT: movl 4(%eax), %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X32-NEXT: movl %ecx, (%esp)
+; X32-NEXT: pshufw $238, (%esp), %mm0 # mm0 = mem[2,3,2,3]
+; X32-NEXT: movq %mm0, {{[0-9]+}}(%esp)
+; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; X32-NEXT: movd %xmm0, %eax
+; X32-NEXT: addl $32, %eax
+; X32-NEXT: movl %ebp, %esp
+; X32-NEXT: popl %ebp
+; X32-NEXT: retl
+;
+; X64-LABEL: test0:
+; X64: # BB#0: # %entry
+; X64-NEXT: pshufw $238, (%rdi), %mm0 # mm0 = mem[2,3,2,3]
+; X64-NEXT: movd %mm0, %eax
+; X64-NEXT: addl $32, %eax
+; X64-NEXT: retq
entry:
%v5 = load <1 x i64>, <1 x i64>* %v4, align 8
%v12 = bitcast <1 x i64> %v5 to <4 x i16>
@@ -21,14 +44,32 @@ entry:
ret i32 %v20
}
-define i32 @test1(i32* nocapture readonly %ptr) {
-; CHECK-LABEL: test1:
-; CHECK: # BB#0:{{.*}} %entry
-; CHECK: movd (%[[REG]]), %mm0
-; CHECK-NEXT: pshufw $232, %mm0, %mm0
-; CHECK-NEXT: movd %mm0, %eax
-; CHECK-NEXT: emms
-; CHECK-NEXT: retq
+define i32 @test1(i32* nocapture readonly %ptr) nounwind {
+; X32-LABEL: test1:
+; X32: # BB#0: # %entry
+; X32-NEXT: pushl %ebp
+; X32-NEXT: movl %esp, %ebp
+; X32-NEXT: andl $-8, %esp
+; X32-NEXT: subl $16, %esp
+; X32-NEXT: movl 8(%ebp), %eax
+; X32-NEXT: movd (%eax), %mm0
+; X32-NEXT: pshufw $232, %mm0, %mm0 # mm0 = mm0[0,2,2,3]
+; X32-NEXT: movq %mm0, (%esp)
+; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; X32-NEXT: movd %xmm0, %eax
+; X32-NEXT: emms
+; X32-NEXT: movl %ebp, %esp
+; X32-NEXT: popl %ebp
+; X32-NEXT: retl
+;
+; X64-LABEL: test1:
+; X64: # BB#0: # %entry
+; X64-NEXT: movd (%rdi), %mm0
+; X64-NEXT: pshufw $232, %mm0, %mm0 # mm0 = mm0[0,2,2,3]
+; X64-NEXT: movd %mm0, %eax
+; X64-NEXT: emms
+; X64-NEXT: retq
entry:
%0 = load i32, i32* %ptr, align 4
%1 = insertelement <2 x i32> undef, i32 %0, i32 0
@@ -47,13 +88,30 @@ entry:
ret i32 %12
}
-define i32 @test2(i32* nocapture readonly %ptr) {
-; CHECK-LABEL: test2:
-; CHECK: # BB#0:{{.*}} %entry
-; CHECK: pshufw $232, (%[[REG]]), %mm0
-; CHECK-NEXT: movd %mm0, %eax
-; CHECK-NEXT: emms
-; CHECK-NEXT: retq
+define i32 @test2(i32* nocapture readonly %ptr) nounwind {
+; X32-LABEL: test2:
+; X32: # BB#0: # %entry
+; X32-NEXT: pushl %ebp
+; X32-NEXT: movl %esp, %ebp
+; X32-NEXT: andl $-8, %esp
+; X32-NEXT: subl $16, %esp
+; X32-NEXT: movl 8(%ebp), %eax
+; X32-NEXT: pshufw $232, (%eax), %mm0 # mm0 = mem[0,2,2,3]
+; X32-NEXT: movq %mm0, (%esp)
+; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; X32-NEXT: movd %xmm0, %eax
+; X32-NEXT: emms
+; X32-NEXT: movl %ebp, %esp
+; X32-NEXT: popl %ebp
+; X32-NEXT: retl
+;
+; X64-LABEL: test2:
+; X64: # BB#0: # %entry
+; X64-NEXT: pshufw $232, (%rdi), %mm0 # mm0 = mem[0,2,2,3]
+; X64-NEXT: movd %mm0, %eax
+; X64-NEXT: emms
+; X64-NEXT: retq
entry:
%0 = bitcast i32* %ptr to x86_mmx*
%1 = load x86_mmx, x86_mmx* %0, align 8
OpenPOWER on IntegriCloud