diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/vec_smulo.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/vec_smulo.ll | 152 |
1 files changed, 76 insertions, 76 deletions
diff --git a/llvm/test/CodeGen/X86/vec_smulo.ll b/llvm/test/CodeGen/X86/vec_smulo.ll index a34824dd106..385c556e969 100644 --- a/llvm/test/CodeGen/X86/vec_smulo.ll +++ b/llvm/test/CodeGen/X86/vec_smulo.ll @@ -195,17 +195,17 @@ define <3 x i32> @smulo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) noun ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] ; SSE2-NEXT: psubd %xmm2, %xmm4 -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSE2-NEXT: movq %xmm2, (%rdi) +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-NEXT: movq %xmm0, (%rdi) +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: psrad $31, %xmm2 ; SSE2-NEXT: pcmpeqd %xmm4, %xmm2 -; SSE2-NEXT: pcmpeqd %xmm1, %xmm1 -; SSE2-NEXT: pxor %xmm2, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] -; SSE2-NEXT: movd %xmm0, 8(%rdi) -; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 +; SSE2-NEXT: pxor %xmm2, %xmm0 +; SSE2-NEXT: movd %xmm1, 8(%rdi) ; SSE2-NEXT: retq ; ; SSSE3-LABEL: smulo_v3i32: @@ -225,17 +225,17 @@ define <3 x i32> @smulo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) noun ; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] ; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] ; SSSE3-NEXT: psubd %xmm2, %xmm4 -; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSSE3-NEXT: movq %xmm2, (%rdi) +; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSSE3-NEXT: movq %xmm0, (%rdi) +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSSE3-NEXT: movdqa %xmm0, %xmm2 ; SSSE3-NEXT: psrad $31, %xmm2 ; SSSE3-NEXT: pcmpeqd %xmm4, %xmm2 -; SSSE3-NEXT: pcmpeqd %xmm1, %xmm1 -; SSSE3-NEXT: pxor %xmm2, %xmm1 -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] -; SSSE3-NEXT: movd %xmm0, 8(%rdi) -; SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSSE3-NEXT: pcmpeqd %xmm0, %xmm0 +; SSSE3-NEXT: pxor %xmm2, %xmm0 +; SSSE3-NEXT: movd %xmm1, 8(%rdi) ; SSSE3-NEXT: retq ; ; SSE41-LABEL: smulo_v3i32: @@ -1767,52 +1767,52 @@ define <4 x i32> @smulo_v4i24(<4 x i24> %a0, <4 x i24> %a1, <4 x i24>* %p2) noun ; SSE2-NEXT: psrad $8, %xmm0 ; SSE2-NEXT: pslld $8, %xmm1 ; SSE2-NEXT: psrad $8, %xmm1 -; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pxor %xmm4, %xmm4 ; SSE2-NEXT: pxor %xmm2, %xmm2 ; SSE2-NEXT: pcmpgtd %xmm1, %xmm2 ; SSE2-NEXT: pand %xmm0, %xmm2 -; SSE2-NEXT: pcmpgtd %xmm0, %xmm3 -; SSE2-NEXT: pand %xmm1, %xmm3 -; SSE2-NEXT: paddd %xmm2, %xmm3 -; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] +; SSE2-NEXT: pcmpgtd %xmm0, %xmm4 +; SSE2-NEXT: pand %xmm1, %xmm4 +; SSE2-NEXT: paddd %xmm2, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] ; SSE2-NEXT: pmuludq %xmm1, %xmm0 -; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,3,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] -; SSE2-NEXT: pmuludq %xmm4, %xmm2 +; SSE2-NEXT: pmuludq %xmm5, %xmm2 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3] -; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1] -; SSE2-NEXT: psubd %xmm3, %xmm5 -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,2,2,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] -; SSE2-NEXT: movdqa %xmm3, %xmm1 +; SSE2-NEXT: psubd %xmm4, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1] +; SSE2-NEXT: movdqa %xmm4, %xmm1 ; SSE2-NEXT: pslld $8, %xmm1 ; SSE2-NEXT: psrad $8, %xmm1 -; SSE2-NEXT: pcmpeqd %xmm3, %xmm1 -; SSE2-NEXT: psrad $31, %xmm3 -; SSE2-NEXT: pcmpeqd %xmm5, %xmm3 -; SSE2-NEXT: pcmpeqd %xmm4, %xmm4 -; SSE2-NEXT: pxor %xmm4, %xmm3 -; SSE2-NEXT: pxor %xmm4, %xmm1 -; SSE2-NEXT: por %xmm3, %xmm1 +; SSE2-NEXT: pcmpeqd %xmm4, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[3,1,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[2,3,0,1] +; SSE2-NEXT: psrad $31, %xmm4 +; SSE2-NEXT: pcmpeqd %xmm3, %xmm4 +; SSE2-NEXT: pcmpeqd %xmm3, %xmm3 +; SSE2-NEXT: pxor %xmm3, %xmm4 +; SSE2-NEXT: pxor %xmm3, %xmm1 +; SSE2-NEXT: por %xmm4, %xmm1 ; SSE2-NEXT: movd %xmm0, %eax ; SSE2-NEXT: movw %ax, (%rdi) -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] -; SSE2-NEXT: movd %xmm0, %ecx -; SSE2-NEXT: movw %cx, 6(%rdi) -; SSE2-NEXT: movd %xmm2, %edx -; SSE2-NEXT: movw %dx, 3(%rdi) +; SSE2-NEXT: movd %xmm2, %ecx +; SSE2-NEXT: movw %cx, 3(%rdi) ; SSE2-NEXT: shrl $16, %eax ; SSE2-NEXT: movb %al, 2(%rdi) -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3] -; SSE2-NEXT: movd %xmm0, %eax -; SSE2-NEXT: movw %ax, 9(%rdi) ; SSE2-NEXT: shrl $16, %ecx -; SSE2-NEXT: movb %cl, 8(%rdi) -; SSE2-NEXT: shrl $16, %edx -; SSE2-NEXT: movb %dl, 5(%rdi) +; SSE2-NEXT: movb %cl, 5(%rdi) +; SSE2-NEXT: movd %xmm5, %eax +; SSE2-NEXT: movw %ax, 9(%rdi) +; SSE2-NEXT: movd %xmm6, %ecx +; SSE2-NEXT: movw %cx, 6(%rdi) ; SSE2-NEXT: shrl $16, %eax ; SSE2-NEXT: movb %al, 11(%rdi) +; SSE2-NEXT: shrl $16, %ecx +; SSE2-NEXT: movb %cl, 8(%rdi) ; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: retq ; @@ -1822,52 +1822,52 @@ define <4 x i32> @smulo_v4i24(<4 x i24> %a0, <4 x i24> %a1, <4 x i24>* %p2) noun ; SSSE3-NEXT: psrad $8, %xmm0 ; SSSE3-NEXT: pslld $8, %xmm1 ; SSSE3-NEXT: psrad $8, %xmm1 -; SSSE3-NEXT: pxor %xmm3, %xmm3 +; SSSE3-NEXT: pxor %xmm4, %xmm4 ; SSSE3-NEXT: pxor %xmm2, %xmm2 ; SSSE3-NEXT: pcmpgtd %xmm1, %xmm2 ; SSSE3-NEXT: pand %xmm0, %xmm2 -; SSSE3-NEXT: pcmpgtd %xmm0, %xmm3 -; SSSE3-NEXT: pand %xmm1, %xmm3 -; SSSE3-NEXT: paddd %xmm2, %xmm3 -; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] +; SSSE3-NEXT: pcmpgtd %xmm0, %xmm4 +; SSSE3-NEXT: pand %xmm1, %xmm4 +; SSSE3-NEXT: paddd %xmm2, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] ; SSSE3-NEXT: pmuludq %xmm1, %xmm0 -; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,3,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3] ; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] -; SSSE3-NEXT: pmuludq %xmm4, %xmm2 +; SSSE3-NEXT: pmuludq %xmm5, %xmm2 ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3] -; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1] -; SSSE3-NEXT: psubd %xmm3, %xmm5 -; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,2,2,3] -; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3] ; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] -; SSSE3-NEXT: movdqa %xmm3, %xmm1 +; SSSE3-NEXT: psubd %xmm4, %xmm3 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1] +; SSSE3-NEXT: movdqa %xmm4, %xmm1 ; SSSE3-NEXT: pslld $8, %xmm1 ; SSSE3-NEXT: psrad $8, %xmm1 -; SSSE3-NEXT: pcmpeqd %xmm3, %xmm1 -; SSSE3-NEXT: psrad $31, %xmm3 -; SSSE3-NEXT: pcmpeqd %xmm5, %xmm3 -; SSSE3-NEXT: pcmpeqd %xmm4, %xmm4 -; SSSE3-NEXT: pxor %xmm4, %xmm3 -; SSSE3-NEXT: pxor %xmm4, %xmm1 -; SSSE3-NEXT: por %xmm3, %xmm1 +; SSSE3-NEXT: pcmpeqd %xmm4, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[3,1,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm4[2,3,0,1] +; SSSE3-NEXT: psrad $31, %xmm4 +; SSSE3-NEXT: pcmpeqd %xmm3, %xmm4 +; SSSE3-NEXT: pcmpeqd %xmm3, %xmm3 +; SSSE3-NEXT: pxor %xmm3, %xmm4 +; SSSE3-NEXT: pxor %xmm3, %xmm1 +; SSSE3-NEXT: por %xmm4, %xmm1 ; SSSE3-NEXT: movd %xmm0, %eax ; SSSE3-NEXT: movw %ax, (%rdi) -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] -; SSSE3-NEXT: movd %xmm0, %ecx -; SSSE3-NEXT: movw %cx, 6(%rdi) -; SSSE3-NEXT: movd %xmm2, %edx -; SSSE3-NEXT: movw %dx, 3(%rdi) +; SSSE3-NEXT: movd %xmm2, %ecx +; SSSE3-NEXT: movw %cx, 3(%rdi) ; SSSE3-NEXT: shrl $16, %eax ; SSSE3-NEXT: movb %al, 2(%rdi) -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3] -; SSSE3-NEXT: movd %xmm0, %eax -; SSSE3-NEXT: movw %ax, 9(%rdi) ; SSSE3-NEXT: shrl $16, %ecx -; SSSE3-NEXT: movb %cl, 8(%rdi) -; SSSE3-NEXT: shrl $16, %edx -; SSSE3-NEXT: movb %dl, 5(%rdi) +; SSSE3-NEXT: movb %cl, 5(%rdi) +; SSSE3-NEXT: movd %xmm5, %eax +; SSSE3-NEXT: movw %ax, 9(%rdi) +; SSSE3-NEXT: movd %xmm6, %ecx +; SSSE3-NEXT: movw %cx, 6(%rdi) ; SSSE3-NEXT: shrl $16, %eax ; SSSE3-NEXT: movb %al, 11(%rdi) +; SSSE3-NEXT: shrl $16, %ecx +; SSSE3-NEXT: movb %cl, 8(%rdi) ; SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSSE3-NEXT: retq ; |