diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/vec_umulo.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/vec_umulo.ll | 140 |
1 files changed, 72 insertions, 68 deletions
diff --git a/llvm/test/CodeGen/X86/vec_umulo.ll b/llvm/test/CodeGen/X86/vec_umulo.ll index 52b92055785..c8146e34468 100644 --- a/llvm/test/CodeGen/X86/vec_umulo.ll +++ b/llvm/test/CodeGen/X86/vec_umulo.ll @@ -179,10 +179,12 @@ define <3 x i32> @umulo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) noun ; SSE2-NEXT: pcmpeqd %xmm3, %xmm2 ; SSE2-NEXT: pcmpeqd %xmm1, %xmm1 ; SSE2-NEXT: pxor %xmm2, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3] -; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] -; SSE2-NEXT: movd %xmm2, 8(%rdi) +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSE2-NEXT: movq %xmm0, (%rdi) +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSE2-NEXT: movd %xmm0, 8(%rdi) ; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: retq ; @@ -199,10 +201,12 @@ define <3 x i32> @umulo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) noun ; SSSE3-NEXT: pcmpeqd %xmm3, %xmm2 ; SSSE3-NEXT: pcmpeqd %xmm1, %xmm1 ; SSSE3-NEXT: pxor %xmm2, %xmm1 -; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3] -; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] -; SSSE3-NEXT: movd %xmm2, 8(%rdi) +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSSE3-NEXT: movq %xmm0, (%rdi) +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSSE3-NEXT: movd %xmm0, 8(%rdi) ; SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSSE3-NEXT: retq ; @@ -1563,90 +1567,90 @@ define <2 x i32> @umulo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) noun define <4 x i32> @umulo_v4i24(<4 x i24> %a0, <4 x i24> %a1, <4 x i24>* %p2) nounwind { ; SSE2-LABEL: umulo_v4i24: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0] -; SSE2-NEXT: pand %xmm0, %xmm1 -; SSE2-NEXT: pand %xmm0, %xmm2 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3] -; SSE2-NEXT: pmuludq %xmm1, %xmm2 -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,3,2,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; SSE2-NEXT: pmuludq %xmm0, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3] -; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1] +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0] +; SSE2-NEXT: pand %xmm2, %xmm1 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm1, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm4, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] ; SSE2-NEXT: pxor %xmm4, %xmm4 ; SSE2-NEXT: pcmpeqd %xmm4, %xmm3 -; SSE2-NEXT: pcmpeqd %xmm5, %xmm5 -; SSE2-NEXT: pxor %xmm5, %xmm3 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,2,2,3] -; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1] -; SSE2-NEXT: psrld $24, %xmm0 -; SSE2-NEXT: pcmpeqd %xmm4, %xmm0 -; SSE2-NEXT: pxor %xmm5, %xmm0 -; SSE2-NEXT: por %xmm3, %xmm0 -; SSE2-NEXT: movd %xmm2, %eax +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1] +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[3,1,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,0,1] +; SSE2-NEXT: psrld $24, %xmm1 +; SSE2-NEXT: pcmpeqd %xmm4, %xmm1 +; SSE2-NEXT: pcmpeqd %xmm4, %xmm4 +; SSE2-NEXT: pxor %xmm4, %xmm3 +; SSE2-NEXT: pxor %xmm4, %xmm1 +; SSE2-NEXT: por %xmm3, %xmm1 +; SSE2-NEXT: movd %xmm0, %eax ; SSE2-NEXT: movw %ax, (%rdi) -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3] ; SSE2-NEXT: movd %xmm2, %ecx -; SSE2-NEXT: movw %cx, 6(%rdi) -; SSE2-NEXT: movd %xmm1, %edx -; SSE2-NEXT: movw %dx, 3(%rdi) +; SSE2-NEXT: movw %cx, 3(%rdi) ; SSE2-NEXT: shrl $16, %eax ; SSE2-NEXT: movb %al, 2(%rdi) -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] -; SSE2-NEXT: movd %xmm1, %eax -; SSE2-NEXT: movw %ax, 9(%rdi) ; SSE2-NEXT: shrl $16, %ecx -; SSE2-NEXT: movb %cl, 8(%rdi) -; SSE2-NEXT: shrl $16, %edx -; SSE2-NEXT: movb %dl, 5(%rdi) +; SSE2-NEXT: movb %cl, 5(%rdi) +; SSE2-NEXT: movd %xmm5, %eax +; SSE2-NEXT: movw %ax, 9(%rdi) +; SSE2-NEXT: movd %xmm6, %ecx +; SSE2-NEXT: movw %cx, 6(%rdi) ; SSE2-NEXT: shrl $16, %eax ; SSE2-NEXT: movb %al, 11(%rdi) +; SSE2-NEXT: shrl $16, %ecx +; SSE2-NEXT: movb %cl, 8(%rdi) +; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: umulo_v4i24: ; SSSE3: # %bb.0: -; SSSE3-NEXT: movdqa %xmm0, %xmm2 -; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0] -; SSSE3-NEXT: pand %xmm0, %xmm1 -; SSSE3-NEXT: pand %xmm0, %xmm2 -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3] -; SSSE3-NEXT: pmuludq %xmm1, %xmm2 -; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,3,2,3] -; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; SSSE3-NEXT: pmuludq %xmm0, %xmm1 -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3] -; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1] +; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0] +; SSSE3-NEXT: pand %xmm2, %xmm1 +; SSSE3-NEXT: pand %xmm2, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm1, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm4, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] ; SSSE3-NEXT: pxor %xmm4, %xmm4 ; SSSE3-NEXT: pcmpeqd %xmm4, %xmm3 -; SSSE3-NEXT: pcmpeqd %xmm5, %xmm5 -; SSSE3-NEXT: pxor %xmm5, %xmm3 -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3] -; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,2,2,3] -; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1] -; SSSE3-NEXT: psrld $24, %xmm0 -; SSSE3-NEXT: pcmpeqd %xmm4, %xmm0 -; SSSE3-NEXT: pxor %xmm5, %xmm0 -; SSSE3-NEXT: por %xmm3, %xmm0 -; SSSE3-NEXT: movd %xmm2, %eax +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1] +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm1[3,1,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,0,1] +; SSSE3-NEXT: psrld $24, %xmm1 +; SSSE3-NEXT: pcmpeqd %xmm4, %xmm1 +; SSSE3-NEXT: pcmpeqd %xmm4, %xmm4 +; SSSE3-NEXT: pxor %xmm4, %xmm3 +; SSSE3-NEXT: pxor %xmm4, %xmm1 +; SSSE3-NEXT: por %xmm3, %xmm1 +; SSSE3-NEXT: movd %xmm0, %eax ; SSSE3-NEXT: movw %ax, (%rdi) -; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3] ; SSSE3-NEXT: movd %xmm2, %ecx -; SSSE3-NEXT: movw %cx, 6(%rdi) -; SSSE3-NEXT: movd %xmm1, %edx -; SSSE3-NEXT: movw %dx, 3(%rdi) +; SSSE3-NEXT: movw %cx, 3(%rdi) ; SSSE3-NEXT: shrl $16, %eax ; SSSE3-NEXT: movb %al, 2(%rdi) -; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] -; SSSE3-NEXT: movd %xmm1, %eax -; SSSE3-NEXT: movw %ax, 9(%rdi) ; SSSE3-NEXT: shrl $16, %ecx -; SSSE3-NEXT: movb %cl, 8(%rdi) -; SSSE3-NEXT: shrl $16, %edx -; SSSE3-NEXT: movb %dl, 5(%rdi) +; SSSE3-NEXT: movb %cl, 5(%rdi) +; SSSE3-NEXT: movd %xmm5, %eax +; SSSE3-NEXT: movw %ax, 9(%rdi) +; SSSE3-NEXT: movd %xmm6, %ecx +; SSSE3-NEXT: movw %cx, 6(%rdi) ; SSSE3-NEXT: shrl $16, %eax ; SSSE3-NEXT: movb %al, 11(%rdi) +; SSSE3-NEXT: shrl $16, %ecx +; SSSE3-NEXT: movb %cl, 8(%rdi) +; SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: umulo_v4i24: |