diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-rotate-128.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/vector-rotate-128.ll | 266 |
1 files changed, 120 insertions, 146 deletions
diff --git a/llvm/test/CodeGen/X86/vector-rotate-128.ll b/llvm/test/CodeGen/X86/vector-rotate-128.ll index 7ea470dfea3..166b75f3310 100644 --- a/llvm/test/CodeGen/X86/vector-rotate-128.ll +++ b/llvm/test/CodeGen/X86/vector-rotate-128.ll @@ -129,32 +129,28 @@ define <4 x i32> @var_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind { ; SSE2-NEXT: cvttps2dq %xmm1, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] ; SSE2-NEXT: pmuludq %xmm0, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm3, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] -; SSE2-NEXT: pmuludq %xmm3, %xmm4 -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,2,2,3] -; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] -; SSE2-NEXT: movdqa %xmm2, %xmm3 -; SSE2-NEXT: psrlq $32, %xmm3 -; SSE2-NEXT: movdqa %xmm0, %xmm4 -; SSE2-NEXT: psrld %xmm3, %xmm4 -; SSE2-NEXT: movdqa %xmm2, %xmm3 -; SSE2-NEXT: psrldq {{.*#+}} xmm3 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1] +; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[2,3,3,3,4,5,6,7] +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: psrld %xmm1, %xmm3 +; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm2[0,1,1,1,4,5,6,7] +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: psrld %xmm5, %xmm1 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] +; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7] ; SSE2-NEXT: movdqa %xmm0, %xmm5 ; SSE2-NEXT: psrld %xmm3, %xmm5 -; SSE2-NEXT: movsd {{.*#+}} xmm5 = xmm4[0],xmm5[1] -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm5[1,3,2,3] -; SSE2-NEXT: pxor %xmm4, %xmm4 -; SSE2-NEXT: movdqa %xmm2, %xmm5 -; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1] -; SSE2-NEXT: movdqa %xmm0, %xmm6 -; SSE2-NEXT: psrld %xmm5, %xmm6 -; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3] +; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7] ; SSE2-NEXT: psrld %xmm2, %xmm0 -; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm6[0],xmm0[1] -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] -; SSE2-NEXT: por %xmm1, %xmm0 +; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm5[1] +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,3] +; SSE2-NEXT: orps %xmm4, %xmm1 +; SSE2-NEXT: movaps %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: var_rotate_v4i32: @@ -165,23 +161,21 @@ define <4 x i32> @var_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind { ; SSE41-NEXT: paddd {{.*}}(%rip), %xmm1 ; SSE41-NEXT: cvttps2dq %xmm1, %xmm1 ; SSE41-NEXT: pmulld %xmm0, %xmm1 -; SSE41-NEXT: movdqa %xmm2, %xmm3 -; SSE41-NEXT: psrldq {{.*#+}} xmm3 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; SSE41-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7] ; SSE41-NEXT: movdqa %xmm0, %xmm4 ; SSE41-NEXT: psrld %xmm3, %xmm4 -; SSE41-NEXT: movdqa %xmm2, %xmm3 -; SSE41-NEXT: psrlq $32, %xmm3 -; SSE41-NEXT: movdqa %xmm0, %xmm5 -; SSE41-NEXT: psrld %xmm3, %xmm5 -; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm4[4,5,6,7] -; SSE41-NEXT: pxor %xmm3, %xmm3 -; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero -; SSE41-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3] -; SSE41-NEXT: movdqa %xmm0, %xmm3 -; SSE41-NEXT: psrld %xmm2, %xmm3 -; SSE41-NEXT: psrld %xmm4, %xmm0 -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7] +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,0,1] +; SSE41-NEXT: pshuflw {{.*#+}} xmm5 = xmm3[2,3,3,3,4,5,6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm6 +; SSE41-NEXT: psrld %xmm5, %xmm6 +; SSE41-NEXT: pblendw {{.*#+}} xmm6 = xmm4[0,1,2,3],xmm6[4,5,6,7] +; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm4 +; SSE41-NEXT: psrld %xmm2, %xmm4 +; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[0,1,1,1,4,5,6,7] +; SSE41-NEXT: psrld %xmm2, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm4[0,1,2,3],xmm0[4,5,6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,3],xmm0[4,5],xmm6[6,7] ; SSE41-NEXT: por %xmm1, %xmm0 ; SSE41-NEXT: retq ; @@ -245,32 +239,28 @@ define <4 x i32> @var_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind { ; X32-SSE-NEXT: cvttps2dq %xmm1, %xmm1 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] ; X32-SSE-NEXT: pmuludq %xmm0, %xmm1 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,2,2,3] +; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] +; X32-SSE-NEXT: pmuludq %xmm3, %xmm1 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; X32-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] -; X32-SSE-NEXT: pmuludq %xmm3, %xmm4 -; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,2,2,3] -; X32-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] -; X32-SSE-NEXT: movdqa %xmm2, %xmm3 -; X32-SSE-NEXT: psrlq $32, %xmm3 -; X32-SSE-NEXT: movdqa %xmm0, %xmm4 -; X32-SSE-NEXT: psrld %xmm3, %xmm4 -; X32-SSE-NEXT: movdqa %xmm2, %xmm3 -; X32-SSE-NEXT: psrldq {{.*#+}} xmm3 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; X32-SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1] +; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[2,3,3,3,4,5,6,7] +; X32-SSE-NEXT: movdqa %xmm0, %xmm3 +; X32-SSE-NEXT: psrld %xmm1, %xmm3 +; X32-SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm2[0,1,1,1,4,5,6,7] +; X32-SSE-NEXT: movdqa %xmm0, %xmm1 +; X32-SSE-NEXT: psrld %xmm5, %xmm1 +; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] +; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] +; X32-SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7] ; X32-SSE-NEXT: movdqa %xmm0, %xmm5 ; X32-SSE-NEXT: psrld %xmm3, %xmm5 -; X32-SSE-NEXT: movsd {{.*#+}} xmm5 = xmm4[0],xmm5[1] -; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm5[1,3,2,3] -; X32-SSE-NEXT: pxor %xmm4, %xmm4 -; X32-SSE-NEXT: movdqa %xmm2, %xmm5 -; X32-SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1] -; X32-SSE-NEXT: movdqa %xmm0, %xmm6 -; X32-SSE-NEXT: psrld %xmm5, %xmm6 -; X32-SSE-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3] +; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7] ; X32-SSE-NEXT: psrld %xmm2, %xmm0 -; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm6[0],xmm0[1] -; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] -; X32-SSE-NEXT: por %xmm1, %xmm0 +; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm5[1] +; X32-SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,3] +; X32-SSE-NEXT: orps %xmm4, %xmm1 +; X32-SSE-NEXT: movaps %xmm1, %xmm0 ; X32-SSE-NEXT: retl %b32 = sub <4 x i32> <i32 32, i32 32, i32 32, i32 32>, %b %shl = shl <4 x i32> %a, %b @@ -910,35 +900,30 @@ define <2 x i64> @splatvar_rotate_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind { define <4 x i32> @splatvar_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind { ; SSE2-LABEL: splatvar_rotate_v4i32: ; SSE2: # %bb.0: -; SSE2-NEXT: pxor %xmm2, %xmm2 -; SSE2-NEXT: xorps %xmm3, %xmm3 -; SSE2-NEXT: movss {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3] +; SSE2-NEXT: xorps %xmm2, %xmm2 +; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32,32,32,32] -; SSE2-NEXT: psubd %xmm1, %xmm4 +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [32,32,32,32] +; SSE2-NEXT: psubd %xmm1, %xmm3 +; SSE2-NEXT: movdqa %xmm0, %xmm4 +; SSE2-NEXT: pslld %xmm2, %xmm4 +; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[2,3,3,3,4,5,6,7] +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: psrld %xmm1, %xmm2 +; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm3[0,1,1,1,4,5,6,7] ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: pslld %xmm3, %xmm1 -; SSE2-NEXT: movdqa %xmm4, %xmm3 -; SSE2-NEXT: psrlq $32, %xmm3 +; SSE2-NEXT: psrld %xmm5, %xmm1 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1] +; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7] ; SSE2-NEXT: movdqa %xmm0, %xmm5 ; SSE2-NEXT: psrld %xmm3, %xmm5 -; SSE2-NEXT: movdqa %xmm4, %xmm3 -; SSE2-NEXT: psrldq {{.*#+}} xmm3 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; SSE2-NEXT: movdqa %xmm0, %xmm6 -; SSE2-NEXT: psrld %xmm3, %xmm6 -; SSE2-NEXT: movsd {{.*#+}} xmm6 = xmm5[0],xmm6[1] -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,3,2,3] -; SSE2-NEXT: movdqa %xmm4, %xmm5 -; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1] -; SSE2-NEXT: movdqa %xmm0, %xmm6 -; SSE2-NEXT: psrld %xmm5, %xmm6 -; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm2[2],xmm4[3],xmm2[3] -; SSE2-NEXT: psrld %xmm4, %xmm0 -; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm6[0],xmm0[1] -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] -; SSE2-NEXT: por %xmm0, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7] +; SSE2-NEXT: psrld %xmm2, %xmm0 +; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm5[1] +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,3] +; SSE2-NEXT: orps %xmm4, %xmm1 +; SSE2-NEXT: movaps %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: splatvar_rotate_v4i32: @@ -949,23 +934,21 @@ define <4 x i32> @splatvar_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind { ; SSE41-NEXT: psubd %xmm1, %xmm3 ; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: pslld %xmm2, %xmm1 -; SSE41-NEXT: movdqa %xmm3, %xmm2 -; SSE41-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[2,3,3,3,4,5,6,7] ; SSE41-NEXT: movdqa %xmm0, %xmm4 ; SSE41-NEXT: psrld %xmm2, %xmm4 -; SSE41-NEXT: movdqa %xmm3, %xmm2 -; SSE41-NEXT: psrlq $32, %xmm2 -; SSE41-NEXT: movdqa %xmm0, %xmm5 -; SSE41-NEXT: psrld %xmm2, %xmm5 -; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm4[4,5,6,7] -; SSE41-NEXT: pxor %xmm2, %xmm2 -; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero -; SSE41-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm2[2],xmm3[3],xmm2[3] -; SSE41-NEXT: movdqa %xmm0, %xmm2 -; SSE41-NEXT: psrld %xmm3, %xmm2 -; SSE41-NEXT: psrld %xmm4, %xmm0 -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7] +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1] +; SSE41-NEXT: pshuflw {{.*#+}} xmm5 = xmm2[2,3,3,3,4,5,6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm6 +; SSE41-NEXT: psrld %xmm5, %xmm6 +; SSE41-NEXT: pblendw {{.*#+}} xmm6 = xmm4[0,1,2,3],xmm6[4,5,6,7] +; SSE41-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,1,1,4,5,6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm4 +; SSE41-NEXT: psrld %xmm3, %xmm4 +; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7] +; SSE41-NEXT: psrld %xmm2, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm4[0,1,2,3],xmm0[4,5,6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,3],xmm0[4,5],xmm6[6,7] ; SSE41-NEXT: por %xmm1, %xmm0 ; SSE41-NEXT: retq ; @@ -1031,35 +1014,30 @@ define <4 x i32> @splatvar_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind { ; ; X32-SSE-LABEL: splatvar_rotate_v4i32: ; X32-SSE: # %bb.0: -; X32-SSE-NEXT: pxor %xmm2, %xmm2 -; X32-SSE-NEXT: xorps %xmm3, %xmm3 -; X32-SSE-NEXT: movss {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3] +; X32-SSE-NEXT: xorps %xmm2, %xmm2 +; X32-SSE-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3] ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] -; X32-SSE-NEXT: movdqa {{.*#+}} xmm4 = [32,32,32,32] -; X32-SSE-NEXT: psubd %xmm1, %xmm4 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [32,32,32,32] +; X32-SSE-NEXT: psubd %xmm1, %xmm3 +; X32-SSE-NEXT: movdqa %xmm0, %xmm4 +; X32-SSE-NEXT: pslld %xmm2, %xmm4 +; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[2,3,3,3,4,5,6,7] +; X32-SSE-NEXT: movdqa %xmm0, %xmm2 +; X32-SSE-NEXT: psrld %xmm1, %xmm2 +; X32-SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm3[0,1,1,1,4,5,6,7] ; X32-SSE-NEXT: movdqa %xmm0, %xmm1 -; X32-SSE-NEXT: pslld %xmm3, %xmm1 -; X32-SSE-NEXT: movdqa %xmm4, %xmm3 -; X32-SSE-NEXT: psrlq $32, %xmm3 +; X32-SSE-NEXT: psrld %xmm5, %xmm1 +; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1] +; X32-SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7] ; X32-SSE-NEXT: movdqa %xmm0, %xmm5 ; X32-SSE-NEXT: psrld %xmm3, %xmm5 -; X32-SSE-NEXT: movdqa %xmm4, %xmm3 -; X32-SSE-NEXT: psrldq {{.*#+}} xmm3 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; X32-SSE-NEXT: movdqa %xmm0, %xmm6 -; X32-SSE-NEXT: psrld %xmm3, %xmm6 -; X32-SSE-NEXT: movsd {{.*#+}} xmm6 = xmm5[0],xmm6[1] -; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,3,2,3] -; X32-SSE-NEXT: movdqa %xmm4, %xmm5 -; X32-SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1] -; X32-SSE-NEXT: movdqa %xmm0, %xmm6 -; X32-SSE-NEXT: psrld %xmm5, %xmm6 -; X32-SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm2[2],xmm4[3],xmm2[3] -; X32-SSE-NEXT: psrld %xmm4, %xmm0 -; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm6[0],xmm0[1] -; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] -; X32-SSE-NEXT: por %xmm0, %xmm1 -; X32-SSE-NEXT: movdqa %xmm1, %xmm0 +; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7] +; X32-SSE-NEXT: psrld %xmm2, %xmm0 +; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm5[1] +; X32-SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,3] +; X32-SSE-NEXT: orps %xmm4, %xmm1 +; X32-SSE-NEXT: movaps %xmm1, %xmm0 ; X32-SSE-NEXT: retl %splat = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer %splat32 = sub <4 x i32> <i32 32, i32 32, i32 32, i32 32>, %splat @@ -1673,18 +1651,16 @@ define <4 x i32> @constant_rotate_v4i32(<4 x i32> %a) nounwind { ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: psrld $27, %xmm1 +; SSE2-NEXT: psrld $25, %xmm1 ; SSE2-NEXT: movdqa %xmm0, %xmm3 -; SSE2-NEXT: psrld $25, %xmm3 -; SSE2-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1] -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3] -; SSE2-NEXT: movdqa %xmm0, %xmm3 -; SSE2-NEXT: psrld $28, %xmm3 -; SSE2-NEXT: psrld $26, %xmm0 -; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1] -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; SSE2-NEXT: por %xmm2, %xmm0 +; SSE2-NEXT: psrld $26, %xmm3 +; SSE2-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm1[1] +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: psrld $27, %xmm1 +; SSE2-NEXT: psrld $28, %xmm0 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm3[0,3] +; SSE2-NEXT: orps %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: constant_rotate_v4i32: @@ -1755,18 +1731,16 @@ define <4 x i32> @constant_rotate_v4i32(<4 x i32> %a) nounwind { ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3] ; X32-SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; X32-SSE-NEXT: movdqa %xmm0, %xmm1 -; X32-SSE-NEXT: psrld $27, %xmm1 +; X32-SSE-NEXT: psrld $25, %xmm1 ; X32-SSE-NEXT: movdqa %xmm0, %xmm3 -; X32-SSE-NEXT: psrld $25, %xmm3 -; X32-SSE-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1] -; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3] -; X32-SSE-NEXT: movdqa %xmm0, %xmm3 -; X32-SSE-NEXT: psrld $28, %xmm3 -; X32-SSE-NEXT: psrld $26, %xmm0 -; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1] -; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; X32-SSE-NEXT: por %xmm2, %xmm0 +; X32-SSE-NEXT: psrld $26, %xmm3 +; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm1[1] +; X32-SSE-NEXT: movdqa %xmm0, %xmm1 +; X32-SSE-NEXT: psrld $27, %xmm1 +; X32-SSE-NEXT: psrld $28, %xmm0 +; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; X32-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm3[0,3] +; X32-SSE-NEXT: orps %xmm2, %xmm0 ; X32-SSE-NEXT: retl %shl = shl <4 x i32> %a, <i32 4, i32 5, i32 6, i32 7> %lshr = lshr <4 x i32> %a, <i32 28, i32 27, i32 26, i32 25> |