diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-shift-ashr-128.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/vector-shift-ashr-128.ll | 140 |
1 files changed, 68 insertions, 72 deletions
diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll index 8e981f45864..55766daecff 100644 --- a/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll @@ -20,19 +20,18 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind { ; SSE2-LABEL: var_shift_v2i64: ; SSE2: # BB#0: -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] -; SSE2-NEXT: movdqa %xmm2, %xmm4 -; SSE2-NEXT: psrlq %xmm3, %xmm4 -; SSE2-NEXT: psrlq %xmm1, %xmm2 -; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm2[0],xmm4[1] -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: psrlq %xmm3, %xmm2 -; SSE2-NEXT: psrlq %xmm1, %xmm0 -; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1] -; SSE2-NEXT: xorpd %xmm4, %xmm2 -; SSE2-NEXT: psubq %xmm4, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm0 +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: psrlq %xmm1, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] +; SSE2-NEXT: psrlq %xmm4, %xmm2 +; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1] +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: psrlq %xmm1, %xmm3 +; SSE2-NEXT: psrlq %xmm4, %xmm0 +; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1] +; SSE2-NEXT: xorpd %xmm2, %xmm0 +; SSE2-NEXT: psubq %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: var_shift_v2i64: @@ -97,19 +96,18 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind { ; ; X32-SSE-LABEL: var_shift_v2i64: ; X32-SSE: # BB#0: -; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] ; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648] -; X32-SSE-NEXT: movdqa %xmm2, %xmm4 -; X32-SSE-NEXT: psrlq %xmm3, %xmm4 -; X32-SSE-NEXT: psrlq %xmm1, %xmm2 -; X32-SSE-NEXT: movsd {{.*#+}} xmm4 = xmm2[0],xmm4[1] -; X32-SSE-NEXT: movdqa %xmm0, %xmm2 -; X32-SSE-NEXT: psrlq %xmm3, %xmm2 -; X32-SSE-NEXT: psrlq %xmm1, %xmm0 -; X32-SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1] -; X32-SSE-NEXT: xorpd %xmm4, %xmm2 -; X32-SSE-NEXT: psubq %xmm4, %xmm2 -; X32-SSE-NEXT: movdqa %xmm2, %xmm0 +; X32-SSE-NEXT: movdqa %xmm2, %xmm3 +; X32-SSE-NEXT: psrlq %xmm1, %xmm3 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] +; X32-SSE-NEXT: psrlq %xmm4, %xmm2 +; X32-SSE-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1] +; X32-SSE-NEXT: movdqa %xmm0, %xmm3 +; X32-SSE-NEXT: psrlq %xmm1, %xmm3 +; X32-SSE-NEXT: psrlq %xmm4, %xmm0 +; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1] +; X32-SSE-NEXT: xorpd %xmm2, %xmm0 +; X32-SSE-NEXT: psubq %xmm2, %xmm0 ; X32-SSE-NEXT: retl %shift = ashr <2 x i64> %a, %b ret <2 x i64> %shift @@ -119,24 +117,24 @@ define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind { ; SSE2-LABEL: var_shift_v4i32: ; SSE2: # BB#0: ; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; SSE2-NEXT: psrlq $32, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: psrad %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: psrlq $32, %xmm2 +; SSE2-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; SSE2-NEXT: movdqa %xmm0, %xmm4 ; SSE2-NEXT: psrad %xmm2, %xmm4 -; SSE2-NEXT: movsd {{.*#+}} xmm3 = xmm4[0],xmm3[1] -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,3,2,3] +; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm3[0],xmm4[1] +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,3,2,3] ; SSE2-NEXT: pxor %xmm3, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm4 -; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] ; SSE2-NEXT: movdqa %xmm0, %xmm5 ; SSE2-NEXT: psrad %xmm4, %xmm5 -; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3] ; SSE2-NEXT: psrad %xmm1, %xmm0 -; SSE2-NEXT: movsd {{.*#+}} xmm5 = xmm0[0],xmm5[1] -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,2,2,3] +; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm5[0],xmm0[1] +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSE2-NEXT: retq ; @@ -207,24 +205,24 @@ define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind { ; X32-SSE-LABEL: var_shift_v4i32: ; X32-SSE: # BB#0: ; X32-SSE-NEXT: movdqa %xmm1, %xmm2 -; X32-SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; X32-SSE-NEXT: psrlq $32, %xmm2 ; X32-SSE-NEXT: movdqa %xmm0, %xmm3 ; X32-SSE-NEXT: psrad %xmm2, %xmm3 ; X32-SSE-NEXT: movdqa %xmm1, %xmm2 -; X32-SSE-NEXT: psrlq $32, %xmm2 +; X32-SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; X32-SSE-NEXT: movdqa %xmm0, %xmm4 ; X32-SSE-NEXT: psrad %xmm2, %xmm4 -; X32-SSE-NEXT: movsd {{.*#+}} xmm3 = xmm4[0],xmm3[1] -; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,3,2,3] +; X32-SSE-NEXT: movsd {{.*#+}} xmm4 = xmm3[0],xmm4[1] +; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,3,2,3] ; X32-SSE-NEXT: pxor %xmm3, %xmm3 ; X32-SSE-NEXT: movdqa %xmm1, %xmm4 -; X32-SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3] +; X32-SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] ; X32-SSE-NEXT: movdqa %xmm0, %xmm5 ; X32-SSE-NEXT: psrad %xmm4, %xmm5 -; X32-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; X32-SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3] ; X32-SSE-NEXT: psrad %xmm1, %xmm0 -; X32-SSE-NEXT: movsd {{.*#+}} xmm5 = xmm0[0],xmm5[1] -; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,2,2,3] +; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm5[0],xmm0[1] +; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; X32-SSE-NEXT: retl %shift = ashr <4 x i32> %a, %b @@ -1044,13 +1042,12 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind { ; SSE2-LABEL: constant_shift_v2i64: ; SSE2: # BB#0: ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: psrlq $7, %xmm1 -; SSE2-NEXT: psrlq $1, %xmm0 -; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] -; SSE2-NEXT: movapd {{.*#+}} xmm0 = [4611686018427387904,72057594037927936] -; SSE2-NEXT: xorpd %xmm0, %xmm1 -; SSE2-NEXT: psubq %xmm0, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: psrlq $1, %xmm1 +; SSE2-NEXT: psrlq $7, %xmm0 +; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; SSE2-NEXT: movapd {{.*#+}} xmm1 = [4611686018427387904,72057594037927936] +; SSE2-NEXT: xorpd %xmm1, %xmm0 +; SSE2-NEXT: psubq %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: constant_shift_v2i64: @@ -1107,16 +1104,15 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind { ; X32-SSE: # BB#0: ; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,2147483648,0,2147483648] ; X32-SSE-NEXT: movdqa %xmm1, %xmm2 -; X32-SSE-NEXT: psrlq $7, %xmm2 -; X32-SSE-NEXT: psrlq $1, %xmm1 -; X32-SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1] -; X32-SSE-NEXT: movdqa %xmm0, %xmm1 +; X32-SSE-NEXT: psrlq $1, %xmm2 ; X32-SSE-NEXT: psrlq $7, %xmm1 -; X32-SSE-NEXT: psrlq $1, %xmm0 -; X32-SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] -; X32-SSE-NEXT: xorpd %xmm2, %xmm1 -; X32-SSE-NEXT: psubq %xmm2, %xmm1 -; X32-SSE-NEXT: movdqa %xmm1, %xmm0 +; X32-SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] +; X32-SSE-NEXT: movdqa %xmm0, %xmm2 +; X32-SSE-NEXT: psrlq $1, %xmm2 +; X32-SSE-NEXT: psrlq $7, %xmm0 +; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1] +; X32-SSE-NEXT: xorpd %xmm1, %xmm0 +; X32-SSE-NEXT: psubq %xmm1, %xmm0 ; X32-SSE-NEXT: retl %shift = ashr <2 x i64> %a, <i64 1, i64 7> ret <2 x i64> %shift @@ -1126,16 +1122,16 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind { ; SSE2-LABEL: constant_shift_v4i32: ; SSE2: # BB#0: ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: psrad $7, %xmm1 +; SSE2-NEXT: psrad $5, %xmm1 ; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: psrad $5, %xmm2 -; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] +; SSE2-NEXT: psrad $7, %xmm2 +; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3] ; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: psrad $6, %xmm2 -; SSE2-NEXT: psrad $4, %xmm0 -; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1] -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3] +; SSE2-NEXT: psrad $4, %xmm2 +; SSE2-NEXT: psrad $6, %xmm0 +; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1] +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: retq ; @@ -1192,16 +1188,16 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind { ; X32-SSE-LABEL: constant_shift_v4i32: ; X32-SSE: # BB#0: ; X32-SSE-NEXT: movdqa %xmm0, %xmm1 -; X32-SSE-NEXT: psrad $7, %xmm1 +; X32-SSE-NEXT: psrad $5, %xmm1 ; X32-SSE-NEXT: movdqa %xmm0, %xmm2 -; X32-SSE-NEXT: psrad $5, %xmm2 -; X32-SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] -; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] +; X32-SSE-NEXT: psrad $7, %xmm2 +; X32-SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1] +; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3] ; X32-SSE-NEXT: movdqa %xmm0, %xmm2 -; X32-SSE-NEXT: psrad $6, %xmm2 -; X32-SSE-NEXT: psrad $4, %xmm0 -; X32-SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1] -; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3] +; X32-SSE-NEXT: psrad $4, %xmm2 +; X32-SSE-NEXT: psrad $6, %xmm0 +; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1] +; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X32-SSE-NEXT: retl %shift = ashr <4 x i32> %a, <i32 4, i32 5, i32 6, i32 7> |