diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/bitcast-and-setcc-128.ll')
| -rw-r--r-- | llvm/test/CodeGen/X86/bitcast-and-setcc-128.ll | 136 |
1 files changed, 68 insertions, 68 deletions
diff --git a/llvm/test/CodeGen/X86/bitcast-and-setcc-128.ll b/llvm/test/CodeGen/X86/bitcast-and-setcc-128.ll index a681c3b0aa4..1b9a1474b9a 100644 --- a/llvm/test/CodeGen/X86/bitcast-and-setcc-128.ll +++ b/llvm/test/CodeGen/X86/bitcast-and-setcc-128.ll @@ -405,19 +405,19 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i8> %d) { ; AVX1-NEXT: vpsrad $24, %xmm2, %xmm2 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7] +; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpsllq $56, %xmm1, %xmm1 -; AVX1-NEXT: vpsrad $31, %xmm1, %xmm4 +; AVX1-NEXT: vpsrad $31, %xmm1, %xmm3 ; AVX1-NEXT: vpsrad $24, %xmm1, %xmm1 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7] ; AVX1-NEXT: vpsllq $56, %xmm0, %xmm0 -; AVX1-NEXT: vpsrad $31, %xmm0, %xmm4 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm3 ; AVX1-NEXT: vpsrad $24, %xmm0, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7] ; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm1 -; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpextrq $1, %xmm0, %rax ; AVX1-NEXT: andl $1, %eax ; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) @@ -439,19 +439,19 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i8> %d) { ; AVX2-NEXT: vpsrad $24, %xmm2, %xmm2 ; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2],xmm4[3] +; AVX2-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2 ; AVX2-NEXT: vpsllq $56, %xmm1, %xmm1 -; AVX2-NEXT: vpsrad $31, %xmm1, %xmm4 +; AVX2-NEXT: vpsrad $31, %xmm1, %xmm3 ; AVX2-NEXT: vpsrad $24, %xmm1, %xmm1 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3] +; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2],xmm3[3] ; AVX2-NEXT: vpsllq $56, %xmm0, %xmm0 -; AVX2-NEXT: vpsrad $31, %xmm0, %xmm4 +; AVX2-NEXT: vpsrad $31, %xmm0, %xmm3 ; AVX2-NEXT: vpsrad $24, %xmm0, %xmm0 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3] +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2],xmm3[3] ; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm1 -; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: vpextrq $1, %xmm0, %rax ; AVX2-NEXT: andl $1, %eax ; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) @@ -559,19 +559,19 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> %d) { ; AVX1-NEXT: vpsrad $16, %xmm2, %xmm2 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7] +; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpsllq $48, %xmm1, %xmm1 -; AVX1-NEXT: vpsrad $31, %xmm1, %xmm4 +; AVX1-NEXT: vpsrad $31, %xmm1, %xmm3 ; AVX1-NEXT: vpsrad $16, %xmm1, %xmm1 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7] ; AVX1-NEXT: vpsllq $48, %xmm0, %xmm0 -; AVX1-NEXT: vpsrad $31, %xmm0, %xmm4 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm3 ; AVX1-NEXT: vpsrad $16, %xmm0, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7] ; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm1 -; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpextrq $1, %xmm0, %rax ; AVX1-NEXT: andl $1, %eax ; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) @@ -593,19 +593,19 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> %d) { ; AVX2-NEXT: vpsrad $16, %xmm2, %xmm2 ; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2],xmm4[3] +; AVX2-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2 ; AVX2-NEXT: vpsllq $48, %xmm1, %xmm1 -; AVX2-NEXT: vpsrad $31, %xmm1, %xmm4 +; AVX2-NEXT: vpsrad $31, %xmm1, %xmm3 ; AVX2-NEXT: vpsrad $16, %xmm1, %xmm1 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3] +; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2],xmm3[3] ; AVX2-NEXT: vpsllq $48, %xmm0, %xmm0 -; AVX2-NEXT: vpsrad $31, %xmm0, %xmm4 +; AVX2-NEXT: vpsrad $31, %xmm0, %xmm3 ; AVX2-NEXT: vpsrad $16, %xmm0, %xmm0 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3] +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2],xmm3[3] ; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm1 -; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: vpextrq $1, %xmm0, %rax ; AVX2-NEXT: andl $1, %eax ; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) @@ -703,17 +703,17 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) { ; AVX1-NEXT: vpsrad $31, %xmm2, %xmm4 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7] +; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1 -; AVX1-NEXT: vpsrad $31, %xmm1, %xmm4 +; AVX1-NEXT: vpsrad $31, %xmm1, %xmm3 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7] ; AVX1-NEXT: vpsllq $32, %xmm0, %xmm0 -; AVX1-NEXT: vpsrad $31, %xmm0, %xmm4 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm3 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7] ; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm1 -; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpextrq $1, %xmm0, %rax ; AVX1-NEXT: andl $1, %eax ; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) @@ -733,17 +733,17 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) { ; AVX2-NEXT: vpsrad $31, %xmm2, %xmm4 ; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2],xmm4[3] +; AVX2-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2 ; AVX2-NEXT: vpsllq $32, %xmm1, %xmm1 -; AVX2-NEXT: vpsrad $31, %xmm1, %xmm4 +; AVX2-NEXT: vpsrad $31, %xmm1, %xmm3 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3] +; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2],xmm3[3] ; AVX2-NEXT: vpsllq $32, %xmm0, %xmm0 -; AVX2-NEXT: vpsrad $31, %xmm0, %xmm4 +; AVX2-NEXT: vpsrad $31, %xmm0, %xmm3 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3] +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2],xmm3[3] ; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm1 -; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: vpextrq $1, %xmm0, %rax ; AVX2-NEXT: andl $1, %eax ; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) @@ -892,25 +892,25 @@ define i4 @v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8> %d) { ; SSE2-SSSE3-NEXT: psrad $24, %xmm3 ; SSE2-SSSE3-NEXT: pslld $24, %xmm2 ; SSE2-SSSE3-NEXT: psrad $24, %xmm2 +; SSE2-SSSE3-NEXT: pcmpgtd %xmm3, %xmm2 ; SSE2-SSSE3-NEXT: pslld $24, %xmm1 ; SSE2-SSSE3-NEXT: psrad $24, %xmm1 ; SSE2-SSSE3-NEXT: pslld $24, %xmm0 ; SSE2-SSSE3-NEXT: psrad $24, %xmm0 ; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: pcmpgtd %xmm3, %xmm2 -; SSE2-SSSE3-NEXT: pand %xmm0, %xmm2 -; SSE2-SSSE3-NEXT: movd %xmm2, %eax +; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0 +; SSE2-SSSE3-NEXT: movd %xmm0, %eax ; SSE2-SSSE3-NEXT: andl $1, %eax ; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[3,1,2,3] -; SSE2-SSSE3-NEXT: movd %xmm0, %eax +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] +; SSE2-SSSE3-NEXT: movd %xmm1, %eax ; SSE2-SSSE3-NEXT: andl $1, %eax ; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1] -; SSE2-SSSE3-NEXT: movd %xmm0, %eax +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE2-SSSE3-NEXT: movd %xmm1, %eax ; SSE2-SSSE3-NEXT: andl $1, %eax ; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,2,3] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; SSE2-SSSE3-NEXT: movd %xmm0, %eax ; SSE2-SSSE3-NEXT: andl $1, %eax ; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) @@ -923,13 +923,13 @@ define i4 @v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8> %d) { ; AVX12-NEXT: vpsrad $24, %xmm3, %xmm3 ; AVX12-NEXT: vpslld $24, %xmm2, %xmm2 ; AVX12-NEXT: vpsrad $24, %xmm2, %xmm2 +; AVX12-NEXT: vpcmpgtd %xmm3, %xmm2, %xmm2 ; AVX12-NEXT: vpslld $24, %xmm1, %xmm1 ; AVX12-NEXT: vpsrad $24, %xmm1, %xmm1 ; AVX12-NEXT: vpslld $24, %xmm0, %xmm0 ; AVX12-NEXT: vpsrad $24, %xmm0, %xmm0 ; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 -; AVX12-NEXT: vpcmpgtd %xmm3, %xmm2, %xmm1 -; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX12-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX12-NEXT: vpextrd $3, %xmm0, %eax ; AVX12-NEXT: andl $1, %eax ; AVX12-NEXT: movb %al, -{{[0-9]+}}(%rsp) @@ -975,25 +975,25 @@ define i4 @v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16> %d) { ; SSE2-SSSE3-NEXT: psrad $16, %xmm3 ; SSE2-SSSE3-NEXT: pslld $16, %xmm2 ; SSE2-SSSE3-NEXT: psrad $16, %xmm2 +; SSE2-SSSE3-NEXT: pcmpgtd %xmm3, %xmm2 ; SSE2-SSSE3-NEXT: pslld $16, %xmm1 ; SSE2-SSSE3-NEXT: psrad $16, %xmm1 ; SSE2-SSSE3-NEXT: pslld $16, %xmm0 ; SSE2-SSSE3-NEXT: psrad $16, %xmm0 ; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: pcmpgtd %xmm3, %xmm2 -; SSE2-SSSE3-NEXT: pand %xmm0, %xmm2 -; SSE2-SSSE3-NEXT: movd %xmm2, %eax +; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0 +; SSE2-SSSE3-NEXT: movd %xmm0, %eax ; SSE2-SSSE3-NEXT: andl $1, %eax ; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[3,1,2,3] -; SSE2-SSSE3-NEXT: movd %xmm0, %eax +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] +; SSE2-SSSE3-NEXT: movd %xmm1, %eax ; SSE2-SSSE3-NEXT: andl $1, %eax ; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1] -; SSE2-SSSE3-NEXT: movd %xmm0, %eax +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE2-SSSE3-NEXT: movd %xmm1, %eax ; SSE2-SSSE3-NEXT: andl $1, %eax ; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,2,3] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; SSE2-SSSE3-NEXT: movd %xmm0, %eax ; SSE2-SSSE3-NEXT: andl $1, %eax ; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) @@ -1006,13 +1006,13 @@ define i4 @v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16> %d) { ; AVX12-NEXT: vpsrad $16, %xmm3, %xmm3 ; AVX12-NEXT: vpslld $16, %xmm2, %xmm2 ; AVX12-NEXT: vpsrad $16, %xmm2, %xmm2 +; AVX12-NEXT: vpcmpgtd %xmm3, %xmm2, %xmm2 ; AVX12-NEXT: vpslld $16, %xmm1, %xmm1 ; AVX12-NEXT: vpsrad $16, %xmm1, %xmm1 ; AVX12-NEXT: vpslld $16, %xmm0, %xmm0 ; AVX12-NEXT: vpsrad $16, %xmm0, %xmm0 ; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 -; AVX12-NEXT: vpcmpgtd %xmm3, %xmm2, %xmm1 -; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX12-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX12-NEXT: vpextrd $3, %xmm0, %eax ; AVX12-NEXT: andl $1, %eax ; AVX12-NEXT: movb %al, -{{[0-9]+}}(%rsp) @@ -1058,35 +1058,35 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) { ; SSE2-SSSE3-NEXT: psraw $8, %xmm3 ; SSE2-SSSE3-NEXT: psllw $8, %xmm2 ; SSE2-SSSE3-NEXT: psraw $8, %xmm2 +; SSE2-SSSE3-NEXT: pcmpgtw %xmm3, %xmm2 ; SSE2-SSSE3-NEXT: psllw $8, %xmm1 ; SSE2-SSSE3-NEXT: psraw $8, %xmm1 ; SSE2-SSSE3-NEXT: psllw $8, %xmm0 ; SSE2-SSSE3-NEXT: psraw $8, %xmm0 ; SSE2-SSSE3-NEXT: pcmpgtw %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: pcmpgtw %xmm3, %xmm2 -; SSE2-SSSE3-NEXT: pand %xmm0, %xmm2 -; SSE2-SSSE3-NEXT: pextrw $7, %xmm2, %eax +; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0 +; SSE2-SSSE3-NEXT: pextrw $7, %xmm0, %eax ; SSE2-SSSE3-NEXT: andl $1, %eax ; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pextrw $6, %xmm2, %eax +; SSE2-SSSE3-NEXT: pextrw $6, %xmm0, %eax ; SSE2-SSSE3-NEXT: andl $1, %eax ; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pextrw $5, %xmm2, %eax +; SSE2-SSSE3-NEXT: pextrw $5, %xmm0, %eax ; SSE2-SSSE3-NEXT: andl $1, %eax ; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pextrw $4, %xmm2, %eax +; SSE2-SSSE3-NEXT: pextrw $4, %xmm0, %eax ; SSE2-SSSE3-NEXT: andl $1, %eax ; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pextrw $3, %xmm2, %eax +; SSE2-SSSE3-NEXT: pextrw $3, %xmm0, %eax ; SSE2-SSSE3-NEXT: andl $1, %eax ; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pextrw $2, %xmm2, %eax +; SSE2-SSSE3-NEXT: pextrw $2, %xmm0, %eax ; SSE2-SSSE3-NEXT: andl $1, %eax ; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: pextrw $1, %xmm2, %eax +; SSE2-SSSE3-NEXT: pextrw $1, %xmm0, %eax ; SSE2-SSSE3-NEXT: andl $1, %eax ; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; SSE2-SSSE3-NEXT: movd %xmm2, %eax +; SSE2-SSSE3-NEXT: movd %xmm0, %eax ; SSE2-SSSE3-NEXT: andl $1, %eax ; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) ; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al @@ -1098,13 +1098,13 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) { ; AVX12-NEXT: vpsraw $8, %xmm3, %xmm3 ; AVX12-NEXT: vpsllw $8, %xmm2, %xmm2 ; AVX12-NEXT: vpsraw $8, %xmm2, %xmm2 +; AVX12-NEXT: vpcmpgtw %xmm3, %xmm2, %xmm2 ; AVX12-NEXT: vpsllw $8, %xmm1, %xmm1 ; AVX12-NEXT: vpsraw $8, %xmm1, %xmm1 ; AVX12-NEXT: vpsllw $8, %xmm0, %xmm0 ; AVX12-NEXT: vpsraw $8, %xmm0, %xmm0 ; AVX12-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0 -; AVX12-NEXT: vpcmpgtw %xmm3, %xmm2, %xmm1 -; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX12-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX12-NEXT: vpextrw $7, %xmm0, %eax ; AVX12-NEXT: andl $1, %eax ; AVX12-NEXT: movb %al, -{{[0-9]+}}(%rsp) |

