diff options
Diffstat (limited to 'llvm/test/CodeGen/X86')
-rw-r--r-- | llvm/test/CodeGen/X86/srem-seteq-optsize.ll | 28 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll | 968 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/srem-seteq-vec-splat.ll | 245 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/srem-seteq.ll | 337 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/vselect-avx.ll | 34 |
5 files changed, 394 insertions, 1218 deletions
diff --git a/llvm/test/CodeGen/X86/srem-seteq-optsize.ll b/llvm/test/CodeGen/X86/srem-seteq-optsize.ll index a48e9d23fda..a1fcdc9153c 100644 --- a/llvm/test/CodeGen/X86/srem-seteq-optsize.ll +++ b/llvm/test/CodeGen/X86/srem-seteq-optsize.ll @@ -47,18 +47,11 @@ define i32 @test_minsize(i32 %X) optsize minsize nounwind readnone { define i32 @test_optsize(i32 %X) optsize nounwind readnone { ; X86-LABEL: test_optsize: ; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl $1717986919, %edx # imm = 0x66666667 -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: imull %edx -; X86-NEXT: movl %edx, %eax -; X86-NEXT: shrl $31, %eax -; X86-NEXT: sarl %edx -; X86-NEXT: addl %eax, %edx -; X86-NEXT: leal (%edx,%edx,4), %eax -; X86-NEXT: cmpl %eax, %ecx +; X86-NEXT: imull $-858993459, {{[0-9]+}}(%esp), %eax # imm = 0xCCCCCCCD +; X86-NEXT: addl $429496729, %eax # imm = 0x19999999 +; X86-NEXT: cmpl $858993459, %eax # imm = 0x33333333 ; X86-NEXT: movl $42, %eax -; X86-NEXT: je .LBB1_2 +; X86-NEXT: jb .LBB1_2 ; X86-NEXT: # %bb.1: ; X86-NEXT: movl $-10, %eax ; X86-NEXT: .LBB1_2: @@ -66,17 +59,12 @@ define i32 @test_optsize(i32 %X) optsize nounwind readnone { ; ; X64-LABEL: test_optsize: ; X64: # %bb.0: -; X64-NEXT: movslq %edi, %rax -; X64-NEXT: imulq $1717986919, %rax, %rcx # imm = 0x66666667 -; X64-NEXT: movq %rcx, %rdx -; X64-NEXT: shrq $63, %rdx -; X64-NEXT: sarq $33, %rcx -; X64-NEXT: addl %edx, %ecx -; X64-NEXT: leal (%rcx,%rcx,4), %ecx -; X64-NEXT: cmpl %ecx, %eax +; X64-NEXT: imull $-858993459, %edi, %eax # imm = 0xCCCCCCCD +; X64-NEXT: addl $429496729, %eax # imm = 0x19999999 +; X64-NEXT: cmpl $858993459, %eax # imm = 0x33333333 ; X64-NEXT: movl $42, %ecx ; X64-NEXT: movl $-10, %eax -; X64-NEXT: cmovel %ecx, %eax +; X64-NEXT: cmovbl %ecx, %eax ; X64-NEXT: retq %rem = srem i32 %X, 5 %cmp = icmp eq i32 %rem, 0 diff --git a/llvm/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll b/llvm/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll index 03ef755425d..134d40015d0 100644 --- a/llvm/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll +++ b/llvm/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll @@ -138,21 +138,10 @@ define <4 x i32> @test_srem_odd_even(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_odd_even: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,2454267027,1374389535,1374389535] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 -; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq @@ -168,132 +157,55 @@ define <4 x i32> @test_srem_odd_even(<4 x i32> %X) nounwind { define <4 x i32> @test_srem_odd_allones_eq(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_srem_odd_allones_eq: ; CHECK-SSE2: # %bb.0: -; CHECK-SSE2-NEXT: pxor %xmm1, %xmm1 -; CHECK-SSE2-NEXT: pxor %xmm2, %xmm2 -; CHECK-SSE2-NEXT: pcmpgtd %xmm0, %xmm2 -; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1717986919,1717986919,0,1717986919] -; CHECK-SSE2-NEXT: pand %xmm3, %xmm2 -; CHECK-SSE2-NEXT: pmuludq %xmm0, %xmm3 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3] -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{.*}}(%rip), %xmm4 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3] -; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] -; CHECK-SSE2-NEXT: psubd %xmm2, %xmm3 -; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm2 = <0,u,4294967295,u> -; CHECK-SSE2-NEXT: pmuludq %xmm0, %xmm2 -; CHECK-SSE2-NEXT: pand {{.*}}(%rip), %xmm2 -; CHECK-SSE2-NEXT: paddd %xmm3, %xmm2 -; CHECK-SSE2-NEXT: movdqa %xmm2, %xmm3 -; CHECK-SSE2-NEXT: psrad $1, %xmm3 -; CHECK-SSE2-NEXT: movdqa %xmm2, %xmm4 -; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm3[3,0] -; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0,2] -; CHECK-SSE2-NEXT: psrld $31, %xmm2 -; CHECK-SSE2-NEXT: pand {{.*}}(%rip), %xmm2 -; CHECK-SSE2-NEXT: paddd %xmm3, %xmm2 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{.*}}(%rip), %xmm2 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{.*}}(%rip), %xmm3 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] -; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] -; CHECK-SSE2-NEXT: psubd %xmm2, %xmm0 -; CHECK-SSE2-NEXT: pcmpeqd %xmm1, %xmm0 -; CHECK-SSE2-NEXT: psrld $31, %xmm0 +; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [3435973837,3435973837,3435973837,3435973837] +; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] +; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm0 +; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2 +; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3] +; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; CHECK-SSE2-NEXT: paddd {{.*}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pxor {{.*}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pcmpgtd {{.*}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pandn {{.*}}(%rip), %xmm0 ; CHECK-SSE2-NEXT: retq ; ; CHECK-SSE41-LABEL: test_srem_odd_allones_eq: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: movl $1717986919, %eax # imm = 0x66666667 -; CHECK-SSE41-NEXT: movd %eax, %xmm1 -; CHECK-SSE41-NEXT: pmuldq %xmm0, %xmm1 -; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuldq {{.*}}(%rip), %xmm2 -; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] -; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [0,0,4294967295,0] -; CHECK-SSE41-NEXT: pmulld %xmm0, %xmm1 -; CHECK-SSE41-NEXT: paddd %xmm2, %xmm1 -; CHECK-SSE41-NEXT: movdqa %xmm1, %xmm2 -; CHECK-SSE41-NEXT: psrad $1, %xmm2 -; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7] -; CHECK-SSE41-NEXT: psrld $31, %xmm1 -; CHECK-SSE41-NEXT: pxor %xmm3, %xmm3 -; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5],xmm1[6,7] -; CHECK-SSE41-NEXT: paddd %xmm2, %xmm1 -; CHECK-SSE41-NEXT: pmulld {{.*}}(%rip), %xmm1 -; CHECK-SSE41-NEXT: psubd %xmm1, %xmm0 -; CHECK-SSE41-NEXT: pcmpeqd %xmm3, %xmm0 +; CHECK-SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: paddd {{.*}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993458,858993458,4294967295,858993458] +; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1 +; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0 ; CHECK-SSE41-NEXT: psrld $31, %xmm0 ; CHECK-SSE41-NEXT: retq ; ; CHECK-AVX1-LABEL: test_srem_odd_allones_eq: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: movl $1717986919, %eax # imm = 0x66666667 -; CHECK-AVX1-NEXT: vmovd %eax, %xmm1 -; CHECK-AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 -; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuldq {{.*}}(%rip), %xmm2, %xmm2 -; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] -; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpsrad $1, %xmm1, %xmm2 -; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7] -; CHECK-AVX1-NEXT: vpsrld $31, %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5],xmm1[6,7] -; CHECK-AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1 -; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: retq ; ; CHECK-AVX2-LABEL: test_srem_odd_allones_eq: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1717986919,1717986919,1717986919,1717986919] -; CHECK-AVX2-NEXT: vpmuldq %xmm2, %xmm1, %xmm1 -; CHECK-AVX2-NEXT: movl $1717986919, %eax # imm = 0x66666667 -; CHECK-AVX2-NEXT: vmovd %eax, %xmm2 -; CHECK-AVX2-NEXT: vpmuldq %xmm2, %xmm0, %xmm2 -; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3] -; CHECK-AVX2-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX2-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3] -; CHECK-AVX2-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX2-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX2-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [3435973837,3435973837,3435973837,3435973837] +; CHECK-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [429496729,429496729,429496729,429496729] +; CHECK-AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX2-NEXT: retq ; ; CHECK-AVX512VL-LABEL: test_srem_odd_allones_eq: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1717986919,1717986919,1717986919,1717986919] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: movl $1717986919, %eax # imm = 0x66666667 -; CHECK-AVX512VL-NEXT: vmovd %eax, %xmm2 -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3] -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq %srem = srem <4 x i32> %X, <i32 5, i32 5, i32 4294967295, i32 5> @@ -304,133 +216,56 @@ define <4 x i32> @test_srem_odd_allones_eq(<4 x i32> %X) nounwind { define <4 x i32> @test_srem_odd_allones_ne(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_srem_odd_allones_ne: ; CHECK-SSE2: # %bb.0: -; CHECK-SSE2-NEXT: pxor %xmm1, %xmm1 -; CHECK-SSE2-NEXT: pxor %xmm2, %xmm2 -; CHECK-SSE2-NEXT: pcmpgtd %xmm0, %xmm2 -; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1717986919,1717986919,0,1717986919] -; CHECK-SSE2-NEXT: pand %xmm3, %xmm2 -; CHECK-SSE2-NEXT: pmuludq %xmm0, %xmm3 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3] -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{.*}}(%rip), %xmm4 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3] -; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] -; CHECK-SSE2-NEXT: psubd %xmm2, %xmm3 -; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm2 = <0,u,4294967295,u> -; CHECK-SSE2-NEXT: pmuludq %xmm0, %xmm2 -; CHECK-SSE2-NEXT: pand {{.*}}(%rip), %xmm2 -; CHECK-SSE2-NEXT: paddd %xmm3, %xmm2 -; CHECK-SSE2-NEXT: movdqa %xmm2, %xmm3 -; CHECK-SSE2-NEXT: psrad $1, %xmm3 -; CHECK-SSE2-NEXT: movdqa %xmm2, %xmm4 -; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm3[3,0] -; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0,2] -; CHECK-SSE2-NEXT: psrld $31, %xmm2 -; CHECK-SSE2-NEXT: pand {{.*}}(%rip), %xmm2 -; CHECK-SSE2-NEXT: paddd %xmm3, %xmm2 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{.*}}(%rip), %xmm2 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{.*}}(%rip), %xmm3 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] -; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] -; CHECK-SSE2-NEXT: psubd %xmm2, %xmm0 -; CHECK-SSE2-NEXT: pcmpeqd %xmm1, %xmm0 -; CHECK-SSE2-NEXT: pandn {{.*}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [3435973837,3435973837,3435973837,3435973837] +; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] +; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm0 +; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2 +; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3] +; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; CHECK-SSE2-NEXT: paddd {{.*}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pxor {{.*}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pcmpgtd {{.*}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: psrld $31, %xmm0 ; CHECK-SSE2-NEXT: retq ; ; CHECK-SSE41-LABEL: test_srem_odd_allones_ne: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: movl $1717986919, %eax # imm = 0x66666667 -; CHECK-SSE41-NEXT: movd %eax, %xmm1 -; CHECK-SSE41-NEXT: pmuldq %xmm0, %xmm1 -; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuldq {{.*}}(%rip), %xmm2 -; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] -; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [0,0,4294967295,0] -; CHECK-SSE41-NEXT: pmulld %xmm0, %xmm1 -; CHECK-SSE41-NEXT: paddd %xmm2, %xmm1 -; CHECK-SSE41-NEXT: movdqa %xmm1, %xmm2 -; CHECK-SSE41-NEXT: psrad $1, %xmm2 -; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7] -; CHECK-SSE41-NEXT: psrld $31, %xmm1 -; CHECK-SSE41-NEXT: pxor %xmm3, %xmm3 -; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5],xmm1[6,7] -; CHECK-SSE41-NEXT: paddd %xmm2, %xmm1 -; CHECK-SSE41-NEXT: pmulld {{.*}}(%rip), %xmm1 -; CHECK-SSE41-NEXT: psubd %xmm1, %xmm0 -; CHECK-SSE41-NEXT: pcmpeqd %xmm3, %xmm0 +; CHECK-SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: paddd {{.*}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993458,858993458,4294967295,858993458] +; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1 +; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0 ; CHECK-SSE41-NEXT: pandn {{.*}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: retq ; ; CHECK-AVX1-LABEL: test_srem_odd_allones_ne: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: movl $1717986919, %eax # imm = 0x66666667 -; CHECK-AVX1-NEXT: vmovd %eax, %xmm1 -; CHECK-AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 -; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuldq {{.*}}(%rip), %xmm2, %xmm2 -; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] -; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpsrad $1, %xmm1, %xmm2 -; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7] -; CHECK-AVX1-NEXT: vpsrld $31, %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5],xmm1[6,7] -; CHECK-AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1 -; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpandn {{.*}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: retq ; ; CHECK-AVX2-LABEL: test_srem_odd_allones_ne: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1717986919,1717986919,1717986919,1717986919] -; CHECK-AVX2-NEXT: vpmuldq %xmm2, %xmm1, %xmm1 -; CHECK-AVX2-NEXT: movl $1717986919, %eax # imm = 0x66666667 -; CHECK-AVX2-NEXT: vmovd %eax, %xmm2 -; CHECK-AVX2-NEXT: vpmuldq %xmm2, %xmm0, %xmm2 -; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3] -; CHECK-AVX2-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX2-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3] -; CHECK-AVX2-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX2-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX2-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [3435973837,3435973837,3435973837,3435973837] +; CHECK-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [429496729,429496729,429496729,429496729] +; CHECK-AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1] ; CHECK-AVX2-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; CHECK-AVX2-NEXT: retq ; ; CHECK-AVX512VL-LABEL: test_srem_odd_allones_ne: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1717986919,1717986919,1717986919,1717986919] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: movl $1717986919, %eax # imm = 0x66666667 -; CHECK-AVX512VL-NEXT: vmovd %eax, %xmm2 -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3] -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpandnd {{.*}}(%rip){1to4}, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq %srem = srem <4 x i32> %X, <i32 5, i32 5, i32 4294967295, i32 5> @@ -559,24 +394,11 @@ define <4 x i32> @test_srem_even_allones_eq(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_even_allones_eq: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: movl $-1840700269, %eax # imm = 0x92492493 -; CHECK-AVX512VL-NEXT: vmovd %eax, %xmm2 -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3] -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vprord $1, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq %srem = srem <4 x i32> %X, <i32 14, i32 14, i32 4294967295, i32 14> @@ -705,24 +527,11 @@ define <4 x i32> @test_srem_even_allones_ne(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_even_allones_ne: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: movl $-1840700269, %eax # imm = 0x92492493 -; CHECK-AVX512VL-NEXT: vmovd %eax, %xmm2 -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3] -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vprord $1, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpandnd {{.*}}(%rip){1to4}, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq %srem = srem <4 x i32> %X, <i32 14, i32 14, i32 4294967295, i32 14> @@ -869,23 +678,11 @@ define <4 x i32> @test_srem_odd_even_allones_eq(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_odd_even_allones_eq: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,2454267027,0,1374389535] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 -; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq %srem = srem <4 x i32> %X, <i32 5, i32 14, i32 4294967295, i32 100> @@ -1031,23 +828,11 @@ define <4 x i32> @test_srem_odd_even_allones_ne(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_odd_even_allones_ne: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,2454267027,0,1374389535] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 -; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpandnd {{.*}}(%rip){1to4}, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq %srem = srem <4 x i32> %X, <i32 5, i32 14, i32 4294967295, i32 100> @@ -1168,20 +953,10 @@ define <4 x i32> @test_srem_odd_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_odd_poweroftwo: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1717986919,1717986919,1717986919,1717986919] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmuldq {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3] -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq @@ -1283,19 +1058,10 @@ define <4 x i32> @test_srem_even_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_even_poweroftwo: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmuldq {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3] -; CHECK-AVX512VL-NEXT: vpaddd %xmm0, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpsrad $3, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq @@ -1438,21 +1204,10 @@ define <4 x i32> @test_srem_odd_even_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_odd_even_poweroftwo: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,2454267027,2147483649,1374389535] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 -; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq @@ -1468,132 +1223,55 @@ define <4 x i32> @test_srem_odd_even_poweroftwo(<4 x i32> %X) nounwind { define <4 x i32> @test_srem_odd_one(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_srem_odd_one: ; CHECK-SSE2: # %bb.0: -; CHECK-SSE2-NEXT: pxor %xmm1, %xmm1 -; CHECK-SSE2-NEXT: pxor %xmm2, %xmm2 -; CHECK-SSE2-NEXT: pcmpgtd %xmm0, %xmm2 -; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1717986919,1717986919,0,1717986919] -; CHECK-SSE2-NEXT: pand %xmm3, %xmm2 -; CHECK-SSE2-NEXT: pmuludq %xmm0, %xmm3 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3] -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{.*}}(%rip), %xmm4 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3] -; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] -; CHECK-SSE2-NEXT: psubd %xmm2, %xmm3 -; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm2 = <0,u,1,u> -; CHECK-SSE2-NEXT: pmuludq %xmm0, %xmm2 -; CHECK-SSE2-NEXT: pand {{.*}}(%rip), %xmm2 -; CHECK-SSE2-NEXT: paddd %xmm3, %xmm2 -; CHECK-SSE2-NEXT: movdqa %xmm2, %xmm3 -; CHECK-SSE2-NEXT: psrad $1, %xmm3 -; CHECK-SSE2-NEXT: movdqa %xmm2, %xmm4 -; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm3[3,0] -; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0,2] -; CHECK-SSE2-NEXT: psrld $31, %xmm2 -; CHECK-SSE2-NEXT: pand {{.*}}(%rip), %xmm2 -; CHECK-SSE2-NEXT: paddd %xmm3, %xmm2 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{.*}}(%rip), %xmm2 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{.*}}(%rip), %xmm3 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] -; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] -; CHECK-SSE2-NEXT: psubd %xmm2, %xmm0 -; CHECK-SSE2-NEXT: pcmpeqd %xmm1, %xmm0 -; CHECK-SSE2-NEXT: psrld $31, %xmm0 +; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [3435973837,3435973837,3435973837,3435973837] +; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] +; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm0 +; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2 +; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3] +; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; CHECK-SSE2-NEXT: paddd {{.*}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pxor {{.*}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pcmpgtd {{.*}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pandn {{.*}}(%rip), %xmm0 ; CHECK-SSE2-NEXT: retq ; ; CHECK-SSE41-LABEL: test_srem_odd_one: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: movl $1717986919, %eax # imm = 0x66666667 -; CHECK-SSE41-NEXT: movd %eax, %xmm1 -; CHECK-SSE41-NEXT: pmuldq %xmm0, %xmm1 -; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuldq {{.*}}(%rip), %xmm2 -; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] -; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [0,0,1,0] -; CHECK-SSE41-NEXT: pmulld %xmm0, %xmm1 -; CHECK-SSE41-NEXT: paddd %xmm2, %xmm1 -; CHECK-SSE41-NEXT: movdqa %xmm1, %xmm2 -; CHECK-SSE41-NEXT: psrad $1, %xmm2 -; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7] -; CHECK-SSE41-NEXT: psrld $31, %xmm1 -; CHECK-SSE41-NEXT: pxor %xmm3, %xmm3 -; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5],xmm1[6,7] -; CHECK-SSE41-NEXT: paddd %xmm2, %xmm1 -; CHECK-SSE41-NEXT: pmulld {{.*}}(%rip), %xmm1 -; CHECK-SSE41-NEXT: psubd %xmm1, %xmm0 -; CHECK-SSE41-NEXT: pcmpeqd %xmm3, %xmm0 +; CHECK-SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: paddd {{.*}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993458,858993458,4294967295,858993458] +; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1 +; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0 ; CHECK-SSE41-NEXT: psrld $31, %xmm0 ; CHECK-SSE41-NEXT: retq ; ; CHECK-AVX1-LABEL: test_srem_odd_one: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: movl $1717986919, %eax # imm = 0x66666667 -; CHECK-AVX1-NEXT: vmovd %eax, %xmm1 -; CHECK-AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 -; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuldq {{.*}}(%rip), %xmm2, %xmm2 -; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] -; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpsrad $1, %xmm1, %xmm2 -; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7] -; CHECK-AVX1-NEXT: vpsrld $31, %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5],xmm1[6,7] -; CHECK-AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1 -; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: retq ; ; CHECK-AVX2-LABEL: test_srem_odd_one: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1717986919,1717986919,1717986919,1717986919] -; CHECK-AVX2-NEXT: vpmuldq %xmm2, %xmm1, %xmm1 -; CHECK-AVX2-NEXT: movl $1717986919, %eax # imm = 0x66666667 -; CHECK-AVX2-NEXT: vmovd %eax, %xmm2 -; CHECK-AVX2-NEXT: vpmuldq %xmm2, %xmm0, %xmm2 -; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3] -; CHECK-AVX2-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX2-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3] -; CHECK-AVX2-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX2-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX2-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [3435973837,3435973837,3435973837,3435973837] +; CHECK-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [429496729,429496729,429496729,429496729] +; CHECK-AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX2-NEXT: retq ; ; CHECK-AVX512VL-LABEL: test_srem_odd_one: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1717986919,1717986919,1717986919,1717986919] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: movl $1717986919, %eax # imm = 0x66666667 -; CHECK-AVX512VL-NEXT: vmovd %eax, %xmm2 -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3] -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq %srem = srem <4 x i32> %X, <i32 5, i32 5, i32 1, i32 5> @@ -1711,23 +1389,11 @@ define <4 x i32> @test_srem_even_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_even_one: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: movl $-1840700269, %eax # imm = 0x92492493 -; CHECK-AVX512VL-NEXT: vmovd %eax, %xmm2 -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3] -; CHECK-AVX512VL-NEXT: vpaddd %xmm0, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vprord $1, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq %srem = srem <4 x i32> %X, <i32 14, i32 14, i32 1, i32 14> @@ -1874,23 +1540,11 @@ define <4 x i32> @test_srem_odd_even_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_odd_even_one: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,2454267027,0,1374389535] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 -; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq %srem = srem <4 x i32> %X, <i32 5, i32 14, i32 1, i32 100> @@ -2465,23 +2119,11 @@ define <4 x i32> @test_srem_odd_allones_and_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_odd_allones_and_poweroftwo: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,0,2147483649,1717986919] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 -; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3] -; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq %srem = srem <4 x i32> %X, <i32 5, i32 4294967295, i32 16, i32 5> @@ -2614,23 +2256,11 @@ define <4 x i32> @test_srem_even_allones_and_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_even_allones_and_poweroftwo: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [2454267027,0,2147483649,2454267027] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 -; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3] -; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq %srem = srem <4 x i32> %X, <i32 14, i32 4294967295, i32 16, i32 14> @@ -2776,23 +2406,11 @@ define <4 x i32> @test_srem_odd_even_allones_and_poweroftwo(<4 x i32> %X) nounwi ; ; CHECK-AVX512VL-LABEL: test_srem_odd_even_allones_and_poweroftwo: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,0,2147483649,1374389535] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 -; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3] -; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq %srem = srem <4 x i32> %X, <i32 5, i32 4294967295, i32 16, i32 100> @@ -2807,138 +2425,55 @@ define <4 x i32> @test_srem_odd_even_allones_and_poweroftwo(<4 x i32> %X) nounwi define <4 x i32> @test_srem_odd_allones_and_one(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_srem_odd_allones_and_one: ; CHECK-SSE2: # %bb.0: -; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,4294967295,1,0] -; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm2 +; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [3435973837,3435973837,3435973837,3435973837] +; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] +; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm0 +; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq %xmm3, %xmm1 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1717986919,0,0,1717986919] -; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm1 -; CHECK-SSE2-NEXT: pmuludq %xmm4, %xmm1 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,3,2,3] -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[2,2,3,3] -; CHECK-SSE2-NEXT: pmuludq %xmm3, %xmm1 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] -; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1] -; CHECK-SSE2-NEXT: pxor %xmm1, %xmm1 -; CHECK-SSE2-NEXT: pxor %xmm3, %xmm3 -; CHECK-SSE2-NEXT: pcmpgtd %xmm0, %xmm3 -; CHECK-SSE2-NEXT: pand %xmm4, %xmm3 -; CHECK-SSE2-NEXT: psubd %xmm3, %xmm5 -; CHECK-SSE2-NEXT: paddd %xmm2, %xmm5 -; CHECK-SSE2-NEXT: movdqa %xmm5, %xmm2 -; CHECK-SSE2-NEXT: psrad $1, %xmm2 -; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm5[1,2] -; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,3,1] -; CHECK-SSE2-NEXT: psrld $31, %xmm5 -; CHECK-SSE2-NEXT: pand {{.*}}(%rip), %xmm5 -; CHECK-SSE2-NEXT: paddd %xmm2, %xmm5 -; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [5,4294967295,1,5] -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm5[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq %xmm2, %xmm5 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,2,2,3] -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq %xmm3, %xmm2 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] -; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] -; CHECK-SSE2-NEXT: psubd %xmm4, %xmm0 -; CHECK-SSE2-NEXT: pcmpeqd %xmm1, %xmm0 -; CHECK-SSE2-NEXT: psrld $31, %xmm0 +; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3] +; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; CHECK-SSE2-NEXT: paddd {{.*}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pxor {{.*}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pcmpgtd {{.*}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pandn {{.*}}(%rip), %xmm0 ; CHECK-SSE2-NEXT: retq ; ; CHECK-SSE41-LABEL: test_srem_odd_allones_and_one: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1717986919,0,0,1717986919] -; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,2,3,3] -; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuldq %xmm2, %xmm3 -; CHECK-SSE41-NEXT: pmuldq %xmm0, %xmm1 -; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7] -; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm2 = [0,4294967295,1,0] -; CHECK-SSE41-NEXT: pmulld %xmm0, %xmm2 -; CHECK-SSE41-NEXT: paddd %xmm1, %xmm2 -; CHECK-SSE41-NEXT: movdqa %xmm2, %xmm1 -; CHECK-SSE41-NEXT: psrad $1, %xmm1 -; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5],xmm1[6,7] -; CHECK-SSE41-NEXT: psrld $31, %xmm2 -; CHECK-SSE41-NEXT: pxor %xmm3, %xmm3 -; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3,4,5],xmm2[6,7] -; CHECK-SSE41-NEXT: paddd %xmm1, %xmm2 -; CHECK-SSE41-NEXT: pmulld {{.*}}(%rip), %xmm2 -; CHECK-SSE41-NEXT: psubd %xmm2, %xmm0 -; CHECK-SSE41-NEXT: pcmpeqd %xmm3, %xmm0 +; CHECK-SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: paddd {{.*}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993458,4294967295,4294967295,858993458] +; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1 +; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0 ; CHECK-SSE41-NEXT: psrld $31, %xmm0 ; CHECK-SSE41-NEXT: retq ; ; CHECK-AVX1-LABEL: test_srem_odd_allones_and_one: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,0,0,1717986919] -; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3] -; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 -; CHECK-AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 -; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] -; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpsrad $1, %xmm1, %xmm2 -; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3,4,5],xmm2[6,7] -; CHECK-AVX1-NEXT: vpsrld $31, %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3,4,5],xmm1[6,7] -; CHECK-AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1 -; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: retq ; ; CHECK-AVX2-LABEL: test_srem_odd_allones_and_one: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,0,0,1717986919] -; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3] -; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-AVX2-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 -; CHECK-AVX2-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 -; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] -; CHECK-AVX2-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX2-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1,2],xmm2[3] -; CHECK-AVX2-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX2-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX2-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [3435973837,3435973837,3435973837,3435973837] +; CHECK-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [429496729,429496729,429496729,429496729] +; CHECK-AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX2-NEXT: retq ; ; CHECK-AVX512VL-LABEL: test_srem_odd_allones_and_one: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,0,0,1717986919] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 -; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1,2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq %srem = srem <4 x i32> %X, <i32 5, i32 4294967295, i32 1, i32 5> @@ -3070,23 +2605,11 @@ define <4 x i32> @test_srem_even_allones_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_even_allones_and_one: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [2454267027,0,0,2454267027] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 -; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1,2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vprord $1, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq %srem = srem <4 x i32> %X, <i32 14, i32 4294967295, i32 1, i32 14> @@ -3225,23 +2748,11 @@ define <4 x i32> @test_srem_odd_even_allones_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_odd_even_allones_and_one: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,0,0,1374389535] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 -; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1,2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq %srem = srem <4 x i32> %X, <i32 5, i32 4294967295, i32 1, i32 100> @@ -3385,23 +2896,11 @@ define <4 x i32> @test_srem_odd_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_odd_poweroftwo_and_one: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,2147483649,0,1717986919] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 -; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq %srem = srem <4 x i32> %X, <i32 5, i32 16, i32 1, i32 5> @@ -3522,22 +3021,11 @@ define <4 x i32> @test_srem_even_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_even_poweroftwo_and_one: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [2454267027,2147483649,0,2454267027] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 -; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpaddd %xmm0, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq %srem = srem <4 x i32> %X, <i32 14, i32 16, i32 1, i32 14> @@ -3684,23 +3172,11 @@ define <4 x i32> @test_srem_odd_even_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_odd_even_poweroftwo_and_one: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,2147483649,0,1374389535] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 -; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq %srem = srem <4 x i32> %X, <i32 5, i32 16, i32 1, i32 100> @@ -3821,18 +3297,11 @@ define <4 x i32> @test_srem_odd_allones_and_poweroftwo_and_one(<4 x i32> %X) nou ; ; CHECK-AVX512VL-LABEL: test_srem_odd_allones_and_poweroftwo_and_one: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm1 -; CHECK-AVX512VL-NEXT: vpmuldq {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpsrlq $32, %xmm2, %xmm2 -; CHECK-AVX512VL-NEXT: vpaddd %xmm1, %xmm2, %xmm1 -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2],xmm3[3] -; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq %srem = srem <4 x i32> %X, <i32 5, i32 4294967295, i32 16, i32 1> @@ -3945,18 +3414,11 @@ define <4 x i32> @test_srem_even_allones_and_poweroftwo_and_one(<4 x i32> %X) no ; ; CHECK-AVX512VL-LABEL: test_srem_even_allones_and_poweroftwo_and_one: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm1 -; CHECK-AVX512VL-NEXT: vpmuldq {{.*}}(%rip), %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpsrlq $32, %xmm2, %xmm2 -; CHECK-AVX512VL-NEXT: vpaddd %xmm1, %xmm2, %xmm1 -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2],xmm3[3] -; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq %srem = srem <4 x i32> %X, <i32 14, i32 4294967295, i32 16, i32 1> diff --git a/llvm/test/CodeGen/X86/srem-seteq-vec-splat.ll b/llvm/test/CodeGen/X86/srem-seteq-vec-splat.ll index 2817d9e6229..1d1f10f4d13 100644 --- a/llvm/test/CodeGen/X86/srem-seteq-vec-splat.ll +++ b/llvm/test/CodeGen/X86/srem-seteq-vec-splat.ll @@ -9,105 +9,55 @@ define <4 x i32> @test_srem_odd_25(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_srem_odd_25: ; CHECK-SSE2: # %bb.0: -; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1374389535,1374389535,1374389535,1374389535] -; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm2 -; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm3 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3] -; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] -; CHECK-SSE2-NEXT: pxor %xmm3, %xmm3 -; CHECK-SSE2-NEXT: pxor %xmm4, %xmm4 -; CHECK-SSE2-NEXT: pcmpgtd %xmm0, %xmm4 -; CHECK-SSE2-NEXT: pand %xmm1, %xmm4 -; CHECK-SSE2-NEXT: psubd %xmm4, %xmm2 -; CHECK-SSE2-NEXT: movdqa %xmm2, %xmm1 -; CHECK-SSE2-NEXT: psrld $31, %xmm1 -; CHECK-SSE2-NEXT: psrad $3, %xmm2 -; CHECK-SSE2-NEXT: paddd %xmm1, %xmm2 -; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [25,25,25,25] -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3] +; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [3264175145,3264175145,3264175145,3264175145] +; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] +; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm0 +; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm4 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[0,2,2,3] -; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; CHECK-SSE2-NEXT: psubd %xmm2, %xmm0 -; CHECK-SSE2-NEXT: pcmpeqd %xmm3, %xmm0 -; CHECK-SSE2-NEXT: psrld $31, %xmm0 +; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3] +; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; CHECK-SSE2-NEXT: paddd {{.*}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pxor {{.*}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pcmpgtd {{.*}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pandn {{.*}}(%rip), %xmm0 ; CHECK-SSE2-NEXT: retq ; ; CHECK-SSE41-LABEL: test_srem_odd_25: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm2 = [1374389535,1374389535,1374389535,1374389535] -; CHECK-SSE41-NEXT: pmuldq %xmm2, %xmm1 -; CHECK-SSE41-NEXT: pmuldq %xmm0, %xmm2 -; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] -; CHECK-SSE41-NEXT: movdqa %xmm2, %xmm1 -; CHECK-SSE41-NEXT: psrld $31, %xmm1 -; CHECK-SSE41-NEXT: psrad $3, %xmm2 -; CHECK-SSE41-NEXT: paddd %xmm1, %xmm2 -; CHECK-SSE41-NEXT: pmulld {{.*}}(%rip), %xmm2 -; CHECK-SSE41-NEXT: psubd %xmm2, %xmm0 -; CHECK-SSE41-NEXT: pxor %xmm1, %xmm1 +; CHECK-SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: paddd {{.*}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [171798690,171798690,171798690,171798690] +; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1 ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0 ; CHECK-SSE41-NEXT: psrld $31, %xmm0 ; CHECK-SSE41-NEXT: retq ; ; CHECK-AVX1-LABEL: test_srem_odd_25: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1374389535,1374389535,1374389535,1374389535] -; CHECK-AVX1-NEXT: vpmuldq %xmm2, %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpmuldq %xmm2, %xmm0, %xmm2 -; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] -; CHECK-AVX1-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX1-NEXT: vpsrad $3, %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: retq ; ; CHECK-AVX2-LABEL: test_srem_odd_25: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1374389535,1374389535,1374389535,1374389535] -; CHECK-AVX2-NEXT: vpmuldq %xmm2, %xmm1, %xmm1 -; CHECK-AVX2-NEXT: vpmuldq %xmm2, %xmm0, %xmm2 -; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3] -; CHECK-AVX2-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX2-NEXT: vpsrad $3, %xmm1, %xmm1 -; CHECK-AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [25,25,25,25] -; CHECK-AVX2-NEXT: vpmulld %xmm2, %xmm1, %xmm1 -; CHECK-AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [3264175145,3264175145,3264175145,3264175145] +; CHECK-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [85899345,85899345,85899345,85899345] +; CHECK-AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [171798690,171798690,171798690,171798690] +; CHECK-AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX2-NEXT: retq ; ; CHECK-AVX512VL-LABEL: test_srem_odd_25: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1374389535,1374389535,1374389535,1374389535] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3] -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpsrad $3, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip){1to4}, %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq @@ -208,18 +158,10 @@ define <4 x i32> @test_srem_even_100(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_even_100: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1374389535,1374389535,1374389535,1374389535] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3] -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpsrad $5, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vprord $2, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip){1to4}, %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq @@ -235,113 +177,55 @@ define <4 x i32> @test_srem_even_100(<4 x i32> %X) nounwind { define <4 x i32> @test_srem_odd_neg25(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_srem_odd_neg25: ; CHECK-SSE2: # %bb.0: -; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1374389535,2920577761,2920577761,1374389535] -; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm2 +; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [3264175145,3264175145,3264175145,3264175145] +; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] +; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm0 +; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,2,3,3] -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq %xmm3, %xmm4 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,3,2,3] -; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] -; CHECK-SSE2-NEXT: pxor %xmm3, %xmm3 -; CHECK-SSE2-NEXT: pxor %xmm4, %xmm4 -; CHECK-SSE2-NEXT: pcmpgtd %xmm0, %xmm4 -; CHECK-SSE2-NEXT: pand %xmm1, %xmm4 -; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,4294967295,4294967295,0] -; CHECK-SSE2-NEXT: pand %xmm0, %xmm1 -; CHECK-SSE2-NEXT: paddd %xmm4, %xmm1 -; CHECK-SSE2-NEXT: psubd %xmm1, %xmm2 -; CHECK-SSE2-NEXT: movdqa %xmm2, %xmm1 -; CHECK-SSE2-NEXT: psrld $31, %xmm1 -; CHECK-SSE2-NEXT: psrad $3, %xmm2 -; CHECK-SSE2-NEXT: paddd %xmm1, %xmm2 -; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [25,4294967271,4294967271,25] -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3] -; CHECK-SSE2-NEXT: pmuludq %xmm4, %xmm1 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; CHECK-SSE2-NEXT: psubd %xmm2, %xmm0 -; CHECK-SSE2-NEXT: pcmpeqd %xmm3, %xmm0 -; CHECK-SSE2-NEXT: psrld $31, %xmm0 +; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3] +; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; CHECK-SSE2-NEXT: paddd {{.*}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pxor {{.*}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pcmpgtd {{.*}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pandn {{.*}}(%rip), %xmm0 ; CHECK-SSE2-NEXT: retq ; ; CHECK-SSE41-LABEL: test_srem_odd_neg25: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1374389535,2920577761,2920577761,1374389535] -; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,2,3,3] -; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuldq %xmm2, %xmm3 -; CHECK-SSE41-NEXT: pmuldq %xmm0, %xmm1 -; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7] -; CHECK-SSE41-NEXT: movdqa %xmm1, %xmm2 -; CHECK-SSE41-NEXT: psrld $31, %xmm2 -; CHECK-SSE41-NEXT: psrad $3, %xmm1 -; CHECK-SSE41-NEXT: paddd %xmm2, %xmm1 -; CHECK-SSE41-NEXT: pmulld {{.*}}(%rip), %xmm1 -; CHECK-SSE41-NEXT: psubd %xmm1, %xmm0 -; CHECK-SSE41-NEXT: pxor %xmm1, %xmm1 +; CHECK-SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: paddd {{.*}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [171798690,171798690,171798690,171798690] +; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1 ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0 ; CHECK-SSE41-NEXT: psrld $31, %xmm0 ; CHECK-SSE41-NEXT: retq ; ; CHECK-AVX1-LABEL: test_srem_odd_neg25: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1374389535,2920577761,2920577761,1374389535] -; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3] -; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 -; CHECK-AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 -; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] -; CHECK-AVX1-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX1-NEXT: vpsrad $3, %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: retq ; ; CHECK-AVX2-LABEL: test_srem_odd_neg25: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1374389535,2920577761,2920577761,1374389535] -; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3] -; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-AVX2-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 -; CHECK-AVX2-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 -; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] -; CHECK-AVX2-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX2-NEXT: vpsrad $3, %xmm1, %xmm1 -; CHECK-AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX2-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [3264175145,3264175145,3264175145,3264175145] +; CHECK-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [85899345,85899345,85899345,85899345] +; CHECK-AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [171798690,171798690,171798690,171798690] +; CHECK-AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX2-NEXT: retq ; ; CHECK-AVX512VL-LABEL: test_srem_odd_neg25: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1374389535,2920577761,2920577761,1374389535] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3] -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 -; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpsrad $3, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip){1to4}, %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq @@ -442,19 +326,10 @@ define <4 x i32> @test_srem_even_neg100(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_even_neg100: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1374389535,1374389535,1374389535,1374389535] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2920577761,2920577761,2920577761,2920577761] -; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm0, %xmm2 -; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3] -; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2 -; CHECK-AVX512VL-NEXT: vpsrad $5, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vprord $2, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip){1to4}, %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: retq diff --git a/llvm/test/CodeGen/X86/srem-seteq.ll b/llvm/test/CodeGen/X86/srem-seteq.ll index b3224114e5c..8b7bb266c8e 100644 --- a/llvm/test/CodeGen/X86/srem-seteq.ll +++ b/llvm/test/CodeGen/X86/srem-seteq.ll @@ -9,32 +9,20 @@ define i32 @test_srem_odd(i32 %X) nounwind { ; X86-LABEL: test_srem_odd: ; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl $1717986919, %edx # imm = 0x66666667 -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: imull %edx -; X86-NEXT: movl %edx, %eax -; X86-NEXT: shrl $31, %eax -; X86-NEXT: sarl %edx -; X86-NEXT: addl %eax, %edx -; X86-NEXT: leal (%edx,%edx,4), %edx +; X86-NEXT: imull $-858993459, {{[0-9]+}}(%esp), %ecx # imm = 0xCCCCCCCD +; X86-NEXT: addl $429496729, %ecx # imm = 0x19999999 ; X86-NEXT: xorl %eax, %eax -; X86-NEXT: cmpl %edx, %ecx -; X86-NEXT: sete %al +; X86-NEXT: cmpl $858993459, %ecx # imm = 0x33333333 +; X86-NEXT: setb %al ; X86-NEXT: retl ; ; X64-LABEL: test_srem_odd: ; X64: # %bb.0: -; X64-NEXT: movslq %edi, %rcx -; X64-NEXT: imulq $1717986919, %rcx, %rax # imm = 0x66666667 -; X64-NEXT: movq %rax, %rdx -; X64-NEXT: shrq $63, %rdx -; X64-NEXT: sarq $33, %rax -; X64-NEXT: addl %edx, %eax -; X64-NEXT: leal (%rax,%rax,4), %edx +; X64-NEXT: imull $-858993459, %edi, %ecx # imm = 0xCCCCCCCD +; X64-NEXT: addl $429496729, %ecx # imm = 0x19999999 ; X64-NEXT: xorl %eax, %eax -; X64-NEXT: cmpl %edx, %ecx -; X64-NEXT: sete %al +; X64-NEXT: cmpl $858993459, %ecx # imm = 0x33333333 +; X64-NEXT: setb %al ; X64-NEXT: retq %srem = srem i32 %X, 5 %cmp = icmp eq i32 %srem, 0 @@ -45,34 +33,20 @@ define i32 @test_srem_odd(i32 %X) nounwind { define i32 @test_srem_odd_25(i32 %X) nounwind { ; X86-LABEL: test_srem_odd_25: ; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl $1374389535, %edx # imm = 0x51EB851F -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: imull %edx -; X86-NEXT: movl %edx, %eax -; X86-NEXT: shrl $31, %eax -; X86-NEXT: sarl $3, %edx -; X86-NEXT: addl %eax, %edx -; X86-NEXT: leal (%edx,%edx,4), %eax -; X86-NEXT: leal (%eax,%eax,4), %edx +; X86-NEXT: imull $-1030792151, {{[0-9]+}}(%esp), %ecx # imm = 0xC28F5C29 +; X86-NEXT: addl $85899345, %ecx # imm = 0x51EB851 ; X86-NEXT: xorl %eax, %eax -; X86-NEXT: cmpl %edx, %ecx -; X86-NEXT: sete %al +; X86-NEXT: cmpl $171798691, %ecx # imm = 0xA3D70A3 +; X86-NEXT: setb %al ; X86-NEXT: retl ; ; X64-LABEL: test_srem_odd_25: ; X64: # %bb.0: -; X64-NEXT: movslq %edi, %rcx -; X64-NEXT: imulq $1374389535, %rcx, %rax # imm = 0x51EB851F -; X64-NEXT: movq %rax, %rdx -; X64-NEXT: shrq $63, %rdx -; X64-NEXT: sarq $35, %rax -; X64-NEXT: addl %edx, %eax -; X64-NEXT: leal (%rax,%rax,4), %eax -; X64-NEXT: leal (%rax,%rax,4), %edx +; X64-NEXT: imull $-1030792151, %edi, %ecx # imm = 0xC28F5C29 +; X64-NEXT: addl $85899345, %ecx # imm = 0x51EB851 ; X64-NEXT: xorl %eax, %eax -; X64-NEXT: cmpl %edx, %ecx -; X64-NEXT: sete %al +; X64-NEXT: cmpl $171798691, %ecx # imm = 0xA3D70A3 +; X64-NEXT: setb %al ; X64-NEXT: retq %srem = srem i32 %X, 25 %cmp = icmp eq i32 %srem, 0 @@ -84,34 +58,20 @@ define i32 @test_srem_odd_25(i32 %X) nounwind { define i32 @test_srem_odd_bit30(i32 %X) nounwind { ; X86-LABEL: test_srem_odd_bit30: ; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl $536870911, %edx # imm = 0x1FFFFFFF -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: imull %edx -; X86-NEXT: movl %edx, %eax -; X86-NEXT: shrl $31, %eax -; X86-NEXT: sarl $27, %edx -; X86-NEXT: addl %eax, %edx -; X86-NEXT: imull $1073741827, %edx, %edx # imm = 0x40000003 +; X86-NEXT: imull $1789569707, {{[0-9]+}}(%esp), %ecx # imm = 0x6AAAAAAB +; X86-NEXT: incl %ecx ; X86-NEXT: xorl %eax, %eax -; X86-NEXT: cmpl %edx, %ecx -; X86-NEXT: sete %al +; X86-NEXT: cmpl $3, %ecx +; X86-NEXT: setb %al ; X86-NEXT: retl ; ; X64-LABEL: test_srem_odd_bit30: ; X64: # %bb.0: -; X64-NEXT: movslq %edi, %rcx -; X64-NEXT: movq %rcx, %rax -; X64-NEXT: shlq $29, %rax -; X64-NEXT: subq %rcx, %rax -; X64-NEXT: movq %rax, %rdx -; X64-NEXT: shrq $63, %rdx -; X64-NEXT: sarq $59, %rax -; X64-NEXT: addl %edx, %eax -; X64-NEXT: imull $1073741827, %eax, %edx # imm = 0x40000003 +; X64-NEXT: imull $1789569707, %edi, %ecx # imm = 0x6AAAAAAB +; X64-NEXT: incl %ecx ; X64-NEXT: xorl %eax, %eax -; X64-NEXT: cmpl %edx, %ecx -; X64-NEXT: sete %al +; X64-NEXT: cmpl $3, %ecx +; X64-NEXT: setb %al ; X64-NEXT: retq %srem = srem i32 %X, 1073741827 %cmp = icmp eq i32 %srem, 0 @@ -123,35 +83,20 @@ define i32 @test_srem_odd_bit30(i32 %X) nounwind { define i32 @test_srem_odd_bit31(i32 %X) nounwind { ; X86-LABEL: test_srem_odd_bit31: ; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl $-536870913, %edx # imm = 0xDFFFFFFF -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: imull %edx -; X86-NEXT: movl %edx, %eax -; X86-NEXT: shrl $31, %eax -; X86-NEXT: sarl $28, %edx -; X86-NEXT: addl %eax, %edx -; X86-NEXT: imull $-2147483645, %edx, %edx # imm = 0x80000003 +; X86-NEXT: imull $-715827883, {{[0-9]+}}(%esp), %ecx # imm = 0xD5555555 +; X86-NEXT: incl %ecx ; X86-NEXT: xorl %eax, %eax -; X86-NEXT: cmpl %edx, %ecx -; X86-NEXT: sete %al +; X86-NEXT: cmpl $3, %ecx +; X86-NEXT: setb %al ; X86-NEXT: retl ; ; X64-LABEL: test_srem_odd_bit31: ; X64: # %bb.0: -; X64-NEXT: movslq %edi, %rcx -; X64-NEXT: movq %rcx, %rax -; X64-NEXT: shlq $29, %rax -; X64-NEXT: addq %rcx, %rax -; X64-NEXT: negq %rax -; X64-NEXT: movq %rax, %rdx -; X64-NEXT: shrq $63, %rdx -; X64-NEXT: sarq $60, %rax -; X64-NEXT: addl %edx, %eax -; X64-NEXT: imull $-2147483645, %eax, %edx # imm = 0x80000003 +; X64-NEXT: imull $-715827883, %edi, %ecx # imm = 0xD5555555 +; X64-NEXT: incl %ecx ; X64-NEXT: xorl %eax, %eax -; X64-NEXT: cmpl %edx, %ecx -; X64-NEXT: sete %al +; X64-NEXT: cmpl $3, %ecx +; X64-NEXT: setb %al ; X64-NEXT: retq %srem = srem i32 %X, 2147483651 %cmp = icmp eq i32 %srem, 0 @@ -166,37 +111,25 @@ define i32 @test_srem_odd_bit31(i32 %X) nounwind { define i16 @test_srem_even(i16 %X) nounwind { ; X86-LABEL: test_srem_even: ; X86: # %bb.0: -; X86-NEXT: movswl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: imull $18725, %ecx, %eax # imm = 0x4925 -; X86-NEXT: movl %eax, %edx -; X86-NEXT: shrl $31, %edx -; X86-NEXT: sarl $18, %eax -; X86-NEXT: addl %edx, %eax -; X86-NEXT: movl %eax, %edx -; X86-NEXT: shll $4, %edx -; X86-NEXT: subl %eax, %edx -; X86-NEXT: subl %eax, %edx +; X86-NEXT: imull $28087, {{[0-9]+}}(%esp), %eax # imm = 0x6DB7 +; X86-NEXT: addl $4680, %eax # imm = 0x1248 +; X86-NEXT: rorw %ax +; X86-NEXT: movzwl %ax, %ecx ; X86-NEXT: xorl %eax, %eax -; X86-NEXT: cmpw %dx, %cx -; X86-NEXT: setne %al +; X86-NEXT: cmpl $4680, %ecx # imm = 0x1248 +; X86-NEXT: seta %al ; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_srem_even: ; X64: # %bb.0: -; X64-NEXT: movswl %di, %ecx -; X64-NEXT: imull $18725, %ecx, %eax # imm = 0x4925 -; X64-NEXT: movl %eax, %edx -; X64-NEXT: shrl $31, %edx -; X64-NEXT: sarl $18, %eax -; X64-NEXT: addl %edx, %eax -; X64-NEXT: movl %eax, %edx -; X64-NEXT: shll $4, %edx -; X64-NEXT: subl %eax, %edx -; X64-NEXT: subl %eax, %edx +; X64-NEXT: imull $28087, %edi, %eax # imm = 0x6DB7 +; X64-NEXT: addl $4680, %eax # imm = 0x1248 +; X64-NEXT: rorw %ax +; X64-NEXT: movzwl %ax, %ecx ; X64-NEXT: xorl %eax, %eax -; X64-NEXT: cmpw %dx, %cx -; X64-NEXT: setne %al +; X64-NEXT: cmpl $4680, %ecx # imm = 0x1248 +; X64-NEXT: seta %al ; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %srem = srem i16 %X, 14 @@ -208,32 +141,22 @@ define i16 @test_srem_even(i16 %X) nounwind { define i32 @test_srem_even_100(i32 %X) nounwind { ; X86-LABEL: test_srem_even_100: ; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl $1374389535, %edx # imm = 0x51EB851F -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: imull %edx -; X86-NEXT: movl %edx, %eax -; X86-NEXT: shrl $31, %eax -; X86-NEXT: sarl $5, %edx -; X86-NEXT: addl %eax, %edx -; X86-NEXT: imull $100, %edx, %edx +; X86-NEXT: imull $-1030792151, {{[0-9]+}}(%esp), %ecx # imm = 0xC28F5C29 +; X86-NEXT: addl $85899344, %ecx # imm = 0x51EB850 +; X86-NEXT: rorl $2, %ecx ; X86-NEXT: xorl %eax, %eax -; X86-NEXT: cmpl %edx, %ecx -; X86-NEXT: sete %al +; X86-NEXT: cmpl $42949673, %ecx # imm = 0x28F5C29 +; X86-NEXT: setb %al ; X86-NEXT: retl ; ; X64-LABEL: test_srem_even_100: ; X64: # %bb.0: -; X64-NEXT: movslq %edi, %rcx -; X64-NEXT: imulq $1374389535, %rcx, %rax # imm = 0x51EB851F -; X64-NEXT: movq %rax, %rdx -; X64-NEXT: shrq $63, %rdx -; X64-NEXT: sarq $37, %rax -; X64-NEXT: addl %edx, %eax -; X64-NEXT: imull $100, %eax, %edx +; X64-NEXT: imull $-1030792151, %edi, %ecx # imm = 0xC28F5C29 +; X64-NEXT: addl $85899344, %ecx # imm = 0x51EB850 +; X64-NEXT: rorl $2, %ecx ; X64-NEXT: xorl %eax, %eax -; X64-NEXT: cmpl %edx, %ecx -; X64-NEXT: sete %al +; X64-NEXT: cmpl $42949673, %ecx # imm = 0x28F5C29 +; X64-NEXT: setb %al ; X64-NEXT: retq %srem = srem i32 %X, 100 %cmp = icmp eq i32 %srem, 0 @@ -245,32 +168,22 @@ define i32 @test_srem_even_100(i32 %X) nounwind { define i32 @test_srem_even_bit30(i32 %X) nounwind { ; X86-LABEL: test_srem_even_bit30: ; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl $1073741721, %edx # imm = 0x3FFFFF99 -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: imull %edx -; X86-NEXT: movl %edx, %eax -; X86-NEXT: shrl $31, %eax -; X86-NEXT: sarl $28, %edx -; X86-NEXT: addl %eax, %edx -; X86-NEXT: imull $1073741928, %edx, %edx # imm = 0x40000068 +; X86-NEXT: imull $-51622203, {{[0-9]+}}(%esp), %ecx # imm = 0xFCEC4EC5 +; X86-NEXT: addl $8, %ecx +; X86-NEXT: rorl $3, %ecx ; X86-NEXT: xorl %eax, %eax -; X86-NEXT: cmpl %edx, %ecx -; X86-NEXT: sete %al +; X86-NEXT: cmpl $3, %ecx +; X86-NEXT: setb %al ; X86-NEXT: retl ; ; X64-LABEL: test_srem_even_bit30: ; X64: # %bb.0: -; X64-NEXT: movslq %edi, %rcx -; X64-NEXT: imulq $1073741721, %rcx, %rax # imm = 0x3FFFFF99 -; X64-NEXT: movq %rax, %rdx -; X64-NEXT: shrq $63, %rdx -; X64-NEXT: sarq $60, %rax -; X64-NEXT: addl %edx, %eax -; X64-NEXT: imull $1073741928, %eax, %edx # imm = 0x40000068 +; X64-NEXT: imull $-51622203, %edi, %ecx # imm = 0xFCEC4EC5 +; X64-NEXT: addl $8, %ecx +; X64-NEXT: rorl $3, %ecx ; X64-NEXT: xorl %eax, %eax -; X64-NEXT: cmpl %edx, %ecx -; X64-NEXT: sete %al +; X64-NEXT: cmpl $3, %ecx +; X64-NEXT: setb %al ; X64-NEXT: retq %srem = srem i32 %X, 1073741928 %cmp = icmp eq i32 %srem, 0 @@ -282,35 +195,22 @@ define i32 @test_srem_even_bit30(i32 %X) nounwind { define i32 @test_srem_even_bit31(i32 %X) nounwind { ; X86-LABEL: test_srem_even_bit31: ; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl $2147483545, %edx # imm = 0x7FFFFF99 -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: imull %edx -; X86-NEXT: subl %ecx, %edx -; X86-NEXT: movl %edx, %eax -; X86-NEXT: shrl $31, %eax -; X86-NEXT: sarl $30, %edx -; X86-NEXT: addl %eax, %edx -; X86-NEXT: imull $-2147483546, %edx, %edx # imm = 0x80000066 +; X86-NEXT: imull $-989526779, {{[0-9]+}}(%esp), %ecx # imm = 0xC5050505 +; X86-NEXT: addl $2, %ecx +; X86-NEXT: rorl %ecx ; X86-NEXT: xorl %eax, %eax -; X86-NEXT: cmpl %edx, %ecx -; X86-NEXT: sete %al +; X86-NEXT: cmpl $3, %ecx +; X86-NEXT: setb %al ; X86-NEXT: retl ; ; X64-LABEL: test_srem_even_bit31: ; X64: # %bb.0: -; X64-NEXT: movslq %edi, %rcx -; X64-NEXT: imulq $2147483545, %rcx, %rax # imm = 0x7FFFFF99 -; X64-NEXT: shrq $32, %rax -; X64-NEXT: subl %ecx, %eax -; X64-NEXT: movl %eax, %edx -; X64-NEXT: shrl $31, %edx -; X64-NEXT: sarl $30, %eax -; X64-NEXT: addl %edx, %eax -; X64-NEXT: imull $-2147483546, %eax, %edx # imm = 0x80000066 +; X64-NEXT: imull $-989526779, %edi, %ecx # imm = 0xC5050505 +; X64-NEXT: addl $2, %ecx +; X64-NEXT: rorl %ecx ; X64-NEXT: xorl %eax, %eax -; X64-NEXT: cmpl %edx, %ecx -; X64-NEXT: sete %al +; X64-NEXT: cmpl $3, %ecx +; X64-NEXT: setb %al ; X64-NEXT: retq %srem = srem i32 %X, 2147483750 %cmp = icmp eq i32 %srem, 0 @@ -326,32 +226,20 @@ define i32 @test_srem_even_bit31(i32 %X) nounwind { define i32 @test_srem_odd_setne(i32 %X) nounwind { ; X86-LABEL: test_srem_odd_setne: ; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl $1717986919, %edx # imm = 0x66666667 -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: imull %edx -; X86-NEXT: movl %edx, %eax -; X86-NEXT: shrl $31, %eax -; X86-NEXT: sarl %edx -; X86-NEXT: addl %eax, %edx -; X86-NEXT: leal (%edx,%edx,4), %edx +; X86-NEXT: imull $-858993459, {{[0-9]+}}(%esp), %ecx # imm = 0xCCCCCCCD +; X86-NEXT: addl $429496729, %ecx # imm = 0x19999999 ; X86-NEXT: xorl %eax, %eax -; X86-NEXT: cmpl %edx, %ecx -; X86-NEXT: setne %al +; X86-NEXT: cmpl $858993458, %ecx # imm = 0x33333332 +; X86-NEXT: seta %al ; X86-NEXT: retl ; ; X64-LABEL: test_srem_odd_setne: ; X64: # %bb.0: -; X64-NEXT: movslq %edi, %rcx -; X64-NEXT: imulq $1717986919, %rcx, %rax # imm = 0x66666667 -; X64-NEXT: movq %rax, %rdx -; X64-NEXT: shrq $63, %rdx -; X64-NEXT: sarq $33, %rax -; X64-NEXT: addl %edx, %eax -; X64-NEXT: leal (%rax,%rax,4), %edx +; X64-NEXT: imull $-858993459, %edi, %ecx # imm = 0xCCCCCCCD +; X64-NEXT: addl $429496729, %ecx # imm = 0x19999999 ; X64-NEXT: xorl %eax, %eax -; X64-NEXT: cmpl %edx, %ecx -; X64-NEXT: setne %al +; X64-NEXT: cmpl $858993458, %ecx # imm = 0x33333332 +; X64-NEXT: seta %al ; X64-NEXT: retq %srem = srem i32 %X, 5 %cmp = icmp ne i32 %srem, 0 @@ -363,32 +251,20 @@ define i32 @test_srem_odd_setne(i32 %X) nounwind { define i32 @test_srem_negative_odd(i32 %X) nounwind { ; X86-LABEL: test_srem_negative_odd: ; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl $-1717986919, %edx # imm = 0x99999999 -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: imull %edx -; X86-NEXT: movl %edx, %eax -; X86-NEXT: shrl $31, %eax -; X86-NEXT: sarl %edx -; X86-NEXT: addl %eax, %edx -; X86-NEXT: leal (%edx,%edx,4), %edx +; X86-NEXT: imull $-858993459, {{[0-9]+}}(%esp), %ecx # imm = 0xCCCCCCCD +; X86-NEXT: addl $429496729, %ecx # imm = 0x19999999 ; X86-NEXT: xorl %eax, %eax -; X86-NEXT: addl %ecx, %edx -; X86-NEXT: setne %al +; X86-NEXT: cmpl $858993458, %ecx # imm = 0x33333332 +; X86-NEXT: seta %al ; X86-NEXT: retl ; ; X64-LABEL: test_srem_negative_odd: ; X64: # %bb.0: -; X64-NEXT: movslq %edi, %rcx -; X64-NEXT: imulq $-1717986919, %rcx, %rax # imm = 0x99999999 -; X64-NEXT: movq %rax, %rdx -; X64-NEXT: shrq $63, %rdx -; X64-NEXT: sarq $33, %rax -; X64-NEXT: addl %edx, %eax -; X64-NEXT: leal (%rax,%rax,4), %edx +; X64-NEXT: imull $-858993459, %edi, %ecx # imm = 0xCCCCCCCD +; X64-NEXT: addl $429496729, %ecx # imm = 0x19999999 ; X64-NEXT: xorl %eax, %eax -; X64-NEXT: addl %edx, %ecx -; X64-NEXT: setne %al +; X64-NEXT: cmpl $858993458, %ecx # imm = 0x33333332 +; X64-NEXT: seta %al ; X64-NEXT: retq %srem = srem i32 %X, -5 %cmp = icmp ne i32 %srem, 0 @@ -398,35 +274,22 @@ define i32 @test_srem_negative_odd(i32 %X) nounwind { define i32 @test_srem_negative_even(i32 %X) nounwind { ; X86-LABEL: test_srem_negative_even: ; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl $1840700269, %edx # imm = 0x6DB6DB6D -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: imull %edx -; X86-NEXT: subl %ecx, %edx -; X86-NEXT: movl %edx, %eax -; X86-NEXT: shrl $31, %eax -; X86-NEXT: sarl $3, %edx -; X86-NEXT: addl %eax, %edx -; X86-NEXT: imull $-14, %edx, %edx +; X86-NEXT: imull $-1227133513, {{[0-9]+}}(%esp), %ecx # imm = 0xB6DB6DB7 +; X86-NEXT: addl $306783378, %ecx # imm = 0x12492492 +; X86-NEXT: rorl %ecx ; X86-NEXT: xorl %eax, %eax -; X86-NEXT: cmpl %edx, %ecx -; X86-NEXT: setne %al +; X86-NEXT: cmpl $306783378, %ecx # imm = 0x12492492 +; X86-NEXT: seta %al ; X86-NEXT: retl ; ; X64-LABEL: test_srem_negative_even: ; X64: # %bb.0: -; X64-NEXT: movslq %edi, %rcx -; X64-NEXT: imulq $1840700269, %rcx, %rax # imm = 0x6DB6DB6D -; X64-NEXT: shrq $32, %rax -; X64-NEXT: subl %ecx, %eax -; X64-NEXT: movl %eax, %edx -; X64-NEXT: shrl $31, %edx -; X64-NEXT: sarl $3, %eax -; X64-NEXT: addl %edx, %eax -; X64-NEXT: imull $-14, %eax, %edx +; X64-NEXT: imull $-1227133513, %edi, %ecx # imm = 0xB6DB6DB7 +; X64-NEXT: addl $306783378, %ecx # imm = 0x12492492 +; X64-NEXT: rorl %ecx ; X64-NEXT: xorl %eax, %eax -; X64-NEXT: cmpl %edx, %ecx -; X64-NEXT: setne %al +; X64-NEXT: cmpl $306783378, %ecx # imm = 0x12492492 +; X64-NEXT: seta %al ; X64-NEXT: retq %srem = srem i32 %X, -14 %cmp = icmp ne i32 %srem, 0 diff --git a/llvm/test/CodeGen/X86/vselect-avx.ll b/llvm/test/CodeGen/X86/vselect-avx.ll index f1475a1bb7c..3cd5654771c 100644 --- a/llvm/test/CodeGen/X86/vselect-avx.ll +++ b/llvm/test/CodeGen/X86/vselect-avx.ll @@ -84,21 +84,14 @@ bb: define void @test3(<4 x i32> %induction30, <4 x i16>* %tmp16, <4 x i16>* %tmp17, <4 x i16> %tmp3, <4 x i16> %tmp12) { ; AVX1-LABEL: test3: ; AVX1: ## %bb.0: -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [1431655766,1431655766,1431655766,1431655766] -; AVX1-NEXT: vpmuldq %xmm4, %xmm3, %xmm3 -; AVX1-NEXT: vpmuldq %xmm4, %xmm0, %xmm4 -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3],xmm4[4,5],xmm3[6,7] -; AVX1-NEXT: vpsrld $31, %xmm3, %xmm4 -; AVX1-NEXT: vpaddd %xmm4, %xmm3, %xmm3 -; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm3, %xmm3 -; AVX1-NEXT: vpsubd %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm3 ; AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: vpblendvb %xmm0, %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX1-NEXT: vpblendvb %xmm0, %xmm2, %xmm3, %xmm0 ; AVX1-NEXT: vmovq %xmm0, (%rdi) ; AVX1-NEXT: vmovq %xmm1, (%rsi) @@ -106,22 +99,17 @@ define void @test3(<4 x i32> %induction30, <4 x i16>* %tmp16, <4 x i16>* %tmp17, ; ; AVX2-LABEL: test3: ; AVX2: ## %bb.0: -; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm4 = [1431655766,1431655766,1431655766,1431655766] -; AVX2-NEXT: vpmuldq %xmm4, %xmm3, %xmm3 -; AVX2-NEXT: vpmuldq %xmm4, %xmm0, %xmm4 -; AVX2-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] -; AVX2-NEXT: vpblendd {{.*#+}} xmm3 = xmm4[0],xmm3[1],xmm4[2],xmm3[3] -; AVX2-NEXT: vpsrld $31, %xmm3, %xmm4 -; AVX2-NEXT: vpaddd %xmm4, %xmm3, %xmm3 -; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm4 = [3,3,3,3] -; AVX2-NEXT: vpmulld %xmm4, %xmm3, %xmm3 -; AVX2-NEXT: vpsubd %xmm3, %xmm0, %xmm0 -; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [2863311531,2863311531,2863311531,2863311531] +; AVX2-NEXT: vpmulld %xmm3, %xmm0, %xmm0 +; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [715827882,715827882,715827882,715827882] +; AVX2-NEXT: vpaddd %xmm3, %xmm0, %xmm0 +; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1431655764,1431655764,1431655764,1431655764] +; AVX2-NEXT: vpminud %xmm3, %xmm0, %xmm3 ; AVX2-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 ; AVX2-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 ; AVX2-NEXT: vpblendvb %xmm0, %xmm1, %xmm2, %xmm1 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX2-NEXT: vpblendvb %xmm0, %xmm2, %xmm3, %xmm0 ; AVX2-NEXT: vmovq %xmm0, (%rdi) ; AVX2-NEXT: vmovq %xmm1, (%rsi) |