diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2019-03-05 15:36:45 +0000 |
---|---|---|
committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2019-03-05 15:36:45 +0000 |
commit | f011e53a78b20e64f4407b6aa1f01956ee4f273f (patch) | |
tree | c000675ddf3b2c8726f11295d25355b3a7e1ebe0 /llvm/test/CodeGen/X86/combine-mulo.ll | |
parent | 622862987f3436486a067c74d5f7a0a3e2c185c1 (diff) | |
download | bcm5719-llvm-f011e53a78b20e64f4407b6aa1f01956ee4f273f.tar.gz bcm5719-llvm-f011e53a78b20e64f4407b6aa1f01956ee4f273f.zip |
[X86] Add SMULO/UMULO combine tests
Include scalar and vector test variants covering the folds in DAGCombiner (vector isn't currently supported - PR40442)
llvm-svn: 355407
Diffstat (limited to 'llvm/test/CodeGen/X86/combine-mulo.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/combine-mulo.ll | 136 |
1 files changed, 136 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/combine-mulo.ll b/llvm/test/CodeGen/X86/combine-mulo.ll new file mode 100644 index 00000000000..045e6595ed2 --- /dev/null +++ b/llvm/test/CodeGen/X86/combine-mulo.ll @@ -0,0 +1,136 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX + +declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone +declare {i32, i1} @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone + +declare {<4 x i32>, <4 x i1>} @llvm.smul.with.overflow.v4i32(<4 x i32>, <4 x i32>) nounwind readnone +declare {<4 x i32>, <4 x i1>} @llvm.umul.with.overflow.v4i32(<4 x i32>, <4 x i32>) nounwind readnone + +; fold (smulo x, 2) -> (saddo x, x) +define i32 @combine_smul_two(i32 %a0, i32 %a1) { +; SSE-LABEL: combine_smul_two: +; SSE: # %bb.0: +; SSE-NEXT: movl %edi, %eax +; SSE-NEXT: addl %edi, %eax +; SSE-NEXT: cmovol %esi, %eax +; SSE-NEXT: retq +; +; AVX-LABEL: combine_smul_two: +; AVX: # %bb.0: +; AVX-NEXT: movl %edi, %eax +; AVX-NEXT: addl %edi, %eax +; AVX-NEXT: cmovol %esi, %eax +; AVX-NEXT: retq + %1 = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %a0, i32 2) + %2 = extractvalue {i32, i1} %1, 0 + %3 = extractvalue {i32, i1} %1, 1 + %4 = select i1 %3, i32 %a1, i32 %2 + ret i32 %4 +} + +define <4 x i32> @combine_vec_smul_two(<4 x i32> %a0, <4 x i32> %a1) { +; SSE-LABEL: combine_vec_smul_two: +; SSE: # %bb.0: +; SSE-NEXT: movdqa %xmm0, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; SSE-NEXT: movdqa {{.*#+}} xmm3 = [2,2,2,2] +; SSE-NEXT: pmuldq %xmm3, %xmm0 +; SSE-NEXT: pmuldq %xmm2, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,3],xmm3[4,5],xmm0[6,7] +; SSE-NEXT: paddd %xmm2, %xmm2 +; SSE-NEXT: movdqa %xmm2, %xmm0 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: pcmpeqd %xmm3, %xmm0 +; SSE-NEXT: pcmpeqd %xmm3, %xmm3 +; SSE-NEXT: pxor %xmm3, %xmm0 +; SSE-NEXT: blendvps %xmm0, %xmm1, %xmm2 +; SSE-NEXT: movaps %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_smul_two: +; AVX: # %bb.0: +; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] +; AVX-NEXT: vpbroadcastd {{.*#+}} xmm3 = [2,2,2,2] +; AVX-NEXT: vpmuldq %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vpmuldq %xmm3, %xmm0, %xmm3 +; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3] +; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vpsrad $31, %xmm0, %xmm3 +; AVX-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 +; AVX-NEXT: vpxor %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = call {<4 x i32>, <4 x i1>} @llvm.smul.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> <i32 2, i32 2, i32 2, i32 2>) + %2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0 + %3 = extractvalue {<4 x i32>, <4 x i1>} %1, 1 + %4 = select <4 x i1> %3, <4 x i32> %a1, <4 x i32> %2 + ret <4 x i32> %4 +} + +; fold (umulo x, 2) -> (uaddo x, x) +define i32 @combine_umul_two(i32 %a0, i32 %a1) { +; SSE-LABEL: combine_umul_two: +; SSE: # %bb.0: +; SSE-NEXT: movl %edi, %eax +; SSE-NEXT: addl %edi, %eax +; SSE-NEXT: cmovbl %esi, %eax +; SSE-NEXT: retq +; +; AVX-LABEL: combine_umul_two: +; AVX: # %bb.0: +; AVX-NEXT: movl %edi, %eax +; AVX-NEXT: addl %edi, %eax +; AVX-NEXT: cmovbl %esi, %eax +; AVX-NEXT: retq + %1 = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %a0, i32 2) + %2 = extractvalue {i32, i1} %1, 0 + %3 = extractvalue {i32, i1} %1, 1 + %4 = select i1 %3, i32 %a1, i32 %2 + ret i32 %4 +} + +define <4 x i32> @combine_vec_umul_two(<4 x i32> %a0, <4 x i32> %a1) { +; SSE-LABEL: combine_vec_umul_two: +; SSE: # %bb.0: +; SSE-NEXT: movdqa %xmm0, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; SSE-NEXT: movdqa {{.*#+}} xmm3 = [2,2,2,2] +; SSE-NEXT: pmuludq %xmm3, %xmm0 +; SSE-NEXT: pmuludq %xmm2, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,3],xmm3[4,5],xmm0[6,7] +; SSE-NEXT: pxor %xmm4, %xmm4 +; SSE-NEXT: pcmpeqd %xmm3, %xmm4 +; SSE-NEXT: pcmpeqd %xmm0, %xmm0 +; SSE-NEXT: pxor %xmm4, %xmm0 +; SSE-NEXT: paddd %xmm2, %xmm2 +; SSE-NEXT: blendvps %xmm0, %xmm1, %xmm2 +; SSE-NEXT: movaps %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_umul_two: +; AVX: # %bb.0: +; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] +; AVX-NEXT: vpbroadcastd {{.*#+}} xmm3 = [2,2,2,2] +; AVX-NEXT: vpmuludq %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vpmuludq %xmm3, %xmm0, %xmm3 +; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3] +; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 +; AVX-NEXT: vpxor %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = call {<4 x i32>, <4 x i1>} @llvm.umul.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> <i32 2, i32 2, i32 2, i32 2>) + %2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0 + %3 = extractvalue {<4 x i32>, <4 x i1>} %1, 1 + %4 = select <4 x i1> %3, <4 x i32> %a1, <4 x i32> %2 + ret <4 x i32> %4 +} |