diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-09-16 19:20:41 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-09-16 19:20:41 +0000 |
| commit | 28568c477d51b0f387ba5cc1cb8166568c4c819a (patch) | |
| tree | 2652eb89241156991633654ac8daab1f987fc129 | |
| parent | 3b33938c05f1e402f226038f2861e7cce9a5c7fd (diff) | |
| download | bcm5719-llvm-28568c477d51b0f387ba5cc1cb8166568c4c819a.tar.gz bcm5719-llvm-28568c477d51b0f387ba5cc1cb8166568c4c819a.zip | |
[X86][SSE] Added vector add combine tests
Some work great and others currently demonstrate the anti-vector bias prevalent in DAGCombiner
llvm-svn: 281768
| -rw-r--r-- | llvm/test/CodeGen/X86/combine-add.ll | 339 |
1 files changed, 339 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/combine-add.ll b/llvm/test/CodeGen/X86/combine-add.ll new file mode 100644 index 00000000000..2f229eb0f8c --- /dev/null +++ b/llvm/test/CodeGen/X86/combine-add.ll @@ -0,0 +1,339 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX + +; fold (add x, 0) -> x +define <4 x i32> @combine_vec_add_to_zero(<4 x i32> %a) { +; SSE-LABEL: combine_vec_add_to_zero: +; SSE: # BB#0: +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_add_to_zero: +; AVX: # BB#0: +; AVX-NEXT: retq + %1 = add <4 x i32> %a, zeroinitializer + ret <4 x i32> %1 +} + +; fold ((c1-A)+c2) -> (c1+c2)-A +define <4 x i32> @combine_vec_add_constant_sub(<4 x i32> %a) { +; SSE-LABEL: combine_vec_add_constant_sub: +; SSE: # BB#0: +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,1,2,3] +; SSE-NEXT: movdqa %xmm2, %xmm1 +; SSE-NEXT: psubd %xmm0, %xmm1 +; SSE-NEXT: paddd %xmm2, %xmm1 +; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_add_constant_sub: +; AVX: # BB#0: +; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3] +; AVX-NEXT: vpsubd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = sub <4 x i32> <i32 0, i32 1, i32 2, i32 3>, %a + %2 = add <4 x i32> <i32 0, i32 1, i32 2, i32 3>, %1 + ret <4 x i32> %2 +} + +; fold ((0-A) + B) -> B-A +define <4 x i32> @combine_vec_add_neg0(<4 x i32> %a, <4 x i32> %b) { +; SSE-LABEL: combine_vec_add_neg0: +; SSE: # BB#0: +; SSE-NEXT: pxor %xmm2, %xmm2 +; SSE-NEXT: psubd %xmm0, %xmm2 +; SSE-NEXT: paddd %xmm1, %xmm2 +; SSE-NEXT: movdqa %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_add_neg0: +; AVX: # BB#0: +; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpsubd %xmm0, %xmm2, %xmm0 +; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = sub <4 x i32> zeroinitializer, %a + %2 = add <4 x i32> %1, %b + ret <4 x i32> %2 +} + +; fold (A + (0-B)) -> A-B +define <4 x i32> @combine_vec_add_neg1(<4 x i32> %a, <4 x i32> %b) { +; SSE-LABEL: combine_vec_add_neg1: +; SSE: # BB#0: +; SSE-NEXT: pxor %xmm2, %xmm2 +; SSE-NEXT: psubd %xmm1, %xmm2 +; SSE-NEXT: paddd %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_add_neg1: +; AVX: # BB#0: +; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpsubd %xmm1, %xmm2, %xmm1 +; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = sub <4 x i32> zeroinitializer, %b + %2 = add <4 x i32> %a, %1 + ret <4 x i32> %2 +} + +; fold (A+(B-A)) -> B +define <4 x i32> @combine_vec_add_sub0(<4 x i32> %a, <4 x i32> %b) { +; SSE-LABEL: combine_vec_add_sub0: +; SSE: # BB#0: +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_add_sub0: +; AVX: # BB#0: +; AVX-NEXT: vmovaps %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = sub <4 x i32> %b, %a + %2 = add <4 x i32> %a, %1 + ret <4 x i32> %2 +} + +; fold ((B-A)+A) -> B +define <4 x i32> @combine_vec_add_sub1(<4 x i32> %a, <4 x i32> %b) { +; SSE-LABEL: combine_vec_add_sub1: +; SSE: # BB#0: +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_add_sub1: +; AVX: # BB#0: +; AVX-NEXT: vmovaps %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = sub <4 x i32> %b, %a + %2 = add <4 x i32> %1, %a + ret <4 x i32> %2 +} + +; fold (A+(B-(A+C))) to (B-C) +define <4 x i32> @combine_vec_add_sub_add0(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) { +; SSE-LABEL: combine_vec_add_sub_add0: +; SSE: # BB#0: +; SSE-NEXT: psubd %xmm2, %xmm1 +; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_add_sub_add0: +; AVX: # BB#0: +; AVX-NEXT: vpsubd %xmm2, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = add <4 x i32> %a, %c + %2 = sub <4 x i32> %b, %1 + %3 = add <4 x i32> %a, %2 + ret <4 x i32> %3 +} + +; fold (A+(B-(C+A))) to (B-C) +define <4 x i32> @combine_vec_add_sub_add1(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) { +; SSE-LABEL: combine_vec_add_sub_add1: +; SSE: # BB#0: +; SSE-NEXT: psubd %xmm2, %xmm1 +; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_add_sub_add1: +; AVX: # BB#0: +; AVX-NEXT: vpsubd %xmm2, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = add <4 x i32> %c, %a + %2 = sub <4 x i32> %b, %1 + %3 = add <4 x i32> %a, %2 + ret <4 x i32> %3 +} + +; fold (A+((B-A)+C)) to (B+C) +define <4 x i32> @combine_vec_add_sub_add2(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) { +; SSE-LABEL: combine_vec_add_sub_add2: +; SSE: # BB#0: +; SSE-NEXT: paddd %xmm2, %xmm1 +; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_add_sub_add2: +; AVX: # BB#0: +; AVX-NEXT: vpaddd %xmm2, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = sub <4 x i32> %b, %a + %2 = add <4 x i32> %1, %c + %3 = add <4 x i32> %a, %2 + ret <4 x i32> %3 +} + +; fold (A+((B-A)-C)) to (B-C) +define <4 x i32> @combine_vec_add_sub_add3(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) { +; SSE-LABEL: combine_vec_add_sub_add3: +; SSE: # BB#0: +; SSE-NEXT: psubd %xmm2, %xmm1 +; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_add_sub_add3: +; AVX: # BB#0: +; AVX-NEXT: vpsubd %xmm2, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = sub <4 x i32> %b, %a + %2 = sub <4 x i32> %1, %c + %3 = add <4 x i32> %a, %2 + ret <4 x i32> %3 +} + +; fold (A-B)+(C-D) to (A+C)-(B+D) when A or C is constant +define <4 x i32> @combine_vec_add_sub_sub(<4 x i32> %a, <4 x i32> %b, <4 x i32> %d) { +; SSE-LABEL: combine_vec_add_sub_sub: +; SSE: # BB#0: +; SSE-NEXT: psubd %xmm1, %xmm0 +; SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,1,2,3] +; SSE-NEXT: psubd %xmm2, %xmm1 +; SSE-NEXT: paddd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_add_sub_sub: +; AVX: # BB#0: +; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3] +; AVX-NEXT: vpsubd %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = sub <4 x i32> %a, %b + %2 = sub <4 x i32> <i32 0, i32 1, i32 2, i32 3>, %d + %3 = add <4 x i32> %1, %2 + ret <4 x i32> %3 +} + +; fold (a+b) -> (a|b) iff a and b share no bits. +define <4 x i32> @combine_vec_add_uniquebits(<4 x i32> %a, <4 x i32> %b) { +; SSE-LABEL: combine_vec_add_uniquebits: +; SSE: # BB#0: +; SSE-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE-NEXT: paddd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_add_uniquebits: +; AVX: # BB#0: +; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2 +; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0 +; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2 +; AVX-NEXT: vpand %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = and <4 x i32> %a, <i32 61680, i32 61680, i32 61680, i32 61680> + %2 = and <4 x i32> %b, <i32 3855, i32 3855, i32 3855, i32 3855> + %3 = add <4 x i32> %1, %2 + ret <4 x i32> %3 +} + +; fold (add x, shl(0 - y, n)) -> sub(x, shl(y, n)) +define <4 x i32> @combine_vec_add_shl_neg0(<4 x i32> %x, <4 x i32> %y) { +; SSE-LABEL: combine_vec_add_shl_neg0: +; SSE: # BB#0: +; SSE-NEXT: pxor %xmm2, %xmm2 +; SSE-NEXT: psubd %xmm1, %xmm2 +; SSE-NEXT: pslld $5, %xmm2 +; SSE-NEXT: paddd %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_add_shl_neg0: +; AVX: # BB#0: +; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpsubd %xmm1, %xmm2, %xmm1 +; AVX-NEXT: vpslld $5, %xmm1, %xmm1 +; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = sub <4 x i32> zeroinitializer, %y + %2 = shl <4 x i32> %1, <i32 5, i32 5, i32 5, i32 5> + %3 = add <4 x i32> %x, %2 + ret <4 x i32> %3 +} + +; fold (add shl(0 - y, n), x) -> sub(x, shl(y, n)) +define <4 x i32> @combine_vec_add_shl_neg1(<4 x i32> %x, <4 x i32> %y) { +; SSE-LABEL: combine_vec_add_shl_neg1: +; SSE: # BB#0: +; SSE-NEXT: pxor %xmm2, %xmm2 +; SSE-NEXT: psubd %xmm1, %xmm2 +; SSE-NEXT: pslld $5, %xmm2 +; SSE-NEXT: paddd %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_add_shl_neg1: +; AVX: # BB#0: +; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpsubd %xmm1, %xmm2, %xmm1 +; AVX-NEXT: vpslld $5, %xmm1, %xmm1 +; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = sub <4 x i32> zeroinitializer, %y + %2 = shl <4 x i32> %1, <i32 5, i32 5, i32 5, i32 5> + %3 = add <4 x i32> %2, %x + ret <4 x i32> %3 +} + +; (add z, (and (sbbl x, x), 1)) -> (sub z, (sbbl x, x)) +; and similar xforms where the inner op is either ~0 or 0. +define <4 x i32> @combine_vec_add_and_compare(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { +; SSE-LABEL: combine_vec_add_and_compare: +; SSE: # BB#0: +; SSE-NEXT: pcmpeqd %xmm2, %xmm1 +; SSE-NEXT: psrld $31, %xmm1 +; SSE-NEXT: paddd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_add_and_compare: +; AVX: # BB#0: +; AVX-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vpsrld $31, %xmm1, %xmm1 +; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = icmp eq <4 x i32> %a1, %a2 + %2 = sext <4 x i1> %1 to <4 x i32> + %3 = and <4 x i32> %2, <i32 1, i32 1, i32 1, i32 1> + %4 = add <4 x i32> %a0, %3 + ret <4 x i32> %4 +} + +; add (sext i1), X -> sub X, (zext i1) +define <4 x i32> @combine_vec_add_sext(<4 x i1> %a0, <4 x i32> %a1) { +; SSE-LABEL: combine_vec_add_sext: +; SSE: # BB#0: +; SSE-NEXT: pslld $31, %xmm0 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: paddd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_add_sext: +; AVX: # BB#0: +; AVX-NEXT: vpslld $31, %xmm0, %xmm0 +; AVX-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = sext <4 x i1> %a0 to <4 x i32> + %2 = add <4 x i32> %1, %a1 + ret <4 x i32> %2 +} + +; add (sext i1), X -> sub X, (zext i1) +define <4 x i32> @combine_vec_add_sextinreg(<4 x i32> %a0, <4 x i32> %a1) { +; SSE-LABEL: combine_vec_add_sextinreg: +; SSE: # BB#0: +; SSE-NEXT: pslld $31, %xmm0 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: paddd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_add_sextinreg: +; AVX: # BB#0: +; AVX-NEXT: vpslld $31, %xmm0, %xmm0 +; AVX-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = shl <4 x i32> %a0, <i32 31, i32 31, i32 31, i32 31> + %2 = ashr <4 x i32> %1, <i32 31, i32 31, i32 31, i32 31> + %3 = add <4 x i32> %2, %a1 + ret <4 x i32> %3 +} |

