diff options
author | Sanjay Patel <spatel@rotateright.com> | 2017-04-19 21:23:09 +0000 |
---|---|---|
committer | Sanjay Patel <spatel@rotateright.com> | 2017-04-19 21:23:09 +0000 |
commit | ae382bb6af2c8ac70d0c24ee32418f0980d7f2c8 (patch) | |
tree | 770daacf8a282c27f3442381da5561d469213c43 /llvm/test/CodeGen/X86/avx-logic.ll | |
parent | ada0888a111750ff4caec49208d11de4e29bfb61 (diff) | |
download | bcm5719-llvm-ae382bb6af2c8ac70d0c24ee32418f0980d7f2c8.tar.gz bcm5719-llvm-ae382bb6af2c8ac70d0c24ee32418f0980d7f2c8.zip |
[DAG] add splat vector support for 'xor' in SimplifyDemandedBits
This allows forming more 'not' ops, so we get improvements for ISAs that have and-not.
Follow-up to:
https://reviews.llvm.org/rL300725
llvm-svn: 300763
Diffstat (limited to 'llvm/test/CodeGen/X86/avx-logic.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/avx-logic.ll | 14 |
1 files changed, 4 insertions, 10 deletions
diff --git a/llvm/test/CodeGen/X86/avx-logic.ll b/llvm/test/CodeGen/X86/avx-logic.ll index 95a61ec8bc3..89abbabee27 100644 --- a/llvm/test/CodeGen/X86/avx-logic.ll +++ b/llvm/test/CodeGen/X86/avx-logic.ll @@ -274,16 +274,13 @@ entry: define <4 x i32> @and_xor_splat1_v4i32(<4 x i32> %x) nounwind { ; AVX-LABEL: and_xor_splat1_v4i32: ; AVX: # BB#0: -; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [1,1,1,1] -; AVX-NEXT: vxorps %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vandnps {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: and_xor_splat1_v4i32: ; AVX512: # BB#0: ; AVX512-NEXT: vbroadcastss {{.*}}(%rip), %xmm1 -; AVX512-NEXT: vxorps %xmm1, %xmm0, %xmm0 -; AVX512-NEXT: vandps %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vandnps %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: retq %xor = xor <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1> %and = and <4 x i32> %xor, <i32 1, i32 1, i32 1, i32 1> @@ -293,16 +290,13 @@ define <4 x i32> @and_xor_splat1_v4i32(<4 x i32> %x) nounwind { define <4 x i64> @and_xor_splat1_v4i64(<4 x i64> %x) nounwind { ; AVX-LABEL: and_xor_splat1_v4i64: ; AVX: # BB#0: -; AVX-NEXT: vmovaps {{.*#+}} ymm1 = [1,1,1,1] -; AVX-NEXT: vxorps %ymm1, %ymm0, %ymm0 -; AVX-NEXT: vandps %ymm1, %ymm0, %ymm0 +; AVX-NEXT: vandnps {{.*}}(%rip), %ymm0, %ymm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: and_xor_splat1_v4i64: ; AVX512: # BB#0: ; AVX512-NEXT: vbroadcastsd {{.*}}(%rip), %ymm1 -; AVX512-NEXT: vxorps %ymm1, %ymm0, %ymm0 -; AVX512-NEXT: vandps %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vandnps %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: retq %xor = xor <4 x i64> %x, <i64 1, i64 1, i64 1, i64 1> %and = and <4 x i64> %xor, <i64 1, i64 1, i64 1, i64 1> |