diff options
author | Michael Kuperstein <michael.m.kuperstein@intel.com> | 2015-01-22 12:37:23 +0000 |
---|---|---|
committer | Michael Kuperstein <michael.m.kuperstein@intel.com> | 2015-01-22 12:37:23 +0000 |
commit | 84fad3e5c9f75710ad1f43198c6e136e7cdb46b4 (patch) | |
tree | 04d9c0d679cd5d5c911e13c5b9350e472055cdcb /llvm/test/CodeGen/X86/splat-const.ll | |
parent | febfd3453ec975887ef2370fa4b9a6a9a6757950 (diff) | |
download | bcm5719-llvm-84fad3e5c9f75710ad1f43198c6e136e7cdb46b4.tar.gz bcm5719-llvm-84fad3e5c9f75710ad1f43198c6e136e7cdb46b4.zip |
[DAGCombine] Produce better code for constant splats
This solves PR22276.
Splats of constants would sometimes produce redundant shuffles, sometimes ridiculously so (see the PR for details). Fold these shuffles into BUILD_VECTORs early on instead.
Differential Revision: http://reviews.llvm.org/D7093
llvm-svn: 226811
Diffstat (limited to 'llvm/test/CodeGen/X86/splat-const.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/splat-const.ll | 40 |
1 files changed, 40 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/splat-const.ll b/llvm/test/CodeGen/X86/splat-const.ll new file mode 100644 index 00000000000..19997b03ad5 --- /dev/null +++ b/llvm/test/CodeGen/X86/splat-const.ll @@ -0,0 +1,40 @@ +; RUN: llc < %s -mcpu=penryn | FileCheck %s --check-prefix=SSE +; RUN: llc < %s -mcpu=sandybridge | FileCheck %s --check-prefix=AVX +; RUN: llc < %s -mcpu=haswell | FileCheck %s --check-prefix=AVX2 +; This checks that lowering for creation of constant vectors is sane and +; doesn't use redundant shuffles. (fixes PR22276) +target triple = "x86_64-unknown-unknown" + +define <4 x i32> @zero_vector() { +; SSE-LABEL: zero_vector: +; SSE: xorps %xmm0, %xmm0 +; SSE-NEXT: retq +; AVX-LABEL: zero_vector: +; AVX: vxorps %xmm0, %xmm0, %xmm0 +; AVX-NEXT: retq +; AVX2-LABEL: zero_vector: +; AVX2: vxorps %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: retq + %zero = insertelement <4 x i32> undef, i32 0, i32 0 + %splat = shufflevector <4 x i32> %zero, <4 x i32> undef, <4 x i32> zeroinitializer + ret <4 x i32> %splat +} + +; Note that for the "const_vector" versions, lowering that uses a shuffle +; instead of a load would be legitimate, if it's a single broadcast shuffle. +; (as opposed to the previous mess) +; However, this is not the current preferred lowering. +define <4 x i32> @const_vector() { +; SSE-LABEL: const_vector: +; SSE: movaps {{.*}}, %xmm0 # xmm0 = [42,42,42,42] +; SSE-NEXT: retq +; AVX-LABEL: const_vector: +; AVX: vmovaps {{.*}}, %xmm0 # xmm0 = [42,42,42,42] +; AVX-NEXT: retq +; AVX2-LABEL: const_vector: +; AVX2: vbroadcastss {{[^%].*}}, %xmm0 +; AVX2-NEXT: retq + %const = insertelement <4 x i32> undef, i32 42, i32 0 + %splat = shufflevector <4 x i32> %const, <4 x i32> undef, <4 x i32> zeroinitializer + ret <4 x i32> %splat +} |