diff options
Diffstat (limited to 'llvm/test/CodeGen')
| -rw-r--r-- | llvm/test/CodeGen/X86/splat-const.ll | 40 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/sse41.ll | 4 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/widen_shuffle-1.ll | 4 |
3 files changed, 44 insertions, 4 deletions
diff --git a/llvm/test/CodeGen/X86/splat-const.ll b/llvm/test/CodeGen/X86/splat-const.ll new file mode 100644 index 00000000000..19997b03ad5 --- /dev/null +++ b/llvm/test/CodeGen/X86/splat-const.ll @@ -0,0 +1,40 @@ +; RUN: llc < %s -mcpu=penryn | FileCheck %s --check-prefix=SSE +; RUN: llc < %s -mcpu=sandybridge | FileCheck %s --check-prefix=AVX +; RUN: llc < %s -mcpu=haswell | FileCheck %s --check-prefix=AVX2 +; This checks that lowering for creation of constant vectors is sane and +; doesn't use redundant shuffles. (fixes PR22276) +target triple = "x86_64-unknown-unknown" + +define <4 x i32> @zero_vector() { +; SSE-LABEL: zero_vector: +; SSE: xorps %xmm0, %xmm0 +; SSE-NEXT: retq +; AVX-LABEL: zero_vector: +; AVX: vxorps %xmm0, %xmm0, %xmm0 +; AVX-NEXT: retq +; AVX2-LABEL: zero_vector: +; AVX2: vxorps %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: retq + %zero = insertelement <4 x i32> undef, i32 0, i32 0 + %splat = shufflevector <4 x i32> %zero, <4 x i32> undef, <4 x i32> zeroinitializer + ret <4 x i32> %splat +} + +; Note that for the "const_vector" versions, lowering that uses a shuffle +; instead of a load would be legitimate, if it's a single broadcast shuffle. +; (as opposed to the previous mess) +; However, this is not the current preferred lowering. +define <4 x i32> @const_vector() { +; SSE-LABEL: const_vector: +; SSE: movaps {{.*}}, %xmm0 # xmm0 = [42,42,42,42] +; SSE-NEXT: retq +; AVX-LABEL: const_vector: +; AVX: vmovaps {{.*}}, %xmm0 # xmm0 = [42,42,42,42] +; AVX-NEXT: retq +; AVX2-LABEL: const_vector: +; AVX2: vbroadcastss {{[^%].*}}, %xmm0 +; AVX2-NEXT: retq + %const = insertelement <4 x i32> undef, i32 42, i32 0 + %splat = shufflevector <4 x i32> %const, <4 x i32> undef, <4 x i32> zeroinitializer + ret <4 x i32> %splat +} diff --git a/llvm/test/CodeGen/X86/sse41.ll b/llvm/test/CodeGen/X86/sse41.ll index 3295e2b206b..23b97f002a0 100644 --- a/llvm/test/CodeGen/X86/sse41.ll +++ b/llvm/test/CodeGen/X86/sse41.ll @@ -1003,14 +1003,14 @@ define void @insertps_pr20411(i32* noalias nocapture %RET) #1 { ; X32-LABEL: insertps_pr20411: ; X32: ## BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: pshufd {{.*#+}} xmm0 = mem[3,1,2,3] +; X32-NEXT: movaps {{.*#+}} xmm0 = [3,3,3,3] ; X32-NEXT: insertps $-36, LCPI49_1+12, %xmm0 ; X32-NEXT: movups %xmm0, (%eax) ; X32-NEXT: retl ; ; X64-LABEL: insertps_pr20411: ; X64: ## BB#0: -; X64-NEXT: pshufd {{.*#+}} xmm0 = mem[3,1,2,3] +; X64-NEXT: movaps {{.*#+}} xmm0 = [3,3,3,3] ; X64-NEXT: insertps $-36, LCPI49_1+{{.*}}(%rip), %xmm0 ; X64-NEXT: movups %xmm0, (%rdi) ; X64-NEXT: retq diff --git a/llvm/test/CodeGen/X86/widen_shuffle-1.ll b/llvm/test/CodeGen/X86/widen_shuffle-1.ll index 70fdbb7c9c8..2aa870f16eb 100644 --- a/llvm/test/CodeGen/X86/widen_shuffle-1.ll +++ b/llvm/test/CodeGen/X86/widen_shuffle-1.ll @@ -82,8 +82,8 @@ define void @shuf5(<8 x i8>* %p) nounwind { ; CHECK-LABEL: shuf5: ; CHECK: # BB#0: ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax -; CHECK-NEXT: movdqa {{.*#+}} xmm0 = <4,33,u,u,u,u,u,u> -; CHECK-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] +; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [33,33,33,33,33,33,33,33] +; CHECK-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] ; CHECK-NEXT: movlpd %xmm0, (%eax) ; CHECK-NEXT: retl %v = shufflevector <2 x i8> <i8 4, i8 33>, <2 x i8> undef, <8 x i32> <i32 1, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |

