diff options
Diffstat (limited to 'llvm/test')
-rw-r--r-- | llvm/test/CodeGen/X86/MergeConsecutiveStores.ll | 28 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll | 2 |
2 files changed, 9 insertions, 21 deletions
diff --git a/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll b/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll index 608b92da514..69f5f4c7a05 100644 --- a/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll +++ b/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll @@ -492,15 +492,10 @@ define void @merge_vec_element_store(<8 x float> %v, float* %ptr) { store float %vecext7, float* %arrayidx7, align 4 ret void -; CHECK: vextractf128 $1, %ymm0, %xmm1 -; CHECK: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; CHECK-LABEL: merge_vec_element_store +; CHECK: vmovups %ymm0, (%rdi) +; CHECK: vzeroupper ; CHECK: retq - -; This is what should be generated: -; FIXME-LABEL: merge_vec_element_store -; FIXME: vmovups -; FIXME-NEXT: vzeroupper -; FIXME-NEXT: retq } ; PR21711 - Merge vector stores into wider vector stores. @@ -520,18 +515,11 @@ define void @merge_vec_extract_stores(<8 x float> %v1, <8 x float> %v2, <4 x flo store <4 x float> %shuffle3, <4 x float>* %idx3, align 16 ret void -; These vblendpd are obviously redundant. -; CHECK: vblendpd $12, %ymm0, %ymm0, %ymm0 # ymm0 = ymm0[0,1,2,3] -; CHECK: vmovupd %ymm0, 48(%rdi) -; CHECK: vblendpd $12, %ymm1, %ymm1, %ymm0 # ymm0 = ymm1[0,1,2,3] -; CHECK: vmovupd %ymm0, 80(%rdi) - -; This is what should be generated: -; FIXME-LABEL: merge_vec_extract_stores -; FIXME: vmovups %ymm0, 48(%rdi) -; FIXME-NEXT: vmovups %ymm1, 80(%rdi) -; FIXME-NEXT: vzeroupper -; FIXME-NEXT: retq +; CHECK-LABEL: merge_vec_extract_stores +; CHECK: vmovups %ymm0, 48(%rdi) +; CHECK-NEXT: vmovups %ymm1, 80(%rdi) +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq } ; Merging vector stores when sourced from vector loads. diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll index 27bc2bb1682..8c84580120f 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll @@ -807,10 +807,10 @@ define <4 x i64> @shuffle_v4i64_0142(<4 x i64> %a, <4 x i64> %b) { define <4 x i64> @shuffle_v4i64_0412(<4 x i64> %a, <4 x i64> %b) { ; AVX1-LABEL: shuffle_v4i64_0412: ; AVX1: # BB#0: +; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0] ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7] ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0] ; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3] ; AVX1-NEXT: retq ; |