diff options
| author | Sanjay Patel <spatel@rotateright.com> | 2014-12-19 20:23:41 +0000 |
|---|---|---|
| committer | Sanjay Patel <spatel@rotateright.com> | 2014-12-19 20:23:41 +0000 |
| commit | 0428a5786e5578552e7f660ba133aecf95d724eb (patch) | |
| tree | a30df49b4313e9d6f23deaa1ec9cffb7241c0775 /llvm/test | |
| parent | 38ce8cd2e208921c02b2cc848135866a478de48c (diff) | |
| download | bcm5719-llvm-0428a5786e5578552e7f660ba133aecf95d724eb.tar.gz bcm5719-llvm-0428a5786e5578552e7f660ba133aecf95d724eb.zip | |
merge consecutive stores of extracted vector elements
Add a path to DAGCombiner::MergeConsecutiveStores()
to combine multiple scalar stores when the store operands
are extracted vector elements. This is a partial fix for
PR21711 ( http://llvm.org/bugs/show_bug.cgi?id=21711 ).
For the new test case, codegen improves from:
vmovss %xmm0, (%rdi)
vextractps $1, %xmm0, 4(%rdi)
vextractps $2, %xmm0, 8(%rdi)
vextractps $3, %xmm0, 12(%rdi)
vextractf128 $1, %ymm0, %xmm0
vmovss %xmm0, 16(%rdi)
vextractps $1, %xmm0, 20(%rdi)
vextractps $2, %xmm0, 24(%rdi)
vextractps $3, %xmm0, 28(%rdi)
vzeroupper
retq
To:
vmovups %ymm0, (%rdi)
vzeroupper
retq
Patch reviewed by Nadav Rotem.
Differential Revision: http://reviews.llvm.org/D6698
llvm-svn: 224611
Diffstat (limited to 'llvm/test')
| -rw-r--r-- | llvm/test/CodeGen/X86/MergeConsecutiveStores.ll | 33 |
1 files changed, 33 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll b/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll index dfdaea523fd..cf984a4f3a9 100644 --- a/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll +++ b/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll @@ -434,3 +434,36 @@ define void @loadStoreBaseIndexOffsetSextNoSex(i8* %a, i8* %b, i8* %c, i32 %n) { ; <label>:14 ret void } + +define void @merge_vec_element_store(<8 x float> %v, float* %ptr) { + %vecext0 = extractelement <8 x float> %v, i32 0 + %vecext1 = extractelement <8 x float> %v, i32 1 + %vecext2 = extractelement <8 x float> %v, i32 2 + %vecext3 = extractelement <8 x float> %v, i32 3 + %vecext4 = extractelement <8 x float> %v, i32 4 + %vecext5 = extractelement <8 x float> %v, i32 5 + %vecext6 = extractelement <8 x float> %v, i32 6 + %vecext7 = extractelement <8 x float> %v, i32 7 + %arrayidx1 = getelementptr inbounds float* %ptr, i64 1 + %arrayidx2 = getelementptr inbounds float* %ptr, i64 2 + %arrayidx3 = getelementptr inbounds float* %ptr, i64 3 + %arrayidx4 = getelementptr inbounds float* %ptr, i64 4 + %arrayidx5 = getelementptr inbounds float* %ptr, i64 5 + %arrayidx6 = getelementptr inbounds float* %ptr, i64 6 + %arrayidx7 = getelementptr inbounds float* %ptr, i64 7 + store float %vecext0, float* %ptr, align 4 + store float %vecext1, float* %arrayidx1, align 4 + store float %vecext2, float* %arrayidx2, align 4 + store float %vecext3, float* %arrayidx3, align 4 + store float %vecext4, float* %arrayidx4, align 4 + store float %vecext5, float* %arrayidx5, align 4 + store float %vecext6, float* %arrayidx6, align 4 + store float %vecext7, float* %arrayidx7, align 4 + ret void + +; CHECK-LABEL: merge_vec_element_store +; CHECK: vmovups +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq +} + |

