diff options
| author | Richard Sandiford <rsandifo@linux.vnet.ibm.com> | 2013-07-05 14:38:48 +0000 |
|---|---|---|
| committer | Richard Sandiford <rsandifo@linux.vnet.ibm.com> | 2013-07-05 14:38:48 +0000 |
| commit | c40f27b52d40bf5018031321ebe6be071697666a (patch) | |
| tree | 398483c619c8f84120607fe88f63d1051e602d05 /llvm/test | |
| parent | 1ca6deaeb71cb1728797eaf51c8f75a3a62eba9a (diff) | |
| download | bcm5719-llvm-c40f27b52d40bf5018031321ebe6be071697666a.tar.gz bcm5719-llvm-c40f27b52d40bf5018031321ebe6be071697666a.zip | |
[SystemZ] Remove no-op MVCs
The stack coloring pass has code to delete stores and loads that become
trivially dead after coloring. Extend it to cope with single instructions
that copy from one frame index to another.
The testcase happens to show an example of this kicking in at the moment.
It did occur in Real Code too though.
llvm-svn: 185705
Diffstat (limited to 'llvm/test')
| -rw-r--r-- | llvm/test/CodeGen/SystemZ/spill-01.ll | 89 |
1 files changed, 89 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/SystemZ/spill-01.ll b/llvm/test/CodeGen/SystemZ/spill-01.ll index b2d9fe7be4a..d48e9827f52 100644 --- a/llvm/test/CodeGen/SystemZ/spill-01.ll +++ b/llvm/test/CodeGen/SystemZ/spill-01.ll @@ -456,3 +456,92 @@ skip: ret void } + +; This used to generate a no-op MVC. It is very sensitive to spill heuristics. +define void @f11() { +; CHECK: f11: +; CHECK-NOT: mvc [[OFFSET:[0-9]+]](8,%r15), [[OFFSET]](%r15) +; CHECK: br %r14 +entry: + %val0 = load volatile i64 *@h0 + %val1 = load volatile i64 *@h1 + %val2 = load volatile i64 *@h2 + %val3 = load volatile i64 *@h3 + %val4 = load volatile i64 *@h4 + %val5 = load volatile i64 *@h5 + %val6 = load volatile i64 *@h6 + %val7 = load volatile i64 *@h7 + + %altval0 = load volatile i64 *@h0 + %altval1 = load volatile i64 *@h1 + + call void @foo() + + store volatile i64 %val0, i64 *@h0 + store volatile i64 %val1, i64 *@h1 + store volatile i64 %val2, i64 *@h2 + store volatile i64 %val3, i64 *@h3 + store volatile i64 %val4, i64 *@h4 + store volatile i64 %val5, i64 *@h5 + store volatile i64 %val6, i64 *@h6 + store volatile i64 %val7, i64 *@h7 + + %check = load volatile i64 *@h0 + %cond = icmp eq i64 %check, 0 + br i1 %cond, label %a1, label %b1 + +a1: + call void @foo() + br label %join1 + +b1: + call void @foo() + br label %join1 + +join1: + %newval0 = phi i64 [ %val0, %a1 ], [ %altval0, %b1 ] + + call void @foo() + + store volatile i64 %val1, i64 *@h1 + store volatile i64 %val2, i64 *@h2 + store volatile i64 %val3, i64 *@h3 + store volatile i64 %val4, i64 *@h4 + store volatile i64 %val5, i64 *@h5 + store volatile i64 %val6, i64 *@h6 + store volatile i64 %val7, i64 *@h7 + br i1 %cond, label %a2, label %b2 + +a2: + call void @foo() + br label %join2 + +b2: + call void @foo() + br label %join2 + +join2: + %newval1 = phi i64 [ %val1, %a2 ], [ %altval1, %b2 ] + + call void @foo() + + store volatile i64 %val2, i64 *@h2 + store volatile i64 %val3, i64 *@h3 + store volatile i64 %val4, i64 *@h4 + store volatile i64 %val5, i64 *@h5 + store volatile i64 %val6, i64 *@h6 + store volatile i64 %val7, i64 *@h7 + + call void @foo() + + store volatile i64 %newval0, i64 *@h0 + store volatile i64 %newval1, i64 *@h1 + store volatile i64 %val2, i64 *@h2 + store volatile i64 %val3, i64 *@h3 + store volatile i64 %val4, i64 *@h4 + store volatile i64 %val5, i64 *@h5 + store volatile i64 %val6, i64 *@h6 + store volatile i64 %val7, i64 *@h7 + + ret void +} |

