summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
authorHal Finkel <hfinkel@anl.gov>2013-02-08 21:35:47 +0000
committerHal Finkel <hfinkel@anl.gov>2013-02-08 21:35:47 +0000
commit2581905f8149fa79e3ca75e33f8a08d4d6a23546 (patch)
tree0e7d986a501c59bdda6868126736ca836230f7fd /llvm/test/CodeGen
parent8d7edced830ffb36802a98e95fb625d76e3cc34f (diff)
downloadbcm5719-llvm-2581905f8149fa79e3ca75e33f8a08d4d6a23546.tar.gz
bcm5719-llvm-2581905f8149fa79e3ca75e33f8a08d4d6a23546.zip
DAGCombiner: Constant folding around pre-increment loads/stores
Previously, even when a pre-increment load or store was generated, we often needed to keep a copy of the original base register for use with other offsets. If all of these offsets are constants (including the offset which was combined into the addressing mode), then this is clearly unnecessary. This change adjusts these other offsets to use the new incremented address. llvm-svn: 174746
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/PowerPC/stdux-constuse.ll47
1 files changed, 47 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/PowerPC/stdux-constuse.ll b/llvm/test/CodeGen/PowerPC/stdux-constuse.ll
new file mode 100644
index 00000000000..e62d438014e
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/stdux-constuse.ll
@@ -0,0 +1,47 @@
+; RUN: llc -mcpu=a2 -disable-lsr < %s | FileCheck %s
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define i32 @test1(i64 %add, i64* %ptr) nounwind {
+entry:
+ %p1 = getelementptr i64* %ptr, i64 144115188075855
+ br label %for.cond2.preheader
+
+for.cond2.preheader:
+ %nl.018 = phi i32 [ 0, %entry ], [ %inc9, %for.end ]
+ br label %for.body4
+
+for.body4:
+ %lsr.iv = phi i32 [ %lsr.iv.next, %for.body4 ], [ 16000, %for.cond2.preheader ]
+ %i0 = phi i64* [ %p1, %for.cond2.preheader ], [ %i6, %for.body4 ]
+ %i6 = getelementptr i64* %i0, i64 400000
+ %i7 = getelementptr i64* %i6, i64 300000
+ %i8 = getelementptr i64* %i6, i64 200000
+ %i9 = getelementptr i64* %i6, i64 100000
+ store i64 %add, i64* %i6, align 32
+ store i64 %add, i64* %i7, align 32
+ store i64 %add, i64* %i8, align 32
+ store i64 %add, i64* %i9, align 32
+ %lsr.iv.next = add i32 %lsr.iv, -16
+ %exitcond.15 = icmp eq i32 %lsr.iv.next, 0
+ br i1 %exitcond.15, label %for.end, label %for.body4
+
+; Make sure that we generate the most compact form of this loop with no
+; unnecessary moves
+; CHECK: @test1
+; CHECK: mtctr
+; CHECK: stdux
+; CHECK-NEXT: stdx
+; CHECK-NEXT: stdx
+; CHECK-NEXT: stdx
+; CHECK-NEXT: bdnz
+
+for.end:
+ %inc9 = add nsw i32 %nl.018, 1
+ %exitcond = icmp eq i32 %inc9, 400000
+ br i1 %exitcond, label %for.end10, label %for.cond2.preheader
+
+for.end10:
+ ret i32 0
+}
+
OpenPOWER on IntegriCloud