summaryrefslogtreecommitdiffstats
path: root/llvm/test/Transforms/Inline/inline_constprop.ll
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2012-12-28 14:43:42 +0000
committerChandler Carruth <chandlerc@gmail.com>2012-12-28 14:43:42 +0000
commit86ed53089fc0bff84b472f89ba42da8ea1a3df86 (patch)
tree48daa097966da56c81dd251b860e9363e3e21567 /llvm/test/Transforms/Inline/inline_constprop.ll
parent753e21d057b2e6f2d73e67b738fa870cfa093d6d (diff)
downloadbcm5719-llvm-86ed53089fc0bff84b472f89ba42da8ea1a3df86.tar.gz
bcm5719-llvm-86ed53089fc0bff84b472f89ba42da8ea1a3df86.zip
Fix a stunning oversight in the inline cost analysis. It was never
propagating one of the values it simplified to a constant across a myriad of instructions. Notably, ptrtoint instructions when we had a constant pointer (say, 0) didn't propagate that, blocking a massive number of down-stream optimizations. This was uncovered when investigating why we fail to inline and delete the boilerplate in: void f() { std::vector<int> v; v.push_back(1); } It turns out most of the efforts I've made thus far to improve the analysis weren't making it far purely because of this. After this is fixed, the store-to-load forwarding patch enables LLVM to optimize the above to an empty function. We still can't nuke a second push_back, but for different reasons. There is a very real chance this will cause somewhat noticable changes in inlining behavior, so please let me know if you see regressions (or improvements!) because of this patch. llvm-svn: 171196
Diffstat (limited to 'llvm/test/Transforms/Inline/inline_constprop.ll')
-rw-r--r--llvm/test/Transforms/Inline/inline_constprop.ll38
1 files changed, 38 insertions, 0 deletions
diff --git a/llvm/test/Transforms/Inline/inline_constprop.ll b/llvm/test/Transforms/Inline/inline_constprop.ll
index b2a14fe0b71..77bc3784acb 100644
--- a/llvm/test/Transforms/Inline/inline_constprop.ll
+++ b/llvm/test/Transforms/Inline/inline_constprop.ll
@@ -149,6 +149,44 @@ bb.false:
ret i8 %z8
}
+define i64 @caller5(i64 %y) {
+; Check that we can round trip constants through various kinds of casts etc w/o
+; losing track of the constant prop in the inline cost analysis.
+;
+; CHECK: @caller5
+; CHECK-NOT: call
+; CHECK: ret i64 -1
+
+entry:
+ %x = call i64 @callee5(i64 42, i64 %y)
+ ret i64 %x
+}
+
+define i64 @callee5(i64 %x, i64 %y) {
+ %inttoptr = inttoptr i64 %x to i8*
+ %bitcast = bitcast i8* %inttoptr to i32*
+ %ptrtoint = ptrtoint i32* %bitcast to i64
+ %trunc = trunc i64 %ptrtoint to i32
+ %zext = zext i32 %trunc to i64
+ %cmp = icmp eq i64 %zext, 42
+ br i1 %cmp, label %bb.true, label %bb.false
+
+bb.true:
+ ret i64 -1
+
+bb.false:
+ ; This block musn't be counted in the inline cost.
+ %y1 = add i64 %y, 1
+ %y2 = add i64 %y1, 1
+ %y3 = add i64 %y2, 1
+ %y4 = add i64 %y3, 1
+ %y5 = add i64 %y4, 1
+ %y6 = add i64 %y5, 1
+ %y7 = add i64 %y6, 1
+ %y8 = add i64 %y7, 1
+ ret i64 %y8
+}
+
define i32 @PR13412.main() {
; This is a somewhat complicated three layer subprogram that was reported to
OpenPOWER on IntegriCloud