diff options
author | Wei Mi <wmi@google.com> | 2017-07-06 15:52:14 +0000 |
---|---|---|
committer | Wei Mi <wmi@google.com> | 2017-07-06 15:52:14 +0000 |
commit | 90707394e37ffade2d9a14caed4e875423b5a101 (patch) | |
tree | 190c62859d3293db390abd0427f7c4943f49c61d /llvm/test/Transforms | |
parent | 713600747e93574c1b3ec76d7df5b40e5d19b2e3 (diff) | |
download | bcm5719-llvm-90707394e37ffade2d9a14caed4e875423b5a101.tar.gz bcm5719-llvm-90707394e37ffade2d9a14caed4e875423b5a101.zip |
[LSR] Narrow search space by filtering non-optimal formulae with the same ScaledReg and Scale.
When the formulae search space is huge, LSR uses a series of heuristic to keep
pruning the search space until the number of possible solutions are within
certain limit.
The big hammer of the series of heuristics is NarrowSearchSpaceByPickingWinnerRegs,
which picks the register which is used by the most LSRUses and deletes the other
formulae which don't use the register. This is a effective way to prune the search
space, but quite often not a good way to keep the best solution. We saw cases before
that the heuristic pruned the best formula candidate out of search space.
To relieve the problem, we introduce a new heuristic called
NarrowSearchSpaceByFilterFormulaWithSameScaledReg. The basic idea is in order to
reduce the search space while keeping the best formula, we want to keep as many
formulae with different Scale and ScaledReg as possible. That is because the central
idea of LSR is to choose a group of loop induction variables and use those induction
variables to represent LSRUses. An induction variable candidate is often represented
by the Scale and ScaledReg in a formula. If we have more formulae with different
ScaledReg and Scale to choose, we have better opportunity to find the best solution.
That is why we believe pruning search space by only keeping the best formula with the
same Scale and ScaledReg should be more effective than PickingWinnerReg. And we use
two criteria to choose the best formula with the same Scale and ScaledReg. The first
criteria is to select the formula using less non shared registers, and the second
criteria is to select the formula with less cost got from RateFormula. The patch
implements the heuristic before NarrowSearchSpaceByPickingWinnerRegs, which is the
last resort.
Testing shows we get 1.8% and 2% on two internal benchmarks on x86. llvm nightly
testsuite performance is neutral. We also tried lsr-exp-narrow and it didn't help
on the two improved internal cases we saw.
Differential Revision: https://reviews.llvm.org/D34583
llvm-svn: 307269
Diffstat (limited to 'llvm/test/Transforms')
-rw-r--r-- | llvm/test/Transforms/LoopStrengthReduce/2013-01-14-ReuseCast.ll | 4 | ||||
-rw-r--r-- | llvm/test/Transforms/LoopStrengthReduce/X86/lsr-filtering-scaledreg.ll | 60 |
2 files changed, 62 insertions, 2 deletions
diff --git a/llvm/test/Transforms/LoopStrengthReduce/2013-01-14-ReuseCast.ll b/llvm/test/Transforms/LoopStrengthReduce/2013-01-14-ReuseCast.ll index dcd068191e1..ea3f6077231 100644 --- a/llvm/test/Transforms/LoopStrengthReduce/2013-01-14-ReuseCast.ll +++ b/llvm/test/Transforms/LoopStrengthReduce/2013-01-14-ReuseCast.ll @@ -14,8 +14,8 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3 ; current LSR cost model. ; CHECK-NOT: = ptrtoint i8* undef to i64 ; CHECK: .lr.ph -; CHECK: [[TMP:%[^ ]+]] = add i64 %tmp5, 1 -; CHECK: sub i64 [[TMP]], %tmp6 +; CHECK: [[TMP:%[^ ]+]] = add i64 %tmp{{[0-9]+}}, -1 +; CHECK: sub i64 [[TMP]], %tmp{{[0-9]+}} ; CHECK: ret void define void @VerifyDiagnosticConsumerTest() unnamed_addr nounwind uwtable align 2 { bb: diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-filtering-scaledreg.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-filtering-scaledreg.ll new file mode 100644 index 00000000000..4ce6f1a79fb --- /dev/null +++ b/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-filtering-scaledreg.ll @@ -0,0 +1,60 @@ +; RUN: opt < %s -loop-reduce -lsr-filter-same-scaled-reg=true -mtriple=x86_64-unknown-linux-gnu -S | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" + +%struct.ham = type { i8, i8, [5 x i32], i64, i64, i64 } + +@global = external local_unnamed_addr global %struct.ham, align 8 + +define void @foo() local_unnamed_addr { +bb: + %tmp = load i64, i64* getelementptr inbounds (%struct.ham, %struct.ham* @global, i64 0, i32 3), align 8 + %tmp1 = and i64 %tmp, 1792 + %tmp2 = load i64, i64* getelementptr inbounds (%struct.ham, %struct.ham* @global, i64 0, i32 4), align 8 + %tmp3 = add i64 %tmp1, %tmp2 + %tmp4 = load i8*, i8** null, align 8 + %tmp5 = getelementptr inbounds i8, i8* %tmp4, i64 0 + %tmp6 = sub i64 0, %tmp3 + %tmp7 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp6 + %tmp8 = inttoptr i64 0 to i8* + br label %bb9 + +; Without filtering non-optimal formulae with the same ScaledReg and Scale, the strategy +; to narrow LSR search space by picking winner reg will generate only one lsr.iv and +; unoptimal result. +; CHECK-LABEL: @foo( +; CHECK: bb9: +; CHECK-NEXT: = phi i8* +; CHECK-NEXT: = phi i8* + +bb9: ; preds = %bb12, %bb + %tmp10 = phi i8* [ %tmp7, %bb ], [ %tmp16, %bb12 ] + %tmp11 = phi i8* [ %tmp8, %bb ], [ %tmp17, %bb12 ] + br i1 false, label %bb18, label %bb12 + +bb12: ; preds = %bb9 + %tmp13 = getelementptr inbounds i8, i8* %tmp10, i64 8 + %tmp14 = bitcast i8* %tmp13 to i64* + %tmp15 = load i64, i64* %tmp14, align 1 + %tmp16 = getelementptr inbounds i8, i8* %tmp10, i64 16 + %tmp17 = getelementptr inbounds i8, i8* %tmp11, i64 16 + br label %bb9 + +bb18: ; preds = %bb9 + %tmp19 = icmp ugt i8* %tmp11, null + %tmp20 = getelementptr inbounds i8, i8* %tmp10, i64 8 + %tmp21 = getelementptr inbounds i8, i8* %tmp11, i64 8 + %tmp22 = select i1 %tmp19, i8* %tmp10, i8* %tmp20 + %tmp23 = select i1 %tmp19, i8* %tmp11, i8* %tmp21 + br label %bb24 + +bb24: ; preds = %bb24, %bb18 + %tmp25 = phi i8* [ %tmp27, %bb24 ], [ %tmp22, %bb18 ] + %tmp26 = phi i8* [ %tmp29, %bb24 ], [ %tmp23, %bb18 ] + %tmp27 = getelementptr inbounds i8, i8* %tmp25, i64 1 + %tmp28 = load i8, i8* %tmp25, align 1 + %tmp29 = getelementptr inbounds i8, i8* %tmp26, i64 1 + store i8 %tmp28, i8* %tmp26, align 1 + %tmp30 = icmp eq i8* %tmp29, %tmp5 + br label %bb24 +} |