diff options
author | Quentin Colombet <qcolombet@apple.com> | 2014-05-20 19:25:04 +0000 |
---|---|---|
committer | Quentin Colombet <qcolombet@apple.com> | 2014-05-20 19:25:04 +0000 |
commit | c88baa5c10afebbd4dcd16f05fd242853f374558 (patch) | |
tree | 4baab35c9e91cb3ef63660d88b7e083420be070c /llvm/test | |
parent | 6f782b12aa355248c55ccded259c9847c4c22db9 (diff) | |
download | bcm5719-llvm-c88baa5c10afebbd4dcd16f05fd242853f374558.tar.gz bcm5719-llvm-c88baa5c10afebbd4dcd16f05fd242853f374558.zip |
[LSR] Canonicalize reg1 + ... + regN into reg1 + ... + 1*regN.
This commit introduces a canonical representation for the formulae.
Basically, as soon as a formula has more that one base register, the scaled
register field is used for one of them. The register put into the scaled
register is preferably a loop variant.
The commit refactors how the formulae are built in order to produce such
representation.
This yields a more accurate, but still perfectible, cost model.
<rdar://problem/16731508>
llvm-svn: 209230
Diffstat (limited to 'llvm/test')
-rw-r--r-- | llvm/test/CodeGen/X86/avoid_complex_am.ll | 11 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/masked-iv-safe.ll | 6 |
2 files changed, 10 insertions, 7 deletions
diff --git a/llvm/test/CodeGen/X86/avoid_complex_am.ll b/llvm/test/CodeGen/X86/avoid_complex_am.ll index 0b7a13d3c09..7f095190ab8 100644 --- a/llvm/test/CodeGen/X86/avoid_complex_am.ll +++ b/llvm/test/CodeGen/X86/avoid_complex_am.ll @@ -1,6 +1,9 @@ ; RUN: opt -S -loop-reduce < %s | FileCheck %s ; Complex addressing mode are costly. ; Make loop-reduce prefer unscaled accesses. +; On X86, reg1 + 1*reg2 has the same cost as reg1 + 8*reg2. +; Therefore, LSR currently prefers to fold as much computation as possible +; in the addressing mode. ; <rdar://problem/16730541> target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-apple-macosx" @@ -18,8 +21,8 @@ for.body: ; preds = %for.body, %entry %tmp = add nsw i64 %indvars.iv, -1 %arrayidx = getelementptr inbounds double* %b, i64 %tmp %tmp1 = load double* %arrayidx, align 8 -; The induction variable should carry the scaling factor: 1 * 8 = 8. -; CHECK: [[IVNEXT]] = add nuw nsw i64 [[IV]], 8 +; The induction variable should carry the scaling factor: 1. +; CHECK: [[IVNEXT]] = add nuw nsw i64 [[IV]], 1 %indvars.iv.next = add i64 %indvars.iv, 1 %arrayidx2 = getelementptr inbounds double* %c, i64 %indvars.iv.next %tmp2 = load double* %arrayidx2, align 8 @@ -27,8 +30,8 @@ for.body: ; preds = %for.body, %entry %arrayidx4 = getelementptr inbounds double* %a, i64 %indvars.iv store double %mul, double* %arrayidx4, align 8 %lftr.wideiv = trunc i64 %indvars.iv.next to i32 -; Comparison should be 19 * 8 = 152. -; CHECK: icmp eq i32 {{%[^,]+}}, 152 +; Comparison should be 19 * 1 = 19. +; CHECK: icmp eq i32 {{%[^,]+}}, 19 %exitcond = icmp eq i32 %lftr.wideiv, 20 br i1 %exitcond, label %for.end, label %for.body diff --git a/llvm/test/CodeGen/X86/masked-iv-safe.ll b/llvm/test/CodeGen/X86/masked-iv-safe.ll index 7f61e10f5f6..9ddc84708d5 100644 --- a/llvm/test/CodeGen/X86/masked-iv-safe.ll +++ b/llvm/test/CodeGen/X86/masked-iv-safe.ll @@ -5,7 +5,7 @@ ; CHECK-LABEL: count_up ; CHECK-NOT: {{and|movz|sar|shl}} -; CHECK: addq $8, +; CHECK: incq ; CHECK-NOT: {{and|movz|sar|shl}} ; CHECK: jne define void @count_up(double* %d, i64 %n) nounwind { @@ -71,7 +71,7 @@ return: ; CHECK-LABEL: count_up_signed ; CHECK-NOT: {{and|movz|sar|shl}} -; CHECK: addq $8, +; CHECK: incq ; CHECK-NOT: {{and|movz|sar|shl}} ; CHECK: jne define void @count_up_signed(double* %d, i64 %n) nounwind { @@ -242,7 +242,7 @@ return: ; CHECK-LABEL: another_count_down_signed ; CHECK-NOT: {{and|movz|sar|shl}} -; CHECK: addq $-8, +; CHECK: decq ; CHECK-NOT: {{and|movz|sar|shl}} ; CHECK: jne define void @another_count_down_signed(double* %d, i64 %n) nounwind { |