summaryrefslogtreecommitdiffstats
path: root/llvm/test/Transforms/LoopStrengthReduce/X86
diff options
context:
space:
mode:
authorEvgeny Stupachenko <evstupac@gmail.com>2017-02-11 02:57:43 +0000
committerEvgeny Stupachenko <evstupac@gmail.com>2017-02-11 02:57:43 +0000
commitfe6f548d2d69a684f0b8cd99e2e89679dc998fea (patch)
tree587f6ef832e9e67259a7aa62f3bba9df356431ac /llvm/test/Transforms/LoopStrengthReduce/X86
parenta05bdf75c0fe4fa25fa4bfb2aedab39269ace972 (diff)
downloadbcm5719-llvm-fe6f548d2d69a684f0b8cd99e2e89679dc998fea.tar.gz
bcm5719-llvm-fe6f548d2d69a684f0b8cd99e2e89679dc998fea.zip
Fix PR23384 (under "-lsr-insns-cost" option)
Summary: The patch adds instructions number generated by a solution to LSR cost under "-lsr-insns-cost" option. Reviewers: qcolombet, hfinkel Differential Revision: http://reviews.llvm.org/D28307 From: Evgeny Stupachenko <evstupac@gmail.com> llvm-svn: 294821
Diffstat (limited to 'llvm/test/Transforms/LoopStrengthReduce/X86')
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-1.ll52
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll58
2 files changed, 110 insertions, 0 deletions
diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-1.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-1.ll
new file mode 100644
index 00000000000..25b6ad71144
--- /dev/null
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-1.ll
@@ -0,0 +1,52 @@
+; RUN: opt < %s -loop-reduce -mtriple=x86_64 -lsr-insns-cost -S | FileCheck %s -check-prefix=BOTH -check-prefix=INSN
+; RUN: opt < %s -loop-reduce -mtriple=x86_64 -S | FileCheck %s -check-prefix=BOTH -check-prefix=REGS
+; RUN: llc < %s -O2 -march=x86-64 -lsr-insns-cost -asm-verbose=0 | FileCheck %s
+
+; OPT test checks that LSR optimize compare for static counter to compare with 0.
+
+; BOTH: for.body:
+; INSN: icmp eq i64 %lsr.iv.next, 0
+; REGS: icmp eq i64 %indvars.iv.next, 1024
+
+; LLC test checks that LSR optimize compare for static counter.
+; That means that instead of creating the following:
+; movl %ecx, (%rdx,%rax,4)
+; incq %rax
+; cmpq $1024, %rax
+; LSR should optimize out cmp:
+; movl %ecx, 4096(%rdx,%rax)
+; addq $4, %rax
+; or
+; movl %ecx, 4096(%rdx,%rax,4)
+; incq %rax
+
+; CHECK: LBB0_1:
+; CHECK-NEXT: movl 4096(%{{...}},[[REG:%...]]
+; CHECK-NEXT: addl 4096(%{{...}},[[REG]]
+; CHECK-NEXT: movl %{{...}}, 4096(%{{...}},[[REG]]
+; CHECK-NOT: cmp
+; CHECK: jne
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+; Function Attrs: norecurse nounwind uwtable
+define void @foo(i32* nocapture readonly %x, i32* nocapture readonly %y, i32* nocapture %q) {
+entry:
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32, i32* %x, i64 %indvars.iv
+ %tmp = load i32, i32* %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %y, i64 %indvars.iv
+ %tmp1 = load i32, i32* %arrayidx2, align 4
+ %add = add nsw i32 %tmp1, %tmp
+ %arrayidx4 = getelementptr inbounds i32, i32* %q, i64 %indvars.iv
+ store i32 %add, i32* %arrayidx4, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body
+}
diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll
new file mode 100644
index 00000000000..e5727f1e30d
--- /dev/null
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll
@@ -0,0 +1,58 @@
+; RUN: opt < %s -loop-reduce -mtriple=x86_64 -lsr-insns-cost -S | FileCheck %s -check-prefix=BOTH -check-prefix=INSN
+; RUN: opt < %s -loop-reduce -mtriple=x86_64 -S | FileCheck %s -check-prefix=BOTH -check-prefix=REGS
+; RUN: llc < %s -O2 -march=x86-64 -lsr-insns-cost -asm-verbose=0 | FileCheck %s
+
+; OPT checks that LSR prefers less instructions to less registers.
+; For x86 LSR should prefer complicated address to new lsr induction
+; variables.
+
+; BOTH: for.body:
+; INSN: getelementptr i32, i32* %x, i64 %indvars.iv
+; INSN: getelementptr i32, i32* %y, i64 %indvars.iv
+; INSN: getelementptr i32, i32* %q, i64 %indvars.iv
+; REGS %lsr.iv4 = phi
+; REGS %lsr.iv2 = phi
+; REGS %lsr.iv1 = phi
+; REGS: getelementptr i32, i32* %lsr.iv1, i64 1
+; REGS: getelementptr i32, i32* %lsr.iv2, i64 1
+; REGS: getelementptr i32, i32* %lsr.iv4, i64 1
+
+; LLC checks that LSR prefers less instructions to less registers.
+; LSR should prefer complicated address to additonal add instructions.
+
+; CHECK: LBB0_2:
+; CHECK-NEXT: movl (%r{{[a-z][a-z]}},
+; CHECK-NEXT: addl (%r{{[a-z][a-z]}},
+; CHECK-NEXT: movl %e{{[a-z][a-z]}}, (%r{{[a-z][a-z]}},
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+; Function Attrs: norecurse nounwind uwtable
+define void @foo(i32* nocapture readonly %x, i32* nocapture readonly %y, i32* nocapture %q, i32 %n) {
+entry:
+ %cmp10 = icmp sgt i32 %n, 0
+ br i1 %cmp10, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader: ; preds = %entry
+ %wide.trip.count = zext i32 %n to i64
+ br label %for.body
+
+for.cond.cleanup.loopexit: ; preds = %for.body
+ br label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
+ ret void
+
+for.body: ; preds = %for.body, %for.body.preheader
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i32, i32* %x, i64 %indvars.iv
+ %tmp = load i32, i32* %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %y, i64 %indvars.iv
+ %tmp1 = load i32, i32* %arrayidx2, align 4
+ %add = add nsw i32 %tmp1, %tmp
+ %arrayidx4 = getelementptr inbounds i32, i32* %q, i64 %indvars.iv
+ store i32 %add, i32* %arrayidx4, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body
+}
OpenPOWER on IntegriCloud