diff options
| author | Arnold Schwaighofer <aschwaighofer@apple.com> | 2013-04-04 23:26:27 +0000 |
|---|---|---|
| committer | Arnold Schwaighofer <aschwaighofer@apple.com> | 2013-04-04 23:26:27 +0000 |
| commit | df6f67ed87c85fe25321da3686881909f38e17a9 (patch) | |
| tree | ffd0658e3776179bcd33dc37bed841a52f06a697 /llvm/test/Transforms/LoopVectorize | |
| parent | 44f902ed7d9b97fa98d383607cb9fab63f8b88a9 (diff) | |
| download | bcm5719-llvm-df6f67ed87c85fe25321da3686881909f38e17a9.tar.gz bcm5719-llvm-df6f67ed87c85fe25321da3686881909f38e17a9.zip | |
LoopVectorizer: Pass OperandValueKind information to the cost model
Pass down the fact that an operand is going to be a vector of constants.
This should bring the performance of MultiSource/Benchmarks/PAQ8p/paq8p on x86
back. It had degraded to scalar performance due to my pervious shift cost change
that made all shifts expensive on x86.
radar://13576547
llvm-svn: 178809
Diffstat (limited to 'llvm/test/Transforms/LoopVectorize')
| -rw-r--r-- | llvm/test/Transforms/LoopVectorize/X86/constant-vector-operand.ll | 28 |
1 files changed, 28 insertions, 0 deletions
diff --git a/llvm/test/Transforms/LoopVectorize/X86/constant-vector-operand.ll b/llvm/test/Transforms/LoopVectorize/X86/constant-vector-operand.ll new file mode 100644 index 00000000000..6c924409af3 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/X86/constant-vector-operand.ll @@ -0,0 +1,28 @@ +; RUN: opt -mtriple=x86_64-apple-darwin -mcpu=core2 -loop-vectorize -dce -instcombine -S < %s | FileCheck %s + +@B = common global [1024 x i32] zeroinitializer, align 16 +@A = common global [1024 x i32] zeroinitializer, align 16 + +; We use to not vectorize this loop because the shift was deemed to expensive. +; Now that we differentiate shift cost base on the operand value kind, we will +; vectorize this loop. +; CHECK: ashr <4 x i32> +define void @f() { +entry: + br label %for.body + +for.body: + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %arrayidx = getelementptr inbounds [1024 x i32]* @B, i64 0, i64 %indvars.iv + %0 = load i32* %arrayidx, align 4 + %shl = ashr i32 %0, 3 + %arrayidx2 = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv + store i32 %shl, i32* %arrayidx2, align 4 + %indvars.iv.next = add i64 %indvars.iv, 1 + %lftr.wideiv = trunc i64 %indvars.iv.next to i32 + %exitcond = icmp eq i32 %lftr.wideiv, 1024 + br i1 %exitcond, label %for.end, label %for.body + +for.end: + ret void +} |

