diff options
author | Gil Rapaport <gil.rapaport@intel.com> | 2017-11-14 12:09:30 +0000 |
---|---|---|
committer | Gil Rapaport <gil.rapaport@intel.com> | 2017-11-14 12:09:30 +0000 |
commit | 848581cadb3f5cd0229cf580c5cb56f679900d3d (patch) | |
tree | 8efea72359f6dba678330e7b13ec420fb821d965 /llvm/test/Transforms/LoopVectorize | |
parent | 5cdc4f9c330c7d4d411e1bbc77084c1415593ded (diff) | |
download | bcm5719-llvm-848581cadb3f5cd0229cf580c5cb56f679900d3d.tar.gz bcm5719-llvm-848581cadb3f5cd0229cf580c5cb56f679900d3d.zip |
[LV] Introduce VPBlendRecipe, VPWidenMemoryInstructionRecipe
This patch is part of D38676.
The patch introduces two new Recipes to handle instructions whose vectorization
involves masking. These Recipes take VPlan-level masks in D38676, but still rely
on ILV's existing createEdgeMask(), createBlockInMask() in this patch.
VPBlendRecipe handles intra-loop phi nodes, which are vectorized as a sequence
of SELECTs. Its execute() code is refactored out of ILV::widenPHIInstruction(),
which now handles only loop-header phi nodes.
VPWidenMemoryInstructionRecipe handles load/store which are to be widened
(but are not part of an Interleave Group). In this patch it simply calls
ILV::vectorizeMemoryInstruction on execute().
Differential Revision: https://reviews.llvm.org/D39068
llvm-svn: 318149
Diffstat (limited to 'llvm/test/Transforms/LoopVectorize')
-rw-r--r-- | llvm/test/Transforms/LoopVectorize/X86/x86-predication.ll | 38 |
1 files changed, 38 insertions, 0 deletions
diff --git a/llvm/test/Transforms/LoopVectorize/X86/x86-predication.ll b/llvm/test/Transforms/LoopVectorize/X86/x86-predication.ll index b35fc59542b..45d843c9d58 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/x86-predication.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/x86-predication.ll @@ -1,4 +1,5 @@ ; RUN: opt < %s -mattr=avx -force-vector-width=2 -force-vector-interleave=1 -loop-vectorize -simplifycfg -S | FileCheck %s +; RUN: opt -mcpu=skylake-avx512 -S -force-vector-width=8 -force-vector-interleave=1 -loop-vectorize < %s | FileCheck %s --check-prefix=SINK-GATHER target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-apple-macosx10.8.0" @@ -58,3 +59,40 @@ for.end: %tmp8 = phi i32 [ %tmp7, %for.inc ] ret i32 %tmp8 } + +; This test ensures that a load, which would have been widened otherwise is +; instead scalarized if Cost-Model so decided as part of its +; sink-scalar-operands optimization for predicated instructions. +; +; SINK-GATHER: vector.body: +; SINK-GATHER: pred.udiv.if: +; SINK-GATHER: %[[T0:.+]] = load i32, i32* %{{.*}}, align 4 +; SINK-GATHER: %{{.*}} = udiv i32 %[[T0]], %{{.*}} +; SINK-GATHER: pred.udiv.continue: +define i32 @scalarize_and_sink_gather(i32* %a, i1 %c, i32 %x, i64 %n) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.inc ] + %r = phi i32 [ 0, %entry ], [ %tmp6, %for.inc ] + %i7 = mul i64 %i, 777 + br i1 %c, label %if.then, label %for.inc + +if.then: + %tmp0 = getelementptr inbounds i32, i32* %a, i64 %i7 + %tmp2 = load i32, i32* %tmp0, align 4 + %tmp4 = udiv i32 %tmp2, %x + br label %for.inc + +for.inc: + %tmp5 = phi i32 [ %x, %for.body ], [ %tmp4, %if.then] + %tmp6 = add i32 %r, %tmp5 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp slt i64 %i.next, %n + br i1 %cond, label %for.body, label %for.end + +for.end: + %tmp7 = phi i32 [ %tmp6, %for.inc ] + ret i32 %tmp7 +} |