summaryrefslogtreecommitdiffstats
path: root/llvm/test/Transforms/LoopVectorize/SystemZ
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/Transforms/LoopVectorize/SystemZ')
-rw-r--r--llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll72
-rw-r--r--llvm/test/Transforms/LoopVectorize/SystemZ/branch-for-predicated-block.ll38
-rw-r--r--llvm/test/Transforms/LoopVectorize/SystemZ/lit.local.cfg2
-rw-r--r--llvm/test/Transforms/LoopVectorize/SystemZ/load-scalarization-cost-0.ll27
-rw-r--r--llvm/test/Transforms/LoopVectorize/SystemZ/load-scalarization-cost-1.ll28
-rw-r--r--llvm/test/Transforms/LoopVectorize/SystemZ/load-store-scalarization-cost.ll33
-rw-r--r--llvm/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs-02.ll149
-rw-r--r--llvm/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs.ll70
-rw-r--r--llvm/test/Transforms/LoopVectorize/SystemZ/pr38110.ll50
9 files changed, 469 insertions, 0 deletions
diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll
new file mode 100644
index 00000000000..1f7a6d29c57
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll
@@ -0,0 +1,72 @@
+; RUN: opt -S -mtriple=s390x-unknown-linux -mcpu=z13 -loop-vectorize -dce \
+; RUN: -instcombine -force-vector-width=2 < %s | FileCheck %s
+;
+; Test that loop vectorizer does not generate vector addresses that must then
+; always be extracted.
+
+; Check that the addresses for a scalarized memory access is not extracted
+; from a vector register.
+define i32 @foo(i32* nocapture %A) {
+;CHECK-LABEL: @foo(
+;CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+;CHECK: %0 = shl nsw i64 %index, 2
+;CHECK: %1 = shl i64 %index, 2
+;CHECK: %2 = or i64 %1, 4
+;CHECK: %3 = getelementptr inbounds i32, i32* %A, i64 %0
+;CHECK: %4 = getelementptr inbounds i32, i32* %A, i64 %2
+;CHECK: store i32 4, i32* %3, align 4
+;CHECK: store i32 4, i32* %4, align 4
+
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %0 = shl nsw i64 %indvars.iv, 2
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %0
+ store i32 4, i32* %arrayidx, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 10000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret i32 undef
+}
+
+
+; Check that a load of address is scalarized.
+define i32 @foo1(i32* nocapture noalias %A, i32** nocapture %PtrPtr) {
+;CHECK-LABEL: @foo1(
+;CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+;CHECK: %0 = or i64 %index, 1
+;CHECK: %1 = getelementptr inbounds i32*, i32** %PtrPtr, i64 %index
+;CHECK: %2 = getelementptr inbounds i32*, i32** %PtrPtr, i64 %0
+;CHECK: %3 = load i32*, i32** %1, align 8
+;CHECK: %4 = load i32*, i32** %2, align 8
+;CHECK: %5 = load i32, i32* %3, align 4
+;CHECK: %6 = load i32, i32* %4, align 4
+;CHECK: %7 = insertelement <2 x i32> undef, i32 %5, i32 0
+;CHECK: %8 = insertelement <2 x i32> %7, i32 %6, i32 1
+;CHECK: %9 = getelementptr inbounds i32, i32* %A, i64 %index
+;CHECK: %10 = bitcast i32* %9 to <2 x i32>*
+;CHECK: store <2 x i32> %8, <2 x i32>* %10, align 4
+
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %ptr = getelementptr inbounds i32*, i32** %PtrPtr, i64 %indvars.iv
+ %el = load i32*, i32** %ptr
+ %v = load i32, i32* %el
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ store i32 %v, i32* %arrayidx, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 10000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret i32 undef
+}
diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/branch-for-predicated-block.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/branch-for-predicated-block.ll
new file mode 100644
index 00000000000..d2e59452033
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/SystemZ/branch-for-predicated-block.ll
@@ -0,0 +1,38 @@
+; REQUIRES: asserts
+; RUN: opt -mtriple=s390x-unknown-linux -mcpu=z13 -loop-vectorize \
+; RUN: -force-vector-width=2 -debug-only=loop-vectorize \
+; RUN: -disable-output < %s 2>&1 | FileCheck %s
+
+; Check costs for branches inside a vectorized loop around predicated
+; blocks. Each such branch will be guarded with an extractelement from the
+; vector compare plus a test under mask instruction. This cost is modelled on
+; the extractelement of i1.
+
+define void @fun(i32* %arr, i64 %trip.count) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i32, i32* %arr, i64 %indvars.iv
+ %l = load i32, i32* %arrayidx, align 4
+ %cmp55 = icmp sgt i32 %l, 0
+ br i1 %cmp55, label %if.then, label %for.inc
+
+if.then:
+ %sub = sub nsw i32 0, %l
+ store i32 %sub, i32* %arrayidx, align 4
+ br label %for.inc
+
+for.inc:
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, %trip.count
+ br i1 %exitcond, label %for.end.loopexit, label %for.body
+
+for.end.loopexit:
+ ret void
+
+; CHECK: LV: Found an estimated cost of 5 for VF 2 For instruction: br i1 %cmp55, label %if.then, label %for.inc
+; CHECK: LV: Found an estimated cost of 0 for VF 2 For instruction: br label %for.inc
+; CHECK: LV: Found an estimated cost of 0 for VF 2 For instruction: br i1 %exitcond, label %for.end.loopexit, label %for.body
+}
diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/lit.local.cfg b/llvm/test/Transforms/LoopVectorize/SystemZ/lit.local.cfg
new file mode 100644
index 00000000000..2f3cf7d3f04
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/SystemZ/lit.local.cfg
@@ -0,0 +1,2 @@
+if not 'SystemZ' in config.root.targets:
+ config.unsupported = True
diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/load-scalarization-cost-0.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/load-scalarization-cost-0.ll
new file mode 100644
index 00000000000..1925527eacf
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/SystemZ/load-scalarization-cost-0.ll
@@ -0,0 +1,27 @@
+; RUN: opt -mtriple=s390x-unknown-linux -mcpu=z13 -loop-vectorize \
+; RUN: -force-vector-width=2 -debug-only=loop-vectorize \
+; RUN: -disable-output < %s 2>&1 | FileCheck %s
+; REQUIRES: asserts
+;
+; Check that a scalarized load does not get operands scalarization costs added.
+
+define void @fun(i64* %data, i64 %n, i64 %s, double* %Src) {
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %mul = mul nsw i64 %iv, %s
+ %gep = getelementptr inbounds double, double* %Src, i64 %mul
+ %bct = bitcast double* %gep to i64*
+ %ld = load i64, i64* %bct
+ %iv.next = add nuw nsw i64 %iv, 1
+ %cmp110.us = icmp slt i64 %iv.next, %n
+ br i1 %cmp110.us, label %for.body, label %for.end
+
+for.end:
+ ret void
+
+; CHECK: LV: Found an estimated cost of 2 for VF 2 For instruction: %mul = mul nsw i64 %iv, %s
+; CHECK: LV: Found an estimated cost of 2 for VF 2 For instruction: %ld = load i64, i64* %bct
+}
diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/load-scalarization-cost-1.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/load-scalarization-cost-1.ll
new file mode 100644
index 00000000000..fbf8b114542
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/SystemZ/load-scalarization-cost-1.ll
@@ -0,0 +1,28 @@
+; RUN: opt -mtriple=s390x-unknown-linux -mcpu=z13 -loop-vectorize \
+; RUN: -force-vector-width=4 -debug-only=loop-vectorize \
+; RUN: -enable-interleaved-mem-accesses=false -disable-output < %s 2>&1 \
+; RUN: | FileCheck %s
+; REQUIRES: asserts
+;
+; Check that a scalarized load does not get a zero cost in a vectorized
+; loop. It can only be folded into the add operand in the scalar loop.
+
+define i32 @fun(i64* %data, i64 %n, i64 %s, i32* %Src) {
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %acc = phi i32 [ 0, %entry ], [ %acc_next, %for.body ]
+ %gep = getelementptr inbounds i32, i32* %Src, i64 %iv
+ %ld = load i32, i32* %gep
+ %acc_next = add i32 %acc, %ld
+ %iv.next = add nuw nsw i64 %iv, 2
+ %cmp110.us = icmp slt i64 %iv.next, %n
+ br i1 %cmp110.us, label %for.body, label %for.end
+
+for.end:
+ ret i32 %acc_next
+
+; CHECK: Found an estimated cost of 4 for VF 4 For instruction: %ld = load i32, i32* %gep
+}
diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/load-store-scalarization-cost.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/load-store-scalarization-cost.ll
new file mode 100644
index 00000000000..9fdf22ecd92
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/SystemZ/load-store-scalarization-cost.ll
@@ -0,0 +1,33 @@
+; REQUIRES: asserts
+; RUN: opt -mtriple=s390x-unknown-linux -mcpu=z13 -loop-vectorize \
+; RUN: -force-vector-width=4 -debug-only=loop-vectorize \
+; RUN: -disable-output -enable-interleaved-mem-accesses=false < %s 2>&1 | \
+; RUN: FileCheck %s
+;
+; Check that a scalarized load/store does not get a cost for insterts/
+; extracts, since z13 supports element load/store.
+
+define void @fun(i32* %data, i64 %n) {
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
+ %tmp0 = getelementptr inbounds i32, i32* %data, i64 %i
+ %tmp1 = load i32, i32* %tmp0, align 4
+ %tmp2 = add i32 %tmp1, 1
+ store i32 %tmp2, i32* %tmp0, align 4
+ %i.next = add nuw nsw i64 %i, 2
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+
+; CHECK: LV: Scalarizing: %tmp1 = load i32, i32* %tmp0, align 4
+; CHECK: LV: Scalarizing: store i32 %tmp2, i32* %tmp0, align 4
+
+; CHECK: LV: Found an estimated cost of 4 for VF 4 For instruction: %tmp1 = load i32, i32* %tmp0, align 4
+; CHECK: LV: Found an estimated cost of 4 for VF 4 For instruction: store i32 %tmp2, i32* %tmp0, align 4
+}
+
diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs-02.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs-02.ll
new file mode 100644
index 00000000000..4c992cedd88
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs-02.ll
@@ -0,0 +1,149 @@
+; REQUIRES: asserts
+; RUN: opt -mtriple=s390x-unknown-linux -mcpu=z13 -loop-vectorize \
+; RUN: -debug-only=loop-vectorize,vectorutils -max-interleave-group-factor=64\
+; RUN: -disable-output < %s 2>&1 | FileCheck %s
+;
+; Check that some cost estimations for interleave groups make sense.
+
+; This loop is loading four i16 values at indices [0, 1, 2, 3], with a stride
+; of 4. At VF=4, memory interleaving means loading 4 * 4 * 16 bits = 2 vector
+; registers. Each of the 4 vector values must then be constructed from the
+; two vector registers using one vperm each, which gives a cost of 2 + 4 = 6.
+;
+; CHECK: LV: Checking a loop in "fun0"
+; CHECK: LV: Found an estimated cost of 6 for VF 4 For instruction: %ld0 = load i16
+; CHECK: LV: Found an estimated cost of 0 for VF 4 For instruction: %ld1 = load i16
+; CHECK: LV: Found an estimated cost of 0 for VF 4 For instruction: %ld2 = load i16
+; CHECK: LV: Found an estimated cost of 0 for VF 4 For instruction: %ld3 = load i16
+define void @fun0(i16 *%ptr, i16 *%dst) {
+entry:
+ br label %for.body
+
+for.body:
+ %ivptr = phi i16* [ %ptr.next, %for.body ], [ %ptr, %entry ]
+ %iv = phi i64 [ %inc, %for.body ], [ 0, %entry ]
+ %inc = add i64 %iv, 4
+ %ptr0 = getelementptr inbounds i16, i16* %ivptr, i64 0
+ %ld0 = load i16, i16* %ptr0
+ %ptr1 = getelementptr inbounds i16, i16* %ivptr, i64 1
+ %ld1 = load i16, i16* %ptr1
+ %ptr2 = getelementptr inbounds i16, i16* %ivptr, i64 2
+ %ld2 = load i16, i16* %ptr2
+ %ptr3 = getelementptr inbounds i16, i16* %ivptr, i64 3
+ %ld3 = load i16, i16* %ptr3
+ %a1 = add i16 %ld0, %ld1
+ %a2 = add i16 %a1, %ld2
+ %a3 = add i16 %a2, %ld3
+ %dstptr = getelementptr inbounds i16, i16* %dst, i64 %iv
+ store i16 %a3, i16* %dstptr
+ %ptr.next = getelementptr inbounds i16, i16* %ivptr, i64 4
+ %cmp = icmp eq i64 %inc, 100
+ br i1 %cmp, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+; This loop loads one i8 value in a stride of 3. At VF=16, this means loading
+; 3 vector registers, and then constructing the vector value with two vperms,
+; which gives a cost of 5.
+;
+; CHECK: LV: Checking a loop in "fun1"
+; CHECK: LV: Found an estimated cost of 5 for VF 16 For instruction: %ld0 = load i8
+define void @fun1(i8 *%ptr, i8 *%dst) {
+entry:
+ br label %for.body
+
+for.body:
+ %ivptr = phi i8* [ %ptr.next, %for.body ], [ %ptr, %entry ]
+ %iv = phi i64 [ %inc, %for.body ], [ 0, %entry ]
+ %inc = add i64 %iv, 4
+ %ptr0 = getelementptr inbounds i8, i8* %ivptr, i64 0
+ %ld0 = load i8, i8* %ptr0
+ %dstptr = getelementptr inbounds i8, i8* %dst, i64 %iv
+ store i8 %ld0, i8* %dstptr
+ %ptr.next = getelementptr inbounds i8, i8* %ivptr, i64 3
+ %cmp = icmp eq i64 %inc, 100
+ br i1 %cmp, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+; This loop is loading 4 i8 values at indexes [0, 1, 2, 3], with a stride of
+; 32. At VF=2, this means loading 2 vector registers, and using 4 vperms to
+; produce the vector values, which gives a cost of 6.
+;
+; CHECK: LV: Checking a loop in "fun2"
+; CHECK: LV: Found an estimated cost of 6 for VF 2 For instruction: %ld0 = load i8
+; CHECK: LV: Found an estimated cost of 0 for VF 2 For instruction: %ld1 = load i8
+; CHECK: LV: Found an estimated cost of 0 for VF 2 For instruction: %ld2 = load i8
+; CHECK: LV: Found an estimated cost of 0 for VF 2 For instruction: %ld3 = load i8
+define void @fun2(i8 *%ptr, i8 *%dst) {
+entry:
+ br label %for.body
+
+for.body:
+ %ivptr = phi i8* [ %ptr.next, %for.body ], [ %ptr, %entry ]
+ %iv = phi i64 [ %inc, %for.body ], [ 0, %entry ]
+ %inc = add i64 %iv, 4
+ %ptr0 = getelementptr inbounds i8, i8* %ivptr, i64 0
+ %ld0 = load i8, i8* %ptr0
+ %ptr1 = getelementptr inbounds i8, i8* %ivptr, i64 1
+ %ld1 = load i8, i8* %ptr1
+ %ptr2 = getelementptr inbounds i8, i8* %ivptr, i64 2
+ %ld2 = load i8, i8* %ptr2
+ %ptr3 = getelementptr inbounds i8, i8* %ivptr, i64 3
+ %ld3 = load i8, i8* %ptr3
+ %a1 = add i8 %ld0, %ld1
+ %a2 = add i8 %a1, %ld2
+ %a3 = add i8 %a2, %ld3
+ %dstptr = getelementptr inbounds i8, i8* %dst, i64 %iv
+ store i8 %a3, i8* %dstptr
+ %ptr.next = getelementptr inbounds i8, i8* %ivptr, i64 32
+ %cmp = icmp eq i64 %inc, 100
+ br i1 %cmp, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+; This loop is loading 4 i8 values at indexes [0, 1, 2, 3], with a stride of
+; 30. At VF=2, this means loading 3 vector registers, and using 4 vperms to
+; produce the vector values, which gives a cost of 7. This is the same loop
+; as in fun2, except the stride makes the second iterations values overlap a
+; vector register boundary.
+;
+; CHECK: LV: Checking a loop in "fun3"
+; CHECK: LV: Found an estimated cost of 7 for VF 2 For instruction: %ld0 = load i8
+; CHECK: LV: Found an estimated cost of 0 for VF 2 For instruction: %ld1 = load i8
+; CHECK: LV: Found an estimated cost of 0 for VF 2 For instruction: %ld2 = load i8
+; CHECK: LV: Found an estimated cost of 0 for VF 2 For instruction: %ld3 = load i8
+define void @fun3(i8 *%ptr, i8 *%dst) {
+entry:
+ br label %for.body
+
+for.body:
+ %ivptr = phi i8* [ %ptr.next, %for.body ], [ %ptr, %entry ]
+ %iv = phi i64 [ %inc, %for.body ], [ 0, %entry ]
+ %inc = add i64 %iv, 4
+ %ptr0 = getelementptr inbounds i8, i8* %ivptr, i64 0
+ %ld0 = load i8, i8* %ptr0
+ %ptr1 = getelementptr inbounds i8, i8* %ivptr, i64 1
+ %ld1 = load i8, i8* %ptr1
+ %ptr2 = getelementptr inbounds i8, i8* %ivptr, i64 2
+ %ld2 = load i8, i8* %ptr2
+ %ptr3 = getelementptr inbounds i8, i8* %ivptr, i64 3
+ %ld3 = load i8, i8* %ptr3
+ %a1 = add i8 %ld0, %ld1
+ %a2 = add i8 %a1, %ld2
+ %a3 = add i8 %a2, %ld3
+ %dstptr = getelementptr inbounds i8, i8* %dst, i64 %iv
+ store i8 %a3, i8* %dstptr
+ %ptr.next = getelementptr inbounds i8, i8* %ivptr, i64 30
+ %cmp = icmp eq i64 %inc, 100
+ br i1 %cmp, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs.ll
new file mode 100644
index 00000000000..4e04a3423ed
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs.ll
@@ -0,0 +1,70 @@
+; REQUIRES: asserts
+; RUN: opt -mtriple=s390x-unknown-linux -mcpu=z13 -loop-vectorize \
+; RUN: -force-vector-width=4 -debug-only=loop-vectorize,vectorutils \
+; RUN: -disable-output < %s 2>&1 | FileCheck %s
+;
+; Check that the loop vectorizer performs memory interleaving with accurate
+; cost estimations.
+
+
+; Simple case where just the load is interleaved, because the store group
+; would have gaps.
+define void @fun0(i32* %data, i64 %n) {
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
+ %tmp0 = getelementptr inbounds i32, i32* %data, i64 %i
+ %tmp1 = load i32, i32* %tmp0, align 4
+ %tmp2 = add i32 %tmp1, 1
+ store i32 %tmp2, i32* %tmp0, align 4
+ %i.next = add nuw nsw i64 %i, 2
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+
+; CHECK: LV: Creating an interleave group with: %tmp1 = load i32, i32* %tmp0, align 4
+; CHECK: LV: Found an estimated cost of 3 for VF 4 For instruction: %tmp1 = load i32, i32* %tmp0, align 4
+; (vl; vl; vperm)
+}
+
+; Interleaving of both load and stores.
+define void @fun1(i32* %data, i64 %n) {
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
+ %tmp0 = getelementptr inbounds i32, i32* %data, i64 %i
+ %tmp1 = load i32, i32* %tmp0, align 4
+ %i_1 = add i64 %i, 1
+ %tmp2 = getelementptr inbounds i32, i32* %data, i64 %i_1
+ %tmp3 = load i32, i32* %tmp2, align 4
+ store i32 %tmp1, i32* %tmp2, align 4
+ store i32 %tmp3, i32* %tmp0, align 4
+ %i.next = add nuw nsw i64 %i, 2
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+
+; CHECK: LV: Creating an interleave group with: store i32 %tmp3, i32* %tmp0, align 4
+; CHECK: LV: Inserted: store i32 %tmp1, i32* %tmp2, align 4
+; CHECK: into the interleave group with store i32 %tmp3, i32* %tmp0, align 4
+; CHECK: LV: Creating an interleave group with: %tmp3 = load i32, i32* %tmp2, align 4
+; CHECK: LV: Inserted: %tmp1 = load i32, i32* %tmp0, align 4
+; CHECK: into the interleave group with %tmp3 = load i32, i32* %tmp2, align 4
+
+; CHECK: LV: Found an estimated cost of 4 for VF 4 For instruction: %tmp1 = load i32, i32* %tmp0, align 4
+; CHECK: LV: Found an estimated cost of 0 for VF 4 For instruction: %tmp3 = load i32, i32* %tmp2, align 4
+; (vl; vl; vperm, vpkg)
+
+; CHECK: LV: Found an estimated cost of 0 for VF 4 For instruction: store i32 %tmp1, i32* %tmp2, align 4
+; CHECK: LV: Found an estimated cost of 4 for VF 4 For instruction: store i32 %tmp3, i32* %tmp0, align 4
+; (vmrlf; vmrhf; vst; vst)
+}
+
diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/pr38110.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/pr38110.ll
new file mode 100644
index 00000000000..6c8fef9fc91
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/SystemZ/pr38110.ll
@@ -0,0 +1,50 @@
+; RUN: opt -passes='loop-vectorize' -mcpu=z13 -force-vector-width=2 -S < %s | FileCheck %s
+;
+; Forcing VF=2 to trigger vector code gen
+;
+; This is a test case to exercise more cases in truncateToMinimalBitWidths().
+; Test passes if vector code is generated w/o hitting llvm_unreachable().
+;
+; Performing minimal check in the output to ensure the loop is actually
+; vectorized.
+;
+; CHECK: vector.body
+
+target datalayout = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-v128:64-a:8:16-n32:64"
+target triple = "s390x-ibm-linux"
+
+define void @test(i32 zeroext %width, i8* nocapture %row, i16 zeroext %src, i16* nocapture readonly %dst) {
+entry:
+ %cmp10 = icmp eq i32 %width, 0
+ br i1 %cmp10, label %for.end, label %for.body.lr.ph
+
+for.body.lr.ph: ; preds = %entry
+ %conv1 = zext i16 %src to i32
+ br label %for.body
+
+for.body: ; preds = %for.inc, %for.body.lr.ph
+ %i.012 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
+ %sp.011 = phi i8* [ %row, %for.body.lr.ph ], [ %incdec.ptr, %for.inc ]
+ %0 = load i8, i8* %sp.011, align 1
+ %conv = zext i8 %0 to i32
+ %cmp2 = icmp eq i32 %conv, %conv1
+ br i1 %cmp2, label %if.then, label %for.inc
+
+if.then: ; preds = %for.body
+ %1 = load i16, i16* %dst, align 2
+ %conv4 = trunc i16 %1 to i8
+ store i8 %conv4, i8* %sp.011, align 1
+ br label %for.inc
+
+for.inc: ; preds = %for.body, %if.then
+ %inc = add nuw i32 %i.012, 1
+ %incdec.ptr = getelementptr inbounds i8, i8* %sp.011, i64 1
+ %exitcond = icmp eq i32 %inc, %width
+ br i1 %exitcond, label %for.end.loopexit, label %for.body
+
+for.end.loopexit: ; preds = %for.inc
+ br label %for.end
+
+for.end: ; preds = %for.end.loopexit, %entry
+ ret void
+}
OpenPOWER on IntegriCloud