diff options
-rw-r--r-- | llvm/lib/Transforms/Vectorize/LoopVectorize.cpp | 5 | ||||
-rw-r--r-- | llvm/test/Transforms/LoopVectorize/store-shuffle-bug.ll | 55 |
2 files changed, 59 insertions, 1 deletions
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index fd20c7507e3..3784b95a76b 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -1227,8 +1227,10 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr, if (SI) { assert(!Legal->isUniform(SI->getPointerOperand()) && "We do not allow storing to uniform addresses"); + // We don't want to update the value in the map as it might be used in + // another expression. So don't use a reference type for "StoredVal". + VectorParts StoredVal = getVectorValue(SI->getValueOperand()); - VectorParts &StoredVal = getVectorValue(SI->getValueOperand()); for (unsigned Part = 0; Part < UF; ++Part) { // Calculate the pointer for the specific unroll-part. Value *PartPtr = Builder.CreateGEP(Ptr, Builder.getInt32(Part * VF)); @@ -1246,6 +1248,7 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr, Value *VecPtr = Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); Builder.CreateStore(StoredVal[Part], VecPtr)->setAlignment(Alignment); } + return; } for (unsigned Part = 0; Part < UF; ++Part) { diff --git a/llvm/test/Transforms/LoopVectorize/store-shuffle-bug.ll b/llvm/test/Transforms/LoopVectorize/store-shuffle-bug.ll new file mode 100644 index 00000000000..42a5ce183ed --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/store-shuffle-bug.ll @@ -0,0 +1,55 @@ +; RUN: opt -S -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine < %s | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.8.0" + +@uf = common global [100 x i32] zeroinitializer, align 16 +@xi = common global [100 x i32] zeroinitializer, align 16 +@q = common global [100 x i32] zeroinitializer, align 16 + +; PR16455 + + +; Due do a bug in the way we handled reverse induction stores we would generate +; a shuffle too many. + +define void @t() { +entry: + br label %for.body + +; CHECK: @t +; CHECK: vector.body: +; CHECK: load <4 x i32> +; CHECK: [[VAR1:%[a-zA-Z0-9]+]] = shufflevector +; CHECK: load <4 x i32> +; CHECK: [[VAR2:%[a-zA-Z0-9]+]] = shufflevector +; CHECK: [[VAR3:%[a-zA-Z0-9]+]] = add nsw <4 x i32> [[VAR2]], [[VAR1]] +; CHECK: [[VAR4:%[a-zA-Z0-9]+]] = shufflevector <4 x i32> [[VAR3]] +; CHECK: store <4 x i32> [[VAR4]] +; CHECK: load <4 x i32> +; CHECK: [[VAR5:%[a-zA-Z0-9]+]] = shufflevector +; CHECK-NOT: add nsw <4 x i32> [[VAR4]], [[VAR5]] +; CHECK-NOT: add nsw <4 x i32> [[VAR5]], [[VAR4]] +; CHECK: add nsw <4 x i32> [[VAR3]], [[VAR5]] + +for.body: + %indvars.iv = phi i64 [ 93, %entry ], [ %indvars.iv.next, %for.body ] + %0 = add i64 %indvars.iv, 1 + %arrayidx = getelementptr inbounds [100 x i32]* @uf, i64 0, i64 %0 + %arrayidx3 = getelementptr inbounds [100 x i32]* @xi, i64 0, i64 %0 + %1 = load i32* %arrayidx3, align 4 + %2 = load i32* %arrayidx, align 4 + %add4 = add nsw i32 %2, %1 + store i32 %add4, i32* %arrayidx, align 4 + %arrayidx7 = getelementptr inbounds [100 x i32]* @q, i64 0, i64 %0 + %3 = load i32* %arrayidx7, align 4 + %add8 = add nsw i32 %add4, %3 + store i32 %add8, i32* %arrayidx, align 4 + %indvars.iv.next = add i64 %indvars.iv, -1 + %4 = trunc i64 %indvars.iv.next to i32 + %cmp = icmp ugt i32 %4, 2 + br i1 %cmp, label %for.body, label %for.end + +for.end: + ret void +} |