diff options
author | Alina Sbirlea <asbirlea@google.com> | 2016-07-01 21:44:12 +0000 |
---|---|---|
committer | Alina Sbirlea <asbirlea@google.com> | 2016-07-01 21:44:12 +0000 |
commit | 8d8aa5dd6ce3c427fa0cf057acfc7ad4aef193dd (patch) | |
tree | 4a6daaa31e920236e7e11832a7022bf2f694dd27 /llvm/test/Transforms/LoadStoreVectorizer | |
parent | c7bb34f6ec9714ee717ca2166e06aae2c34e9041 (diff) | |
download | bcm5719-llvm-8d8aa5dd6ce3c427fa0cf057acfc7ad4aef193dd.tar.gz bcm5719-llvm-8d8aa5dd6ce3c427fa0cf057acfc7ad4aef193dd.zip |
Address two correctness issues in LoadStoreVectorizer
Summary:
GetBoundryInstruction returns the last instruction as the instruction which follows or end(). Otherwise the last instruction in the boundry set is not being tested by isVectorizable().
Partially solve reordering of instructions. More extensive solution to follow.
Reviewers: tstellarAMD, llvm-commits, jlebar
Subscribers: escha, arsenm, mzolotukhin
Differential Revision: http://reviews.llvm.org/D21934
llvm-svn: 274389
Diffstat (limited to 'llvm/test/Transforms/LoadStoreVectorizer')
4 files changed, 56 insertions, 3 deletions
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll index 8b8019c7482..4d6240a9aa9 100644 --- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll @@ -2,11 +2,11 @@ target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64" -; This is OK to vectorize the load as long as the may alias store -; occurs before the vector load. +; This is NOT OK to vectorize, as either load may alias either store. +; CHECK: load double ; CHECK: store double 0.000000e+00, double addrspace(1)* %a, -; CHECK: load <2 x double> +; CHECK: load double ; CHECK: store double 0.000000e+00, double addrspace(1)* %a.idx.1 define void @interleave(double addrspace(1)* nocapture %a, double addrspace(1)* nocapture %b, double addrspace(1)* nocapture readonly %c) #0 { entry: diff --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/lit.local.cfg b/llvm/test/Transforms/LoadStoreVectorizer/X86/lit.local.cfg new file mode 100644 index 00000000000..e71f3cc4c41 --- /dev/null +++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/lit.local.cfg @@ -0,0 +1,3 @@ +if not 'X86' in config.root.targets: + config.unsupported = True + diff --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order32.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order32.ll new file mode 100644 index 00000000000..29d1e6a1cf3 --- /dev/null +++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order32.ll @@ -0,0 +1,25 @@ +; RUN: opt -mtriple=x86-linux -load-store-vectorizer -S -o - %s | FileCheck %s + +target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64" + +%struct.buffer_t = type { i32, i8* } + +; Check an i32 and i8* get vectorized, and that +; the two accesses (load into buff.val and store to buff.p) preserve their order. + +; CHECK-LABEL: @preserve_order_32( +; CHECK: load <2 x i32> +; CHECK: %buff.val = load i8 +; CHECK: store i8 0 +define void @preserve_order_32(%struct.buffer_t* noalias %buff) #0 { +entry: + %tmp1 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %buff, i32 0, i32 1 + %buff.p = load i8*, i8** %tmp1, align 8 + %buff.val = load i8, i8* %buff.p, align 8 + store i8 0, i8* %buff.p, align 8 + %tmp0 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %buff, i32 0, i32 0 + %buff.int = load i32, i32* %tmp0, align 8 + ret void +} + +attributes #0 = { nounwind } diff --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order64.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order64.ll new file mode 100644 index 00000000000..303ead9a603 --- /dev/null +++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order64.ll @@ -0,0 +1,25 @@ +; RUN: opt -mtriple=x86-linux -load-store-vectorizer -S -o - %s | FileCheck %s + +target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" + +%struct.buffer_t = type { i64, i8* } + +; Check an i64 and i8* get vectorized, and that +; the two accesses (load into buff.val and store to buff.p) preserve their order. + +; CHECK-LABEL: @preserve_order_64( +; CHECK: load <2 x i64> +; CHECK: %buff.val = load i8 +; CHECK: store i8 0 +define void @preserve_order_64(%struct.buffer_t* noalias %buff) #0 { +entry: + %tmp1 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %buff, i64 0, i32 1 + %buff.p = load i8*, i8** %tmp1, align 8 + %buff.val = load i8, i8* %buff.p, align 8 + store i8 0, i8* %buff.p, align 8 + %tmp0 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %buff, i64 0, i32 0 + %buff.int = load i64, i64* %tmp0, align 8 + ret void +} + +attributes #0 = { nounwind } |