summaryrefslogtreecommitdiffstats
path: root/llvm/test/Transforms/LoadStoreVectorizer
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/Transforms/LoadStoreVectorizer')
-rw-r--r--llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll6
-rw-r--r--llvm/test/Transforms/LoadStoreVectorizer/X86/correct-order.ll26
-rw-r--r--llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order32.ll7
-rw-r--r--llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order64.ll56
-rw-r--r--llvm/test/Transforms/LoadStoreVectorizer/X86/subchain-interleaved.ll91
5 files changed, 179 insertions, 7 deletions
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll
index c6bde5dfe15..64a0480d8d3 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll
@@ -2,13 +2,13 @@
target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
-; Check relative position of the inserted vector load relative to the
-; existing adds.
+; Check relative position of the inserted vector load relative to the existing
+; adds. Vectorized loads should be inserted at the position of the first load.
; CHECK-LABEL: @insert_load_point(
; CHECK: %z = add i32 %x, 4
-; CHECK: %w = add i32 %y, 9
; CHECK: load <2 x float>
+; CHECK: %w = add i32 %y, 9
; CHECK: %foo = add i32 %z, %w
define void @insert_load_point(float addrspace(1)* nocapture %a, float addrspace(1)* nocapture %b, float addrspace(1)* nocapture readonly %c, i64 %idx, i32 %x, i32 %y) #0 {
entry:
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/correct-order.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/correct-order.ll
new file mode 100644
index 00000000000..b98014e76cb
--- /dev/null
+++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/correct-order.ll
@@ -0,0 +1,26 @@
+; RUN: opt -mtriple=x86-linux -load-store-vectorizer -S -o - %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
+
+; CHECK-LABEL: @correct_order(
+; CHECK: bitcast i32*
+; CHECK: load <2 x i32>
+; CHECK: load i32
+; CHECK: bitcast i32*
+; CHECK: store <2 x i32>
+; CHECK: load i32
+define void @correct_order(i32* noalias %ptr) {
+ %next.gep = getelementptr i32, i32* %ptr, i64 0
+ %next.gep1 = getelementptr i32, i32* %ptr, i64 1
+ %next.gep2 = getelementptr i32, i32* %ptr, i64 2
+
+ %l1 = load i32, i32* %next.gep1, align 4
+ %l2 = load i32, i32* %next.gep, align 4
+ store i32 0, i32* %next.gep1, align 4
+ store i32 0, i32* %next.gep, align 4
+ %l3 = load i32, i32* %next.gep1, align 4
+ %l4 = load i32, i32* %next.gep2, align 4
+
+ ret void
+}
+
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order32.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order32.ll
index 29d1e6a1cf3..9a7b294e4ce 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order32.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order32.ll
@@ -4,8 +4,11 @@ target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:
%struct.buffer_t = type { i32, i8* }
-; Check an i32 and i8* get vectorized, and that
-; the two accesses (load into buff.val and store to buff.p) preserve their order.
+; Check an i32 and i8* get vectorized, and that the two accesses
+; (load into buff.val and store to buff.p) preserve their order.
+; Vectorized loads should be inserted at the position of the first load,
+; and instructions which were between the first and last load should be
+; reordered preserving their relative order inasmuch as possible.
; CHECK-LABEL: @preserve_order_32(
; CHECK: load <2 x i32>
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order64.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order64.ll
index 303ead9a603..23c43863015 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order64.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order64.ll
@@ -3,9 +3,13 @@
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
%struct.buffer_t = type { i64, i8* }
+%struct.nested.buffer = type { %struct.buffer_t, %struct.buffer_t }
-; Check an i64 and i8* get vectorized, and that
-; the two accesses (load into buff.val and store to buff.p) preserve their order.
+; Check an i64 and i8* get vectorized, and that the two accesses
+; (load into buff.val and store to buff.p) preserve their order.
+; Vectorized loads should be inserted at the position of the first load,
+; and instructions which were between the first and last load should be
+; reordered preserving their relative order inasmuch as possible.
; CHECK-LABEL: @preserve_order_64(
; CHECK: load <2 x i64>
@@ -22,4 +26,52 @@ entry:
ret void
}
+; Check reordering recurses correctly.
+
+; CHECK-LABEL: @transitive_reorder(
+; CHECK: load <2 x i64>
+; CHECK: %buff.val = load i8
+; CHECK: store i8 0
+define void @transitive_reorder(%struct.buffer_t* noalias %buff, %struct.nested.buffer* noalias %nest) #0 {
+entry:
+ %nest0_0 = getelementptr inbounds %struct.nested.buffer, %struct.nested.buffer* %nest, i64 0, i32 0
+ %tmp1 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %nest0_0, i64 0, i32 1
+ %buff.p = load i8*, i8** %tmp1, align 8
+ %buff.val = load i8, i8* %buff.p, align 8
+ store i8 0, i8* %buff.p, align 8
+ %nest1_0 = getelementptr inbounds %struct.nested.buffer, %struct.nested.buffer* %nest, i64 0, i32 0
+ %tmp0 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %nest1_0, i64 0, i32 0
+ %buff.int = load i64, i64* %tmp0, align 8
+ ret void
+}
+
+; Check for no vectorization over phi node
+
+; CHECK-LABEL: @no_vect_phi(
+; CHECK: load i8*
+; CHECK: load i8
+; CHECK: store i8 0
+; CHECK: load i64
+define void @no_vect_phi(i32* noalias %ptr, %struct.buffer_t* noalias %buff) {
+entry:
+ %tmp1 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %buff, i64 0, i32 1
+ %buff.p = load i8*, i8** %tmp1, align 8
+ %buff.val = load i8, i8* %buff.p, align 8
+ store i8 0, i8* %buff.p, align 8
+ br label %"for something"
+
+"for something":
+ %index = phi i64 [ 0, %entry ], [ %index.next, %"for something" ]
+
+ %tmp0 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %buff, i64 0, i32 0
+ %buff.int = load i64, i64* %tmp0, align 8
+
+ %index.next = add i64 %index, 8
+ %cmp_res = icmp eq i64 %index.next, 8
+ br i1 %cmp_res, label %ending, label %"for something"
+
+ending:
+ ret void
+}
+
attributes #0 = { nounwind }
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/subchain-interleaved.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/subchain-interleaved.ll
new file mode 100644
index 00000000000..e50cd4d0156
--- /dev/null
+++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/subchain-interleaved.ll
@@ -0,0 +1,91 @@
+; RUN: opt -mtriple=x86-linux -load-store-vectorizer -S -o - %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
+
+; Vectorized subsets of the load/store chains in the presence of
+; interleaved loads/stores
+
+; CHECK-LABEL: @interleave_2L_2S(
+; CHECK: load <2 x i32>
+; CHECK: load i32
+; CHECK: store <2 x i32>
+; CHECK: load i32
+define void @interleave_2L_2S(i32* noalias %ptr) {
+ %next.gep = getelementptr i32, i32* %ptr, i64 0
+ %next.gep1 = getelementptr i32, i32* %ptr, i64 1
+ %next.gep2 = getelementptr i32, i32* %ptr, i64 2
+
+ %l1 = load i32, i32* %next.gep1, align 4
+ %l2 = load i32, i32* %next.gep, align 4
+ store i32 0, i32* %next.gep1, align 4
+ store i32 0, i32* %next.gep, align 4
+ %l3 = load i32, i32* %next.gep1, align 4
+ %l4 = load i32, i32* %next.gep2, align 4
+
+ ret void
+}
+
+; CHECK-LABEL: @interleave_3L_2S_1L(
+; CHECK: load <3 x i32>
+; CHECK: store <2 x i32>
+; CHECK: load i32
+
+define void @interleave_3L_2S_1L(i32* noalias %ptr) {
+ %next.gep = getelementptr i32, i32* %ptr, i64 0
+ %next.gep1 = getelementptr i32, i32* %ptr, i64 1
+ %next.gep2 = getelementptr i32, i32* %ptr, i64 2
+
+ %l2 = load i32, i32* %next.gep, align 4
+ %l1 = load i32, i32* %next.gep1, align 4
+ store i32 0, i32* %next.gep1, align 4
+ store i32 0, i32* %next.gep, align 4
+ %l3 = load i32, i32* %next.gep1, align 4
+ %l4 = load i32, i32* %next.gep2, align 4
+
+ ret void
+}
+
+; CHECK-LABEL: @chain_suffix(
+; CHECK: load i32
+; CHECK: store <2 x i32>
+; CHECK: load i32
+; CHECK: load i32
+define void @chain_suffix(i32* noalias %ptr) {
+ %next.gep = getelementptr i32, i32* %ptr, i64 0
+ %next.gep1 = getelementptr i32, i32* %ptr, i64 1
+ %next.gep2 = getelementptr i32, i32* %ptr, i64 2
+
+ %l2 = load i32, i32* %next.gep, align 4
+ store i32 0, i32* %next.gep1, align 4
+ store i32 0, i32* %next.gep, align 4
+ %l3 = load i32, i32* %next.gep1, align 4
+ %l4 = load i32, i32* %next.gep2, align 4
+
+ ret void
+}
+
+
+; CHECK-LABEL: @chain_prefix_suffix(
+; CHECK: load i32
+; CHECK: load i32
+; CHECK: store <2 x i32>
+; CHECK: load i32
+; CHECK: load i32
+; CHECK: load i32
+define void @chain_prefix_suffix(i32* noalias %ptr) {
+ %next.gep = getelementptr i32, i32* %ptr, i64 0
+ %next.gep1 = getelementptr i32, i32* %ptr, i64 1
+ %next.gep2 = getelementptr i32, i32* %ptr, i64 2
+ %next.gep3 = getelementptr i32, i32* %ptr, i64 3
+
+ %l1 = load i32, i32* %next.gep, align 4
+ %l2 = load i32, i32* %next.gep1, align 4
+ store i32 0, i32* %next.gep1, align 4
+ store i32 0, i32* %next.gep2, align 4
+ %l3 = load i32, i32* %next.gep1, align 4
+ %l4 = load i32, i32* %next.gep2, align 4
+ %l5 = load i32, i32* %next.gep3, align 4
+
+ ret void
+}
+
OpenPOWER on IntegriCloud