summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AArch64
diff options
context:
space:
mode:
authorHao Liu <Hao.Liu@arm.com>2015-06-11 09:05:02 +0000
committerHao Liu <Hao.Liu@arm.com>2015-06-11 09:05:02 +0000
commit4566d18e8926e2a255ad2ed13e54e2fad28afb40 (patch)
treeae6baf0fce7a02c95b3bbfcfb7da5bd2295da149 /llvm/test/CodeGen/AArch64
parent229628b39e1eaedc9870824786ee32ca8cd0c5c2 (diff)
downloadbcm5719-llvm-4566d18e8926e2a255ad2ed13e54e2fad28afb40.tar.gz
bcm5719-llvm-4566d18e8926e2a255ad2ed13e54e2fad28afb40.zip
[AArch64] Match interleaved memory accesses into ldN/stN instructions.
Add a pass AArch64InterleavedAccess to identify and match interleaved memory accesses. This pass transforms an interleaved load/store into ldN/stN intrinsic. As Loop Vectorizor disables optimization on interleaved accesses by default, this optimization is also disabled by default. To enable it by "-aarch64-interleaved-access-opt=true" E.g. Transform an interleaved load (Factor = 2): %wide.vec = load <8 x i32>, <8 x i32>* %ptr %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements Into: %ld2 = { <4 x i32>, <4 x i32> } call aarch64.neon.ld2(%ptr) %v0 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 0 %v1 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 1 E.g. Transform an interleaved store (Factor = 2): %i.vec = shuffle %v0, %v1, <0, 4, 1, 5, 2, 6, 3, 7> ; Interleaved vec store <8 x i32> %i.vec, <8 x i32>* %ptr Into: %v0 = shuffle %i.vec, undef, <0, 1, 2, 3> %v1 = shuffle %i.vec, undef, <4, 5, 6, 7> call void aarch64.neon.st2(%v0, %v1, %ptr) llvm-svn: 239514
Diffstat (limited to 'llvm/test/CodeGen/AArch64')
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-interleaved-accesses.ll197
1 files changed, 197 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AArch64/aarch64-interleaved-accesses.ll b/llvm/test/CodeGen/AArch64/aarch64-interleaved-accesses.ll
new file mode 100644
index 00000000000..e651be97569
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/aarch64-interleaved-accesses.ll
@@ -0,0 +1,197 @@
+; RUN: llc -march=aarch64 -aarch64-interleaved-access-opt=true < %s | FileCheck %s
+
+; CHECK-LABEL: load_factor2:
+; CHECK: ld2 { v0.8b, v1.8b }, [x0]
+define <8 x i8> @load_factor2(<16 x i8>* %ptr) {
+ %wide.vec = load <16 x i8>, <16 x i8>* %ptr, align 4
+ %strided.v0 = shufflevector <16 x i8> %wide.vec, <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ %strided.v1 = shufflevector <16 x i8> %wide.vec, <16 x i8> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ %add = add nsw <8 x i8> %strided.v0, %strided.v1
+ ret <8 x i8> %add
+}
+
+; CHECK-LABEL: load_delat3:
+; CHECK: ld3 { v0.4s, v1.4s, v2.4s }, [x0]
+define <4 x i32> @load_delat3(i32* %ptr) {
+ %base = bitcast i32* %ptr to <12 x i32>*
+ %wide.vec = load <12 x i32>, <12 x i32>* %base, align 4
+ %strided.v2 = shufflevector <12 x i32> %wide.vec, <12 x i32> undef, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
+ %strided.v1 = shufflevector <12 x i32> %wide.vec, <12 x i32> undef, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
+ %add = add nsw <4 x i32> %strided.v2, %strided.v1
+ ret <4 x i32> %add
+}
+
+; CHECK-LABEL: load_factor4:
+; CHECK: ld4 { v0.4s, v1.4s, v2.4s, v3.4s }, [x0]
+define <4 x i32> @load_factor4(i32* %ptr) {
+ %base = bitcast i32* %ptr to <16 x i32>*
+ %wide.vec = load <16 x i32>, <16 x i32>* %base, align 4
+ %strided.v0 = shufflevector <16 x i32> %wide.vec, <16 x i32> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
+ %strided.v2 = shufflevector <16 x i32> %wide.vec, <16 x i32> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
+ %add = add nsw <4 x i32> %strided.v0, %strided.v2
+ ret <4 x i32> %add
+}
+
+; CHECK-LABEL: store_factor2:
+; CHECK: st2 { v0.8b, v1.8b }, [x0]
+define void @store_factor2(<16 x i8>* %ptr, <8 x i8> %v0, <8 x i8> %v1) {
+ %interleaved.vec = shufflevector <8 x i8> %v0, <8 x i8> %v1, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ store <16 x i8> %interleaved.vec, <16 x i8>* %ptr, align 4
+ ret void
+}
+
+; CHECK-LABEL: store_factor3:
+; CHECK: st3 { v0.4s, v1.4s, v2.4s }, [x0]
+define void @store_factor3(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2) {
+ %base = bitcast i32* %ptr to <12 x i32>*
+ %v0_v1 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v2_u = shufflevector <4 x i32> %v2, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+ %interleaved.vec = shufflevector <8 x i32> %v0_v1, <8 x i32> %v2_u, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
+ store <12 x i32> %interleaved.vec, <12 x i32>* %base, align 4
+ ret void
+}
+
+; CHECK-LABEL: store_factor4:
+; CHECK: st4 { v0.4s, v1.4s, v2.4s, v3.4s }, [x0]
+define void @store_factor4(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3) {
+ %base = bitcast i32* %ptr to <16 x i32>*
+ %v0_v1 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v2_v3 = shufflevector <4 x i32> %v2, <4 x i32> %v3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %interleaved.vec = shufflevector <8 x i32> %v0_v1, <8 x i32> %v2_v3, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
+ store <16 x i32> %interleaved.vec, <16 x i32>* %base, align 4
+ ret void
+}
+
+; The following cases test that interleaved access of pointer vectors can be
+; matched to ldN/stN instruction.
+
+; CHECK-LABEL: load_ptrvec_factor2:
+; CHECK: ld2 { v0.2d, v1.2d }, [x0]
+define <2 x i32*> @load_ptrvec_factor2(i32** %ptr) {
+ %base = bitcast i32** %ptr to <4 x i32*>*
+ %wide.vec = load <4 x i32*>, <4 x i32*>* %base, align 4
+ %strided.v0 = shufflevector <4 x i32*> %wide.vec, <4 x i32*> undef, <2 x i32> <i32 0, i32 2>
+ ret <2 x i32*> %strided.v0
+}
+
+; CHECK-LABEL: load_ptrvec_factor3:
+; CHECK: ld3 { v0.2d, v1.2d, v2.2d }, [x0]
+define void @load_ptrvec_factor3(i32** %ptr, <2 x i32*>* %ptr1, <2 x i32*>* %ptr2) {
+ %base = bitcast i32** %ptr to <6 x i32*>*
+ %wide.vec = load <6 x i32*>, <6 x i32*>* %base, align 4
+ %strided.v2 = shufflevector <6 x i32*> %wide.vec, <6 x i32*> undef, <2 x i32> <i32 2, i32 5>
+ store <2 x i32*> %strided.v2, <2 x i32*>* %ptr1
+ %strided.v1 = shufflevector <6 x i32*> %wide.vec, <6 x i32*> undef, <2 x i32> <i32 1, i32 4>
+ store <2 x i32*> %strided.v1, <2 x i32*>* %ptr2
+ ret void
+}
+
+; CHECK-LABEL: load_ptrvec_factor4:
+; CHECK: ld4 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0]
+define void @load_ptrvec_factor4(i32** %ptr, <2 x i32*>* %ptr1, <2 x i32*>* %ptr2) {
+ %base = bitcast i32** %ptr to <8 x i32*>*
+ %wide.vec = load <8 x i32*>, <8 x i32*>* %base, align 4
+ %strided.v1 = shufflevector <8 x i32*> %wide.vec, <8 x i32*> undef, <2 x i32> <i32 1, i32 5>
+ %strided.v3 = shufflevector <8 x i32*> %wide.vec, <8 x i32*> undef, <2 x i32> <i32 3, i32 7>
+ store <2 x i32*> %strided.v1, <2 x i32*>* %ptr1
+ store <2 x i32*> %strided.v3, <2 x i32*>* %ptr2
+ ret void
+}
+
+; CHECK-LABEL: store_ptrvec_factor2:
+; CHECK: st2 { v0.2d, v1.2d }, [x0]
+define void @store_ptrvec_factor2(i32** %ptr, <2 x i32*> %v0, <2 x i32*> %v1) {
+ %base = bitcast i32** %ptr to <4 x i32*>*
+ %interleaved.vec = shufflevector <2 x i32*> %v0, <2 x i32*> %v1, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+ store <4 x i32*> %interleaved.vec, <4 x i32*>* %base, align 4
+ ret void
+}
+
+; CHECK-LABEL: store_ptrvec_factor3:
+; CHECK: st3 { v0.2d, v1.2d, v2.2d }, [x0]
+define void @store_ptrvec_factor3(i32** %ptr, <2 x i32*> %v0, <2 x i32*> %v1, <2 x i32*> %v2) {
+ %base = bitcast i32** %ptr to <6 x i32*>*
+ %v0_v1 = shufflevector <2 x i32*> %v0, <2 x i32*> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v2_u = shufflevector <2 x i32*> %v2, <2 x i32*> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
+ %interleaved.vec = shufflevector <4 x i32*> %v0_v1, <4 x i32*> %v2_u, <6 x i32> <i32 0, i32 2, i32 4, i32 1, i32 3, i32 5>
+ store <6 x i32*> %interleaved.vec, <6 x i32*>* %base, align 4
+ ret void
+}
+
+; CHECK-LABEL: store_ptrvec_factor4:
+; CHECK: st4 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0]
+define void @store_ptrvec_factor4(i32* %ptr, <2 x i32*> %v0, <2 x i32*> %v1, <2 x i32*> %v2, <2 x i32*> %v3) {
+ %base = bitcast i32* %ptr to <8 x i32*>*
+ %v0_v1 = shufflevector <2 x i32*> %v0, <2 x i32*> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v2_v3 = shufflevector <2 x i32*> %v2, <2 x i32*> %v3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %interleaved.vec = shufflevector <4 x i32*> %v0_v1, <4 x i32*> %v2_v3, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 1, i32 3, i32 5, i32 7>
+ store <8 x i32*> %interleaved.vec, <8 x i32*>* %base, align 4
+ ret void
+}
+
+; Following cases check that shuffle maskes with undef indices can be matched
+; into ldN/stN instruction.
+
+; CHECK-LABEL: load_undef_mask_factor2:
+; CHECK: ld2 { v0.4s, v1.4s }, [x0]
+define <4 x i32> @load_undef_mask_factor2(i32* %ptr) {
+ %base = bitcast i32* %ptr to <8 x i32>*
+ %wide.vec = load <8 x i32>, <8 x i32>* %base, align 4
+ %strided.v0 = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 undef, i32 2, i32 undef, i32 6>
+ %strided.v1 = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 undef, i32 3, i32 undef, i32 7>
+ %add = add nsw <4 x i32> %strided.v0, %strided.v1
+ ret <4 x i32> %add
+}
+
+; CHECK-LABEL: load_undef_mask_factor3:
+; CHECK: ld3 { v0.4s, v1.4s, v2.4s }, [x0]
+define <4 x i32> @load_undef_mask_factor3(i32* %ptr) {
+ %base = bitcast i32* %ptr to <12 x i32>*
+ %wide.vec = load <12 x i32>, <12 x i32>* %base, align 4
+ %strided.v2 = shufflevector <12 x i32> %wide.vec, <12 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 undef, i32 undef>
+ %strided.v1 = shufflevector <12 x i32> %wide.vec, <12 x i32> undef, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
+ %add = add nsw <4 x i32> %strided.v2, %strided.v1
+ ret <4 x i32> %add
+}
+
+; CHECK-LABEL: load_undef_mask_factor4:
+; CHECK: ld4 { v0.4s, v1.4s, v2.4s, v3.4s }, [x0]
+define <4 x i32> @load_undef_mask_factor4(i32* %ptr) {
+ %base = bitcast i32* %ptr to <16 x i32>*
+ %wide.vec = load <16 x i32>, <16 x i32>* %base, align 4
+ %strided.v0 = shufflevector <16 x i32> %wide.vec, <16 x i32> undef, <4 x i32> <i32 0, i32 4, i32 undef, i32 undef>
+ %strided.v2 = shufflevector <16 x i32> %wide.vec, <16 x i32> undef, <4 x i32> <i32 2, i32 6, i32 undef, i32 undef>
+ %add = add nsw <4 x i32> %strided.v0, %strided.v2
+ ret <4 x i32> %add
+}
+
+; CHECK-LABEL: store_undef_mask_factor2:
+; CHECK: st2 { v0.4s, v1.4s }, [x0]
+define void @store_undef_mask_factor2(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1) {
+ %base = bitcast i32* %ptr to <8 x i32>*
+ %interleaved.vec = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 2, i32 6, i32 3, i32 7>
+ store <8 x i32> %interleaved.vec, <8 x i32>* %base, align 4
+ ret void
+}
+
+; CHECK-LABEL: store_undef_mask_factor3:
+; CHECK: st3 { v0.4s, v1.4s, v2.4s }, [x0]
+define void @store_undef_mask_factor3(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2) {
+ %base = bitcast i32* %ptr to <12 x i32>*
+ %v0_v1 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v2_u = shufflevector <4 x i32> %v2, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+ %interleaved.vec = shufflevector <8 x i32> %v0_v1, <8 x i32> %v2_u, <12 x i32> <i32 0, i32 4, i32 undef, i32 1, i32 undef, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
+ store <12 x i32> %interleaved.vec, <12 x i32>* %base, align 4
+ ret void
+}
+
+; CHECK-LABEL: store_undef_mask_factor4:
+; CHECK: st4 { v0.4s, v1.4s, v2.4s, v3.4s }, [x0]
+define void @store_undef_mask_factor4(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3) {
+ %base = bitcast i32* %ptr to <16 x i32>*
+ %v0_v1 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v2_v3 = shufflevector <4 x i32> %v2, <4 x i32> %v3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %interleaved.vec = shufflevector <8 x i32> %v0_v1, <8 x i32> %v2_v3, <16 x i32> <i32 0, i32 4, i32 8, i32 undef, i32 undef, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
+ store <16 x i32> %interleaved.vec, <16 x i32>* %base, align 4
+ ret void
+}
OpenPOWER on IntegriCloud