summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
authorChad Rosier <mcrosier@codeaurora.org>2016-04-15 18:09:10 +0000
committerChad Rosier <mcrosier@codeaurora.org>2016-04-15 18:09:10 +0000
commit1fbe9bcab479522e7704716f0f57c7c09d27900a (patch)
tree0b74a578f22352c4fa3135e4d7962dc512c5f24d /llvm/test/CodeGen
parent74cba6427a8a3f793ac28b690b26e75a23f47c2b (diff)
downloadbcm5719-llvm-1fbe9bcab479522e7704716f0f57c7c09d27900a.tar.gz
bcm5719-llvm-1fbe9bcab479522e7704716f0f57c7c09d27900a.zip
[AArch64] Add load/store pair instructions to getMemOpBaseRegImmOfsWidth().
This improves AA in the MI schduler when reason about paired instructions. Phabricator Revision: http://reviews.llvm.org/D17098 PR26358 llvm-svn: 266462
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-stp-aa.ll34
2 files changed, 35 insertions, 1 deletions
diff --git a/llvm/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll b/llvm/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll
index bf2d2cfa606..71bf2039eaa 100644
--- a/llvm/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=arm64-apple-darwin -enable-misched=false | FileCheck %s
+; RUN: llc < %s -mtriple=arm64-apple-darwin -enable-misched=false -enable-post-misched=false | FileCheck %s
; rdar://12713765
; Make sure we are not creating stack objects that are assumed to be 64-byte
diff --git a/llvm/test/CodeGen/AArch64/arm64-stp-aa.ll b/llvm/test/CodeGen/AArch64/arm64-stp-aa.ll
index 82d343d976b..2a45745fedb 100644
--- a/llvm/test/CodeGen/AArch64/arm64-stp-aa.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-stp-aa.ll
@@ -109,3 +109,37 @@ define double @stp_double_aa_after(double %d0, double %a, double %b, double* noc
store double %b, double* %add.ptr, align 8
ret double %tmp
}
+
+; Check that the stores %c and %d are paired after the fadd instruction,
+; and then the stores %a and %d are paired after proving that they do not
+; depend on the the (%c, %d) pair.
+;
+; CHECK-LABEL: st1:
+; CHECK: stp q0, q1, [x{{[0-9]+}}]
+; CHECK: fadd
+; CHECK: stp q2, q0, [x{{[0-9]+}}, #32]
+define void @st1(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d, float* %base, i64 %index) {
+entry:
+ %a0 = getelementptr inbounds float, float* %base, i64 %index
+ %b0 = getelementptr float, float* %a0, i64 4
+ %c0 = getelementptr float, float* %a0, i64 8
+ %d0 = getelementptr float, float* %a0, i64 12
+
+ %a1 = bitcast float* %a0 to <4 x float>*
+ %b1 = bitcast float* %b0 to <4 x float>*
+ %c1 = bitcast float* %c0 to <4 x float>*
+ %d1 = bitcast float* %d0 to <4 x float>*
+
+ store <4 x float> %c, <4 x float> * %c1, align 4
+ store <4 x float> %a, <4 x float> * %a1, align 4
+
+ ; This fadd forces the compiler to pair %c and %e after fadd, and leave the
+ ; stores %a and %b separated by a stp. The dependence analysis needs then to
+ ; prove that it is safe to move %b past the stp to be paired with %a.
+ %e = fadd fast <4 x float> %d, %a
+
+ store <4 x float> %e, <4 x float>* %d1, align 4
+ store <4 x float> %b, <4 x float>* %b1, align 4
+
+ ret void
+}
OpenPOWER on IntegriCloud