summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Greene <greened@obbligato.org>2018-10-25 21:10:39 +0000
committerDavid Greene <greened@obbligato.org>2018-10-25 21:10:39 +0000
commit53e869da7d6672c7ad7e118b8969905733806afe (patch)
tree21fe5f3696911bfd13c8de4746e4c8834e291d06
parent117b1fa19af43aff5d4ef0e5f35b80c9c27c5cf1 (diff)
downloadbcm5719-llvm-53e869da7d6672c7ad7e118b8969905733806afe.tar.gz
bcm5719-llvm-53e869da7d6672c7ad7e118b8969905733806afe.zip
[AArch64] Create proper memoperand for multi-vector stores
Include all of the store's source vector operands when creating the MachineMemOperand. Previously, we were missing the first operand, making the store size seem smaller than it really is. Differential Revision: https://reviews.llvm.org/D52816 llvm-svn: 345315
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp2
-rw-r--r--llvm/test/CodeGen/AArch64/multi-vector-store-size.ll164
2 files changed, 165 insertions, 1 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index a7a1b0a5feb..2a42d2db75d 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -7972,7 +7972,7 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.opc = ISD::INTRINSIC_VOID;
// Conservatively set memVT to the entire set of vectors stored.
unsigned NumElts = 0;
- for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
+ for (unsigned ArgI = 0, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
Type *ArgTy = I.getArgOperand(ArgI)->getType();
if (!ArgTy->isVectorTy())
break;
diff --git a/llvm/test/CodeGen/AArch64/multi-vector-store-size.ll b/llvm/test/CodeGen/AArch64/multi-vector-store-size.ll
new file mode 100644
index 00000000000..9627556168a
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/multi-vector-store-size.ll
@@ -0,0 +1,164 @@
+; RUN: llc -mtriple=aarch64-linux-gnu -stop-after=isel < %s | FileCheck %s
+
+declare void @llvm.aarch64.neon.st2.v4f32.p0f32(<4 x float>, <4 x float>, float*)
+declare void @llvm.aarch64.neon.st3.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, float*)
+declare void @llvm.aarch64.neon.st4.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, <4 x float>, float*)
+
+declare void @llvm.aarch64.neon.st1x2.v4f32.p0f32(<4 x float>, <4 x float>, float*)
+declare void @llvm.aarch64.neon.st1x3.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, float*)
+declare void @llvm.aarch64.neon.st1x4.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, <4 x float>, float*)
+
+declare void @llvm.aarch64.neon.st2lane.v4f32.p0f32(<4 x float>, <4 x float>, i64, float*)
+declare void @llvm.aarch64.neon.st3lane.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, i64, float*)
+declare void @llvm.aarch64.neon.st4lane.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, <4 x float>, i64, float*)
+
+define void @addstx(float* %res, <4 x float>* %a, <4 x float>* %b, <4 x float>* %c, <4 x float>* %d) {
+ %al = load <4 x float>, <4 x float>* %a
+ %bl = load <4 x float>, <4 x float>* %b
+ %cl = load <4 x float>, <4 x float>* %c
+ %dl = load <4 x float>, <4 x float>* %d
+
+ %ar = fadd <4 x float> %al, %bl
+ %br = fadd <4 x float> %bl, %cl
+ %cr = fadd <4 x float> %cl, %dl
+ %dr = fadd <4 x float> %dl, %al
+
+; The sizes below are conservative. AArch64TargetLowering
+; conservatively assumes the entire vector is stored.
+ tail call void @llvm.aarch64.neon.st2.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, float* %res)
+; CHECK: ST2Twov4s {{.*}} :: (store 32 {{.*}})
+ tail call void @llvm.aarch64.neon.st3.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, float* %res)
+; CHECK: ST3Threev4s {{.*}} :: (store 48 {{.*}})
+ tail call void @llvm.aarch64.neon.st4.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, <4 x float> %dr, float* %res)
+; CHECK: ST4Fourv4s {{.*}} :: (store 64 {{.*}})
+
+ ret void
+}
+
+define void @addst1x(float* %res, <4 x float>* %a, <4 x float>* %b, <4 x float>* %c, <4 x float>* %d) {
+ %al = load <4 x float>, <4 x float>* %a
+ %bl = load <4 x float>, <4 x float>* %b
+ %cl = load <4 x float>, <4 x float>* %c
+ %dl = load <4 x float>, <4 x float>* %d
+
+ %ar = fadd <4 x float> %al, %bl
+ %br = fadd <4 x float> %bl, %cl
+ %cr = fadd <4 x float> %cl, %dl
+ %dr = fadd <4 x float> %dl, %al
+
+; The sizes below are conservative. AArch64TargetLowering
+; conservatively assumes the entire vector is stored.
+ tail call void @llvm.aarch64.neon.st1x2.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, float* %res)
+; CHECK: ST1Twov4s {{.*}} :: (store 32 {{.*}})
+ tail call void @llvm.aarch64.neon.st1x3.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, float* %res)
+; CHECK: ST1Threev4s {{.*}} :: (store 48 {{.*}})
+ tail call void @llvm.aarch64.neon.st1x4.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, <4 x float> %dr, float* %res)
+; CHECK: ST1Fourv4s {{.*}} :: (store 64 {{.*}})
+
+ ret void
+}
+
+define void @addstxlane(float* %res, <4 x float>* %a, <4 x float>* %b, <4 x float>* %c, <4 x float>* %d) {
+ %al = load <4 x float>, <4 x float>* %a
+ %bl = load <4 x float>, <4 x float>* %b
+ %cl = load <4 x float>, <4 x float>* %c
+ %dl = load <4 x float>, <4 x float>* %d
+
+ %ar = fadd <4 x float> %al, %bl
+ %br = fadd <4 x float> %bl, %cl
+ %cr = fadd <4 x float> %cl, %dl
+ %dr = fadd <4 x float> %dl, %al
+
+; The sizes below are conservative. AArch64TargetLowering
+; conservatively assumes the entire vector is stored.
+ tail call void @llvm.aarch64.neon.st2lane.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, i64 1, float* %res)
+; CHECK: ST2i32 {{.*}} :: (store 32 {{.*}})
+ tail call void @llvm.aarch64.neon.st3lane.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, i64 1, float* %res)
+; CHECK: ST3i32 {{.*}} :: (store 48 {{.*}})
+ tail call void @llvm.aarch64.neon.st4lane.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, <4 x float> %dr, i64 1, float* %res)
+; CHECK: ST4i32 {{.*}} :: (store 64 {{.*}})
+
+ ret void
+}
+; RUN: llc -mtriple=aarch64-linux-gnu -stop-after=isel < %s | FileCheck %s
+
+declare void @llvm.aarch64.neon.st2.v4f32.p0f32(<4 x float>, <4 x float>, float*)
+declare void @llvm.aarch64.neon.st3.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, float*)
+declare void @llvm.aarch64.neon.st4.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, <4 x float>, float*)
+
+declare void @llvm.aarch64.neon.st1x2.v4f32.p0f32(<4 x float>, <4 x float>, float*)
+declare void @llvm.aarch64.neon.st1x3.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, float*)
+declare void @llvm.aarch64.neon.st1x4.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, <4 x float>, float*)
+
+declare void @llvm.aarch64.neon.st2lane.v4f32.p0f32(<4 x float>, <4 x float>, i64, float*)
+declare void @llvm.aarch64.neon.st3lane.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, i64, float*)
+declare void @llvm.aarch64.neon.st4lane.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, <4 x float>, i64, float*)
+
+define void @addstx(float* %res, <4 x float>* %a, <4 x float>* %b, <4 x float>* %c, <4 x float>* %d) {
+ %al = load <4 x float>, <4 x float>* %a
+ %bl = load <4 x float>, <4 x float>* %b
+ %cl = load <4 x float>, <4 x float>* %c
+ %dl = load <4 x float>, <4 x float>* %d
+
+ %ar = fadd <4 x float> %al, %bl
+ %br = fadd <4 x float> %bl, %cl
+ %cr = fadd <4 x float> %cl, %dl
+ %dr = fadd <4 x float> %dl, %al
+
+; The sizes below are conservative. AArch64TargetLowering
+; conservatively assumes the entiew vector is stored.
+ tail call void @llvm.aarch64.neon.st2.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, float* %res)
+; CHECK: ST2Twov4s {{.*}} :: (store 32 {{.*}})
+ tail call void @llvm.aarch64.neon.st3.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, float* %res)
+; CHECK: ST3Threev4s {{.*}} :: (store 48 {{.*}})
+ tail call void @llvm.aarch64.neon.st4.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, <4 x float> %dr, float* %res)
+; CHECK: ST4Fourv4s {{.*}} :: (store 64 {{.*}})
+
+ ret void
+}
+
+define void @addst1x(float* %res, <4 x float>* %a, <4 x float>* %b, <4 x float>* %c, <4 x float>* %d) {
+ %al = load <4 x float>, <4 x float>* %a
+ %bl = load <4 x float>, <4 x float>* %b
+ %cl = load <4 x float>, <4 x float>* %c
+ %dl = load <4 x float>, <4 x float>* %d
+
+ %ar = fadd <4 x float> %al, %bl
+ %br = fadd <4 x float> %bl, %cl
+ %cr = fadd <4 x float> %cl, %dl
+ %dr = fadd <4 x float> %dl, %al
+
+; The sizes below are conservative. AArch64TargetLowering
+; conservatively assumes the entiew vector is stored.
+ tail call void @llvm.aarch64.neon.st1x2.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, float* %res)
+; CHECK: ST1Twov4s {{.*}} :: (store 32 {{.*}})
+ tail call void @llvm.aarch64.neon.st1x3.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, float* %res)
+; CHECK: ST1Threev4s {{.*}} :: (store 48 {{.*}})
+ tail call void @llvm.aarch64.neon.st1x4.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, <4 x float> %dr, float* %res)
+; CHECK: ST1Fourv4s {{.*}} :: (store 64 {{.*}})
+
+ ret void
+}
+
+define void @addstxlane(float* %res, <4 x float>* %a, <4 x float>* %b, <4 x float>* %c, <4 x float>* %d) {
+ %al = load <4 x float>, <4 x float>* %a
+ %bl = load <4 x float>, <4 x float>* %b
+ %cl = load <4 x float>, <4 x float>* %c
+ %dl = load <4 x float>, <4 x float>* %d
+
+ %ar = fadd <4 x float> %al, %bl
+ %br = fadd <4 x float> %bl, %cl
+ %cr = fadd <4 x float> %cl, %dl
+ %dr = fadd <4 x float> %dl, %al
+
+; The sizes below are conservative. AArch64TargetLowering
+; conservatively assumes the entiew vector is stored.
+ tail call void @llvm.aarch64.neon.st2lane.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, i64 1, float* %res)
+; CHECK: ST2i32 {{.*}} :: (store 32 {{.*}})
+ tail call void @llvm.aarch64.neon.st3lane.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, i64 1, float* %res)
+; CHECK: ST3i32 {{.*}} :: (store 48 {{.*}})
+ tail call void @llvm.aarch64.neon.st4lane.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, <4 x float> %dr, i64 1, float* %res)
+; CHECK: ST4i32 {{.*}} :: (store 64 {{.*}})
+
+ ret void
+}
OpenPOWER on IntegriCloud