summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
authorAhmed Bougacha <ahmed.bougacha@gmail.com>2014-12-23 06:07:31 +0000
committerAhmed Bougacha <ahmed.bougacha@gmail.com>2014-12-23 06:07:31 +0000
commit4553bff4126068ce5b9f451b75f71d367320cec7 (patch)
treed591daa8e5565f9550132450c5966ff1b49f31c6 /llvm/test/CodeGen
parent279663c1b4bfcec7f849dd2becf207841c45c0cf (diff)
downloadbcm5719-llvm-4553bff4126068ce5b9f451b75f71d367320cec7.tar.gz
bcm5719-llvm-4553bff4126068ce5b9f451b75f71d367320cec7.zip
[ARM] Don't break alignment when combining base updates into load/stores.
r223862/r224203 tried to also combine base-updating load/stores. There was a mistake there: the alignment was added as is as an operand to the ARMISD::VLD/VST node. However, the VLD/VST selection logic doesn't care about less-than-standard alignment attributes. For example, no matter the alignment of a v2i64 load (say 1), SelectVLD picks VLD1q64 (because of the memory type). But VLD1q64 ("vld1.64 {dXX, dYY}") is 8-aligned, per ARMARMv7a 3.2.1. For the 1-aligned load, what we really want is VLD1q8. This commit introduces bitcasts if necessary, and changes the vld/vst type to one whose standard alignment matches the original load/store alignment. Differential Revision: http://reviews.llvm.org/D6759 llvm-svn: 224754
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/ARM/memcpy-inline.ll10
-rw-r--r--llvm/test/CodeGen/ARM/vector-load.ll59
-rw-r--r--llvm/test/CodeGen/ARM/vector-store.ll60
3 files changed, 105 insertions, 24 deletions
diff --git a/llvm/test/CodeGen/ARM/memcpy-inline.ll b/llvm/test/CodeGen/ARM/memcpy-inline.ll
index dca2eb9f26f..33ac4e12563 100644
--- a/llvm/test/CodeGen/ARM/memcpy-inline.ll
+++ b/llvm/test/CodeGen/ARM/memcpy-inline.ll
@@ -46,8 +46,8 @@ entry:
; CHECK: movw [[REG2:r[0-9]+]], #16716
; CHECK: movt [[REG2:r[0-9]+]], #72
; CHECK: str [[REG2]], [r0, #32]
-; CHECK: vld1.64 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]!
-; CHECK: vst1.64 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]!
+; CHECK: vld1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]!
+; CHECK: vst1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]!
; CHECK: vld1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]
; CHECK: vst1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([36 x i8]* @.str2, i64 0, i64 0), i64 36, i32 1, i1 false)
@@ -57,8 +57,8 @@ entry:
define void @t3(i8* nocapture %C) nounwind {
entry:
; CHECK-LABEL: t3:
-; CHECK: vld1.64 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]!
-; CHECK: vst1.64 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]!
+; CHECK: vld1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]!
+; CHECK: vst1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]!
; CHECK: vld1.8 {d{{[0-9]+}}}, [r1]
; CHECK: vst1.8 {d{{[0-9]+}}}, [r0]
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([24 x i8]* @.str3, i64 0, i64 0), i64 24, i32 1, i1 false)
@@ -69,7 +69,7 @@ define void @t4(i8* nocapture %C) nounwind {
entry:
; CHECK-LABEL: t4:
; CHECK: vld1.8 {[[REG3:d[0-9]+]], [[REG4:d[0-9]+]]}, [r1]
-; CHECK: vst1.64 {[[REG3]], [[REG4]]}, [r0]!
+; CHECK: vst1.8 {[[REG3]], [[REG4]]}, [r0]!
; CHECK: strh [[REG5:r[0-9]+]], [r0]
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([18 x i8]* @.str4, i64 0, i64 0), i64 18, i32 1, i1 false)
ret void
diff --git a/llvm/test/CodeGen/ARM/vector-load.ll b/llvm/test/CodeGen/ARM/vector-load.ll
index 0a018d833fb..008bd1f6f8c 100644
--- a/llvm/test/CodeGen/ARM/vector-load.ll
+++ b/llvm/test/CodeGen/ARM/vector-load.ll
@@ -31,7 +31,7 @@ define <4 x i16> @load_v4i16(<4 x i16>** %ptr) {
define <4 x i16> @load_v4i16_update(<4 x i16>** %ptr) {
;CHECK-LABEL: load_v4i16_update:
-;CHECK: vld1.16 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <4 x i16>** %ptr
%lA = load <4 x i16>* %A, align 1
%inc = getelementptr <4 x i16>* %A, i34 1
@@ -49,7 +49,7 @@ define <2 x i32> @load_v2i32(<2 x i32>** %ptr) {
define <2 x i32> @load_v2i32_update(<2 x i32>** %ptr) {
;CHECK-LABEL: load_v2i32_update:
-;CHECK: vld1.32 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <2 x i32>** %ptr
%lA = load <2 x i32>* %A, align 1
%inc = getelementptr <2 x i32>* %A, i32 1
@@ -67,7 +67,7 @@ define <2 x float> @load_v2f32(<2 x float>** %ptr) {
define <2 x float> @load_v2f32_update(<2 x float>** %ptr) {
;CHECK-LABEL: load_v2f32_update:
-;CHECK: vld1.32 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <2 x float>** %ptr
%lA = load <2 x float>* %A, align 1
%inc = getelementptr <2 x float>* %A, i32 1
@@ -85,7 +85,7 @@ define <1 x i64> @load_v1i64(<1 x i64>** %ptr) {
define <1 x i64> @load_v1i64_update(<1 x i64>** %ptr) {
;CHECK-LABEL: load_v1i64_update:
-;CHECK: vld1.64 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <1 x i64>** %ptr
%lA = load <1 x i64>* %A, align 1
%inc = getelementptr <1 x i64>* %A, i31 1
@@ -121,7 +121,7 @@ define <8 x i16> @load_v8i16(<8 x i16>** %ptr) {
define <8 x i16> @load_v8i16_update(<8 x i16>** %ptr) {
;CHECK-LABEL: load_v8i16_update:
-;CHECK: vld1.16 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <8 x i16>** %ptr
%lA = load <8 x i16>* %A, align 1
%inc = getelementptr <8 x i16>* %A, i38 1
@@ -139,7 +139,7 @@ define <4 x i32> @load_v4i32(<4 x i32>** %ptr) {
define <4 x i32> @load_v4i32_update(<4 x i32>** %ptr) {
;CHECK-LABEL: load_v4i32_update:
-;CHECK: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <4 x i32>** %ptr
%lA = load <4 x i32>* %A, align 1
%inc = getelementptr <4 x i32>* %A, i34 1
@@ -157,7 +157,7 @@ define <4 x float> @load_v4f32(<4 x float>** %ptr) {
define <4 x float> @load_v4f32_update(<4 x float>** %ptr) {
;CHECK-LABEL: load_v4f32_update:
-;CHECK: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <4 x float>** %ptr
%lA = load <4 x float>* %A, align 1
%inc = getelementptr <4 x float>* %A, i34 1
@@ -175,7 +175,7 @@ define <2 x i64> @load_v2i64(<2 x i64>** %ptr) {
define <2 x i64> @load_v2i64_update(<2 x i64>** %ptr) {
;CHECK-LABEL: load_v2i64_update:
-;CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <2 x i64>** %ptr
%lA = load <2 x i64>* %A, align 1
%inc = getelementptr <2 x i64>* %A, i32 1
@@ -183,6 +183,47 @@ define <2 x i64> @load_v2i64_update(<2 x i64>** %ptr) {
ret <2 x i64> %lA
}
+; Make sure we change the type to match alignment if necessary.
+define <2 x i64> @load_v2i64_update_aligned2(<2 x i64>** %ptr) {
+;CHECK-LABEL: load_v2i64_update_aligned2:
+;CHECK: vld1.16 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <2 x i64>** %ptr
+ %lA = load <2 x i64>* %A, align 2
+ %inc = getelementptr <2 x i64>* %A, i32 1
+ store <2 x i64>* %inc, <2 x i64>** %ptr
+ ret <2 x i64> %lA
+}
+
+define <2 x i64> @load_v2i64_update_aligned4(<2 x i64>** %ptr) {
+;CHECK-LABEL: load_v2i64_update_aligned4:
+;CHECK: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <2 x i64>** %ptr
+ %lA = load <2 x i64>* %A, align 4
+ %inc = getelementptr <2 x i64>* %A, i32 1
+ store <2 x i64>* %inc, <2 x i64>** %ptr
+ ret <2 x i64> %lA
+}
+
+define <2 x i64> @load_v2i64_update_aligned8(<2 x i64>** %ptr) {
+;CHECK-LABEL: load_v2i64_update_aligned8:
+;CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}:64]!
+ %A = load <2 x i64>** %ptr
+ %lA = load <2 x i64>* %A, align 8
+ %inc = getelementptr <2 x i64>* %A, i32 1
+ store <2 x i64>* %inc, <2 x i64>** %ptr
+ ret <2 x i64> %lA
+}
+
+define <2 x i64> @load_v2i64_update_aligned16(<2 x i64>** %ptr) {
+;CHECK-LABEL: load_v2i64_update_aligned16:
+;CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}:128]!
+ %A = load <2 x i64>** %ptr
+ %lA = load <2 x i64>* %A, align 16
+ %inc = getelementptr <2 x i64>* %A, i32 1
+ store <2 x i64>* %inc, <2 x i64>** %ptr
+ ret <2 x i64> %lA
+}
+
; Make sure we don't break smaller-than-dreg extloads.
define <4 x i32> @zextload_v8i8tov8i32(<4 x i8>** %ptr) {
;CHECK-LABEL: zextload_v8i8tov8i32:
@@ -190,7 +231,7 @@ define <4 x i32> @zextload_v8i8tov8i32(<4 x i8>** %ptr) {
;CHECK: vmovl.u8 {{q[0-9]+}}, {{d[0-9]+}}
;CHECK: vmovl.u16 {{q[0-9]+}}, {{d[0-9]+}}
%A = load <4 x i8>** %ptr
- %lA = load <4 x i8>* %A, align 1
+ %lA = load <4 x i8>* %A, align 4
%zlA = zext <4 x i8> %lA to <4 x i32>
ret <4 x i32> %zlA
}
diff --git a/llvm/test/CodeGen/ARM/vector-store.ll b/llvm/test/CodeGen/ARM/vector-store.ll
index 17994118241..9036a31d141 100644
--- a/llvm/test/CodeGen/ARM/vector-store.ll
+++ b/llvm/test/CodeGen/ARM/vector-store.ll
@@ -31,7 +31,7 @@ define void @store_v4i16(<4 x i16>** %ptr, <4 x i16> %val) {
define void @store_v4i16_update(<4 x i16>** %ptr, <4 x i16> %val) {
;CHECK-LABEL: store_v4i16_update:
-;CHECK: vst1.16 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+;CHECK: vst1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <4 x i16>** %ptr
store <4 x i16> %val, <4 x i16>* %A, align 1
%inc = getelementptr <4 x i16>* %A, i34 1
@@ -49,7 +49,7 @@ define void @store_v2i32(<2 x i32>** %ptr, <2 x i32> %val) {
define void @store_v2i32_update(<2 x i32>** %ptr, <2 x i32> %val) {
;CHECK-LABEL: store_v2i32_update:
-;CHECK: vst1.32 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+;CHECK: vst1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <2 x i32>** %ptr
store <2 x i32> %val, <2 x i32>* %A, align 1
%inc = getelementptr <2 x i32>* %A, i32 1
@@ -67,7 +67,7 @@ define void @store_v2f32(<2 x float>** %ptr, <2 x float> %val) {
define void @store_v2f32_update(<2 x float>** %ptr, <2 x float> %val) {
;CHECK-LABEL: store_v2f32_update:
-;CHECK: vst1.32 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+;CHECK: vst1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <2 x float>** %ptr
store <2 x float> %val, <2 x float>* %A, align 1
%inc = getelementptr <2 x float>* %A, i32 1
@@ -85,7 +85,7 @@ define void @store_v1i64(<1 x i64>** %ptr, <1 x i64> %val) {
define void @store_v1i64_update(<1 x i64>** %ptr, <1 x i64> %val) {
;CHECK-LABEL: store_v1i64_update:
-;CHECK: vst1.64 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+;CHECK: vst1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <1 x i64>** %ptr
store <1 x i64> %val, <1 x i64>* %A, align 1
%inc = getelementptr <1 x i64>* %A, i31 1
@@ -121,7 +121,7 @@ define void @store_v8i16(<8 x i16>** %ptr, <8 x i16> %val) {
define void @store_v8i16_update(<8 x i16>** %ptr, <8 x i16> %val) {
;CHECK-LABEL: store_v8i16_update:
-;CHECK: vst1.16 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <8 x i16>** %ptr
store <8 x i16> %val, <8 x i16>* %A, align 1
%inc = getelementptr <8 x i16>* %A, i38 1
@@ -139,7 +139,7 @@ define void @store_v4i32(<4 x i32>** %ptr, <4 x i32> %val) {
define void @store_v4i32_update(<4 x i32>** %ptr, <4 x i32> %val) {
;CHECK-LABEL: store_v4i32_update:
-;CHECK: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <4 x i32>** %ptr
store <4 x i32> %val, <4 x i32>* %A, align 1
%inc = getelementptr <4 x i32>* %A, i34 1
@@ -157,7 +157,7 @@ define void @store_v4f32(<4 x float>** %ptr, <4 x float> %val) {
define void @store_v4f32_update(<4 x float>** %ptr, <4 x float> %val) {
;CHECK-LABEL: store_v4f32_update:
-;CHECK: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <4 x float>** %ptr
store <4 x float> %val, <4 x float>* %A, align 1
%inc = getelementptr <4 x float>* %A, i34 1
@@ -175,7 +175,7 @@ define void @store_v2i64(<2 x i64>** %ptr, <2 x i64> %val) {
define void @store_v2i64_update(<2 x i64>** %ptr, <2 x i64> %val) {
;CHECK-LABEL: store_v2i64_update:
-;CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <2 x i64>** %ptr
store <2 x i64> %val, <2 x i64>* %A, align 1
%inc = getelementptr <2 x i64>* %A, i32 1
@@ -183,6 +183,46 @@ define void @store_v2i64_update(<2 x i64>** %ptr, <2 x i64> %val) {
ret void
}
+define void @store_v2i64_update_aligned2(<2 x i64>** %ptr, <2 x i64> %val) {
+;CHECK-LABEL: store_v2i64_update_aligned2:
+;CHECK: vst1.16 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <2 x i64>** %ptr
+ store <2 x i64> %val, <2 x i64>* %A, align 2
+ %inc = getelementptr <2 x i64>* %A, i32 1
+ store <2 x i64>* %inc, <2 x i64>** %ptr
+ ret void
+}
+
+define void @store_v2i64_update_aligned4(<2 x i64>** %ptr, <2 x i64> %val) {
+;CHECK-LABEL: store_v2i64_update_aligned4:
+;CHECK: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <2 x i64>** %ptr
+ store <2 x i64> %val, <2 x i64>* %A, align 4
+ %inc = getelementptr <2 x i64>* %A, i32 1
+ store <2 x i64>* %inc, <2 x i64>** %ptr
+ ret void
+}
+
+define void @store_v2i64_update_aligned8(<2 x i64>** %ptr, <2 x i64> %val) {
+;CHECK-LABEL: store_v2i64_update_aligned8:
+;CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}:64]!
+ %A = load <2 x i64>** %ptr
+ store <2 x i64> %val, <2 x i64>* %A, align 8
+ %inc = getelementptr <2 x i64>* %A, i32 1
+ store <2 x i64>* %inc, <2 x i64>** %ptr
+ ret void
+}
+
+define void @store_v2i64_update_aligned16(<2 x i64>** %ptr, <2 x i64> %val) {
+;CHECK-LABEL: store_v2i64_update_aligned16:
+;CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}:128]!
+ %A = load <2 x i64>** %ptr
+ store <2 x i64> %val, <2 x i64>* %A, align 16
+ %inc = getelementptr <2 x i64>* %A, i32 1
+ store <2 x i64>* %inc, <2 x i64>** %ptr
+ ret void
+}
+
define void @truncstore_v4i32tov4i8(<4 x i8>** %ptr, <4 x i32> %val) {
;CHECK-LABEL: truncstore_v4i32tov4i8:
;CHECK: ldr.w r9, [sp]
@@ -191,10 +231,10 @@ define void @truncstore_v4i32tov4i8(<4 x i8>** %ptr, <4 x i32> %val) {
;CHECK: vmovn.i32 [[VECLO:d[0-9]+]], {{q[0-9]+}}
;CHECK: vuzp.8 [[VECLO]], {{d[0-9]+}}
;CHECK: ldr r[[PTRREG:[0-9]+]], [r0]
-;CHECK: vst1.32 {[[VECLO]][0]}, [r[[PTRREG]]]
+;CHECK: vst1.32 {[[VECLO]][0]}, [r[[PTRREG]]:32]
%A = load <4 x i8>** %ptr
%trunc = trunc <4 x i32> %val to <4 x i8>
- store <4 x i8> %trunc, <4 x i8>* %A, align 1
+ store <4 x i8> %trunc, <4 x i8>* %A, align 4
ret void
}
OpenPOWER on IntegriCloud