summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/ARM64
diff options
context:
space:
mode:
authorTim Northover <tnorthover@apple.com>2014-05-15 11:16:32 +0000
committerTim Northover <tnorthover@apple.com>2014-05-15 11:16:32 +0000
commitd8d65a69cfb608724e071048350b9616228dfe34 (patch)
tree009f769115178cfbebd034aa7481523152978316 /llvm/test/CodeGen/ARM64
parentdd8fca513682f6899d36b7161e48659fa216bba2 (diff)
downloadbcm5719-llvm-d8d65a69cfb608724e071048350b9616228dfe34.tar.gz
bcm5719-llvm-d8d65a69cfb608724e071048350b9616228dfe34.zip
TableGen/ARM64: print aliases even if they have syntax variants.
To get at least one use of the change (and some actual tests) in with its commit, I've enabled the AArch64 & ARM64 NEON mov aliases. llvm-svn: 208867
Diffstat (limited to 'llvm/test/CodeGen/ARM64')
-rw-r--r--llvm/test/CodeGen/ARM64/aarch64-neon-copyPhysReg-tuple.ll18
-rw-r--r--llvm/test/CodeGen/ARM64/copy-tuple.ll36
-rw-r--r--llvm/test/CodeGen/ARM64/fp128.ll4
3 files changed, 29 insertions, 29 deletions
diff --git a/llvm/test/CodeGen/ARM64/aarch64-neon-copyPhysReg-tuple.ll b/llvm/test/CodeGen/ARM64/aarch64-neon-copyPhysReg-tuple.ll
index 76e704736b7..f24392bb8fc 100644
--- a/llvm/test/CodeGen/ARM64/aarch64-neon-copyPhysReg-tuple.ll
+++ b/llvm/test/CodeGen/ARM64/aarch64-neon-copyPhysReg-tuple.ll
@@ -3,8 +3,8 @@
define <4 x i32> @copyTuple.QPair(i32* %a, i32* %b) {
; CHECK-LABEL: copyTuple.QPair:
-; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
-; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: ld2 { {{v[0-9]+}}.s, {{v[0-9]+}}.s }[{{[0-9]+}}], [x{{[0-9]+|sp}}]
entry:
%vld = tail call { <4 x i32>, <4 x i32> } @llvm.arm64.neon.ld2lane.v4i32.p0i32(<4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 2, i32 2, i32 2, i32 2>, i64 1, i32* %a)
@@ -16,9 +16,9 @@ entry:
define <4 x i32> @copyTuple.QTriple(i32* %a, i32* %b, <4 x i32> %c) {
; CHECK-LABEL: copyTuple.QTriple:
-; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
-; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
-; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: ld3 { {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s }[{{[0-9]+}}], [x{{[0-9]+|sp}}]
entry:
%vld = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm64.neon.ld3lane.v4i32.p0i32(<4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %c, <4 x i32> %c, i64 1, i32* %a)
@@ -30,10 +30,10 @@ entry:
define <4 x i32> @copyTuple.QQuad(i32* %a, i32* %b, <4 x i32> %c) {
; CHECK-LABEL: copyTuple.QQuad:
-; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
-; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
-; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
-; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: ld4 { {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s }[{{[0-9]+}}], [x{{[0-9]+|sp}}]
entry:
%vld = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm64.neon.ld4lane.v4i32.p0i32(<4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %c, <4 x i32> %c, <4 x i32> %c, i64 1, i32* %a)
diff --git a/llvm/test/CodeGen/ARM64/copy-tuple.ll b/llvm/test/CodeGen/ARM64/copy-tuple.ll
index 6325c3f8550..f9981931246 100644
--- a/llvm/test/CodeGen/ARM64/copy-tuple.ll
+++ b/llvm/test/CodeGen/ARM64/copy-tuple.ll
@@ -9,8 +9,8 @@
define void @test_D1D2_from_D0D1(i8* %addr) #0 {
; CHECK-LABEL: test_D1D2_from_D0D1:
-; CHECK: orr.8b v2, v1
-; CHECK: orr.8b v1, v0
+; CHECK: mov.8b v2, v1
+; CHECK: mov.8b v1, v0
entry:
%addr_v8i8 = bitcast i8* %addr to <8 x i8>*
%vec = tail call { <8 x i8>, <8 x i8> } @llvm.arm64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8)
@@ -26,8 +26,8 @@ entry:
define void @test_D0D1_from_D1D2(i8* %addr) #0 {
; CHECK-LABEL: test_D0D1_from_D1D2:
-; CHECK: orr.8b v0, v1
-; CHECK: orr.8b v1, v2
+; CHECK: mov.8b v0, v1
+; CHECK: mov.8b v1, v2
entry:
%addr_v8i8 = bitcast i8* %addr to <8 x i8>*
%vec = tail call { <8 x i8>, <8 x i8> } @llvm.arm64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8)
@@ -43,8 +43,8 @@ entry:
define void @test_D0D1_from_D31D0(i8* %addr) #0 {
; CHECK-LABEL: test_D0D1_from_D31D0:
-; CHECK: orr.8b v1, v0
-; CHECK: orr.8b v0, v31
+; CHECK: mov.8b v1, v0
+; CHECK: mov.8b v0, v31
entry:
%addr_v8i8 = bitcast i8* %addr to <8 x i8>*
%vec = tail call { <8 x i8>, <8 x i8> } @llvm.arm64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8)
@@ -60,8 +60,8 @@ entry:
define void @test_D31D0_from_D0D1(i8* %addr) #0 {
; CHECK-LABEL: test_D31D0_from_D0D1:
-; CHECK: orr.8b v31, v0
-; CHECK: orr.8b v0, v1
+; CHECK: mov.8b v31, v0
+; CHECK: mov.8b v0, v1
entry:
%addr_v8i8 = bitcast i8* %addr to <8 x i8>*
%vec = tail call { <8 x i8>, <8 x i8> } @llvm.arm64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8)
@@ -77,9 +77,9 @@ entry:
define void @test_D2D3D4_from_D0D1D2(i8* %addr) #0 {
; CHECK-LABEL: test_D2D3D4_from_D0D1D2:
-; CHECK: orr.8b v4, v2
-; CHECK: orr.8b v3, v1
-; CHECK: orr.8b v2, v0
+; CHECK: mov.8b v4, v2
+; CHECK: mov.8b v3, v1
+; CHECK: mov.8b v2, v0
entry:
%addr_v8i8 = bitcast i8* %addr to <8 x i8>*
%vec = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm64.neon.ld3.v8i8.p0v8i8(<8 x i8>* %addr_v8i8)
@@ -97,9 +97,9 @@ entry:
define void @test_Q0Q1Q2_from_Q1Q2Q3(i8* %addr) #0 {
; CHECK-LABEL: test_Q0Q1Q2_from_Q1Q2Q3:
-; CHECK: orr.16b v0, v1
-; CHECK: orr.16b v1, v2
-; CHECK: orr.16b v2, v3
+; CHECK: mov.16b v0, v1
+; CHECK: mov.16b v1, v2
+; CHECK: mov.16b v2, v3
entry:
%addr_v16i8 = bitcast i8* %addr to <16 x i8>*
%vec = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm64.neon.ld3.v16i8.p0v16i8(<16 x i8>* %addr_v16i8)
@@ -116,10 +116,10 @@ entry:
define void @test_Q1Q2Q3Q4_from_Q30Q31Q0Q1(i8* %addr) #0 {
; CHECK-LABEL: test_Q1Q2Q3Q4_from_Q30Q31Q0Q1:
-; CHECK: orr.16b v4, v1
-; CHECK: orr.16b v3, v0
-; CHECK: orr.16b v2, v31
-; CHECK: orr.16b v1, v30
+; CHECK: mov.16b v4, v1
+; CHECK: mov.16b v3, v0
+; CHECK: mov.16b v2, v31
+; CHECK: mov.16b v1, v30
%addr_v16i8 = bitcast i8* %addr to <16 x i8>*
%vec = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm64.neon.ld4.v16i8.p0v16i8(<16 x i8>* %addr_v16i8)
%vec0 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vec, 0
diff --git a/llvm/test/CodeGen/ARM64/fp128.ll b/llvm/test/CodeGen/ARM64/fp128.ll
index 6aef6f5e485..57bbb93e12b 100644
--- a/llvm/test/CodeGen/ARM64/fp128.ll
+++ b/llvm/test/CodeGen/ARM64/fp128.ll
@@ -205,7 +205,7 @@ define void @test_select(i1 %cond, fp128 %lhs, fp128 %rhs) {
; CHECK: tst w0, #0x1
; CHECK-NEXT: b.eq [[IFFALSE:.LBB[0-9]+_[0-9]+]]
; CHECK-NEXT: BB#
-; CHECK-NEXT: orr v[[VAL:[0-9]+]].16b, v0.16b, v0.16b
+; CHECK-NEXT: mov v[[VAL:[0-9]+]].16b, v0.16b
; CHECK-NEXT: [[IFFALSE]]:
; CHECK: str q[[VAL]], [{{x[0-9]+}}, :lo12:lhs]
ret void
@@ -264,7 +264,7 @@ define fp128 @test_neg(fp128 %in) {
; Could in principle be optimized to fneg which we can't select, this makes
; sure that doesn't happen.
%ret = fsub fp128 0xL00000000000000008000000000000000, %in
-; CHECK: orr v1.16b, v0.16b, v0.16b
+; CHECK: mov v1.16b, v0.16b
; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:[[MINUS0]]]
; CHECK: bl __subtf3
OpenPOWER on IntegriCloud