summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/ARM64
diff options
context:
space:
mode:
authorTim Northover <tnorthover@apple.com>2014-05-15 12:11:02 +0000
committerTim Northover <tnorthover@apple.com>2014-05-15 12:11:02 +0000
commit2509a3fc640587d7de5e75ef5e2088637264b9f0 (patch)
tree0fb1812e13096374b9b7bacc9aae585831c03160 /llvm/test/CodeGen/ARM64
parenta357badc582bdc7c6cb091e1a1c9575dc00471e4 (diff)
downloadbcm5719-llvm-2509a3fc640587d7de5e75ef5e2088637264b9f0.tar.gz
bcm5719-llvm-2509a3fc640587d7de5e75ef5e2088637264b9f0.zip
ARM64: print correct aliases for NEON mov & mvn instructions
In all cases, if a "mov" alias exists, it is the canonical form of the instruction. Now that TableGen can support aliases containing syntax variants, we can enable them and improve the quality of the asm output. llvm-svn: 208874
Diffstat (limited to 'llvm/test/CodeGen/ARM64')
-rw-r--r--llvm/test/CodeGen/ARM64/aarch64-neon-copy.ll10
-rw-r--r--llvm/test/CodeGen/ARM64/neon-compare-instructions.ll28
-rw-r--r--llvm/test/CodeGen/ARM64/umov.ll8
-rw-r--r--llvm/test/CodeGen/ARM64/vcmp.ll2
4 files changed, 24 insertions, 24 deletions
diff --git a/llvm/test/CodeGen/ARM64/aarch64-neon-copy.ll b/llvm/test/CodeGen/ARM64/aarch64-neon-copy.ll
index 3b4cc6cc4f7..9493cad3345 100644
--- a/llvm/test/CodeGen/ARM64/aarch64-neon-copy.ll
+++ b/llvm/test/CodeGen/ARM64/aarch64-neon-copy.ll
@@ -260,21 +260,21 @@ define i32 @umovw8h(<8 x i16> %tmp1) {
define i32 @umovw4s(<4 x i32> %tmp1) {
; CHECK-LABEL: umovw4s:
-; CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.s[2]
+; CHECK: mov {{w[0-9]+}}, {{v[0-9]+}}.s[2]
%tmp3 = extractelement <4 x i32> %tmp1, i32 2
ret i32 %tmp3
}
define i64 @umovx2d(<2 x i64> %tmp1) {
; CHECK-LABEL: umovx2d:
-; CHECK: umov {{x[0-9]+}}, {{v[0-9]+}}.d[1]
+; CHECK: mov {{x[0-9]+}}, {{v[0-9]+}}.d[1]
%tmp3 = extractelement <2 x i64> %tmp1, i32 1
ret i64 %tmp3
}
define i32 @umovw8b(<8 x i8> %tmp1) {
; CHECK-LABEL: umovw8b:
-; CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.b[7]
+; CHECK: mov {{w[0-9]+}}, {{v[0-9]+}}.b[7]
%tmp3 = extractelement <8 x i8> %tmp1, i32 7
%tmp4 = zext i8 %tmp3 to i32
ret i32 %tmp4
@@ -282,7 +282,7 @@ define i32 @umovw8b(<8 x i8> %tmp1) {
define i32 @umovw4h(<4 x i16> %tmp1) {
; CHECK-LABEL: umovw4h:
-; CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.h[2]
+; CHECK: mov {{w[0-9]+}}, {{v[0-9]+}}.h[2]
%tmp3 = extractelement <4 x i16> %tmp1, i32 2
%tmp4 = zext i16 %tmp3 to i32
ret i32 %tmp4
@@ -290,7 +290,7 @@ define i32 @umovw4h(<4 x i16> %tmp1) {
define i32 @umovw2s(<2 x i32> %tmp1) {
; CHECK-LABEL: umovw2s:
-; CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.s[1]
+; CHECK: mov {{w[0-9]+}}, {{v[0-9]+}}.s[1]
%tmp3 = extractelement <2 x i32> %tmp1, i32 1
ret i32 %tmp3
}
diff --git a/llvm/test/CodeGen/ARM64/neon-compare-instructions.ll b/llvm/test/CodeGen/ARM64/neon-compare-instructions.ll
index 55f7b99cd6d..cba81ef99b9 100644
--- a/llvm/test/CodeGen/ARM64/neon-compare-instructions.ll
+++ b/llvm/test/CodeGen/ARM64/neon-compare-instructions.ll
@@ -51,7 +51,7 @@ define <2 x i64> @cmeq2xi64(<2 x i64> %A, <2 x i64> %B) {
define <8 x i8> @cmne8xi8(<8 x i8> %A, <8 x i8> %B) {
;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ne <8 x i8> %A, %B;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
@@ -59,7 +59,7 @@ define <8 x i8> @cmne8xi8(<8 x i8> %A, <8 x i8> %B) {
define <16 x i8> @cmne16xi8(<16 x i8> %A, <16 x i8> %B) {
;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <16 x i8> %A, %B;
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
@@ -67,7 +67,7 @@ define <16 x i8> @cmne16xi8(<16 x i8> %A, <16 x i8> %B) {
define <4 x i16> @cmne4xi16(<4 x i16> %A, <4 x i16> %B) {
;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ne <4 x i16> %A, %B;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
@@ -75,7 +75,7 @@ define <4 x i16> @cmne4xi16(<4 x i16> %A, <4 x i16> %B) {
define <8 x i16> @cmne8xi16(<8 x i16> %A, <8 x i16> %B) {
;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <8 x i16> %A, %B;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
@@ -83,7 +83,7 @@ define <8 x i16> @cmne8xi16(<8 x i16> %A, <8 x i16> %B) {
define <2 x i32> @cmne2xi32(<2 x i32> %A, <2 x i32> %B) {
;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ne <2 x i32> %A, %B;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
@@ -91,7 +91,7 @@ define <2 x i32> @cmne2xi32(<2 x i32> %A, <2 x i32> %B) {
define <4 x i32> @cmne4xi32(<4 x i32> %A, <4 x i32> %B) {
;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <4 x i32> %A, %B;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
@@ -99,7 +99,7 @@ define <4 x i32> @cmne4xi32(<4 x i32> %A, <4 x i32> %B) {
define <2 x i64> @cmne2xi64(<2 x i64> %A, <2 x i64> %B) {
;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <2 x i64> %A, %B;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@@ -803,7 +803,7 @@ define <2 x i64> @cmltz2xi64(<2 x i64> %A) {
define <8 x i8> @cmneqz8xi8(<8 x i8> %A) {
;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
-;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ne <8 x i8> %A, zeroinitializer;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
@@ -811,7 +811,7 @@ define <8 x i8> @cmneqz8xi8(<8 x i8> %A) {
define <16 x i8> @cmneqz16xi8(<16 x i8> %A) {
;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <16 x i8> %A, zeroinitializer;
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
@@ -819,7 +819,7 @@ define <16 x i8> @cmneqz16xi8(<16 x i8> %A) {
define <4 x i16> @cmneqz4xi16(<4 x i16> %A) {
;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
-;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ne <4 x i16> %A, zeroinitializer;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
@@ -827,7 +827,7 @@ define <4 x i16> @cmneqz4xi16(<4 x i16> %A) {
define <8 x i16> @cmneqz8xi16(<8 x i16> %A) {
;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <8 x i16> %A, zeroinitializer;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
@@ -835,7 +835,7 @@ define <8 x i16> @cmneqz8xi16(<8 x i16> %A) {
define <2 x i32> @cmneqz2xi32(<2 x i32> %A) {
;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
-;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ne <2 x i32> %A, zeroinitializer;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
@@ -843,7 +843,7 @@ define <2 x i32> @cmneqz2xi32(<2 x i32> %A) {
define <4 x i32> @cmneqz4xi32(<4 x i32> %A) {
;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <4 x i32> %A, zeroinitializer;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
@@ -851,7 +851,7 @@ define <4 x i32> @cmneqz4xi32(<4 x i32> %A) {
define <2 x i64> @cmneqz2xi64(<2 x i64> %A) {
;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <2 x i64> %A, zeroinitializer;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
diff --git a/llvm/test/CodeGen/ARM64/umov.ll b/llvm/test/CodeGen/ARM64/umov.ll
index 770187448fa..19fd91b6c3d 100644
--- a/llvm/test/CodeGen/ARM64/umov.ll
+++ b/llvm/test/CodeGen/ARM64/umov.ll
@@ -2,7 +2,7 @@
define zeroext i8 @f1(<16 x i8> %a) {
; CHECK-LABEL: f1:
-; CHECK: umov.b w0, v0[3]
+; CHECK: mov.b w0, v0[3]
; CHECK-NEXT: ret
%vecext = extractelement <16 x i8> %a, i32 3
ret i8 %vecext
@@ -10,7 +10,7 @@ define zeroext i8 @f1(<16 x i8> %a) {
define zeroext i16 @f2(<4 x i16> %a) {
; CHECK-LABEL: f2:
-; CHECK: umov.h w0, v0[2]
+; CHECK: mov.h w0, v0[2]
; CHECK-NEXT: ret
%vecext = extractelement <4 x i16> %a, i32 2
ret i16 %vecext
@@ -18,7 +18,7 @@ define zeroext i16 @f2(<4 x i16> %a) {
define i32 @f3(<2 x i32> %a) {
; CHECK-LABEL: f3:
-; CHECK: umov.s w0, v0[1]
+; CHECK: mov.s w0, v0[1]
; CHECK-NEXT: ret
%vecext = extractelement <2 x i32> %a, i32 1
ret i32 %vecext
@@ -26,7 +26,7 @@ define i32 @f3(<2 x i32> %a) {
define i64 @f4(<2 x i64> %a) {
; CHECK-LABEL: f4:
-; CHECK: umov.d x0, v0[1]
+; CHECK: mov.d x0, v0[1]
; CHECK-NEXT: ret
%vecext = extractelement <2 x i64> %a, i32 1
ret i64 %vecext
diff --git a/llvm/test/CodeGen/ARM64/vcmp.ll b/llvm/test/CodeGen/ARM64/vcmp.ll
index 16ff177a054..56153f08f35 100644
--- a/llvm/test/CodeGen/ARM64/vcmp.ll
+++ b/llvm/test/CodeGen/ARM64/vcmp.ll
@@ -229,7 +229,7 @@ define <1 x i64> @fcmlt_d(<1 x double> %A, <1 x double> %B) nounwind {
define <1 x i64> @cmnez_d(<1 x i64> %A) nounwind {
; CHECK-LABEL: cmnez_d:
; CHECK: cmeq d[[EQ:[0-9]+]], d0, #0
-; CHECK: not.8b v0, v[[EQ]]
+; CHECK: mvn.8b v0, v[[EQ]]
%tst = icmp ne <1 x i64> %A, zeroinitializer
%mask = sext <1 x i1> %tst to <1 x i64>
ret <1 x i64> %mask
OpenPOWER on IntegriCloud