summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
authorOwen Anderson <resistor@mac.com>2010-11-01 18:13:11 +0000
committerOwen Anderson <resistor@mac.com>2010-11-01 18:13:11 +0000
commite5271a935a4121fbbf5c6083cd0945d338a7f64c (patch)
treecc4f70b84481ac14b8ab6672067cf80e1f4d12fc /llvm/test
parent7d45c101e5ecb13f8c6e16f09e0697c89a703e93 (diff)
downloadbcm5719-llvm-e5271a935a4121fbbf5c6083cd0945d338a7f64c.tar.gz
bcm5719-llvm-e5271a935a4121fbbf5c6083cd0945d338a7f64c.zip
Covert this test to .s form.
llvm-svn: 117937
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/MC/ARM/neon-shuffle-encoding.ll215
-rw-r--r--llvm/test/MC/ARM/neon-shuffle-encoding.s46
2 files changed, 46 insertions, 215 deletions
diff --git a/llvm/test/MC/ARM/neon-shuffle-encoding.ll b/llvm/test/MC/ARM/neon-shuffle-encoding.ll
deleted file mode 100644
index 98ccf425c6d..00000000000
--- a/llvm/test/MC/ARM/neon-shuffle-encoding.ll
+++ /dev/null
@@ -1,215 +0,0 @@
-; RUN: llc -show-mc-encoding -march=arm -mcpu=cortex-a8 -mattr=+neon < %s | FileCheck %s
-
-; XFAIL: *
-
-define <8 x i8> @test_vextd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
-; CHECK: vext.8 d16, d17, d16, #3 @ encoding: [0xa0,0x03,0xf1,0xf2]
- %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
- ret <8 x i8> %tmp3
-}
-
-define <8 x i8> @test_vextRd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
-; CHECK: vext.8 d16, d17, d16, #5 @ encoding: [0xa0,0x05,0xf1,0xf2]
- %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4>
- ret <8 x i8> %tmp3
-}
-
-define <16 x i8> @test_vextq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
-; CHECK: vext.8 q8, q8, q9, #3 @ encoding: [0xe2,0x03,0xf0,0xf2
- %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18>
- ret <16 x i8> %tmp3
-}
-
-define <16 x i8> @test_vextRq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
-; CHECK: vext.8 q8, q8, q9, #7 @ encoding: [0xe2,0x07,0xf0,0xf2]
- %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6>
- ret <16 x i8> %tmp3
-}
-
-define <4 x i16> @test_vextd16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
-; CHECK: vext.16 d16, d17, d16, #3 @ encoding: [0xa0,0x03,0xf1,0xf2]
- %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
- ret <4 x i16> %tmp3
-}
-
-define <4 x i32> @test_vextq32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
-; CHECK: vext.32 q8, q8, q9, #3 @ encoding: [0xe2,0x03,0xf0,0xf2]
- %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
- ret <4 x i32> %tmp3
-}
-
-define <8 x i8> @vtrni8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
-; CHECK: vtrn.8 d17, d16 @ encoding: [0xa0,0x10,0xf2,0xf3]
- %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
- %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
- %tmp5 = add <8 x i8> %tmp3, %tmp4
- ret <8 x i8> %tmp5
-}
-
-define <4 x i16> @vtrni16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
-; CHECK: vtrn.16 d17, d16 @ encoding: [0xa0,0x10,0xf6,0xf3]
- %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
- %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
- %tmp5 = add <4 x i16> %tmp3, %tmp4
- ret <4 x i16> %tmp5
-}
-
-define <2 x i32> @vtrni32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
-; CHECK: vtrn.32 d17, d16 @ encoding: [0xa0,0x10,0xfa,0xf3]
- %tmp3 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 0, i32 2>
- %tmp4 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 3>
- %tmp5 = add <2 x i32> %tmp3, %tmp4
- ret <2 x i32> %tmp5
-}
-
-define <16 x i8> @vtrnQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
-; CHECK: vadd.i8 q8, q9, q8 @ encoding: [0xe0,0x08,0x42,0xf2]
- %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
- %tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
- %tmp5 = add <16 x i8> %tmp3, %tmp4
- ret <16 x i8> %tmp5
-}
-
-define <8 x i16> @vtrnQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
-; CHECK: vadd.i16 q8, q9, q8 @ encoding: [0xe0,0x08,0x52,0xf2]
- %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
- %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
- %tmp5 = add <8 x i16> %tmp3, %tmp4
- ret <8 x i16> %tmp5
-}
-
-define <4 x i32> @vtrnQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
-; CHECK: vtrn.32 q9, q8 @ encoding: [0xe0,0x20,0xfa,0xf3]
- %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
- %tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
- %tmp5 = add <4 x i32> %tmp3, %tmp4
- ret <4 x i32> %tmp5
-}
-
-define <8 x i8> @vuzpi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
-; CHECK: vuzp.8 d17, d16 @ encoding: [0x20,0x11,0xf2,0xf3]
- %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
- %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
- %tmp5 = add <8 x i8> %tmp3, %tmp4
- ret <8 x i8> %tmp5
-}
-
-define <4 x i16> @vuzpi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
-; CHECK: vuzp.16 d17, d16 @ encoding: [0x20,0x11,0xf6,0xf3
- %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
- %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
- %tmp5 = add <4 x i16> %tmp3, %tmp4
- ret <4 x i16> %tmp5
-}
-
-; VUZP.32 is equivalent to VTRN.32 for 64-bit vectors.
-
-define <16 x i8> @vuzpQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
-; CHECK: vuzp.8 q9, q8 @ encoding: [0x60,0x21,0xf2,0xf3]
- %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
- %tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
- %tmp5 = add <16 x i8> %tmp3, %tmp4
- ret <16 x i8> %tmp5
-}
-
-define <8 x i16> @vuzpQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
-; CHECK: vuzp.16 q9, q8 @ encoding: [0x60,0x21,0xf6,0xf3]
- %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
- %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
- %tmp5 = add <8 x i16> %tmp3, %tmp4
- ret <8 x i16> %tmp5
-}
-
-define <4 x i32> @vuzpQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
-; CHECK: vuzp.32 q9, q8 @ encoding: [0x60,0x21,0xfa,0xf3]
- %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
- %tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
- %tmp5 = add <4 x i32> %tmp3, %tmp4
- ret <4 x i32> %tmp5
-}
-
-define <8 x i8> @vzipi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
-; CHECK: vzip.8 d17, d16 @ encoding: [0xa0,0x11,0xf2,0xf3]
- %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
- %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
- %tmp5 = add <8 x i8> %tmp3, %tmp4
- ret <8 x i8> %tmp5
-}
-
-define <4 x i16> @vzipi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
-; CHECK: vzip.16 d17, d16 @ encoding: [0xa0,0x11,0xf6,0xf3]
- %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
- %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
- %tmp5 = add <4 x i16> %tmp3, %tmp4
- ret <4 x i16> %tmp5
-}
-
-; VZIP.32 is equivalent to VTRN.32 for 64-bit vectors.
-
-define <16 x i8> @vzipQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
-; CHECK: vzip.8 q9, q8 @ encoding: [0xe0,0x21,0xf2,0xf3]
- %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
- %tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
- %tmp5 = add <16 x i8> %tmp3, %tmp4
- ret <16 x i8> %tmp5
-}
-
-define <8 x i16> @vzipQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
-; CHECK: vzip.16 q9, q8 @ encoding: [0xe0,0x21,0xf6,0xf3]
- %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
- %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
- %tmp5 = add <8 x i16> %tmp3, %tmp4
- ret <8 x i16> %tmp5
-}
-
-define <4 x i32> @vzipQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
-; CHECK: vzip.32 q9, q8 @ encoding: [0xe0,0x21,0xfa,0xf3]
- %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
- %tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
- %tmp5 = add <4 x i32> %tmp3, %tmp4
- ret <4 x i32> %tmp5
-}
diff --git a/llvm/test/MC/ARM/neon-shuffle-encoding.s b/llvm/test/MC/ARM/neon-shuffle-encoding.s
new file mode 100644
index 00000000000..0fcd6e14bb2
--- /dev/null
+++ b/llvm/test/MC/ARM/neon-shuffle-encoding.s
@@ -0,0 +1,46 @@
+@ RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
+
+@ CHECK: vext.8 d16, d17, d16, #3 @ encoding: [0xa0,0x03,0xf1,0xf2]
+ vext.8 d16, d17, d16, #3
+@ CHECK: vext.8 d16, d17, d16, #5 @ encoding: [0xa0,0x05,0xf1,0xf2]
+ vext.8 d16, d17, d16, #5
+@ CHECK: vext.8 q8, q9, q8, #3 @ encoding: [0xe0,0x03,0xf2,0xf2]
+ vext.8 q8, q9, q8, #3
+@ CHECK: vext.8 q8, q9, q8, #7 @ encoding: [0xe0,0x07,0xf2,0xf2]
+ vext.8 q8, q9, q8, #7
+@ CHECK: vext.16 d16, d17, d16, #3 @ encoding: [0xa0,0x03,0xf1,0xf2]
+ vext.16 d16, d17, d16, #3
+@ CHECK: vext.32 q8, q9, q8, #3 @ encoding: [0xe0,0x03,0xf2,0xf2]
+ vext.32 q8, q9, q8, #3
+@ CHECK: vtrn.8 d17, d16 @ encoding: [0xa0,0x10,0xf2,0xf3]
+ vtrn.8 d17, d16
+@ CHECK: vtrn.16 d17, d16 @ encoding: [0xa0,0x10,0xf6,0xf3]
+ vtrn.16 d17, d16
+@ CHECK: vtrn.32 d17, d16 @ encoding: [0xa0,0x10,0xfa,0xf3]
+ vtrn.32 d17, d16
+@ CHECK: vtrn.8 q9, q8 @ encoding: [0xe0,0x20,0xf2,0xf3]
+ vtrn.8 q9, q8
+@ CHECK: vtrn.16 q9, q8 @ encoding: [0xe0,0x20,0xf6,0xf3]
+ vtrn.16 q9, q8
+@ CHECK: vtrn.32 q9, q8 @ encoding: [0xe0,0x20,0xfa,0xf3]
+ vtrn.32 q9, q8
+@ CHECK: vuzp.8 d17, d16 @ encoding: [0x20,0x11,0xf2,0xf3]
+ vuzp.8 d17, d16
+@ CHECK: vuzp.16 d17, d16 @ encoding: [0x20,0x11,0xf6,0xf3]
+ vuzp.16 d17, d16
+@ CHECK: vuzp.8 q9, q8 @ encoding: [0x60,0x21,0xf2,0xf3]
+ vuzp.8 q9, q8
+@ CHECK: vuzp.16 q9, q8 @ encoding: [0x60,0x21,0xf6,0xf3]
+ vuzp.16 q9, q8
+@ CHECK: vuzp.32 q9, q8 @ encoding: [0x60,0x21,0xfa,0xf3]
+ vuzp.32 q9, q8
+@ CHECK: vzip.8 d17, d16 @ encoding: [0xa0,0x11,0xf2,0xf3]
+ vzip.8 d17, d16
+@ CHECK: vzip.16 d17, d16 @ encoding: [0xa0,0x11,0xf6,0xf3]
+ vzip.16 d17, d16
+@ CHECK: vzip.8 q9, q8 @ encoding: [0xe0,0x21,0xf2,0xf3]
+ vzip.8 q9, q8
+@ CHECK: vzip.16 q9, q8 @ encoding: [0xe0,0x21,0xf6,0xf3]
+ vzip.16 q9, q8
+@ CHECK: vzip.32 q9, q8 @ encoding: [0xe0,0x21,0xfa,0xf3]
+ vzip.32 q9, q8
OpenPOWER on IntegriCloud