summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSjoerd Meijer <sjoerd.meijer@arm.com>2018-08-08 14:42:11 +0000
committerSjoerd Meijer <sjoerd.meijer@arm.com>2018-08-08 14:42:11 +0000
commit1919ecfd0bca69d353719bf57f93217036184fb5 (patch)
tree2b456f6d07c0904f44faf2c8085baf415e041c64
parent4a68586d9cee9b7137ebebf9fdf72d635e8c2fb6 (diff)
downloadbcm5719-llvm-1919ecfd0bca69d353719bf57f93217036184fb5.tar.gz
bcm5719-llvm-1919ecfd0bca69d353719bf57f93217036184fb5.zip
[ARM][NFC] Replaced tab-characters in test file vtrn.ll
llvm-svn: 339251
-rw-r--r--llvm/test/CodeGen/ARM/vtrn.ll200
1 files changed, 100 insertions, 100 deletions
diff --git a/llvm/test/CodeGen/ARM/vtrn.ll b/llvm/test/CodeGen/ARM/vtrn.ll
index 12cb504eda7..6b200176e1f 100644
--- a/llvm/test/CodeGen/ARM/vtrn.ll
+++ b/llvm/test/CodeGen/ARM/vtrn.ll
@@ -9,12 +9,12 @@ define <8 x i8> @vtrni8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-NEXT: vadd.i8 d16, d17, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <8 x i8>, <8 x i8>* %A
- %tmp2 = load <8 x i8>, <8 x i8>* %B
- %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
- %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
- %tmp5 = add <8 x i8> %tmp3, %tmp4
- ret <8 x i8> %tmp5
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
+ %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ %tmp5 = add <8 x i8> %tmp3, %tmp4
+ ret <8 x i8> %tmp5
}
define <16 x i8> @vtrni8_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind {
@@ -26,10 +26,10 @@ define <16 x i8> @vtrni8_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-NEXT: vmov r0, r1, [[LDR0]]
; CHECK-NEXT: vmov r2, r3, [[LDR1]]
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <8 x i8>, <8 x i8>* %A
- %tmp2 = load <8 x i8>, <8 x i8>* %B
- %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <16 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14, i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
- ret <16 x i8> %tmp3
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
+ %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <16 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14, i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ret <16 x i8> %tmp3
}
define <4 x i16> @vtrni16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
@@ -41,12 +41,12 @@ define <4 x i16> @vtrni16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
; CHECK-NEXT: vadd.i16 d16, d17, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <4 x i16>, <4 x i16>* %A
- %tmp2 = load <4 x i16>, <4 x i16>* %B
- %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
- %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
- %tmp5 = add <4 x i16> %tmp3, %tmp4
- ret <4 x i16> %tmp5
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
+ %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ %tmp5 = add <4 x i16> %tmp3, %tmp4
+ ret <4 x i16> %tmp5
}
define <8 x i16> @vtrni16_Qres(<4 x i16>* %A, <4 x i16>* %B) nounwind {
@@ -58,10 +58,10 @@ define <8 x i16> @vtrni16_Qres(<4 x i16>* %A, <4 x i16>* %B) nounwind {
; CHECK-NEXT: vmov r0, r1, [[LDR0]]
; CHECK-NEXT: vmov r2, r3, [[LDR1]]
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <4 x i16>, <4 x i16>* %A
- %tmp2 = load <4 x i16>, <4 x i16>* %B
- %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <8 x i32> <i32 0, i32 4, i32 2, i32 6, i32 1, i32 5, i32 3, i32 7>
- ret <8 x i16> %tmp3
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
+ %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <8 x i32> <i32 0, i32 4, i32 2, i32 6, i32 1, i32 5, i32 3, i32 7>
+ ret <8 x i16> %tmp3
}
define <2 x i32> @vtrni32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
@@ -73,12 +73,12 @@ define <2 x i32> @vtrni32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
; CHECK-NEXT: vmul.i32 d16, d17, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <2 x i32>, <2 x i32>* %A
- %tmp2 = load <2 x i32>, <2 x i32>* %B
- %tmp3 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 0, i32 2>
- %tmp4 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 3>
- %tmp5 = mul <2 x i32> %tmp3, %tmp4
- ret <2 x i32> %tmp5
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
+ %tmp2 = load <2 x i32>, <2 x i32>* %B
+ %tmp3 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 0, i32 2>
+ %tmp4 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 3>
+ %tmp5 = mul <2 x i32> %tmp3, %tmp4
+ ret <2 x i32> %tmp5
}
define <4 x i32> @vtrni32_Qres(<2 x i32>* %A, <2 x i32>* %B) nounwind {
@@ -90,10 +90,10 @@ define <4 x i32> @vtrni32_Qres(<2 x i32>* %A, <2 x i32>* %B) nounwind {
; CHECK-NEXT: vmov r0, r1, [[LDR0]]
; CHECK-NEXT: vmov r2, r3, [[LDR1]]
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <2 x i32>, <2 x i32>* %A
- %tmp2 = load <2 x i32>, <2 x i32>* %B
- %tmp3 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
- ret <4 x i32> %tmp3
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
+ %tmp2 = load <2 x i32>, <2 x i32>* %B
+ %tmp3 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+ ret <4 x i32> %tmp3
}
define <2 x float> @vtrnf(<2 x float>* %A, <2 x float>* %B) nounwind {
@@ -105,12 +105,12 @@ define <2 x float> @vtrnf(<2 x float>* %A, <2 x float>* %B) nounwind {
; CHECK-NEXT: vadd.f32 d16, d17, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <2 x float>, <2 x float>* %A
- %tmp2 = load <2 x float>, <2 x float>* %B
- %tmp3 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <2 x i32> <i32 0, i32 2>
- %tmp4 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <2 x i32> <i32 1, i32 3>
- %tmp5 = fadd <2 x float> %tmp3, %tmp4
- ret <2 x float> %tmp5
+ %tmp1 = load <2 x float>, <2 x float>* %A
+ %tmp2 = load <2 x float>, <2 x float>* %B
+ %tmp3 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <2 x i32> <i32 0, i32 2>
+ %tmp4 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <2 x i32> <i32 1, i32 3>
+ %tmp5 = fadd <2 x float> %tmp3, %tmp4
+ ret <2 x float> %tmp5
}
define <4 x float> @vtrnf_Qres(<2 x float>* %A, <2 x float>* %B) nounwind {
@@ -122,10 +122,10 @@ define <4 x float> @vtrnf_Qres(<2 x float>* %A, <2 x float>* %B) nounwind {
; CHECK-NEXT: vmov r0, r1, [[LDR0]]
; CHECK-NEXT: vmov r2, r3, [[LDR1]]
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <2 x float>, <2 x float>* %A
- %tmp2 = load <2 x float>, <2 x float>* %B
- %tmp3 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
- ret <4 x float> %tmp3
+ %tmp1 = load <2 x float>, <2 x float>* %A
+ %tmp2 = load <2 x float>, <2 x float>* %B
+ %tmp3 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+ ret <4 x float> %tmp3
}
define <16 x i8> @vtrnQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
@@ -138,12 +138,12 @@ define <16 x i8> @vtrnQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <16 x i8>, <16 x i8>* %A
- %tmp2 = load <16 x i8>, <16 x i8>* %B
- %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
- %tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
- %tmp5 = add <16 x i8> %tmp3, %tmp4
- ret <16 x i8> %tmp5
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
+ %tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
+ %tmp5 = add <16 x i8> %tmp3, %tmp4
+ ret <16 x i8> %tmp5
}
define <32 x i8> @vtrnQi8_QQres(<16 x i8>* %A, <16 x i8>* %B) nounwind {
@@ -155,10 +155,10 @@ define <32 x i8> @vtrnQi8_QQres(<16 x i8>* %A, <16 x i8>* %B) nounwind {
; CHECK-NEXT: vst1.8 {d18, d19}, [r0:128]!
; CHECK-NEXT: vst1.64 {d16, d17}, [r0:128]
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <16 x i8>, <16 x i8>* %A
- %tmp2 = load <16 x i8>, <16 x i8>* %B
- %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <32 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30, i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
- ret <32 x i8> %tmp3
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <32 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30, i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
+ ret <32 x i8> %tmp3
}
define <8 x i16> @vtrnQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
@@ -171,12 +171,12 @@ define <8 x i16> @vtrnQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <8 x i16>, <8 x i16>* %A
- %tmp2 = load <8 x i16>, <8 x i16>* %B
- %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
- %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
- %tmp5 = add <8 x i16> %tmp3, %tmp4
- ret <8 x i16> %tmp5
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
+ %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ %tmp5 = add <8 x i16> %tmp3, %tmp4
+ ret <8 x i16> %tmp5
}
define <16 x i16> @vtrnQi16_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind {
@@ -188,10 +188,10 @@ define <16 x i16> @vtrnQi16_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-NEXT: vst1.16 {d18, d19}, [r0:128]!
; CHECK-NEXT: vst1.64 {d16, d17}, [r0:128]
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <8 x i16>, <8 x i16>* %A
- %tmp2 = load <8 x i16>, <8 x i16>* %B
- %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <16 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14, i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
- ret <16 x i16> %tmp3
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
+ %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <16 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14, i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ret <16 x i16> %tmp3
}
define <4 x i32> @vtrnQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
@@ -204,12 +204,12 @@ define <4 x i32> @vtrnQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <4 x i32>, <4 x i32>* %A
- %tmp2 = load <4 x i32>, <4 x i32>* %B
- %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
- %tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
- %tmp5 = add <4 x i32> %tmp3, %tmp4
- ret <4 x i32> %tmp5
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i32>, <4 x i32>* %B
+ %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ %tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ %tmp5 = add <4 x i32> %tmp3, %tmp4
+ ret <4 x i32> %tmp5
}
define <8 x i32> @vtrnQi32_QQres(<4 x i32>* %A, <4 x i32>* %B) nounwind {
@@ -221,10 +221,10 @@ define <8 x i32> @vtrnQi32_QQres(<4 x i32>* %A, <4 x i32>* %B) nounwind {
; CHECK-NEXT: vst1.32 {d18, d19}, [r0:128]!
; CHECK-NEXT: vst1.64 {d16, d17}, [r0:128]
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <4 x i32>, <4 x i32>* %A
- %tmp2 = load <4 x i32>, <4 x i32>* %B
- %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <8 x i32> <i32 0, i32 4, i32 2, i32 6, i32 1, i32 5, i32 3, i32 7>
- ret <8 x i32> %tmp3
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i32>, <4 x i32>* %B
+ %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <8 x i32> <i32 0, i32 4, i32 2, i32 6, i32 1, i32 5, i32 3, i32 7>
+ ret <8 x i32> %tmp3
}
define <4 x float> @vtrnQf(<4 x float>* %A, <4 x float>* %B) nounwind {
@@ -237,12 +237,12 @@ define <4 x float> @vtrnQf(<4 x float>* %A, <4 x float>* %B) nounwind {
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <4 x float>, <4 x float>* %A
- %tmp2 = load <4 x float>, <4 x float>* %B
- %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
- %tmp4 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
- %tmp5 = fadd <4 x float> %tmp3, %tmp4
- ret <4 x float> %tmp5
+ %tmp1 = load <4 x float>, <4 x float>* %A
+ %tmp2 = load <4 x float>, <4 x float>* %B
+ %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ %tmp4 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ %tmp5 = fadd <4 x float> %tmp3, %tmp4
+ ret <4 x float> %tmp5
}
define <8 x float> @vtrnQf_QQres(<4 x float>* %A, <4 x float>* %B) nounwind {
@@ -254,10 +254,10 @@ define <8 x float> @vtrnQf_QQres(<4 x float>* %A, <4 x float>* %B) nounwind {
; CHECK-NEXT: vst1.32 {d18, d19}, [r0:128]!
; CHECK-NEXT: vst1.64 {d16, d17}, [r0:128]
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <4 x float>, <4 x float>* %A
- %tmp2 = load <4 x float>, <4 x float>* %B
- %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <8 x i32> <i32 0, i32 4, i32 2, i32 6, i32 1, i32 5, i32 3, i32 7>
- ret <8 x float> %tmp3
+ %tmp1 = load <4 x float>, <4 x float>* %A
+ %tmp2 = load <4 x float>, <4 x float>* %B
+ %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <8 x i32> <i32 0, i32 4, i32 2, i32 6, i32 1, i32 5, i32 3, i32 7>
+ ret <8 x float> %tmp3
}
@@ -270,12 +270,12 @@ define <8 x i8> @vtrni8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-NEXT: vadd.i8 d16, d17, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <8 x i8>, <8 x i8>* %A
- %tmp2 = load <8 x i8>, <8 x i8>* %B
- %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 undef, i32 2, i32 10, i32 undef, i32 12, i32 6, i32 14>
- %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 undef, i32 undef, i32 15>
- %tmp5 = add <8 x i8> %tmp3, %tmp4
- ret <8 x i8> %tmp5
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
+ %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 undef, i32 2, i32 10, i32 undef, i32 12, i32 6, i32 14>
+ %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 undef, i32 undef, i32 15>
+ %tmp5 = add <8 x i8> %tmp3, %tmp4
+ ret <8 x i8> %tmp5
}
define <16 x i8> @vtrni8_undef_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind {
@@ -287,10 +287,10 @@ define <16 x i8> @vtrni8_undef_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-NEXT: vmov r0, r1, [[LDR0]]
; CHECK-NEXT: vmov r2, r3, [[LDR1]]
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <8 x i8>, <8 x i8>* %A
- %tmp2 = load <8 x i8>, <8 x i8>* %B
- %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <16 x i32> <i32 0, i32 undef, i32 2, i32 10, i32 undef, i32 12, i32 6, i32 14, i32 1, i32 9, i32 3, i32 11, i32 5, i32 undef, i32 undef, i32 15>
- ret <16 x i8> %tmp3
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
+ %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <16 x i32> <i32 0, i32 undef, i32 2, i32 10, i32 undef, i32 12, i32 6, i32 14, i32 1, i32 9, i32 3, i32 11, i32 5, i32 undef, i32 undef, i32 15>
+ ret <16 x i8> %tmp3
}
define <8 x i16> @vtrnQi16_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
@@ -303,12 +303,12 @@ define <8 x i16> @vtrnQi16_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <8 x i16>, <8 x i16>* %A
- %tmp2 = load <8 x i16>, <8 x i16>* %B
- %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 undef, i32 undef, i32 4, i32 12, i32 6, i32 14>
- %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 undef, i32 3, i32 11, i32 5, i32 13, i32 undef, i32 undef>
- %tmp5 = add <8 x i16> %tmp3, %tmp4
- ret <8 x i16> %tmp5
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
+ %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 undef, i32 undef, i32 4, i32 12, i32 6, i32 14>
+ %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 undef, i32 3, i32 11, i32 5, i32 13, i32 undef, i32 undef>
+ %tmp5 = add <8 x i16> %tmp3, %tmp4
+ ret <8 x i16> %tmp5
}
define <16 x i16> @vtrnQi16_undef_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind {
@@ -320,10 +320,10 @@ define <16 x i16> @vtrnQi16_undef_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-NEXT: vst1.16 {d18, d19}, [r0:128]!
; CHECK-NEXT: vst1.64 {d16, d17}, [r0:128]
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <8 x i16>, <8 x i16>* %A
- %tmp2 = load <8 x i16>, <8 x i16>* %B
- %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <16 x i32> <i32 0, i32 8, i32 undef, i32 undef, i32 4, i32 12, i32 6, i32 14, i32 1, i32 undef, i32 3, i32 11, i32 5, i32 13, i32 undef, i32 undef>
- ret <16 x i16> %tmp3
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
+ %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <16 x i32> <i32 0, i32 8, i32 undef, i32 undef, i32 4, i32 12, i32 6, i32 14, i32 1, i32 undef, i32 3, i32 11, i32 5, i32 13, i32 undef, i32 undef>
+ ret <16 x i16> %tmp3
}
define <8 x i16> @vtrn_lower_shufflemask_undef(<4 x i16>* %A, <4 x i16>* %B) {
OpenPOWER on IntegriCloud