diff options
Diffstat (limited to 'llvm/test/CodeGen/ARM/vtbl.ll')
-rw-r--r-- | llvm/test/CodeGen/ARM/vtbl.ll | 66 |
1 files changed, 33 insertions, 33 deletions
diff --git a/llvm/test/CodeGen/ARM/vtbl.ll b/llvm/test/CodeGen/ARM/vtbl.ll index 89653b097ac..926498739e8 100644 --- a/llvm/test/CodeGen/ARM/vtbl.ll +++ b/llvm/test/CodeGen/ARM/vtbl.ll @@ -1,8 +1,8 @@ ; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s -%struct.__builtin_neon_v8qi2 = type { <8 x i8>, <8 x i8> } -%struct.__builtin_neon_v8qi3 = type { <8 x i8>, <8 x i8>, <8 x i8> } -%struct.__builtin_neon_v8qi4 = type { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } +%struct.__neon_int8x8x2_t = type { <8 x i8>, <8 x i8> } +%struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> } +%struct.__neon_int8x8x4_t = type { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } define <8 x i8> @vtbl1(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK: vtbl1: @@ -13,38 +13,38 @@ define <8 x i8> @vtbl1(<8 x i8>* %A, <8 x i8>* %B) nounwind { ret <8 x i8> %tmp3 } -define <8 x i8> @vtbl2(<8 x i8>* %A, %struct.__builtin_neon_v8qi2* %B) nounwind { +define <8 x i8> @vtbl2(<8 x i8>* %A, %struct.__neon_int8x8x2_t* %B) nounwind { ;CHECK: vtbl2: ;CHECK: vtbl.8 %tmp1 = load <8 x i8>* %A - %tmp2 = load %struct.__builtin_neon_v8qi2* %B - %tmp3 = extractvalue %struct.__builtin_neon_v8qi2 %tmp2, 0 - %tmp4 = extractvalue %struct.__builtin_neon_v8qi2 %tmp2, 1 + %tmp2 = load %struct.__neon_int8x8x2_t* %B + %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 0 + %tmp4 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 1 %tmp5 = call <8 x i8> @llvm.arm.neon.vtbl2(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4) ret <8 x i8> %tmp5 } -define <8 x i8> @vtbl3(<8 x i8>* %A, %struct.__builtin_neon_v8qi3* %B) nounwind { +define <8 x i8> @vtbl3(<8 x i8>* %A, %struct.__neon_int8x8x3_t* %B) nounwind { ;CHECK: vtbl3: ;CHECK: vtbl.8 %tmp1 = load <8 x i8>* %A - %tmp2 = load %struct.__builtin_neon_v8qi3* %B - %tmp3 = extractvalue %struct.__builtin_neon_v8qi3 %tmp2, 0 - %tmp4 = extractvalue %struct.__builtin_neon_v8qi3 %tmp2, 1 - %tmp5 = extractvalue %struct.__builtin_neon_v8qi3 %tmp2, 2 + %tmp2 = load %struct.__neon_int8x8x3_t* %B + %tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 0 + %tmp4 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 1 + %tmp5 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 2 %tmp6 = call <8 x i8> @llvm.arm.neon.vtbl3(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5) ret <8 x i8> %tmp6 } -define <8 x i8> @vtbl4(<8 x i8>* %A, %struct.__builtin_neon_v8qi4* %B) nounwind { +define <8 x i8> @vtbl4(<8 x i8>* %A, %struct.__neon_int8x8x4_t* %B) nounwind { ;CHECK: vtbl4: ;CHECK: vtbl.8 %tmp1 = load <8 x i8>* %A - %tmp2 = load %struct.__builtin_neon_v8qi4* %B - %tmp3 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 0 - %tmp4 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 1 - %tmp5 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 2 - %tmp6 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 3 + %tmp2 = load %struct.__neon_int8x8x4_t* %B + %tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 0 + %tmp4 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 1 + %tmp5 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 2 + %tmp6 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 3 %tmp7 = call <8 x i8> @llvm.arm.neon.vtbl4(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5, <8 x i8> %tmp6) ret <8 x i8> %tmp7 } @@ -59,40 +59,40 @@ define <8 x i8> @vtbx1(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind { ret <8 x i8> %tmp4 } -define <8 x i8> @vtbx2(<8 x i8>* %A, %struct.__builtin_neon_v8qi2* %B, <8 x i8>* %C) nounwind { +define <8 x i8> @vtbx2(<8 x i8>* %A, %struct.__neon_int8x8x2_t* %B, <8 x i8>* %C) nounwind { ;CHECK: vtbx2: ;CHECK: vtbx.8 %tmp1 = load <8 x i8>* %A - %tmp2 = load %struct.__builtin_neon_v8qi2* %B - %tmp3 = extractvalue %struct.__builtin_neon_v8qi2 %tmp2, 0 - %tmp4 = extractvalue %struct.__builtin_neon_v8qi2 %tmp2, 1 + %tmp2 = load %struct.__neon_int8x8x2_t* %B + %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 0 + %tmp4 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 1 %tmp5 = load <8 x i8>* %C %tmp6 = call <8 x i8> @llvm.arm.neon.vtbx2(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5) ret <8 x i8> %tmp6 } -define <8 x i8> @vtbx3(<8 x i8>* %A, %struct.__builtin_neon_v8qi3* %B, <8 x i8>* %C) nounwind { +define <8 x i8> @vtbx3(<8 x i8>* %A, %struct.__neon_int8x8x3_t* %B, <8 x i8>* %C) nounwind { ;CHECK: vtbx3: ;CHECK: vtbx.8 %tmp1 = load <8 x i8>* %A - %tmp2 = load %struct.__builtin_neon_v8qi3* %B - %tmp3 = extractvalue %struct.__builtin_neon_v8qi3 %tmp2, 0 - %tmp4 = extractvalue %struct.__builtin_neon_v8qi3 %tmp2, 1 - %tmp5 = extractvalue %struct.__builtin_neon_v8qi3 %tmp2, 2 + %tmp2 = load %struct.__neon_int8x8x3_t* %B + %tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 0 + %tmp4 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 1 + %tmp5 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 2 %tmp6 = load <8 x i8>* %C %tmp7 = call <8 x i8> @llvm.arm.neon.vtbx3(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5, <8 x i8> %tmp6) ret <8 x i8> %tmp7 } -define <8 x i8> @vtbx4(<8 x i8>* %A, %struct.__builtin_neon_v8qi4* %B, <8 x i8>* %C) nounwind { +define <8 x i8> @vtbx4(<8 x i8>* %A, %struct.__neon_int8x8x4_t* %B, <8 x i8>* %C) nounwind { ;CHECK: vtbx4: ;CHECK: vtbx.8 %tmp1 = load <8 x i8>* %A - %tmp2 = load %struct.__builtin_neon_v8qi4* %B - %tmp3 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 0 - %tmp4 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 1 - %tmp5 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 2 - %tmp6 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 3 + %tmp2 = load %struct.__neon_int8x8x4_t* %B + %tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 0 + %tmp4 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 1 + %tmp5 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 2 + %tmp6 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 3 %tmp7 = load <8 x i8>* %C %tmp8 = call <8 x i8> @llvm.arm.neon.vtbx4(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5, <8 x i8> %tmp6, <8 x i8> %tmp7) ret <8 x i8> %tmp8 |