summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/Thumb2/mve-gather-ind8-unscaled.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/Thumb2/mve-gather-ind8-unscaled.ll')
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-gather-ind8-unscaled.ll370
1 files changed, 370 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/Thumb2/mve-gather-ind8-unscaled.ll b/llvm/test/CodeGen/Thumb2/mve-gather-ind8-unscaled.ll
new file mode 100644
index 00000000000..deb3be28ddb
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-ind8-unscaled.ll
@@ -0,0 +1,370 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve.fp -enable-arm-maskedgatscat %s -o - | FileCheck %s
+
+define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8(i8* %base, <16 x i8>* %offptr) {
+; CHECK-LABEL: unscaled_v16i8_i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vldrb.u8 q1, [r1]
+; CHECK-NEXT: vldrb.u8 q0, [r0, q1]
+; CHECK-NEXT: bx lr
+entry:
+ %offs = load <16 x i8>, <16 x i8>* %offptr, align 1
+ %offs.zext = zext <16 x i8> %offs to <16 x i32>
+ %ptrs = getelementptr inbounds i8, i8* %base, <16 x i32> %offs.zext
+ %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+ ret <16 x i8> %gather
+}
+
+define arm_aapcs_vfpcc <8 x i8> @unscaled_v8i8_i8(i8* %base, <8 x i8>* %offptr) {
+; CHECK-LABEL: unscaled_v8i8_i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r4, r5, r7, lr}
+; CHECK-NEXT: push {r4, r5, r7, lr}
+; CHECK-NEXT: vldrb.u32 q0, [r1]
+; CHECK-NEXT: vldrb.u32 q1, [r1, #4]
+; CHECK-NEXT: vadd.i32 q0, q0, r0
+; CHECK-NEXT: vadd.i32 q1, q1, r0
+; CHECK-NEXT: vmov r2, s2
+; CHECK-NEXT: vmov r3, s3
+; CHECK-NEXT: vmov r5, s1
+; CHECK-NEXT: vmov r0, s4
+; CHECK-NEXT: vmov r1, s5
+; CHECK-NEXT: vmov r4, s7
+; CHECK-NEXT: ldrb.w r12, [r2]
+; CHECK-NEXT: vmov r2, s0
+; CHECK-NEXT: ldrb.w lr, [r3]
+; CHECK-NEXT: vmov r3, s6
+; CHECK-NEXT: ldrb r5, [r5]
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: ldrb r1, [r1]
+; CHECK-NEXT: ldrb r4, [r4]
+; CHECK-NEXT: ldrb r2, [r2]
+; CHECK-NEXT: ldrb r3, [r3]
+; CHECK-NEXT: vmov.16 q0[0], r2
+; CHECK-NEXT: vmov.16 q0[1], r5
+; CHECK-NEXT: vmov.16 q0[2], r12
+; CHECK-NEXT: vmov.16 q0[3], lr
+; CHECK-NEXT: vmov.16 q0[4], r0
+; CHECK-NEXT: vmov.16 q0[5], r1
+; CHECK-NEXT: vmov.16 q0[6], r3
+; CHECK-NEXT: vmov.16 q0[7], r4
+; CHECK-NEXT: pop {r4, r5, r7, pc}
+entry:
+ %offs = load <8 x i8>, <8 x i8>* %offptr, align 1
+ %offs.zext = zext <8 x i8> %offs to <8 x i32>
+ %ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext
+ %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*> %ptrs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
+ ret <8 x i8> %gather
+}
+
+define arm_aapcs_vfpcc <2 x i8> @unscaled_v2i8_i8(i8* %base, <2 x i8>* %offptr) {
+; CHECK-LABEL: unscaled_v2i8_i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: ldrb r2, [r1]
+; CHECK-NEXT: vmov.i32 q0, #0xff
+; CHECK-NEXT: ldrb r1, [r1, #1]
+; CHECK-NEXT: vmov.32 q1[0], r2
+; CHECK-NEXT: vmov.32 q1[2], r1
+; CHECK-NEXT: vand q0, q1, q0
+; CHECK-NEXT: vmov r1, s0
+; CHECK-NEXT: vmov r2, s2
+; CHECK-NEXT: ldrb r1, [r0, r1]
+; CHECK-NEXT: ldrb r0, [r0, r2]
+; CHECK-NEXT: vmov.32 q0[0], r1
+; CHECK-NEXT: vmov.32 q0[2], r0
+; CHECK-NEXT: bx lr
+entry:
+ %offs = load <2 x i8>, <2 x i8>* %offptr, align 1
+ %offs.zext = zext <2 x i8> %offs to <2 x i32>
+ %ptrs = getelementptr inbounds i8, i8* %base, <2 x i32> %offs.zext
+ %gather = call <2 x i8> @llvm.masked.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> undef)
+ ret <2 x i8> %gather
+}
+
+define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_sext(i8* %base, <16 x i8>* %offptr) {
+; CHECK-LABEL: unscaled_v16i8_sext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r4, r5, r6, lr}
+; CHECK-NEXT: push {r4, r5, r6, lr}
+; CHECK-NEXT: vldrb.s32 q0, [r1, #8]
+; CHECK-NEXT: vldrb.s32 q2, [r1, #4]
+; CHECK-NEXT: vadd.i32 q1, q0, r0
+; CHECK-NEXT: vldrb.s32 q0, [r1, #12]
+; CHECK-NEXT: vmov r2, s6
+; CHECK-NEXT: vadd.i32 q2, q2, r0
+; CHECK-NEXT: vadd.i32 q0, q0, r0
+; CHECK-NEXT: vmov r6, s4
+; CHECK-NEXT: vmov r3, s2
+; CHECK-NEXT: vmov r4, s3
+; CHECK-NEXT: vmov r5, s7
+; CHECK-NEXT: ldrb.w r12, [r2]
+; CHECK-NEXT: vmov r2, s0
+; CHECK-NEXT: ldrb r6, [r6]
+; CHECK-NEXT: ldrb r3, [r3]
+; CHECK-NEXT: ldrb r4, [r4]
+; CHECK-NEXT: ldrb r5, [r5]
+; CHECK-NEXT: ldrb.w lr, [r2]
+; CHECK-NEXT: vmov r2, s1
+; CHECK-NEXT: vldrb.s32 q0, [r1]
+; CHECK-NEXT: vadd.i32 q3, q0, r0
+; CHECK-NEXT: vmov r0, s12
+; CHECK-NEXT: ldrb r2, [r2]
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[0], r0
+; CHECK-NEXT: vmov r0, s13
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[1], r0
+; CHECK-NEXT: vmov r0, s14
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[2], r0
+; CHECK-NEXT: vmov r0, s15
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[3], r0
+; CHECK-NEXT: vmov r0, s8
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[4], r0
+; CHECK-NEXT: vmov r0, s9
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[5], r0
+; CHECK-NEXT: vmov r0, s10
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[6], r0
+; CHECK-NEXT: vmov r0, s11
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[7], r0
+; CHECK-NEXT: vmov r0, s5
+; CHECK-NEXT: vmov.8 q0[8], r6
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[9], r0
+; CHECK-NEXT: vmov.8 q0[10], r12
+; CHECK-NEXT: vmov.8 q0[11], r5
+; CHECK-NEXT: vmov.8 q0[12], lr
+; CHECK-NEXT: vmov.8 q0[13], r2
+; CHECK-NEXT: vmov.8 q0[14], r3
+; CHECK-NEXT: vmov.8 q0[15], r4
+; CHECK-NEXT: pop {r4, r5, r6, pc}
+entry:
+ %offs = load <16 x i8>, <16 x i8>* %offptr, align 1
+ %offs.sext = sext <16 x i8> %offs to <16 x i32>
+ %ptrs = getelementptr inbounds i8, i8* %base, <16 x i32> %offs.sext
+ %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+ ret <16 x i8> %gather
+}
+
+define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i16(i8* %base, <16 x i16>* %offptr) {
+; CHECK-LABEL: unscaled_v16i8_i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r4, r5, r6, lr}
+; CHECK-NEXT: push {r4, r5, r6, lr}
+; CHECK-NEXT: vldrh.s32 q0, [r1, #16]
+; CHECK-NEXT: vldrh.s32 q2, [r1, #8]
+; CHECK-NEXT: vadd.i32 q1, q0, r0
+; CHECK-NEXT: vldrh.s32 q0, [r1, #24]
+; CHECK-NEXT: vmov r2, s6
+; CHECK-NEXT: vadd.i32 q2, q2, r0
+; CHECK-NEXT: vadd.i32 q0, q0, r0
+; CHECK-NEXT: vmov r6, s4
+; CHECK-NEXT: vmov r3, s2
+; CHECK-NEXT: vmov r4, s3
+; CHECK-NEXT: vmov r5, s7
+; CHECK-NEXT: ldrb.w r12, [r2]
+; CHECK-NEXT: vmov r2, s0
+; CHECK-NEXT: ldrb r6, [r6]
+; CHECK-NEXT: ldrb r3, [r3]
+; CHECK-NEXT: ldrb r4, [r4]
+; CHECK-NEXT: ldrb r5, [r5]
+; CHECK-NEXT: ldrb.w lr, [r2]
+; CHECK-NEXT: vmov r2, s1
+; CHECK-NEXT: vldrh.s32 q0, [r1]
+; CHECK-NEXT: vadd.i32 q3, q0, r0
+; CHECK-NEXT: vmov r0, s12
+; CHECK-NEXT: ldrb r2, [r2]
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[0], r0
+; CHECK-NEXT: vmov r0, s13
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[1], r0
+; CHECK-NEXT: vmov r0, s14
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[2], r0
+; CHECK-NEXT: vmov r0, s15
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[3], r0
+; CHECK-NEXT: vmov r0, s8
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[4], r0
+; CHECK-NEXT: vmov r0, s9
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[5], r0
+; CHECK-NEXT: vmov r0, s10
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[6], r0
+; CHECK-NEXT: vmov r0, s11
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[7], r0
+; CHECK-NEXT: vmov r0, s5
+; CHECK-NEXT: vmov.8 q0[8], r6
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[9], r0
+; CHECK-NEXT: vmov.8 q0[10], r12
+; CHECK-NEXT: vmov.8 q0[11], r5
+; CHECK-NEXT: vmov.8 q0[12], lr
+; CHECK-NEXT: vmov.8 q0[13], r2
+; CHECK-NEXT: vmov.8 q0[14], r3
+; CHECK-NEXT: vmov.8 q0[15], r4
+; CHECK-NEXT: pop {r4, r5, r6, pc}
+entry:
+ %offs = load <16 x i16>, <16 x i16>* %offptr, align 2
+ %offs.sext = sext <16 x i16> %offs to <16 x i32>
+ %ptrs = getelementptr inbounds i8, i8* %base, <16 x i32> %offs.sext
+ %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+ ret <16 x i8> %gather
+}
+
+define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_scaled(i32* %base, <16 x i8>* %offptr) {
+; CHECK-LABEL: unscaled_v16i8_scaled:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r4, r5, r6, lr}
+; CHECK-NEXT: push {r4, r5, r6, lr}
+; CHECK-NEXT: vldrb.u32 q0, [r1, #8]
+; CHECK-NEXT: vldrb.u32 q2, [r1, #4]
+; CHECK-NEXT: vshl.i32 q0, q0, #2
+; CHECK-NEXT: vshl.i32 q2, q2, #2
+; CHECK-NEXT: vadd.i32 q1, q0, r0
+; CHECK-NEXT: vldrb.u32 q0, [r1, #12]
+; CHECK-NEXT: vmov r2, s6
+; CHECK-NEXT: vadd.i32 q2, q2, r0
+; CHECK-NEXT: vshl.i32 q0, q0, #2
+; CHECK-NEXT: vmov r6, s4
+; CHECK-NEXT: vadd.i32 q0, q0, r0
+; CHECK-NEXT: vmov r5, s7
+; CHECK-NEXT: vmov r3, s2
+; CHECK-NEXT: vmov r4, s3
+; CHECK-NEXT: ldrb.w r12, [r2]
+; CHECK-NEXT: vmov r2, s0
+; CHECK-NEXT: ldrb r6, [r6]
+; CHECK-NEXT: ldrb r5, [r5]
+; CHECK-NEXT: ldrb r3, [r3]
+; CHECK-NEXT: ldrb r4, [r4]
+; CHECK-NEXT: ldrb.w lr, [r2]
+; CHECK-NEXT: vmov r2, s1
+; CHECK-NEXT: vldrb.u32 q0, [r1]
+; CHECK-NEXT: vshl.i32 q0, q0, #2
+; CHECK-NEXT: vadd.i32 q3, q0, r0
+; CHECK-NEXT: vmov r0, s12
+; CHECK-NEXT: ldrb r2, [r2]
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[0], r0
+; CHECK-NEXT: vmov r0, s13
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[1], r0
+; CHECK-NEXT: vmov r0, s14
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[2], r0
+; CHECK-NEXT: vmov r0, s15
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[3], r0
+; CHECK-NEXT: vmov r0, s8
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[4], r0
+; CHECK-NEXT: vmov r0, s9
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[5], r0
+; CHECK-NEXT: vmov r0, s10
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[6], r0
+; CHECK-NEXT: vmov r0, s11
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[7], r0
+; CHECK-NEXT: vmov r0, s5
+; CHECK-NEXT: vmov.8 q0[8], r6
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[9], r0
+; CHECK-NEXT: vmov.8 q0[10], r12
+; CHECK-NEXT: vmov.8 q0[11], r5
+; CHECK-NEXT: vmov.8 q0[12], lr
+; CHECK-NEXT: vmov.8 q0[13], r2
+; CHECK-NEXT: vmov.8 q0[14], r3
+; CHECK-NEXT: vmov.8 q0[15], r4
+; CHECK-NEXT: pop {r4, r5, r6, pc}
+entry:
+ %offs = load <16 x i8>, <16 x i8>* %offptr, align 4
+ %offs.zext = zext <16 x i8> %offs to <16 x i32>
+ %ptrs32 = getelementptr inbounds i32, i32* %base, <16 x i32> %offs.zext
+ %ptrs = bitcast <16 x i32*> %ptrs32 to <16 x i8*>
+ %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+ ret <16 x i8> %gather
+}
+
+define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_next(i8* %base, <16 x i32>* %offptr) {
+; CHECK-LABEL: unscaled_v16i8_i8_next:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r4, r5, r6, lr}
+; CHECK-NEXT: push {r4, r5, r6, lr}
+; CHECK-NEXT: vldrw.u32 q0, [r1, #32]
+; CHECK-NEXT: vldrw.u32 q2, [r1, #16]
+; CHECK-NEXT: vadd.i32 q1, q0, r0
+; CHECK-NEXT: vldrw.u32 q0, [r1, #48]
+; CHECK-NEXT: vmov r2, s6
+; CHECK-NEXT: vadd.i32 q2, q2, r0
+; CHECK-NEXT: vadd.i32 q0, q0, r0
+; CHECK-NEXT: vmov r6, s4
+; CHECK-NEXT: vmov r3, s2
+; CHECK-NEXT: vmov r4, s3
+; CHECK-NEXT: vmov r5, s7
+; CHECK-NEXT: ldrb.w r12, [r2]
+; CHECK-NEXT: vmov r2, s0
+; CHECK-NEXT: ldrb r6, [r6]
+; CHECK-NEXT: ldrb r3, [r3]
+; CHECK-NEXT: ldrb r4, [r4]
+; CHECK-NEXT: ldrb r5, [r5]
+; CHECK-NEXT: ldrb.w lr, [r2]
+; CHECK-NEXT: vmov r2, s1
+; CHECK-NEXT: vldrw.u32 q0, [r1]
+; CHECK-NEXT: vadd.i32 q3, q0, r0
+; CHECK-NEXT: vmov r0, s12
+; CHECK-NEXT: ldrb r2, [r2]
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[0], r0
+; CHECK-NEXT: vmov r0, s13
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[1], r0
+; CHECK-NEXT: vmov r0, s14
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[2], r0
+; CHECK-NEXT: vmov r0, s15
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[3], r0
+; CHECK-NEXT: vmov r0, s8
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[4], r0
+; CHECK-NEXT: vmov r0, s9
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[5], r0
+; CHECK-NEXT: vmov r0, s10
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[6], r0
+; CHECK-NEXT: vmov r0, s11
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[7], r0
+; CHECK-NEXT: vmov r0, s5
+; CHECK-NEXT: vmov.8 q0[8], r6
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: vmov.8 q0[9], r0
+; CHECK-NEXT: vmov.8 q0[10], r12
+; CHECK-NEXT: vmov.8 q0[11], r5
+; CHECK-NEXT: vmov.8 q0[12], lr
+; CHECK-NEXT: vmov.8 q0[13], r2
+; CHECK-NEXT: vmov.8 q0[14], r3
+; CHECK-NEXT: vmov.8 q0[15], r4
+; CHECK-NEXT: pop {r4, r5, r6, pc}
+entry:
+ %offs = load <16 x i32>, <16 x i32>* %offptr, align 4
+ %ptrs = getelementptr inbounds i8, i8* %base, <16 x i32> %offs
+ %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+ ret <16 x i8> %gather
+}
+
+declare <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*>, i32, <16 x i1>, <16 x i8>)
+declare <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*>, i32, <8 x i1>, <8 x i8>)
+declare <2 x i8> @llvm.masked.gather.v2i8.v2p0i8(<2 x i8*>, i32, <2 x i1>, <2 x i8>)
OpenPOWER on IntegriCloud