diff options
Diffstat (limited to 'llvm')
21 files changed, 1305 insertions, 241 deletions
diff --git a/llvm/include/llvm/IR/IntrinsicsARM.td b/llvm/include/llvm/IR/IntrinsicsARM.td index 7861ce464f2..8995f969e2e 100644 --- a/llvm/include/llvm/IR/IntrinsicsARM.td +++ b/llvm/include/llvm/IR/IntrinsicsARM.td @@ -848,11 +848,11 @@ def int_arm_mve_qrdmulh_predicated: Intrinsic<[llvm_anyvector_ty], [IntrNoMem]>; def int_arm_mve_mull_int_predicated: Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, LLVMMatchType<1>, llvm_i32_ty, llvm_anyvector_ty, - LLVMMatchType<1>], + LLVMMatchType<0>], [IntrNoMem]>; def int_arm_mve_mull_poly_predicated: Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, LLVMMatchType<1>, llvm_i32_ty, llvm_anyvector_ty, - LLVMMatchType<1>], + LLVMMatchType<0>], [IntrNoMem]>; def int_arm_mve_qadd_predicated: Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>], diff --git a/llvm/lib/Target/ARM/ARMInstrMVE.td b/llvm/lib/Target/ARM/ARMInstrMVE.td index 2a063e4ebde..a1a8614e4a7 100644 --- a/llvm/lib/Target/ARM/ARMInstrMVE.td +++ b/llvm/lib/Target/ARM/ARMInstrMVE.td @@ -3831,11 +3831,11 @@ multiclass MVE_VMULL_m<MVEVectorVTInfo VTI, // Predicated multiply def : Pat<(VTI.DblVec (pred_int (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn), (i32 Top), - (VTI.Pred VCCR:$mask), (VTI.Vec MQPR:$inactive))), + (VTI.Pred VCCR:$mask), (VTI.DblVec MQPR:$inactive))), (VTI.DblVec (!cast<Instruction>(NAME) (VTI.Vec MQPR:$Qm), (VTI.Vec MQPR:$Qn), ARMVCCThen, (VTI.Pred VCCR:$mask), - (VTI.Vec MQPR:$inactive)))>; + (VTI.DblVec MQPR:$inactive)))>; } } diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vabdq.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vabdq.ll index bafff00ea1d..fcd57664a16 100644 --- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vabdq.ll +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vabdq.ll @@ -1,31 +1,61 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s -define arm_aapcs_vfpcc <4 x i32> @test_vabdq_u32(<4 x i32> %a, <4 x i32> %b) { +define arm_aapcs_vfpcc <16 x i8> @test_vabdq_s8(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr #0 { +; CHECK-LABEL: test_vabdq_s8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vabd.s8 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = tail call <16 x i8> @llvm.arm.mve.vabd.v16i8(<16 x i8> %a, <16 x i8> %b) + ret <16 x i8> %0 +} + +declare <16 x i8> @llvm.arm.mve.vabd.v16i8(<16 x i8>, <16 x i8>) #1 + +define arm_aapcs_vfpcc <4 x i32> @test_vabdq_u32(<4 x i32> %a, <4 x i32> %b) local_unnamed_addr #0 { ; CHECK-LABEL: test_vabdq_u32: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vabd.s32 q0, q0, q1 ; CHECK-NEXT: bx lr entry: - %0 = tail call <4 x i32> @llvm.arm.mve.vabd.v4i32(<4 x i32>%a, <4 x i32>%b) + %0 = tail call <4 x i32> @llvm.arm.mve.vabd.v4i32(<4 x i32> %a, <4 x i32> %b) ret <4 x i32> %0 } -declare <4 x i32> @llvm.arm.mve.vabd.v4i32(<4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.arm.mve.vabd.v4i32(<4 x i32>, <4 x i32>) #1 -define arm_aapcs_vfpcc <4 x float> @test_vabdq_f32(<4 x float> %a, <4 x float> %b) { +define arm_aapcs_vfpcc <8 x half> @test_vabdq_f32(<8 x half> %a, <8 x half> %b) local_unnamed_addr #0 { ; CHECK-LABEL: test_vabdq_f32: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vabd.f32 q0, q0, q1 +; CHECK-NEXT: vabd.f16 q0, q0, q1 ; CHECK-NEXT: bx lr entry: - %0 = tail call <4 x float> @llvm.arm.mve.vabd.v4f32(<4 x float>%a, <4 x float>%b) - ret <4 x float> %0 + %0 = tail call <8 x half> @llvm.arm.mve.vabd.v8f16(<8 x half> %a, <8 x half> %b) + ret <8 x half> %0 } -declare <4 x float> @llvm.arm.mve.vabd.v4f32(<4 x float>, <4 x float>) +declare <8 x half> @llvm.arm.mve.vabd.v8f16(<8 x half>, <8 x half>) #1 -define arm_aapcs_vfpcc <16 x i8> @test_vabdq_m_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) { +define arm_aapcs_vfpcc <8 x i16> @test_vabdq_m_u16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vabdq_m_u16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vabdt.s16 q0, q1, q2 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.abd.predicated.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, <8 x i1> %1, <8 x i16> %inactive) + ret <8 x i16> %2 +} + +declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32) #1 + +declare <8 x i16> @llvm.arm.mve.abd.predicated.v8i16.v8i1(<8 x i16>, <8 x i16>, <8 x i1>, <8 x i16>) #1 + +define arm_aapcs_vfpcc <16 x i8> @test_vabdq_m_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #0 { ; CHECK-LABEL: test_vabdq_m_s8: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 @@ -39,24 +69,71 @@ entry: ret <16 x i8> %2 } -declare <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32) +declare <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32) #1 + +declare <16 x i8> @llvm.arm.mve.abd.predicated.v16i8.v16i1(<16 x i8>, <16 x i8>, <16 x i1>, <16 x i8>) #1 + +define arm_aapcs_vfpcc <4 x float> @test_vabdq_m_f32(<4 x float> %inactive, <4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vabdq_m_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vabdt.f32 q0, q1, q2 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x float> @llvm.arm.mve.abd.predicated.v4f32.v4i1(<4 x float> %a, <4 x float> %b, <4 x i1> %1, <4 x float> %inactive) + ret <4 x float> %2 +} + +declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) #1 + +declare <4 x float> @llvm.arm.mve.abd.predicated.v4f32.v4i1(<4 x float>, <4 x float>, <4 x i1>, <4 x float>) #1 + +define arm_aapcs_vfpcc <8 x i16> @test_vabdq_x_u16(<8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vabdq_x_u16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vabdt.s16 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.abd.predicated.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, <8 x i1> %1, <8 x i16> undef) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vabdq_x_u32(<4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vabdq_x_u32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vabdt.s32 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.abd.predicated.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i1> %1, <4 x i32> undef) + ret <4 x i32> %2 +} -declare <16 x i8> @llvm.arm.mve.abd.predicated.v16i8.v16i1(<16 x i8>, <16 x i8>, <16 x i1>, <16 x i8>) +declare <4 x i32> @llvm.arm.mve.abd.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i1>, <4 x i32>) #1 -define arm_aapcs_vfpcc <8 x half> @test_vabdq_m_f16(<8 x half> %inactive, <8 x half> %a, <8 x half> %b, i16 zeroext %p) { -; CHECK-LABEL: test_vabdq_m_f16: +define arm_aapcs_vfpcc <8 x half> @test_vabdq_x_f16(<8 x half> %a, <8 x half> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vabdq_x_f16: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 ; CHECK-NEXT: vpst -; CHECK-NEXT: vabdt.f16 q0, q1, q2 +; CHECK-NEXT: vabdt.f16 q0, q0, q1 ; CHECK-NEXT: bx lr entry: %0 = zext i16 %p to i32 %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = tail call <8 x half> @llvm.arm.mve.abd.predicated.v8f16.v8i1(<8 x half> %a, <8 x half> %b, <8 x i1> %1, <8 x half> %inactive) + %2 = tail call <8 x half> @llvm.arm.mve.abd.predicated.v8f16.v8i1(<8 x half> %a, <8 x half> %b, <8 x i1> %1, <8 x half> undef) ret <8 x half> %2 } -declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32) +declare <8 x half> @llvm.arm.mve.abd.predicated.v8f16.v8i1(<8 x half>, <8 x half>, <8 x i1>, <8 x half>) #1 -declare <8 x half> @llvm.arm.mve.abd.predicated.v8f16.v8i1(<8 x half>, <8 x half>, <8 x i1>, <8 x half>) diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vaddq.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vaddq.ll index a3cb91c21bc..b7becf6dd2c 100644 --- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vaddq.ll +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vaddq.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s -define arm_aapcs_vfpcc <4 x i32> @test_vaddq_u32(<4 x i32> %a, <4 x i32> %b) { +define arm_aapcs_vfpcc <4 x i32> @test_vaddq_u32(<4 x i32> %a, <4 x i32> %b) local_unnamed_addr #0 { ; CHECK-LABEL: test_vaddq_u32: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vadd.i32 q0, q1, q0 @@ -11,37 +11,17 @@ entry: ret <4 x i32> %0 } -define arm_aapcs_vfpcc <4 x float> @test_vaddq_f32(<4 x float> %a, <4 x float> %b) { -; CHECK-LABEL: test_vaddq_f32: +define arm_aapcs_vfpcc <8 x half> @test_vaddq_f16(<8 x half> %a, <8 x half> %b) local_unnamed_addr #0 { +; CHECK-LABEL: test_vaddq_f16: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vadd.f32 q0, q1, q0 +; CHECK-NEXT: vadd.f16 q0, q0, q1 ; CHECK-NEXT: bx lr entry: - %0 = fadd <4 x float> %b, %a - ret <4 x float> %0 -} - -define arm_aapcs_vfpcc <8 x half> @test_vsubq_f16(<8 x half> %a, <8 x half> %b) { -; CHECK-LABEL: test_vsubq_f16: -; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vsub.f16 q0, q0, q1 -; CHECK-NEXT: bx lr -entry: - %0 = fsub <8 x half> %a, %b + %0 = fadd <8 x half> %a, %b ret <8 x half> %0 } -define arm_aapcs_vfpcc <8 x i16> @test_vsubq_s16(<8 x i16> %a, <8 x i16> %b) { -; CHECK-LABEL: test_vsubq_s16: -; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vsub.i16 q0, q0, q1 -; CHECK-NEXT: bx lr -entry: - %0 = sub <8 x i16> %a, %b - ret <8 x i16> %0 -} - -define arm_aapcs_vfpcc <16 x i8> @test_vaddq_m_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) { +define arm_aapcs_vfpcc <16 x i8> @test_vaddq_m_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #1 { ; CHECK-LABEL: test_vaddq_m_s8: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 @@ -55,58 +35,59 @@ entry: ret <16 x i8> %2 } -declare <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32) +declare <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32) #2 -declare <16 x i8> @llvm.arm.mve.add.predicated.v16i8.v16i1(<16 x i8>, <16 x i8>, <16 x i1>, <16 x i8>) +declare <16 x i8> @llvm.arm.mve.add.predicated.v16i8.v16i1(<16 x i8>, <16 x i8>, <16 x i1>, <16 x i8>) #2 -define arm_aapcs_vfpcc <8 x half> @test_vaddq_m_f16(<8 x half> %inactive, <8 x half> %a, <8 x half> %b, i16 zeroext %p) { -; CHECK-LABEL: test_vaddq_m_f16: +define arm_aapcs_vfpcc <4 x float> @test_vaddq_m_f32(<4 x float> %inactive, <4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vaddq_m_f32: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 ; CHECK-NEXT: vpst -; CHECK-NEXT: vaddt.f16 q0, q1, q2 +; CHECK-NEXT: vaddt.f32 q0, q1, q2 ; CHECK-NEXT: bx lr entry: %0 = zext i16 %p to i32 - %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = tail call <8 x half> @llvm.arm.mve.add.predicated.v8f16.v8i1(<8 x half> %a, <8 x half> %b, <8 x i1> %1, <8 x half> %inactive) - ret <8 x half> %2 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x float> @llvm.arm.mve.add.predicated.v4f32.v4i1(<4 x float> %a, <4 x float> %b, <4 x i1> %1, <4 x float> %inactive) + ret <4 x float> %2 } -declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32) +declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) #2 -declare <8 x half> @llvm.arm.mve.add.predicated.v8f16.v8i1(<8 x half>, <8 x half>, <8 x i1>, <8 x half>) +declare <4 x float> @llvm.arm.mve.add.predicated.v4f32.v4i1(<4 x float>, <4 x float>, <4 x i1>, <4 x float>) #2 -define arm_aapcs_vfpcc <4 x float> @test_vsubq_m_f32(<4 x float> %inactive, <4 x float> %a, <4 x float> %b, i16 zeroext %p) { -; CHECK-LABEL: test_vsubq_m_f32: +define arm_aapcs_vfpcc <8 x i16> @test_vaddq_x_u16(<8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vaddq_x_u16: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 ; CHECK-NEXT: vpst -; CHECK-NEXT: vsubt.f32 q0, q1, q2 +; CHECK-NEXT: vaddt.i16 q0, q0, q1 ; CHECK-NEXT: bx lr entry: %0 = zext i16 %p to i32 - %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = tail call <4 x float> @llvm.arm.mve.sub.predicated.v4f32.v4i1(<4 x float> %a, <4 x float> %b, <4 x i1> %1, <4 x float> %inactive) - ret <4 x float> %2 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, <8 x i1> %1, <8 x i16> undef) + ret <8 x i16> %2 } -declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) +declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32) #2 -declare <4 x float> @llvm.arm.mve.sub.predicated.v4f32.v4i1(<4 x float>, <4 x float>, <4 x i1>, <4 x float>) +declare <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16>, <8 x i16>, <8 x i1>, <8 x i16>) #2 -define arm_aapcs_vfpcc <4 x i32> @test_vsubq_m_u32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i16 zeroext %p) { -; CHECK-LABEL: test_vsubq_m_u32: +define arm_aapcs_vfpcc <8 x half> @test_vaddq_x_f16(<8 x half> %a, <8 x half> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vaddq_x_f16: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 ; CHECK-NEXT: vpst -; CHECK-NEXT: vsubt.i32 q0, q1, q2 +; CHECK-NEXT: vaddt.f16 q0, q0, q1 ; CHECK-NEXT: bx lr entry: %0 = zext i16 %p to i32 - %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = tail call <4 x i32> @llvm.arm.mve.sub.predicated.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i1> %1, <4 x i32> %inactive) - ret <4 x i32> %2 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x half> @llvm.arm.mve.add.predicated.v8f16.v8i1(<8 x half> %a, <8 x half> %b, <8 x i1> %1, <8 x half> undef) + ret <8 x half> %2 } -declare <4 x i32> @llvm.arm.mve.sub.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i1>, <4 x i32>) +declare <8 x half> @llvm.arm.mve.add.predicated.v8f16.v8i1(<8 x half>, <8 x half>, <8 x i1>, <8 x half>) #2 + diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vandq.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vandq.ll index 1b1d498bc37..651ac17234b 100644 --- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vandq.ll +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vandq.ll @@ -11,24 +11,24 @@ entry: ret <16 x i8> %0 } -define arm_aapcs_vfpcc <4 x i32> @test_vandq_u32(<4 x i32> %a, <4 x i32> %b) local_unnamed_addr #0 { -; CHECK-LABEL: test_vandq_u32: +define arm_aapcs_vfpcc <8 x i16> @test_vandq_s16(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr #0 { +; CHECK-LABEL: test_vandq_s16: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vand q0, q1, q0 ; CHECK-NEXT: bx lr entry: - %0 = and <4 x i32> %b, %a - ret <4 x i32> %0 + %0 = and <8 x i16> %b, %a + ret <8 x i16> %0 } -define arm_aapcs_vfpcc <8 x i16> @test_vandq_s16(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr #0 { -; CHECK-LABEL: test_vandq_s16: +define arm_aapcs_vfpcc <4 x i32> @test_vandq_u32(<4 x i32> %a, <4 x i32> %b) local_unnamed_addr #0 { +; CHECK-LABEL: test_vandq_u32: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vand q0, q1, q0 ; CHECK-NEXT: bx lr entry: - %0 = and <8 x i16> %b, %a - ret <8 x i16> %0 + %0 = and <4 x i32> %b, %a + ret <4 x i32> %0 } define arm_aapcs_vfpcc <4 x float> @test_vandq_f32(<4 x float> %a, <4 x float> %b) local_unnamed_addr #0 { @@ -80,25 +80,98 @@ declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32) #2 declare <8 x i16> @llvm.arm.mve.and.predicated.v8i16.v8i1(<8 x i16>, <8 x i16>, <8 x i1>, <8 x i16>) #2 -; Function Attrs: nounwind readnone -define arm_aapcs_vfpcc <8 x half> @test_vandq_m_f32(<4 x float> %inactive, <4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #1 { -; CHECK-LABEL: test_vandq_m_f32: +define arm_aapcs_vfpcc <4 x i32> @test_vandq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vandq_m_s32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vandt q0, q1, q2 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.and.predicated.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i1> %1, <4 x i32> %inactive) + ret <4 x i32> %2 +} + +declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) #2 + +declare <4 x i32> @llvm.arm.mve.and.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i1>, <4 x i32>) #2 + +define arm_aapcs_vfpcc <8 x half> @test_vandq_m_f16(<8 x half> %inactive, <8 x half> %a, <8 x half> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vandq_m_f16: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 ; CHECK-NEXT: vpst ; CHECK-NEXT: vandt q0, q1, q2 ; CHECK-NEXT: bx lr entry: + %0 = bitcast <8 x half> %a to <8 x i16> + %1 = bitcast <8 x half> %b to <8 x i16> + %2 = zext i16 %p to i32 + %3 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %2) + %4 = bitcast <8 x half> %inactive to <8 x i16> + %5 = tail call <8 x i16> @llvm.arm.mve.and.predicated.v8i16.v8i1(<8 x i16> %0, <8 x i16> %1, <8 x i1> %3, <8 x i16> %4) + %6 = bitcast <8 x i16> %5 to <8 x half> + ret <8 x half> %6 +} + +define arm_aapcs_vfpcc <16 x i8> @test_vandq_x_u8(<16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vandq_x_u8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vandt q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) + %2 = tail call <16 x i8> @llvm.arm.mve.and.predicated.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, <16 x i1> %1, <16 x i8> undef) + ret <16 x i8> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vandq_x_s16(<8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vandq_x_s16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vandt q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.and.predicated.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, <8 x i1> %1, <8 x i16> undef) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vandq_x_u32(<4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vandq_x_u32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vandt q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.and.predicated.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i1> %1, <4 x i32> undef) + ret <4 x i32> %2 +} + +define arm_aapcs_vfpcc <4 x float> @test_vandq_m_f32(<4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vandq_m_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vandt q0, q0, q1 +; CHECK-NEXT: bx lr +entry: %0 = bitcast <4 x float> %a to <4 x i32> %1 = bitcast <4 x float> %b to <4 x i32> %2 = zext i16 %p to i32 %3 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %2) - %4 = bitcast <4 x float> %inactive to <4 x i32> - %5 = tail call <4 x i32> @llvm.arm.mve.and.predicated.v4i32.v4i1(<4 x i32> %0, <4 x i32> %1, <4 x i1> %3, <4 x i32> %4) - %6 = bitcast <4 x i32> %5 to <8 x half> - ret <8 x half> %6 + %4 = tail call <4 x i32> @llvm.arm.mve.and.predicated.v4i32.v4i1(<4 x i32> %0, <4 x i32> %1, <4 x i1> %3, <4 x i32> undef) + %5 = bitcast <4 x i32> %4 to <4 x float> + ret <4 x float> %5 } -declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) #2 - -declare <4 x i32> @llvm.arm.mve.and.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i1>, <4 x i32>) #2 diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vbicq.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vbicq.ll index 47877a13cb9..97b7ac9fc34 100644 --- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vbicq.ll +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vbicq.ll @@ -12,26 +12,26 @@ entry: ret <16 x i8> %1 } -define arm_aapcs_vfpcc <4 x i32> @test_vbicq_u32(<4 x i32> %a, <4 x i32> %b) local_unnamed_addr #0 { -; CHECK-LABEL: test_vbicq_u32: +define arm_aapcs_vfpcc <8 x i16> @test_vbicq_s16(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr #0 { +; CHECK-LABEL: test_vbicq_s16: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vbic q0, q0, q1 ; CHECK-NEXT: bx lr entry: - %0 = xor <4 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1> - %1 = and <4 x i32> %0, %a - ret <4 x i32> %1 + %0 = xor <8 x i16> %b, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> + %1 = and <8 x i16> %0, %a + ret <8 x i16> %1 } -define arm_aapcs_vfpcc <8 x i16> @test_vbicq_s16(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr #0 { -; CHECK-LABEL: test_vbicq_s16: +define arm_aapcs_vfpcc <4 x i32> @test_vbicq_u32(<4 x i32> %a, <4 x i32> %b) local_unnamed_addr #0 { +; CHECK-LABEL: test_vbicq_u32: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vbic q0, q0, q1 ; CHECK-NEXT: bx lr entry: - %0 = xor <8 x i16> %b, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> - %1 = and <8 x i16> %0, %a - ret <8 x i16> %1 + %0 = xor <4 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1> + %1 = and <4 x i32> %0, %a + ret <4 x i32> %1 } define arm_aapcs_vfpcc <4 x float> @test_vbicq_f32(<4 x float> %a, <4 x float> %b) local_unnamed_addr #0 { @@ -84,25 +84,98 @@ declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32) #2 declare <8 x i16> @llvm.arm.mve.bic.predicated.v8i16.v8i1(<8 x i16>, <8 x i16>, <8 x i1>, <8 x i16>) #2 -; Function Attrs: nounwind readnone -define arm_aapcs_vfpcc <8 x half> @test_vbicq_m_f32(<4 x float> %inactive, <4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #1 { -; CHECK-LABEL: test_vbicq_m_f32: +define arm_aapcs_vfpcc <4 x i32> @test_vbicq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vbicq_m_s32: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 ; CHECK-NEXT: vpst ; CHECK-NEXT: vbict q0, q1, q2 ; CHECK-NEXT: bx lr entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.bic.predicated.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i1> %1, <4 x i32> %inactive) + ret <4 x i32> %2 +} + +declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) #2 + +declare <4 x i32> @llvm.arm.mve.bic.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i1>, <4 x i32>) #2 + +define arm_aapcs_vfpcc <8 x half> @test_vbicq_m_f16(<8 x half> %inactive, <8 x half> %a, <8 x half> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vbicq_m_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vbict q0, q1, q2 +; CHECK-NEXT: bx lr +entry: + %0 = bitcast <8 x half> %a to <8 x i16> + %1 = bitcast <8 x half> %b to <8 x i16> + %2 = zext i16 %p to i32 + %3 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %2) + %4 = bitcast <8 x half> %inactive to <8 x i16> + %5 = tail call <8 x i16> @llvm.arm.mve.bic.predicated.v8i16.v8i1(<8 x i16> %0, <8 x i16> %1, <8 x i1> %3, <8 x i16> %4) + %6 = bitcast <8 x i16> %5 to <8 x half> + ret <8 x half> %6 +} + +define arm_aapcs_vfpcc <16 x i8> @test_vbicq_x_u8(<16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vbicq_x_u8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vbict q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) + %2 = tail call <16 x i8> @llvm.arm.mve.bic.predicated.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, <16 x i1> %1, <16 x i8> undef) + ret <16 x i8> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vbicq_x_s16(<8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vbicq_x_s16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vbict q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.bic.predicated.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, <8 x i1> %1, <8 x i16> undef) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vbicq_x_u32(<4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vbicq_x_u32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vbict q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.bic.predicated.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i1> %1, <4 x i32> undef) + ret <4 x i32> %2 +} + +define arm_aapcs_vfpcc <4 x float> @test_vbicq_m_f32(<4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vbicq_m_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vbict q0, q0, q1 +; CHECK-NEXT: bx lr +entry: %0 = bitcast <4 x float> %a to <4 x i32> %1 = bitcast <4 x float> %b to <4 x i32> %2 = zext i16 %p to i32 %3 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %2) - %4 = bitcast <4 x float> %inactive to <4 x i32> - %5 = tail call <4 x i32> @llvm.arm.mve.bic.predicated.v4i32.v4i1(<4 x i32> %0, <4 x i32> %1, <4 x i1> %3, <4 x i32> %4) - %6 = bitcast <4 x i32> %5 to <8 x half> - ret <8 x half> %6 + %4 = tail call <4 x i32> @llvm.arm.mve.bic.predicated.v4i32.v4i1(<4 x i32> %0, <4 x i32> %1, <4 x i1> %3, <4 x i32> undef) + %5 = bitcast <4 x i32> %4 to <4 x float> + ret <4 x float> %5 } -declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) #2 - -declare <4 x i32> @llvm.arm.mve.bic.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i1>, <4 x i32>) #2 diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/veorq.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/veorq.ll index 9b66f3656eb..6f70cafb013 100644 --- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/veorq.ll +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/veorq.ll @@ -11,24 +11,24 @@ entry: ret <16 x i8> %0 } -define arm_aapcs_vfpcc <4 x i32> @test_veorq_u32(<4 x i32> %a, <4 x i32> %b) local_unnamed_addr #0 { -; CHECK-LABEL: test_veorq_u32: +define arm_aapcs_vfpcc <8 x i16> @test_veorq_s16(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr #0 { +; CHECK-LABEL: test_veorq_s16: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: veor q0, q1, q0 ; CHECK-NEXT: bx lr entry: - %0 = xor <4 x i32> %b, %a - ret <4 x i32> %0 + %0 = xor <8 x i16> %b, %a + ret <8 x i16> %0 } -define arm_aapcs_vfpcc <8 x i16> @test_veorq_s16(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr #0 { -; CHECK-LABEL: test_veorq_s16: +define arm_aapcs_vfpcc <4 x i32> @test_veorq_u32(<4 x i32> %a, <4 x i32> %b) local_unnamed_addr #0 { +; CHECK-LABEL: test_veorq_u32: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: veor q0, q1, q0 ; CHECK-NEXT: bx lr entry: - %0 = xor <8 x i16> %b, %a - ret <8 x i16> %0 + %0 = xor <4 x i32> %b, %a + ret <4 x i32> %0 } define arm_aapcs_vfpcc <4 x float> @test_veorq_f32(<4 x float> %a, <4 x float> %b) local_unnamed_addr #0 { @@ -80,25 +80,98 @@ declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32) #2 declare <8 x i16> @llvm.arm.mve.eor.predicated.v8i16.v8i1(<8 x i16>, <8 x i16>, <8 x i1>, <8 x i16>) #2 -; Function Attrs: nounwind readnone -define arm_aapcs_vfpcc <8 x half> @test_veorq_m_f32(<4 x float> %inactive, <4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #1 { -; CHECK-LABEL: test_veorq_m_f32: +define arm_aapcs_vfpcc <4 x i32> @test_veorq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_veorq_m_s32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: veort q0, q1, q2 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.eor.predicated.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i1> %1, <4 x i32> %inactive) + ret <4 x i32> %2 +} + +declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) #2 + +declare <4 x i32> @llvm.arm.mve.eor.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i1>, <4 x i32>) #2 + +define arm_aapcs_vfpcc <8 x half> @test_veorq_m_f16(<8 x half> %inactive, <8 x half> %a, <8 x half> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_veorq_m_f16: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 ; CHECK-NEXT: vpst ; CHECK-NEXT: veort q0, q1, q2 ; CHECK-NEXT: bx lr entry: + %0 = bitcast <8 x half> %a to <8 x i16> + %1 = bitcast <8 x half> %b to <8 x i16> + %2 = zext i16 %p to i32 + %3 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %2) + %4 = bitcast <8 x half> %inactive to <8 x i16> + %5 = tail call <8 x i16> @llvm.arm.mve.eor.predicated.v8i16.v8i1(<8 x i16> %0, <8 x i16> %1, <8 x i1> %3, <8 x i16> %4) + %6 = bitcast <8 x i16> %5 to <8 x half> + ret <8 x half> %6 +} + +define arm_aapcs_vfpcc <16 x i8> @test_veorq_x_u8(<16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_veorq_x_u8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: veort q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) + %2 = tail call <16 x i8> @llvm.arm.mve.eor.predicated.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, <16 x i1> %1, <16 x i8> undef) + ret <16 x i8> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_veorq_x_s16(<8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_veorq_x_s16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: veort q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.eor.predicated.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, <8 x i1> %1, <8 x i16> undef) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_veorq_x_u32(<4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_veorq_x_u32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: veort q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.eor.predicated.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i1> %1, <4 x i32> undef) + ret <4 x i32> %2 +} + +define arm_aapcs_vfpcc <4 x float> @test_veorq_m_f32(<4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_veorq_m_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: veort q0, q0, q1 +; CHECK-NEXT: bx lr +entry: %0 = bitcast <4 x float> %a to <4 x i32> %1 = bitcast <4 x float> %b to <4 x i32> %2 = zext i16 %p to i32 %3 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %2) - %4 = bitcast <4 x float> %inactive to <4 x i32> - %5 = tail call <4 x i32> @llvm.arm.mve.eor.predicated.v4i32.v4i1(<4 x i32> %0, <4 x i32> %1, <4 x i1> %3, <4 x i32> %4) - %6 = bitcast <4 x i32> %5 to <8 x half> - ret <8 x half> %6 + %4 = tail call <4 x i32> @llvm.arm.mve.eor.predicated.v4i32.v4i1(<4 x i32> %0, <4 x i32> %1, <4 x i1> %3, <4 x i32> undef) + %5 = bitcast <4 x i32> %4 to <4 x float> + ret <4 x float> %5 } -declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) #2 - -declare <4 x i32> @llvm.arm.mve.eor.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i1>, <4 x i32>) #2 diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vhaddq.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vhaddq.ll index 11b21142427..82ecc062c08 100644 --- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vhaddq.ll +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vhaddq.ll @@ -90,3 +90,46 @@ entry: declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) #1 declare <4 x i32> @llvm.arm.mve.hadd.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i1>, <4 x i32>) #1 + +define arm_aapcs_vfpcc <16 x i8> @test_vhaddq_x_u8(<16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vhaddq_x_u8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vhaddt.s8 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) + %2 = tail call <16 x i8> @llvm.arm.mve.hadd.predicated.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, <16 x i1> %1, <16 x i8> undef) + ret <16 x i8> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vhaddq_x_s16(<8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vhaddq_x_s16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vhaddt.s16 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.hadd.predicated.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, <8 x i1> %1, <8 x i16> undef) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vhaddq_x_u32(<4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vhaddq_x_u32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vhaddt.s32 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.hadd.predicated.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i1> %1, <4 x i32> undef) + ret <4 x i32> %2 +} + diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmaxnmq.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmaxnmq.ll index d89308bb594..54a140042bf 100644 --- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmaxnmq.ll +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmaxnmq.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s -define dso_local arm_aapcs_vfpcc <8 x half> @test_vmaxnmq_f16(<8 x half> %a, <8 x half> %b) local_unnamed_addr #0 { +define arm_aapcs_vfpcc <8 x half> @test_vmaxnmq_f16(<8 x half> %a, <8 x half> %b) local_unnamed_addr #0 { ; CHECK-LABEL: test_vmaxnmq_f16: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmaxnm.f16 q0, q0, q1 @@ -13,7 +13,7 @@ entry: declare <8 x half> @llvm.maxnum.v8f16(<8 x half>, <8 x half>) #1 -define dso_local arm_aapcs_vfpcc <4 x float> @test_vmaxnmq_f32(<4 x float> %a, <4 x float> %b) local_unnamed_addr #0 { +define arm_aapcs_vfpcc <4 x float> @test_vmaxnmq_f32(<4 x float> %a, <4 x float> %b) local_unnamed_addr #0 { ; CHECK-LABEL: test_vmaxnmq_f32: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmaxnm.f32 q0, q0, q1 @@ -25,7 +25,7 @@ entry: declare <4 x float> @llvm.maxnum.v4f32(<4 x float>, <4 x float>) #1 -define dso_local arm_aapcs_vfpcc <8 x half> @test_vmaxnmq_m_f16(<8 x half> %inactive, <8 x half> %a, <8 x half> %b, i16 zeroext %p) local_unnamed_addr #0 { +define arm_aapcs_vfpcc <8 x half> @test_vmaxnmq_m_f16(<8 x half> %inactive, <8 x half> %a, <8 x half> %b, i16 zeroext %p) local_unnamed_addr #0 { ; CHECK-LABEL: test_vmaxnmq_m_f16: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 @@ -38,11 +38,12 @@ entry: %2 = tail call <8 x half> @llvm.arm.mve.max.predicated.v8f16.v8i1(<8 x half> %a, <8 x half> %b, <8 x i1> %1, <8 x half> %inactive) ret <8 x half> %2 } + declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32) #2 declare <8 x half> @llvm.arm.mve.max.predicated.v8f16.v8i1(<8 x half>, <8 x half>, <8 x i1>, <8 x half>) #2 -define dso_local arm_aapcs_vfpcc <4 x float> @test_vmaxnmq_m_f32(<4 x float> %inactive, <4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #0 { +define arm_aapcs_vfpcc <4 x float> @test_vmaxnmq_m_f32(<4 x float> %inactive, <4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #0 { ; CHECK-LABEL: test_vmaxnmq_m_f32: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 @@ -59,3 +60,32 @@ entry: declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) #2 declare <4 x float> @llvm.arm.mve.max.predicated.v4f32.v4i1(<4 x float>, <4 x float>, <4 x i1>, <4 x float>) #2 + +define arm_aapcs_vfpcc <8 x half> @test_vmaxnmq_x_f16(<8 x half> %a, <8 x half> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vmaxnmq_x_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vmaxnmt.f32 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x half> @llvm.arm.mve.max.predicated.v8f16.v8i1(<8 x half> %a, <8 x half> %b, <8 x i1> %1, <8 x half> undef) + ret <8 x half> %2 +} + +define arm_aapcs_vfpcc <4 x float> @test_vmaxnmq_x_f32(<4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vmaxnmq_x_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vmaxnmt.f32 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x float> @llvm.arm.mve.max.predicated.v4f32.v4i1(<4 x float> %a, <4 x float> %b, <4 x i1> %1, <4 x float> undef) + ret <4 x float> %2 +} + diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmaxq.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmaxq.ll index 09a7d60cd16..622aa1ccd33 100644 --- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmaxq.ll +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmaxq.ll @@ -1,89 +1,132 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s -define dso_local arm_aapcs_vfpcc <16 x i8> @test_vmaxq_u8(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr #0 { -; CHECK-LABEL: test_vmaxq_u8: +define arm_aapcs_vfpcc <16 x i8> @test_vmaxq_s8(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr #0 { +; CHECK-LABEL: test_vmaxq_s8: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmin.u8 q0, q0, q1 +; CHECK-NEXT: vmax.s8 q0, q0, q1 ; CHECK-NEXT: bx lr entry: - %0 = icmp ugt <16 x i8> %a, %b + %0 = icmp slt <16 x i8> %a, %b %1 = select <16 x i1> %0, <16 x i8> %b, <16 x i8> %a ret <16 x i8> %1 } -define dso_local arm_aapcs_vfpcc <8 x i16> @test_vmaxq_s16(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr #0 { -; CHECK-LABEL: test_vmaxq_s16: +define arm_aapcs_vfpcc <8 x i16> @test_vmaxq_u16(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr #0 { +; CHECK-LABEL: test_vmaxq_u16: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmin.s16 q0, q0, q1 +; CHECK-NEXT: vmax.u16 q0, q0, q1 ; CHECK-NEXT: bx lr entry: - %0 = icmp sgt <8 x i16> %a, %b + %0 = icmp ult <8 x i16> %a, %b %1 = select <8 x i1> %0, <8 x i16> %b, <8 x i16> %a ret <8 x i16> %1 } -define dso_local arm_aapcs_vfpcc <4 x i32> @test_vmaxq_u32(<4 x i32> %a, <4 x i32> %b) local_unnamed_addr #0 { -; CHECK-LABEL: test_vmaxq_u32: +define arm_aapcs_vfpcc <4 x i32> @test_vmaxq_s32(<4 x i32> %a, <4 x i32> %b) local_unnamed_addr #0 { +; CHECK-LABEL: test_vmaxq_s32: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmin.u32 q0, q0, q1 +; CHECK-NEXT: vmax.s32 q0, q0, q1 ; CHECK-NEXT: bx lr entry: - %0 = icmp ugt <4 x i32> %a, %b + %0 = icmp slt <4 x i32> %a, %b %1 = select <4 x i1> %0, <4 x i32> %b, <4 x i32> %a ret <4 x i32> %1 } -define dso_local arm_aapcs_vfpcc <16 x i8> @test_vmaxq_m_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #1 { -; CHECK-LABEL: test_vmaxq_m_s8: +define arm_aapcs_vfpcc <16 x i8> @test_vmaxq_m_u8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vmaxq_m_u8: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 ; CHECK-NEXT: vpst -; CHECK-NEXT: vmint.s8 q0, q1, q2 +; CHECK-NEXT: vmaxt.s8 q0, q1, q2 ; CHECK-NEXT: bx lr entry: %0 = zext i16 %p to i32 %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = tail call <16 x i8> @llvm.arm.mve.min.predicated.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, <16 x i1> %1, <16 x i8> %inactive) + %2 = tail call <16 x i8> @llvm.arm.mve.max.predicated.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, <16 x i1> %1, <16 x i8> %inactive) ret <16 x i8> %2 } declare <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32) #2 -declare <16 x i8> @llvm.arm.mve.min.predicated.v16i8.v16i1(<16 x i8>, <16 x i8>, <16 x i1>, <16 x i8>) #2 +declare <16 x i8> @llvm.arm.mve.max.predicated.v16i8.v16i1(<16 x i8>, <16 x i8>, <16 x i1>, <16 x i8>) #2 -define dso_local arm_aapcs_vfpcc <8 x i16> @test_vmaxq_m_u16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #1 { -; CHECK-LABEL: test_vmaxq_m_u16: +define arm_aapcs_vfpcc <8 x i16> @test_vmaxq_m_s16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vmaxq_m_s16: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 ; CHECK-NEXT: vpst -; CHECK-NEXT: vmint.s16 q0, q1, q2 +; CHECK-NEXT: vmaxt.s16 q0, q1, q2 ; CHECK-NEXT: bx lr entry: %0 = zext i16 %p to i32 %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = tail call <8 x i16> @llvm.arm.mve.min.predicated.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, <8 x i1> %1, <8 x i16> %inactive) + %2 = tail call <8 x i16> @llvm.arm.mve.max.predicated.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, <8 x i1> %1, <8 x i16> %inactive) ret <8 x i16> %2 } declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32) #2 -declare <8 x i16> @llvm.arm.mve.min.predicated.v8i16.v8i1(<8 x i16>, <8 x i16>, <8 x i1>, <8 x i16>) #2 +declare <8 x i16> @llvm.arm.mve.max.predicated.v8i16.v8i1(<8 x i16>, <8 x i16>, <8 x i1>, <8 x i16>) #2 -define dso_local arm_aapcs_vfpcc <4 x i32> @test_vmaxq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #1 { -; CHECK-LABEL: test_vmaxq_m_s32: +define arm_aapcs_vfpcc <4 x i32> @test_vmaxq_m_u32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vmaxq_m_u32: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 ; CHECK-NEXT: vpst -; CHECK-NEXT: vmint.s32 q0, q1, q2 +; CHECK-NEXT: vmaxt.s32 q0, q1, q2 ; CHECK-NEXT: bx lr entry: %0 = zext i16 %p to i32 %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = tail call <4 x i32> @llvm.arm.mve.min.predicated.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i1> %1, <4 x i32> %inactive) + %2 = tail call <4 x i32> @llvm.arm.mve.max.predicated.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i1> %1, <4 x i32> %inactive) ret <4 x i32> %2 } declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) #2 -declare <4 x i32> @llvm.arm.mve.min.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i1>, <4 x i32>) #2 +declare <4 x i32> @llvm.arm.mve.max.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i1>, <4 x i32>) #2 + +define arm_aapcs_vfpcc <16 x i8> @test_vmaxq_x_u8(<16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vmaxq_x_u8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vmaxt.s8 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) + %2 = tail call <16 x i8> @llvm.arm.mve.max.predicated.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, <16 x i1> %1, <16 x i8> undef) + ret <16 x i8> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vmaxq_x_u16(<8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vmaxq_x_u16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vmaxt.s16 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.max.predicated.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, <8 x i1> %1, <8 x i16> undef) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vmaxq_x_s32(<4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vmaxq_x_s32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vmaxt.s32 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.max.predicated.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i1> %1, <4 x i32> undef) + ret <4 x i32> %2 +} + diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vminnmq.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vminnmq.ll index 10cd674d39a..ae449099523 100644 --- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vminnmq.ll +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vminnmq.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s -define dso_local arm_aapcs_vfpcc <8 x half> @test_vminnmq_f16(<8 x half> %a, <8 x half> %b) local_unnamed_addr #0 { +define arm_aapcs_vfpcc <8 x half> @test_vminnmq_f16(<8 x half> %a, <8 x half> %b) local_unnamed_addr #0 { ; CHECK-LABEL: test_vminnmq_f16: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vminnm.f16 q0, q0, q1 @@ -13,7 +13,7 @@ entry: declare <8 x half> @llvm.minnum.v8f16(<8 x half>, <8 x half>) #1 -define dso_local arm_aapcs_vfpcc <4 x float> @test_vminnmq_f32(<4 x float> %a, <4 x float> %b) local_unnamed_addr #0 { +define arm_aapcs_vfpcc <4 x float> @test_vminnmq_f32(<4 x float> %a, <4 x float> %b) local_unnamed_addr #0 { ; CHECK-LABEL: test_vminnmq_f32: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vminnm.f32 q0, q0, q1 @@ -25,7 +25,7 @@ entry: declare <4 x float> @llvm.minnum.v4f32(<4 x float>, <4 x float>) #1 -define dso_local arm_aapcs_vfpcc <8 x half> @test_vminnmq_m_f16(<8 x half> %inactive, <8 x half> %a, <8 x half> %b, i16 zeroext %p) local_unnamed_addr #0 { +define arm_aapcs_vfpcc <8 x half> @test_vminnmq_m_f16(<8 x half> %inactive, <8 x half> %a, <8 x half> %b, i16 zeroext %p) local_unnamed_addr #0 { ; CHECK-LABEL: test_vminnmq_m_f16: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 @@ -43,7 +43,7 @@ declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32) #2 declare <8 x half> @llvm.arm.mve.min.predicated.v8f16.v8i1(<8 x half>, <8 x half>, <8 x i1>, <8 x half>) #2 -define dso_local arm_aapcs_vfpcc <4 x float> @test_vminnmq_m_f32(<4 x float> %inactive, <4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #0 { +define arm_aapcs_vfpcc <4 x float> @test_vminnmq_m_f32(<4 x float> %inactive, <4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #0 { ; CHECK-LABEL: test_vminnmq_m_f32: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 @@ -60,3 +60,32 @@ entry: declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) #2 declare <4 x float> @llvm.arm.mve.min.predicated.v4f32.v4i1(<4 x float>, <4 x float>, <4 x i1>, <4 x float>) #2 + +define arm_aapcs_vfpcc <8 x half> @test_vminnmq_x_f16(<8 x half> %a, <8 x half> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vminnmq_x_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vminnmt.f32 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x half> @llvm.arm.mve.min.predicated.v8f16.v8i1(<8 x half> %a, <8 x half> %b, <8 x i1> %1, <8 x half> undef) + ret <8 x half> %2 +} + +define arm_aapcs_vfpcc <4 x float> @test_vminnmq_x_f32(<4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vminnmq_x_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vminnmt.f32 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x float> @llvm.arm.mve.min.predicated.v4f32.v4i1(<4 x float> %a, <4 x float> %b, <4 x i1> %1, <4 x float> undef) + ret <4 x float> %2 +} + diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vminq.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vminq.ll index 0cbef86c928..96bc2233f22 100644 --- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vminq.ll +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vminq.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s -define dso_local arm_aapcs_vfpcc <16 x i8> @test_vminq_u8(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr #0 { +define arm_aapcs_vfpcc <16 x i8> @test_vminq_u8(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr #0 { ; CHECK-LABEL: test_vminq_u8: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmin.u8 q0, q0, q1 @@ -12,7 +12,7 @@ entry: ret <16 x i8> %1 } -define dso_local arm_aapcs_vfpcc <8 x i16> @test_vminq_s16(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr #0 { +define arm_aapcs_vfpcc <8 x i16> @test_vminq_s16(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr #0 { ; CHECK-LABEL: test_vminq_s16: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmin.s16 q0, q0, q1 @@ -23,7 +23,7 @@ entry: ret <8 x i16> %1 } -define dso_local arm_aapcs_vfpcc <4 x i32> @test_vminq_u32(<4 x i32> %a, <4 x i32> %b) local_unnamed_addr #0 { +define arm_aapcs_vfpcc <4 x i32> @test_vminq_u32(<4 x i32> %a, <4 x i32> %b) local_unnamed_addr #0 { ; CHECK-LABEL: test_vminq_u32: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmin.u32 q0, q0, q1 @@ -34,7 +34,7 @@ entry: ret <4 x i32> %1 } -define dso_local arm_aapcs_vfpcc <16 x i8> @test_vminq_m_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #1 { +define arm_aapcs_vfpcc <16 x i8> @test_vminq_m_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #1 { ; CHECK-LABEL: test_vminq_m_s8: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 @@ -52,7 +52,7 @@ declare <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32) #2 declare <16 x i8> @llvm.arm.mve.min.predicated.v16i8.v16i1(<16 x i8>, <16 x i8>, <16 x i1>, <16 x i8>) #2 -define dso_local arm_aapcs_vfpcc <8 x i16> @test_vminq_m_u16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #1 { +define arm_aapcs_vfpcc <8 x i16> @test_vminq_m_u16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #1 { ; CHECK-LABEL: test_vminq_m_u16: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 @@ -70,7 +70,7 @@ declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32) #2 declare <8 x i16> @llvm.arm.mve.min.predicated.v8i16.v8i1(<8 x i16>, <8 x i16>, <8 x i1>, <8 x i16>) #2 -define dso_local arm_aapcs_vfpcc <4 x i32> @test_vminq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #1 { +define arm_aapcs_vfpcc <4 x i32> @test_vminq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #1 { ; CHECK-LABEL: test_vminq_m_s32: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 @@ -87,3 +87,46 @@ entry: declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) #2 declare <4 x i32> @llvm.arm.mve.min.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i1>, <4 x i32>) #2 + +define arm_aapcs_vfpcc <16 x i8> @test_vminq_x_u8(<16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vminq_x_u8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vmint.s8 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) + %2 = tail call <16 x i8> @llvm.arm.mve.min.predicated.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, <16 x i1> %1, <16 x i8> undef) + ret <16 x i8> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vminq_x_s16(<8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vminq_x_s16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vmint.s16 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.min.predicated.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, <8 x i1> %1, <8 x i16> undef) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vminq_x_s32(<4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vminq_x_s32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vmint.s32 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.min.predicated.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i1> %1, <4 x i32> undef) + ret <4 x i32> %2 +} + diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmulhq.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmulhq.ll index 78ee17b5541..ba15af6c549 100644 --- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmulhq.ll +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmulhq.ll @@ -90,3 +90,46 @@ entry: declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) #1 declare <4 x i32> @llvm.arm.mve.mulh.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i1>, <4 x i32>) #1 + +define arm_aapcs_vfpcc <16 x i8> @test_vmulhq_x_u8(<16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vmulhq_x_u8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vmulht.s8 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) + %2 = tail call <16 x i8> @llvm.arm.mve.mulh.predicated.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, <16 x i1> %1, <16 x i8> undef) + ret <16 x i8> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vmulhq_x_s16(<8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vmulhq_x_s16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vmulht.s16 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.mulh.predicated.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, <8 x i1> %1, <8 x i16> undef) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vmulhq_x_u32(<4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vmulhq_x_u32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vmulht.s32 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.mulh.predicated.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i1> %1, <4 x i32> undef) + ret <4 x i32> %2 +} + diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmullbq.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmullbq.ll index 78f24fa62c2..e443d54f020 100644 --- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmullbq.ll +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmullbq.ll @@ -50,7 +50,7 @@ entry: declare <4 x i32> @llvm.arm.mve.vmull.poly.v4i32.v8i16(<8 x i16>, <8 x i16>, i32) #1 -define arm_aapcs_vfpcc <8 x i16> @test_vmullbq_int_m_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #0 { +define arm_aapcs_vfpcc <8 x i16> @test_vmullbq_int_m_s8(<8 x i16> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #0 { ; CHECK-LABEL: test_vmullbq_int_m_s8: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 @@ -60,15 +60,15 @@ define arm_aapcs_vfpcc <8 x i16> @test_vmullbq_int_m_s8(<16 x i8> %inactive, <16 entry: %0 = zext i16 %p to i32 %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = tail call <8 x i16> @llvm.arm.mve.mull.int.predicated.v8i16.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, i32 0, <16 x i1> %1, <16 x i8> %inactive) + %2 = tail call <8 x i16> @llvm.arm.mve.mull.int.predicated.v8i16.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, i32 0, <16 x i1> %1, <8 x i16> %inactive) ret <8 x i16> %2 } declare <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32) #1 -declare <8 x i16> @llvm.arm.mve.mull.int.predicated.v8i16.v16i8.v16i1(<16 x i8>, <16 x i8>, i32, <16 x i1>, <16 x i8>) #1 +declare <8 x i16> @llvm.arm.mve.mull.int.predicated.v8i16.v16i8.v16i1(<16 x i8>, <16 x i8>, i32, <16 x i1>, <8 x i16>) #1 -define arm_aapcs_vfpcc <4 x i32> @test_vmullbq_int_m_u16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #0 { +define arm_aapcs_vfpcc <4 x i32> @test_vmullbq_int_m_u16(<4 x i32> %inactive, <8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #0 { ; CHECK-LABEL: test_vmullbq_int_m_u16: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 @@ -78,15 +78,15 @@ define arm_aapcs_vfpcc <4 x i32> @test_vmullbq_int_m_u16(<8 x i16> %inactive, <8 entry: %0 = zext i16 %p to i32 %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = tail call <4 x i32> @llvm.arm.mve.mull.int.predicated.v4i32.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, i32 0, <8 x i1> %1, <8 x i16> %inactive) + %2 = tail call <4 x i32> @llvm.arm.mve.mull.int.predicated.v4i32.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, i32 0, <8 x i1> %1, <4 x i32> %inactive) ret <4 x i32> %2 } declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32) #1 -declare <4 x i32> @llvm.arm.mve.mull.int.predicated.v4i32.v8i16.v8i1(<8 x i16>, <8 x i16>, i32, <8 x i1>, <8 x i16>) #1 +declare <4 x i32> @llvm.arm.mve.mull.int.predicated.v4i32.v8i16.v8i1(<8 x i16>, <8 x i16>, i32, <8 x i1>, <4 x i32>) #1 -define arm_aapcs_vfpcc <2 x i64> @test_vmullbq_int_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #0 { +define arm_aapcs_vfpcc <2 x i64> @test_vmullbq_int_m_s32(<2 x i64> %inactive, <4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #0 { ; CHECK-LABEL: test_vmullbq_int_m_s32: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 @@ -96,15 +96,15 @@ define arm_aapcs_vfpcc <2 x i64> @test_vmullbq_int_m_s32(<4 x i32> %inactive, <4 entry: %0 = zext i16 %p to i32 %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = tail call <2 x i64> @llvm.arm.mve.mull.int.predicated.v2i64.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, i32 0, <4 x i1> %1, <4 x i32> %inactive) + %2 = tail call <2 x i64> @llvm.arm.mve.mull.int.predicated.v2i64.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, i32 0, <4 x i1> %1, <2 x i64> %inactive) ret <2 x i64> %2 } declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) #1 -declare <2 x i64> @llvm.arm.mve.mull.int.predicated.v2i64.v4i32.v4i1(<4 x i32>, <4 x i32>, i32, <4 x i1>, <4 x i32>) #1 +declare <2 x i64> @llvm.arm.mve.mull.int.predicated.v2i64.v4i32.v4i1(<4 x i32>, <4 x i32>, i32, <4 x i1>, <2 x i64>) #1 -define arm_aapcs_vfpcc <8 x i16> @test_vmullbq_poly_m_p8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #0 { +define arm_aapcs_vfpcc <8 x i16> @test_vmullbq_poly_m_p8(<8 x i16> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #0 { ; CHECK-LABEL: test_vmullbq_poly_m_p8: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 @@ -114,8 +114,68 @@ define arm_aapcs_vfpcc <8 x i16> @test_vmullbq_poly_m_p8(<16 x i8> %inactive, <1 entry: %0 = zext i16 %p to i32 %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = tail call <8 x i16> @llvm.arm.mve.mull.poly.predicated.v8i16.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, i32 0, <16 x i1> %1, <16 x i8> %inactive) + %2 = tail call <8 x i16> @llvm.arm.mve.mull.poly.predicated.v8i16.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, i32 0, <16 x i1> %1, <8 x i16> %inactive) ret <8 x i16> %2 } -declare <8 x i16> @llvm.arm.mve.mull.poly.predicated.v8i16.v16i8.v16i1(<16 x i8>, <16 x i8>, i32, <16 x i1>, <16 x i8>) #1 +declare <8 x i16> @llvm.arm.mve.mull.poly.predicated.v8i16.v16i8.v16i1(<16 x i8>, <16 x i8>, i32, <16 x i1>, <8 x i16>) #1 + +define arm_aapcs_vfpcc <8 x i16> @test_vmullbq_int_x_u8(<16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vmullbq_int_x_u8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vmullbt.s8 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.mull.int.predicated.v8i16.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, i32 0, <16 x i1> %1, <8 x i16> undef) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vmullbq_int_x_s16(<8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vmullbq_int_x_s16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vmullbt.s16 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.mull.int.predicated.v4i32.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, i32 0, <8 x i1> %1, <4 x i32> undef) + ret <4 x i32> %2 +} + +define arm_aapcs_vfpcc <2 x i64> @test_vmullbq_int_x_u32(<4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vmullbq_int_x_u32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vmullbt.s32 q2, q0, q1 +; CHECK-NEXT: vmov q0, q2 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <2 x i64> @llvm.arm.mve.mull.int.predicated.v2i64.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, i32 0, <4 x i1> %1, <2 x i64> undef) + ret <2 x i64> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vmullbq_poly_x_p16(<8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vmullbq_poly_x_p16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vmullbt.p16 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.mull.poly.predicated.v4i32.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, i32 0, <8 x i1> %1, <4 x i32> undef) + ret <4 x i32> %2 +} + +declare <4 x i32> @llvm.arm.mve.mull.poly.predicated.v4i32.v8i16.v8i1(<8 x i16>, <8 x i16>, i32, <8 x i1>, <4 x i32>) #1 + diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmulltq.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmulltq.ll index 5dde90e298a..ca2aae6034a 100644 --- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmulltq.ll +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmulltq.ll @@ -50,7 +50,7 @@ entry: declare <4 x i32> @llvm.arm.mve.vmull.poly.v4i32.v8i16(<8 x i16>, <8 x i16>, i32) #1 -define arm_aapcs_vfpcc <8 x i16> @test_vmulltq_int_m_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #0 { +define arm_aapcs_vfpcc <8 x i16> @test_vmulltq_int_m_s8(<8 x i16> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #0 { ; CHECK-LABEL: test_vmulltq_int_m_s8: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 @@ -60,15 +60,15 @@ define arm_aapcs_vfpcc <8 x i16> @test_vmulltq_int_m_s8(<16 x i8> %inactive, <16 entry: %0 = zext i16 %p to i32 %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = tail call <8 x i16> @llvm.arm.mve.mull.int.predicated.v8i16.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, i32 1, <16 x i1> %1, <16 x i8> %inactive) + %2 = tail call <8 x i16> @llvm.arm.mve.mull.int.predicated.v8i16.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, i32 1, <16 x i1> %1, <8 x i16> %inactive) ret <8 x i16> %2 } declare <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32) #1 -declare <8 x i16> @llvm.arm.mve.mull.int.predicated.v8i16.v16i8.v16i1(<16 x i8>, <16 x i8>, i32, <16 x i1>, <16 x i8>) #1 +declare <8 x i16> @llvm.arm.mve.mull.int.predicated.v8i16.v16i8.v16i1(<16 x i8>, <16 x i8>, i32, <16 x i1>, <8 x i16>) #1 -define arm_aapcs_vfpcc <4 x i32> @test_vmulltq_int_m_u16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #0 { +define arm_aapcs_vfpcc <4 x i32> @test_vmulltq_int_m_u16(<4 x i32> %inactive, <8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #0 { ; CHECK-LABEL: test_vmulltq_int_m_u16: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 @@ -78,15 +78,15 @@ define arm_aapcs_vfpcc <4 x i32> @test_vmulltq_int_m_u16(<8 x i16> %inactive, <8 entry: %0 = zext i16 %p to i32 %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = tail call <4 x i32> @llvm.arm.mve.mull.int.predicated.v4i32.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, i32 1, <8 x i1> %1, <8 x i16> %inactive) + %2 = tail call <4 x i32> @llvm.arm.mve.mull.int.predicated.v4i32.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, i32 1, <8 x i1> %1, <4 x i32> %inactive) ret <4 x i32> %2 } declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32) #1 -declare <4 x i32> @llvm.arm.mve.mull.int.predicated.v4i32.v8i16.v8i1(<8 x i16>, <8 x i16>, i32, <8 x i1>, <8 x i16>) #1 +declare <4 x i32> @llvm.arm.mve.mull.int.predicated.v4i32.v8i16.v8i1(<8 x i16>, <8 x i16>, i32, <8 x i1>, <4 x i32>) #1 -define arm_aapcs_vfpcc <2 x i64> @test_vmulltq_int_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #0 { +define arm_aapcs_vfpcc <2 x i64> @test_vmulltq_int_m_s32(<2 x i64> %inactive, <4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #0 { ; CHECK-LABEL: test_vmulltq_int_m_s32: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 @@ -96,15 +96,15 @@ define arm_aapcs_vfpcc <2 x i64> @test_vmulltq_int_m_s32(<4 x i32> %inactive, <4 entry: %0 = zext i16 %p to i32 %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = tail call <2 x i64> @llvm.arm.mve.mull.int.predicated.v2i64.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, i32 1, <4 x i1> %1, <4 x i32> %inactive) + %2 = tail call <2 x i64> @llvm.arm.mve.mull.int.predicated.v2i64.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, i32 1, <4 x i1> %1, <2 x i64> %inactive) ret <2 x i64> %2 } declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) #1 -declare <2 x i64> @llvm.arm.mve.mull.int.predicated.v2i64.v4i32.v4i1(<4 x i32>, <4 x i32>, i32, <4 x i1>, <4 x i32>) #1 +declare <2 x i64> @llvm.arm.mve.mull.int.predicated.v2i64.v4i32.v4i1(<4 x i32>, <4 x i32>, i32, <4 x i1>, <2 x i64>) #1 -define arm_aapcs_vfpcc <8 x i16> @test_vmulltq_poly_m_p8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #0 { +define arm_aapcs_vfpcc <8 x i16> @test_vmulltq_poly_m_p8(<8 x i16> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #0 { ; CHECK-LABEL: test_vmulltq_poly_m_p8: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 @@ -114,8 +114,66 @@ define arm_aapcs_vfpcc <8 x i16> @test_vmulltq_poly_m_p8(<16 x i8> %inactive, <1 entry: %0 = zext i16 %p to i32 %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = tail call <8 x i16> @llvm.arm.mve.mull.poly.predicated.v8i16.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, i32 1, <16 x i1> %1, <16 x i8> %inactive) + %2 = tail call <8 x i16> @llvm.arm.mve.mull.poly.predicated.v8i16.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, i32 1, <16 x i1> %1, <8 x i16> %inactive) + ret <8 x i16> %2 +} + +declare <8 x i16> @llvm.arm.mve.mull.poly.predicated.v8i16.v16i8.v16i1(<16 x i8>, <16 x i8>, i32, <16 x i1>, <8 x i16>) #1 + +define arm_aapcs_vfpcc <8 x i16> @test_vmulltq_int_x_u8(<16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vmulltq_int_x_u8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vmulltt.s8 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.mull.int.predicated.v8i16.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, i32 1, <16 x i1> %1, <8 x i16> undef) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vmulltq_int_x_s16(<8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vmulltq_int_x_s16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vmulltt.s16 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.mull.int.predicated.v4i32.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, i32 1, <8 x i1> %1, <4 x i32> undef) + ret <4 x i32> %2 +} + +define arm_aapcs_vfpcc <2 x i64> @test_vmulltq_int_x_u32(<4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vmulltq_int_x_u32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vmulltt.s32 q2, q0, q1 +; CHECK-NEXT: vmov q0, q2 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <2 x i64> @llvm.arm.mve.mull.int.predicated.v2i64.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, i32 1, <4 x i1> %1, <2 x i64> undef) + ret <2 x i64> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vmulltq_poly_x_p8(<16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vmulltq_poly_x_p8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vmulltt.p8 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.mull.poly.predicated.v8i16.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, i32 1, <16 x i1> %1, <8 x i16> undef) ret <8 x i16> %2 } -declare <8 x i16> @llvm.arm.mve.mull.poly.predicated.v8i16.v16i8.v16i1(<16 x i8>, <16 x i8>, i32, <16 x i1>, <16 x i8>) #1 diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmulq.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmulq.ll index 09d8e11a71a..19ed08a77a2 100644 --- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmulq.ll +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmulq.ll @@ -1,7 +1,27 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s -define arm_aapcs_vfpcc <4 x i32> @test_vmulq_u32(<4 x i32> %a, <4 x i32> %b) { +define arm_aapcs_vfpcc <16 x i8> @test_vmulq_u8(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr #0 { +; CHECK-LABEL: test_vmulq_u8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmul.i8 q0, q1, q0 +; CHECK-NEXT: bx lr +entry: + %0 = mul <16 x i8> %b, %a + ret <16 x i8> %0 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vmulq_s16(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr #0 { +; CHECK-LABEL: test_vmulq_s16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmul.i16 q0, q1, q0 +; CHECK-NEXT: bx lr +entry: + %0 = mul <8 x i16> %b, %a + ret <8 x i16> %0 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vmulq_u32(<4 x i32> %a, <4 x i32> %b) local_unnamed_addr #0 { ; CHECK-LABEL: test_vmulq_u32: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmul.i32 q0, q1, q0 @@ -11,17 +31,17 @@ entry: ret <4 x i32> %0 } -define arm_aapcs_vfpcc <4 x float> @test_vmulq_f32(<4 x float> %a, <4 x float> %b) { +define arm_aapcs_vfpcc <4 x float> @test_vmulq_f32(<4 x float> %a, <4 x float> %b) local_unnamed_addr #0 { ; CHECK-LABEL: test_vmulq_f32: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmul.f32 q0, q1, q0 +; CHECK-NEXT: vmul.f32 q0, q0, q1 ; CHECK-NEXT: bx lr entry: - %0 = fmul <4 x float> %b, %a + %0 = fmul <4 x float> %a, %b ret <4 x float> %0 } -define arm_aapcs_vfpcc <16 x i8> @test_vmulq_m_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) { +define arm_aapcs_vfpcc <16 x i8> @test_vmulq_m_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #1 { ; CHECK-LABEL: test_vmulq_m_s8: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 @@ -35,11 +55,47 @@ entry: ret <16 x i8> %2 } -declare <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32) +declare <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32) #2 + +declare <16 x i8> @llvm.arm.mve.mul.predicated.v16i8.v16i1(<16 x i8>, <16 x i8>, <16 x i1>, <16 x i8>) #2 + +define arm_aapcs_vfpcc <8 x i16> @test_vmulq_m_u16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vmulq_m_u16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vmult.i16 q0, q1, q2 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.mul.predicated.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, <8 x i1> %1, <8 x i16> %inactive) + ret <8 x i16> %2 +} + +declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32) #2 -declare <16 x i8> @llvm.arm.mve.mul.predicated.v16i8.v16i1(<16 x i8>, <16 x i8>, <16 x i1>, <16 x i8>) +declare <8 x i16> @llvm.arm.mve.mul.predicated.v8i16.v8i1(<8 x i16>, <8 x i16>, <8 x i1>, <8 x i16>) #2 -define arm_aapcs_vfpcc <8 x half> @test_vmulq_m_f16(<8 x half> %inactive, <8 x half> %a, <8 x half> %b, i16 zeroext %p) { +define arm_aapcs_vfpcc <4 x i32> @test_vmulq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vmulq_m_s32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vmult.i32 q0, q1, q2 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.mul.predicated.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i1> %1, <4 x i32> %inactive) + ret <4 x i32> %2 +} + +declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) #2 + +declare <4 x i32> @llvm.arm.mve.mul.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i1>, <4 x i32>) #2 + +define arm_aapcs_vfpcc <8 x half> @test_vmulq_m_f16(<8 x half> %inactive, <8 x half> %a, <8 x half> %b, i16 zeroext %p) local_unnamed_addr #1 { ; CHECK-LABEL: test_vmulq_m_f16: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 @@ -53,6 +109,63 @@ entry: ret <8 x half> %2 } -declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32) +declare <8 x half> @llvm.arm.mve.mul.predicated.v8f16.v8i1(<8 x half>, <8 x half>, <8 x i1>, <8 x half>) #2 + +define arm_aapcs_vfpcc <16 x i8> @test_vmulq_x_u8(<16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vmulq_x_u8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vmult.i8 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) + %2 = tail call <16 x i8> @llvm.arm.mve.mul.predicated.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, <16 x i1> %1, <16 x i8> undef) + ret <16 x i8> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vmulq_x_s16(<8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vmulq_x_s16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vmult.i16 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.mul.predicated.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, <8 x i1> %1, <8 x i16> undef) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vmulq_x_u32(<4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vmulq_x_u32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vmult.i32 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.mul.predicated.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i1> %1, <4 x i32> undef) + ret <4 x i32> %2 +} + +define arm_aapcs_vfpcc <4 x float> @test_vmulq_m_f32(<4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vmulq_m_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vmult.f32 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x float> @llvm.arm.mve.mul.predicated.v4f32.v4i1(<4 x float> %a, <4 x float> %b, <4 x i1> %1, <4 x float> undef) + ret <4 x float> %2 +} + +declare <4 x float> @llvm.arm.mve.mul.predicated.v4f32.v4i1(<4 x float>, <4 x float>, <4 x i1>, <4 x float>) #2 -declare <8 x half> @llvm.arm.mve.mul.predicated.v8f16.v8i1(<8 x half>, <8 x half>, <8 x i1>, <8 x half>) diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vornq.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vornq.ll index 48f6a3cd23a..be4ad54dd13 100644 --- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vornq.ll +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vornq.ll @@ -12,26 +12,26 @@ entry: ret <16 x i8> %1 } -define arm_aapcs_vfpcc <4 x i32> @test_vornq_u32(<4 x i32> %a, <4 x i32> %b) local_unnamed_addr #0 { -; CHECK-LABEL: test_vornq_u32: +define arm_aapcs_vfpcc <8 x i16> @test_vornq_s16(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr #0 { +; CHECK-LABEL: test_vornq_s16: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vorn q0, q0, q1 ; CHECK-NEXT: bx lr entry: - %0 = xor <4 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1> - %1 = or <4 x i32> %0, %a - ret <4 x i32> %1 + %0 = xor <8 x i16> %b, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> + %1 = or <8 x i16> %0, %a + ret <8 x i16> %1 } -define arm_aapcs_vfpcc <8 x i16> @test_vornq_s16(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr #0 { -; CHECK-LABEL: test_vornq_s16: +define arm_aapcs_vfpcc <4 x i32> @test_vornq_u32(<4 x i32> %a, <4 x i32> %b) local_unnamed_addr #0 { +; CHECK-LABEL: test_vornq_u32: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vorn q0, q0, q1 ; CHECK-NEXT: bx lr entry: - %0 = xor <8 x i16> %b, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> - %1 = or <8 x i16> %0, %a - ret <8 x i16> %1 + %0 = xor <4 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1> + %1 = or <4 x i32> %0, %a + ret <4 x i32> %1 } define arm_aapcs_vfpcc <4 x float> @test_vornq_f32(<4 x float> %a, <4 x float> %b) local_unnamed_addr #0 { @@ -84,25 +84,98 @@ declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32) #2 declare <8 x i16> @llvm.arm.mve.orn.predicated.v8i16.v8i1(<8 x i16>, <8 x i16>, <8 x i1>, <8 x i16>) #2 -; Function Attrs: nounwind readnone -define arm_aapcs_vfpcc <8 x half> @test_vornq_m_f32(<4 x float> %inactive, <4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #1 { -; CHECK-LABEL: test_vornq_m_f32: +define arm_aapcs_vfpcc <4 x i32> @test_vornq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vornq_m_s32: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 ; CHECK-NEXT: vpst ; CHECK-NEXT: vornt q0, q1, q2 ; CHECK-NEXT: bx lr entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.orn.predicated.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i1> %1, <4 x i32> %inactive) + ret <4 x i32> %2 +} + +declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) #2 + +declare <4 x i32> @llvm.arm.mve.orn.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i1>, <4 x i32>) #2 + +define arm_aapcs_vfpcc <8 x half> @test_vornq_m_f16(<8 x half> %inactive, <8 x half> %a, <8 x half> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vornq_m_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vornt q0, q1, q2 +; CHECK-NEXT: bx lr +entry: + %0 = bitcast <8 x half> %a to <8 x i16> + %1 = bitcast <8 x half> %b to <8 x i16> + %2 = zext i16 %p to i32 + %3 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %2) + %4 = bitcast <8 x half> %inactive to <8 x i16> + %5 = tail call <8 x i16> @llvm.arm.mve.orn.predicated.v8i16.v8i1(<8 x i16> %0, <8 x i16> %1, <8 x i1> %3, <8 x i16> %4) + %6 = bitcast <8 x i16> %5 to <8 x half> + ret <8 x half> %6 +} + +define arm_aapcs_vfpcc <16 x i8> @test_vornq_x_u8(<16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vornq_x_u8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vornt q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) + %2 = tail call <16 x i8> @llvm.arm.mve.orn.predicated.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, <16 x i1> %1, <16 x i8> undef) + ret <16 x i8> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vornq_x_s16(<8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vornq_x_s16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vornt q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.orn.predicated.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, <8 x i1> %1, <8 x i16> undef) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vornq_x_u32(<4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vornq_x_u32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vornt q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.orn.predicated.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i1> %1, <4 x i32> undef) + ret <4 x i32> %2 +} + +define arm_aapcs_vfpcc <4 x float> @test_vornq_m_f32(<4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vornq_m_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vornt q0, q0, q1 +; CHECK-NEXT: bx lr +entry: %0 = bitcast <4 x float> %a to <4 x i32> %1 = bitcast <4 x float> %b to <4 x i32> %2 = zext i16 %p to i32 %3 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %2) - %4 = bitcast <4 x float> %inactive to <4 x i32> - %5 = tail call <4 x i32> @llvm.arm.mve.orn.predicated.v4i32.v4i1(<4 x i32> %0, <4 x i32> %1, <4 x i1> %3, <4 x i32> %4) - %6 = bitcast <4 x i32> %5 to <8 x half> - ret <8 x half> %6 + %4 = tail call <4 x i32> @llvm.arm.mve.orn.predicated.v4i32.v4i1(<4 x i32> %0, <4 x i32> %1, <4 x i1> %3, <4 x i32> undef) + %5 = bitcast <4 x i32> %4 to <4 x float> + ret <4 x float> %5 } -declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) #2 - -declare <4 x i32> @llvm.arm.mve.orn.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i1>, <4 x i32>) #2 diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vorrq.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vorrq.ll index ccb511a85e5..168dde0806b 100644 --- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vorrq.ll +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vorrq.ll @@ -11,24 +11,24 @@ entry: ret <16 x i8> %0 } -define arm_aapcs_vfpcc <4 x i32> @test_vorrq_u32(<4 x i32> %a, <4 x i32> %b) local_unnamed_addr #0 { -; CHECK-LABEL: test_vorrq_u32: +define arm_aapcs_vfpcc <8 x i16> @test_vorrq_s16(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr #0 { +; CHECK-LABEL: test_vorrq_s16: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vorr q0, q1, q0 ; CHECK-NEXT: bx lr entry: - %0 = or <4 x i32> %b, %a - ret <4 x i32> %0 + %0 = or <8 x i16> %b, %a + ret <8 x i16> %0 } -define arm_aapcs_vfpcc <8 x i16> @test_vorrq_s16(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr #0 { -; CHECK-LABEL: test_vorrq_s16: +define arm_aapcs_vfpcc <4 x i32> @test_vorrq_u32(<4 x i32> %a, <4 x i32> %b) local_unnamed_addr #0 { +; CHECK-LABEL: test_vorrq_u32: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vorr q0, q1, q0 ; CHECK-NEXT: bx lr entry: - %0 = or <8 x i16> %b, %a - ret <8 x i16> %0 + %0 = or <4 x i32> %b, %a + ret <4 x i32> %0 } define arm_aapcs_vfpcc <4 x float> @test_vorrq_f32(<4 x float> %a, <4 x float> %b) local_unnamed_addr #0 { @@ -80,25 +80,98 @@ declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32) #2 declare <8 x i16> @llvm.arm.mve.orr.predicated.v8i16.v8i1(<8 x i16>, <8 x i16>, <8 x i1>, <8 x i16>) #2 -; Function Attrs: nounwind readnone -define arm_aapcs_vfpcc <8 x half> @test_vorrq_m_f32(<4 x float> %inactive, <4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #1 { -; CHECK-LABEL: test_vorrq_m_f32: +define arm_aapcs_vfpcc <4 x i32> @test_vorrq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vorrq_m_s32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vorrt q0, q1, q2 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.orr.predicated.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i1> %1, <4 x i32> %inactive) + ret <4 x i32> %2 +} + +declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) #2 + +declare <4 x i32> @llvm.arm.mve.orr.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i1>, <4 x i32>) #2 + +define arm_aapcs_vfpcc <8 x half> @test_vorrq_m_f16(<8 x half> %inactive, <8 x half> %a, <8 x half> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vorrq_m_f16: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmsr p0, r0 ; CHECK-NEXT: vpst ; CHECK-NEXT: vorrt q0, q1, q2 ; CHECK-NEXT: bx lr entry: + %0 = bitcast <8 x half> %a to <8 x i16> + %1 = bitcast <8 x half> %b to <8 x i16> + %2 = zext i16 %p to i32 + %3 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %2) + %4 = bitcast <8 x half> %inactive to <8 x i16> + %5 = tail call <8 x i16> @llvm.arm.mve.orr.predicated.v8i16.v8i1(<8 x i16> %0, <8 x i16> %1, <8 x i1> %3, <8 x i16> %4) + %6 = bitcast <8 x i16> %5 to <8 x half> + ret <8 x half> %6 +} + +define arm_aapcs_vfpcc <16 x i8> @test_vorrq_x_u8(<16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vorrq_x_u8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vorrt q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) + %2 = tail call <16 x i8> @llvm.arm.mve.orr.predicated.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, <16 x i1> %1, <16 x i8> undef) + ret <16 x i8> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vorrq_x_s16(<8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vorrq_x_s16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vorrt q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.orr.predicated.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, <8 x i1> %1, <8 x i16> undef) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vorrq_x_u32(<4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vorrq_x_u32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vorrt q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.orr.predicated.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i1> %1, <4 x i32> undef) + ret <4 x i32> %2 +} + +define arm_aapcs_vfpcc <4 x float> @test_vorrq_m_f32(<4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vorrq_m_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vorrt q0, q0, q1 +; CHECK-NEXT: bx lr +entry: %0 = bitcast <4 x float> %a to <4 x i32> %1 = bitcast <4 x float> %b to <4 x i32> %2 = zext i16 %p to i32 %3 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %2) - %4 = bitcast <4 x float> %inactive to <4 x i32> - %5 = tail call <4 x i32> @llvm.arm.mve.orr.predicated.v4i32.v4i1(<4 x i32> %0, <4 x i32> %1, <4 x i1> %3, <4 x i32> %4) - %6 = bitcast <4 x i32> %5 to <8 x half> - ret <8 x half> %6 + %4 = tail call <4 x i32> @llvm.arm.mve.orr.predicated.v4i32.v4i1(<4 x i32> %0, <4 x i32> %1, <4 x i1> %3, <4 x i32> undef) + %5 = bitcast <4 x i32> %4 to <4 x float> + ret <4 x float> %5 } -declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) #2 - -declare <4 x i32> @llvm.arm.mve.orr.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i1>, <4 x i32>) #2 diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vrhaddq.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vrhaddq.ll index 0ba47befa37..15b23d5ed27 100644 --- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vrhaddq.ll +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vrhaddq.ll @@ -90,3 +90,46 @@ entry: declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) #1 declare <4 x i32> @llvm.arm.mve.rhadd.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i1>, <4 x i32>) #1 + +define arm_aapcs_vfpcc <16 x i8> @test_vrhaddq_x_u8(<16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vrhaddq_x_u8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vrhaddt.s8 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) + %2 = tail call <16 x i8> @llvm.arm.mve.rhadd.predicated.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, <16 x i1> %1, <16 x i8> undef) + ret <16 x i8> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vrhaddq_x_u16(<8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vrhaddq_x_u16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vrhaddt.s16 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.rhadd.predicated.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, <8 x i1> %1, <8 x i16> undef) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vrhaddq_x_u32(<4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vrhaddq_x_u32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vrhaddt.s32 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.rhadd.predicated.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i1> %1, <4 x i32> undef) + ret <4 x i32> %2 +} + diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vrmulhq.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vrmulhq.ll index 3975e4eca87..9418f581eb4 100644 --- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vrmulhq.ll +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vrmulhq.ll @@ -90,3 +90,46 @@ entry: declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) #1 declare <4 x i32> @llvm.arm.mve.rmulh.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i1>, <4 x i32>) #1 + +define arm_aapcs_vfpcc <16 x i8> @test_vrmulhq_x_u8(<16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vrmulhq_x_u8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vrmulht.s8 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) + %2 = tail call <16 x i8> @llvm.arm.mve.rmulh.predicated.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, <16 x i1> %1, <16 x i8> undef) + ret <16 x i8> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vrmulhq_x_s16(<8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vrmulhq_x_s16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vrmulht.s16 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.rmulh.predicated.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, <8 x i1> %1, <8 x i16> undef) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vrmulhq_m_u32(<4 x i32> %a, <4 x i32> %b, i16 zeroext %p) local_unnamed_addr #0 { +; CHECK-LABEL: test_vrmulhq_m_u32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vrmulht.s32 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.rmulh.predicated.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i1> %1, <4 x i32> undef) + ret <4 x i32> %2 +} + diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vsubq.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vsubq.ll new file mode 100644 index 00000000000..2959e61e0d8 --- /dev/null +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vsubq.ll @@ -0,0 +1,93 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s + +define arm_aapcs_vfpcc <4 x i32> @test_vsubq_u32(<4 x i32> %a, <4 x i32> %b) local_unnamed_addr #0 { +; CHECK-LABEL: test_vsubq_u32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vsub.i32 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = sub <4 x i32> %a, %b + ret <4 x i32> %0 +} + +define arm_aapcs_vfpcc <8 x half> @test_vsubq_f16(<8 x half> %a, <8 x half> %b) local_unnamed_addr #0 { +; CHECK-LABEL: test_vsubq_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vsub.f16 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = fsub <8 x half> %a, %b + ret <8 x half> %0 +} + +define arm_aapcs_vfpcc <16 x i8> @test_vsubq_m_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vsubq_m_s8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vsubt.i8 q0, q1, q2 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) + %2 = tail call <16 x i8> @llvm.arm.mve.sub.predicated.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, <16 x i1> %1, <16 x i8> %inactive) + ret <16 x i8> %2 +} + +declare <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32) #2 + +declare <16 x i8> @llvm.arm.mve.sub.predicated.v16i8.v16i1(<16 x i8>, <16 x i8>, <16 x i1>, <16 x i8>) #2 + +define arm_aapcs_vfpcc <4 x float> @test_vsubq_m_f32(<4 x float> %inactive, <4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vsubq_m_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vsubt.f32 q0, q1, q2 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x float> @llvm.arm.mve.sub.predicated.v4f32.v4i1(<4 x float> %a, <4 x float> %b, <4 x i1> %1, <4 x float> %inactive) + ret <4 x float> %2 +} + +declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) #2 + +declare <4 x float> @llvm.arm.mve.sub.predicated.v4f32.v4i1(<4 x float>, <4 x float>, <4 x i1>, <4 x float>) #2 + +define arm_aapcs_vfpcc <8 x i16> @test_vsubq_x_u16(<8 x i16> %a, <8 x i16> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vsubq_x_u16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vsubt.i16 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.sub.predicated.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, <8 x i1> %1, <8 x i16> undef) + ret <8 x i16> %2 +} + +declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32) #2 + +declare <8 x i16> @llvm.arm.mve.sub.predicated.v8i16.v8i1(<8 x i16>, <8 x i16>, <8 x i1>, <8 x i16>) #2 + +define arm_aapcs_vfpcc <8 x half> @test_vsubq_x_f16(<8 x half> %a, <8 x half> %b, i16 zeroext %p) local_unnamed_addr #1 { +; CHECK-LABEL: test_vsubq_x_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vsubt.f16 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x half> @llvm.arm.mve.sub.predicated.v8f16.v8i1(<8 x half> %a, <8 x half> %b, <8 x i1> %1, <8 x half> undef) + ret <8 x half> %2 +} + +declare <8 x half> @llvm.arm.mve.sub.predicated.v8f16.v8i1(<8 x half>, <8 x half>, <8 x i1>, <8 x half>) #2 + |