diff options
author | David Green <david.green@arm.com> | 2019-10-10 13:05:04 +0000 |
---|---|---|
committer | David Green <david.green@arm.com> | 2019-10-10 13:05:04 +0000 |
commit | 39596ec2fee06d90907e9efce9a230b2da8eacd2 (patch) | |
tree | 7db45894680332bdfe4b24d9f6a37b28b8c55f6a | |
parent | f5b2b76008dca191c327c5c8c070e5f089608329 (diff) | |
download | bcm5719-llvm-39596ec2fee06d90907e9efce9a230b2da8eacd2.tar.gz bcm5719-llvm-39596ec2fee06d90907e9efce9a230b2da8eacd2.zip |
[ARM] VQADD instructions
This selects MVE VQADD from the vector llvm.sadd.sat or llvm.uadd.sat
intrinsics.
Differential Revision: https://reviews.llvm.org/D68566
llvm-svn: 374336
-rw-r--r-- | llvm/lib/Target/ARM/ARMISelLowering.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/ARM/ARMInstrMVE.td | 54 | ||||
-rw-r--r-- | llvm/test/CodeGen/Thumb2/mve-saturating-arith.ll | 60 |
3 files changed, 42 insertions, 74 deletions
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index c1365f58930..90b709867fe 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -265,6 +265,8 @@ void ARMTargetLowering::addMVEVectorTypes(bool HasMVEFP) { setOperationAction(ISD::CTTZ, VT, Custom); setOperationAction(ISD::BITREVERSE, VT, Legal); setOperationAction(ISD::BSWAP, VT, Legal); + setOperationAction(ISD::SADDSAT, VT, Legal); + setOperationAction(ISD::UADDSAT, VT, Legal); // No native support for these. setOperationAction(ISD::UDIV, VT, Expand); diff --git a/llvm/lib/Target/ARM/ARMInstrMVE.td b/llvm/lib/Target/ARM/ARMInstrMVE.td index 3ba23d9812c..28a32e1263d 100644 --- a/llvm/lib/Target/ARM/ARMInstrMVE.td +++ b/llvm/lib/Target/ARM/ARMInstrMVE.td @@ -1524,8 +1524,8 @@ let Predicates = [HasMVEInt] in { } class MVE_VQADDSUB<string iname, string suffix, bit U, bit subtract, - bits<2> size, list<dag> pattern=[]> - : MVE_int<iname, suffix, size, pattern> { + bits<2> size, ValueType vt> + : MVE_int<iname, suffix, size, []> { let Inst{28} = U; let Inst{25-23} = 0b110; @@ -1535,26 +1535,40 @@ class MVE_VQADDSUB<string iname, string suffix, bit U, bit subtract, let Inst{8} = 0b0; let Inst{4} = 0b1; let Inst{0} = 0b0; + + ValueType VT = vt; +} + +class MVE_VQADD<string suffix, bit U, bits<2> size, ValueType VT> + : MVE_VQADDSUB<"vqadd", suffix, U, 0b0, size, VT>; +class MVE_VQSUB<string suffix, bit U, bits<2> size, ValueType VT> + : MVE_VQADDSUB<"vqsub", suffix, U, 0b1, size, VT>; + +def MVE_VQADDs8 : MVE_VQADD<"s8", 0b0, 0b00, v16i8>; +def MVE_VQADDs16 : MVE_VQADD<"s16", 0b0, 0b01, v8i16>; +def MVE_VQADDs32 : MVE_VQADD<"s32", 0b0, 0b10, v4i32>; +def MVE_VQADDu8 : MVE_VQADD<"u8", 0b1, 0b00, v16i8>; +def MVE_VQADDu16 : MVE_VQADD<"u16", 0b1, 0b01, v8i16>; +def MVE_VQADDu32 : MVE_VQADD<"u32", 0b1, 0b10, v4i32>; + +def MVE_VQSUBs8 : MVE_VQSUB<"s8", 0b0, 0b00, v16i8>; +def MVE_VQSUBs16 : MVE_VQSUB<"s16", 0b0, 0b01, v8i16>; +def MVE_VQSUBs32 : MVE_VQSUB<"s32", 0b0, 0b10, v4i32>; +def MVE_VQSUBu8 : MVE_VQSUB<"u8", 0b1, 0b00, v16i8>; +def MVE_VQSUBu16 : MVE_VQSUB<"u16", 0b1, 0b01, v8i16>; +def MVE_VQSUBu32 : MVE_VQSUB<"u32", 0b1, 0b10, v4i32>; + +let Predicates = [HasMVEInt] in { + foreach instr = [MVE_VQADDu8, MVE_VQADDu16, MVE_VQADDu32] in + foreach VT = [instr.VT] in + def : Pat<(VT (uaddsat (VT MQPR:$Qm), (VT MQPR:$Qn))), + (VT (instr (VT MQPR:$Qm), (VT MQPR:$Qn)))>; + foreach instr = [MVE_VQADDs8, MVE_VQADDs16, MVE_VQADDs32] in + foreach VT = [instr.VT] in + def : Pat<(VT (saddsat (VT MQPR:$Qm), (VT MQPR:$Qn))), + (VT (instr (VT MQPR:$Qm), (VT MQPR:$Qn)))>; } -class MVE_VQADD<string suffix, bit U, bits<2> size, list<dag> pattern=[]> - : MVE_VQADDSUB<"vqadd", suffix, U, 0b0, size, pattern>; -class MVE_VQSUB<string suffix, bit U, bits<2> size, list<dag> pattern=[]> - : MVE_VQADDSUB<"vqsub", suffix, U, 0b1, size, pattern>; - -def MVE_VQADDs8 : MVE_VQADD<"s8", 0b0, 0b00>; -def MVE_VQADDs16 : MVE_VQADD<"s16", 0b0, 0b01>; -def MVE_VQADDs32 : MVE_VQADD<"s32", 0b0, 0b10>; -def MVE_VQADDu8 : MVE_VQADD<"u8", 0b1, 0b00>; -def MVE_VQADDu16 : MVE_VQADD<"u16", 0b1, 0b01>; -def MVE_VQADDu32 : MVE_VQADD<"u32", 0b1, 0b10>; - -def MVE_VQSUBs8 : MVE_VQSUB<"s8", 0b0, 0b00>; -def MVE_VQSUBs16 : MVE_VQSUB<"s16", 0b0, 0b01>; -def MVE_VQSUBs32 : MVE_VQSUB<"s32", 0b0, 0b10>; -def MVE_VQSUBu8 : MVE_VQSUB<"u8", 0b1, 0b00>; -def MVE_VQSUBu16 : MVE_VQSUB<"u16", 0b1, 0b01>; -def MVE_VQSUBu32 : MVE_VQSUB<"u32", 0b1, 0b10>; class MVE_VABD_int<string suffix, bit U, bits<2> size, list<dag> pattern=[]> : MVE_int<"vabd", suffix, size, pattern> { diff --git a/llvm/test/CodeGen/Thumb2/mve-saturating-arith.ll b/llvm/test/CodeGen/Thumb2/mve-saturating-arith.ll index f6a9af42f6a..2a775fe9543 100644 --- a/llvm/test/CodeGen/Thumb2/mve-saturating-arith.ll +++ b/llvm/test/CodeGen/Thumb2/mve-saturating-arith.ll @@ -4,21 +4,7 @@ define arm_aapcs_vfpcc <16 x i8> @sadd_int8_t(<16 x i8> %src1, <16 x i8> %src2) { ; CHECK-LABEL: sadd_int8_t: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} -; CHECK-NEXT: vadd.i8 q2, q0, q1 -; CHECK-NEXT: vmov.i8 q3, #0x80 -; CHECK-NEXT: vcmp.s8 lt, q2, zr -; CHECK-NEXT: vmov.i8 q4, #0x7f -; CHECK-NEXT: vpsel q3, q4, q3 -; CHECK-NEXT: vcmp.s8 gt, q0, q2 -; CHECK-NEXT: vmrs r0, p0 -; CHECK-NEXT: vcmp.s8 lt, q1, zr -; CHECK-NEXT: vmrs r1, p0 -; CHECK-NEXT: eors r0, r1 -; CHECK-NEXT: vmsr p0, r0 -; CHECK-NEXT: vpsel q0, q3, q2 -; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: vqadd.s8 q0, q0, q1 ; CHECK-NEXT: bx lr entry: %0 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %src1, <16 x i8> %src2) @@ -28,21 +14,7 @@ entry: define arm_aapcs_vfpcc <8 x i16> @sadd_int16_t(<8 x i16> %src1, <8 x i16> %src2) { ; CHECK-LABEL: sadd_int16_t: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} -; CHECK-NEXT: vadd.i16 q2, q0, q1 -; CHECK-NEXT: vmov.i16 q3, #0x8000 -; CHECK-NEXT: vcmp.s16 lt, q2, zr -; CHECK-NEXT: vmvn.i16 q4, #0x8000 -; CHECK-NEXT: vpsel q3, q4, q3 -; CHECK-NEXT: vcmp.s16 gt, q0, q2 -; CHECK-NEXT: vmrs r0, p0 -; CHECK-NEXT: vcmp.s16 lt, q1, zr -; CHECK-NEXT: vmrs r1, p0 -; CHECK-NEXT: eors r0, r1 -; CHECK-NEXT: vmsr p0, r0 -; CHECK-NEXT: vpsel q0, q3, q2 -; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: vqadd.s16 q0, q0, q1 ; CHECK-NEXT: bx lr entry: %0 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %src1, <8 x i16> %src2) @@ -52,21 +24,7 @@ entry: define arm_aapcs_vfpcc <4 x i32> @sadd_int32_t(<4 x i32> %src1, <4 x i32> %src2) { ; CHECK-LABEL: sadd_int32_t: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} -; CHECK-NEXT: vadd.i32 q2, q0, q1 -; CHECK-NEXT: vmov.i32 q3, #0x80000000 -; CHECK-NEXT: vcmp.s32 lt, q2, zr -; CHECK-NEXT: vmvn.i32 q4, #0x80000000 -; CHECK-NEXT: vpsel q3, q4, q3 -; CHECK-NEXT: vcmp.s32 gt, q0, q2 -; CHECK-NEXT: vmrs r0, p0 -; CHECK-NEXT: vcmp.s32 lt, q1, zr -; CHECK-NEXT: vmrs r1, p0 -; CHECK-NEXT: eors r0, r1 -; CHECK-NEXT: vmsr p0, r0 -; CHECK-NEXT: vpsel q0, q3, q2 -; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: vqadd.s32 q0, q0, q1 ; CHECK-NEXT: bx lr entry: %0 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %src1, <4 x i32> %src2) @@ -156,9 +114,7 @@ entry: define arm_aapcs_vfpcc <16 x i8> @uadd_int8_t(<16 x i8> %src1, <16 x i8> %src2) { ; CHECK-LABEL: uadd_int8_t: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmvn q2, q1 -; CHECK-NEXT: vmin.u8 q0, q0, q2 -; CHECK-NEXT: vadd.i8 q0, q0, q1 +; CHECK-NEXT: vqadd.u8 q0, q0, q1 ; CHECK-NEXT: bx lr entry: %0 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %src1, <16 x i8> %src2) @@ -168,9 +124,7 @@ entry: define arm_aapcs_vfpcc <8 x i16> @uadd_int16_t(<8 x i16> %src1, <8 x i16> %src2) { ; CHECK-LABEL: uadd_int16_t: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmvn q2, q1 -; CHECK-NEXT: vmin.u16 q0, q0, q2 -; CHECK-NEXT: vadd.i16 q0, q0, q1 +; CHECK-NEXT: vqadd.u16 q0, q0, q1 ; CHECK-NEXT: bx lr entry: %0 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %src1, <8 x i16> %src2) @@ -180,9 +134,7 @@ entry: define arm_aapcs_vfpcc <4 x i32> @uadd_int32_t(<4 x i32> %src1, <4 x i32> %src2) { ; CHECK-LABEL: uadd_int32_t: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmvn q2, q1 -; CHECK-NEXT: vmin.u32 q0, q0, q2 -; CHECK-NEXT: vadd.i32 q0, q0, q1 +; CHECK-NEXT: vqadd.u32 q0, q0, q1 ; CHECK-NEXT: bx lr entry: %0 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %src1, <4 x i32> %src2) |