diff options
author | Eli Friedman <efriedma@codeaurora.org> | 2017-01-11 19:33:38 +0000 |
---|---|---|
committer | Eli Friedman <efriedma@codeaurora.org> | 2017-01-11 19:33:38 +0000 |
commit | 3a03742c37bf6d38d2b37400a327bb6a0df82517 (patch) | |
tree | a22a4763261be445a3dba5257f4d0b0888dad849 /llvm/test | |
parent | f69e64662bda7498fd29418a54b4c5b8fb1e3476 (diff) | |
download | bcm5719-llvm-3a03742c37bf6d38d2b37400a327bb6a0df82517.tar.gz bcm5719-llvm-3a03742c37bf6d38d2b37400a327bb6a0df82517.zip |
[ARM] More aggressive matching for vpadd and vpaddl.
The new matchers work after legalization to make them simpler, and to avoid
blocking other optimizations.
Differential Revision: https://reviews.llvm.org/D27779
llvm-svn: 291693
Diffstat (limited to 'llvm/test')
-rw-r--r-- | llvm/test/CodeGen/ARM/vpadd.ll | 248 | ||||
-rw-r--r-- | llvm/test/CodeGen/ARM/vtrn.ll | 4 |
2 files changed, 234 insertions, 18 deletions
diff --git a/llvm/test/CodeGen/ARM/vpadd.ll b/llvm/test/CodeGen/ARM/vpadd.ll index 269223ac9f3..1aa23597cf4 100644 --- a/llvm/test/CodeGen/ARM/vpadd.ll +++ b/llvm/test/CodeGen/ARM/vpadd.ll @@ -214,14 +214,11 @@ define <2 x i64> @vpaddlQu32(<4 x i32>* %A) nounwind { } ; Combine vuzp+vadd->vpadd. -; FIXME: Implement this optimization -define void @addCombineToVPADD(<16 x i8> *%cbcr, <8 x i8> *%X) nounwind ssp { -; CHECK-LABEL: addCombineToVPADD: +define void @addCombineToVPADD_i8(<16 x i8> *%cbcr, <8 x i8> *%X) nounwind ssp { +; CHECK-LABEL: addCombineToVPADD_i8: ; CHECK: @ BB#0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] -; CHECK-NEXT: vorr d18, d17, d17 -; CHECK-NEXT: vuzp.8 d16, d18 -; CHECK-NEXT: vadd.i8 d16, d18, d16 +; CHECK-NEXT: vpadd.i8 d16, d16, d17 ; CHECK-NEXT: vstr d16, [r1] ; CHECK-NEXT: mov pc, lr %tmp = load <16 x i8>, <16 x i8>* %cbcr @@ -233,15 +230,44 @@ define void @addCombineToVPADD(<16 x i8> *%cbcr, <8 x i8> *%X) nounwind ssp { ret void } +; Combine vuzp+vadd->vpadd. +define void @addCombineToVPADD_i16(<8 x i16> *%cbcr, <4 x i16> *%X) nounwind ssp { +; CHECK-LABEL: addCombineToVPADD_i16: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r0] +; CHECK-NEXT: vpadd.i16 d16, d16, d17 +; CHECK-NEXT: vstr d16, [r1] +; CHECK-NEXT: mov pc, lr + %tmp = load <8 x i16>, <8 x i16>* %cbcr + %tmp1 = shufflevector <8 x i16> %tmp, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6> + %tmp3 = shufflevector <8 x i16> %tmp, <8 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7> + %add = add <4 x i16> %tmp3, %tmp1 + store <4 x i16> %add, <4 x i16>* %X, align 8 + ret void +} + +; Combine vtrn+vadd->vpadd. +define void @addCombineToVPADD_i32(<4 x i32> *%cbcr, <2 x i32> *%X) nounwind ssp { +; CHECK-LABEL: addCombineToVPADD_i32: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r0] +; CHECK-NEXT: vpadd.i32 d16, d16, d17 +; CHECK-NEXT: vstr d16, [r1] +; CHECK-NEXT: mov pc, lr + %tmp = load <4 x i32>, <4 x i32>* %cbcr + %tmp1 = shufflevector <4 x i32> %tmp, <4 x i32> undef, <2 x i32> <i32 0, i32 2> + %tmp3 = shufflevector <4 x i32> %tmp, <4 x i32> undef, <2 x i32> <i32 1, i32 3> + %add = add <2 x i32> %tmp3, %tmp1 + store <2 x i32> %add, <2 x i32>* %X, align 8 + ret void +} + ; Combine vuzp+vaddl->vpaddl -; FIXME: Implement this optimization. -define void @addCombineToVPADDL_sext(<16 x i8> *%cbcr, <8 x i16> *%X) nounwind ssp { -; CHECK-LABEL: addCombineToVPADDL_sext: +define void @addCombineToVPADDLq_s8(<16 x i8> *%cbcr, <8 x i16> *%X) nounwind ssp { +; CHECK-LABEL: addCombineToVPADDLq_s8: ; CHECK: @ BB#0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] -; CHECK-NEXT: vorr d18, d17, d17 -; CHECK-NEXT: vuzp.8 d16, d18 -; CHECK-NEXT: vaddl.s8 q8, d18, d16 +; CHECK-NEXT: vpaddl.s8 q8, q8 ; CHECK-NEXT: vst1.64 {d16, d17}, [r1] ; CHECK-NEXT: mov pc, lr %tmp = load <16 x i8>, <16 x i8>* %cbcr @@ -254,10 +280,200 @@ define void @addCombineToVPADDL_sext(<16 x i8> *%cbcr, <8 x i16> *%X) nounwind s ret void } -; Legalization produces a EXTRACT_VECTOR_ELT DAG node which performs an extend from -; i16 to i32. In this case the input for the formed VPADDL needs to be a vector of i16s. -define <2 x i16> @fromExtendingExtractVectorElt(<4 x i16> %in) { -; CHECK-LABEL: fromExtendingExtractVectorElt: +; Combine vuzp+vaddl->vpaddl +; FIXME: Legalization butchers the shuffles. +define void @addCombineToVPADDL_s8(<16 x i8> *%cbcr, <4 x i16> *%X) nounwind ssp { +; CHECK-LABEL: addCombineToVPADDL_s8: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r0] +; CHECK-NEXT: vmov.i16 d18, #0x8 +; CHECK-NEXT: vneg.s16 d18, d18 +; CHECK-NEXT: vext.8 d19, d16, d16, #1 +; CHECK-NEXT: vshl.i16 d16, d16, #8 +; CHECK-NEXT: vshl.i16 d17, d19, #8 +; CHECK-NEXT: vshl.s16 d16, d16, d18 +; CHECK-NEXT: vshl.s16 d17, d17, d18 +; CHECK-NEXT: vadd.i16 d16, d17, d16 +; CHECK-NEXT: vstr d16, [r1] +; CHECK-NEXT: mov pc, lr + %tmp = load <16 x i8>, <16 x i8>* %cbcr + %tmp1 = shufflevector <16 x i8> %tmp, <16 x i8> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6> + %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7> + %tmp4 = sext <4 x i8> %tmp3 to <4 x i16> + %tmp5 = sext <4 x i8> %tmp1 to <4 x i16> + %add = add <4 x i16> %tmp4, %tmp5 + store <4 x i16> %add, <4 x i16>* %X, align 8 + ret void +} + +; Combine vuzp+vaddl->vpaddl +define void @addCombineToVPADDLq_u8(<16 x i8> *%cbcr, <8 x i16> *%X) nounwind ssp { +; CHECK-LABEL: addCombineToVPADDLq_u8: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r0] +; CHECK-NEXT: vpaddl.u8 q8, q8 +; CHECK-NEXT: vst1.64 {d16, d17}, [r1] +; CHECK-NEXT: mov pc, lr + %tmp = load <16 x i8>, <16 x i8>* %cbcr + %tmp1 = shufflevector <16 x i8> %tmp, <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> + %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> + %tmp4 = zext <8 x i8> %tmp3 to <8 x i16> + %tmp5 = zext <8 x i8> %tmp1 to <8 x i16> + %add = add <8 x i16> %tmp4, %tmp5 + store <8 x i16> %add, <8 x i16>* %X, align 8 + ret void +} + +; In theory, it's possible to match this to vpaddl, but rearranging the +; shuffle is awkward, so this doesn't match at the moment. +define void @addCombineToVPADDLq_u8_early_zext(<16 x i8> *%cbcr, <8 x i16> *%X) nounwind ssp { +; CHECK-LABEL: addCombineToVPADDLq_u8_early_zext: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r0] +; CHECK-NEXT: vmovl.u8 q9, d17 +; CHECK-NEXT: vmovl.u8 q8, d16 +; CHECK-NEXT: vuzp.16 q8, q9 +; CHECK-NEXT: vadd.i16 q8, q8, q9 +; CHECK-NEXT: vst1.64 {d16, d17}, [r1] +; CHECK-NEXT: mov pc, lr + %tmp = load <16 x i8>, <16 x i8>* %cbcr + %tmp1 = zext <16 x i8> %tmp to <16 x i16> + %tmp2 = shufflevector <16 x i16> %tmp1, <16 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> + %tmp3 = shufflevector <16 x i16> %tmp1, <16 x i16> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> + %add = add <8 x i16> %tmp2, %tmp3 + store <8 x i16> %add, <8 x i16>* %X, align 8 + ret void +} + +; Combine vuzp+vaddl->vpaddl +; FIXME: Legalization butchers the shuffle. +define void @addCombineToVPADDL_u8(<16 x i8> *%cbcr, <4 x i16> *%X) nounwind ssp { +; CHECK-LABEL: addCombineToVPADDL_u8: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r0] +; CHECK-NEXT: vext.8 d18, d16, d16, #1 +; CHECK-NEXT: vbic.i16 d16, #0xff00 +; CHECK-NEXT: vbic.i16 d18, #0xff00 +; CHECK-NEXT: vadd.i16 d16, d18, d16 +; CHECK-NEXT: vstr d16, [r1] +; CHECK-NEXT: mov pc, lr + %tmp = load <16 x i8>, <16 x i8>* %cbcr + %tmp1 = shufflevector <16 x i8> %tmp, <16 x i8> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6> + %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7> + %tmp4 = zext <4 x i8> %tmp3 to <4 x i16> + %tmp5 = zext <4 x i8> %tmp1 to <4 x i16> + %add = add <4 x i16> %tmp4, %tmp5 + store <4 x i16> %add, <4 x i16>* %X, align 8 + ret void +} + +; Matching to vpaddl.8 requires matching shuffle(zext()). +define void @addCombineToVPADDL_u8_early_zext(<16 x i8> *%cbcr, <4 x i16> *%X) nounwind ssp { +; CHECK-LABEL: addCombineToVPADDL_u8_early_zext: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r0] +; CHECK-NEXT: vmovl.u8 q8, d16 +; CHECK-NEXT: vpadd.i16 d16, d16, d17 +; CHECK-NEXT: vstr d16, [r1] +; CHECK-NEXT: mov pc, lr + %tmp = load <16 x i8>, <16 x i8>* %cbcr + %tmp1 = zext <16 x i8> %tmp to <16 x i16> + %tmp2 = shufflevector <16 x i16> %tmp1, <16 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6> + %tmp3 = shufflevector <16 x i16> %tmp1, <16 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7> + %add = add <4 x i16> %tmp2, %tmp3 + store <4 x i16> %add, <4 x i16>* %X, align 8 + ret void +} + +; Combine vuzp+vaddl->vpaddl +define void @addCombineToVPADDLq_s16(<8 x i16> *%cbcr, <4 x i32> *%X) nounwind ssp { +; CHECK-LABEL: addCombineToVPADDLq_s16: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r0] +; CHECK-NEXT: vpaddl.s16 q8, q8 +; CHECK-NEXT: vst1.64 {d16, d17}, [r1] +; CHECK-NEXT: mov pc, lr + %tmp = load <8 x i16>, <8 x i16>* %cbcr + %tmp1 = shufflevector <8 x i16> %tmp, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6> + %tmp3 = shufflevector <8 x i16> %tmp, <8 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7> + %tmp4 = sext <4 x i16> %tmp3 to <4 x i32> + %tmp5 = sext <4 x i16> %tmp1 to <4 x i32> + %add = add <4 x i32> %tmp4, %tmp5 + store <4 x i32> %add, <4 x i32>* %X, align 8 + ret void +} + +; Combine vuzp+vaddl->vpaddl +define void @addCombineToVPADDLq_u16(<8 x i16> *%cbcr, <4 x i32> *%X) nounwind ssp { +; CHECK-LABEL: addCombineToVPADDLq_u16: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r0] +; CHECK-NEXT: vpaddl.u16 q8, q8 +; CHECK-NEXT: vst1.64 {d16, d17}, [r1] +; CHECK-NEXT: mov pc, lr + %tmp = load <8 x i16>, <8 x i16>* %cbcr + %tmp1 = shufflevector <8 x i16> %tmp, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6> + %tmp3 = shufflevector <8 x i16> %tmp, <8 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7> + %tmp4 = zext <4 x i16> %tmp3 to <4 x i32> + %tmp5 = zext <4 x i16> %tmp1 to <4 x i32> + %add = add <4 x i32> %tmp4, %tmp5 + store <4 x i32> %add, <4 x i32>* %X, align 8 + ret void +} + +; Combine vtrn+vaddl->vpaddl +define void @addCombineToVPADDLq_s32(<4 x i32> *%cbcr, <2 x i64> *%X) nounwind ssp { +; CHECK-LABEL: addCombineToVPADDLq_s32: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r0] +; CHECK-NEXT: vpaddl.s32 q8, q8 +; CHECK-NEXT: vst1.64 {d16, d17}, [r1] +; CHECK-NEXT: mov pc, lr + %tmp = load <4 x i32>, <4 x i32>* %cbcr + %tmp1 = shufflevector <4 x i32> %tmp, <4 x i32> undef, <2 x i32> <i32 0, i32 2> + %tmp3 = shufflevector <4 x i32> %tmp, <4 x i32> undef, <2 x i32> <i32 1, i32 3> + %tmp4 = sext <2 x i32> %tmp3 to <2 x i64> + %tmp5 = sext <2 x i32> %tmp1 to <2 x i64> + %add = add <2 x i64> %tmp4, %tmp5 + store <2 x i64> %add, <2 x i64>* %X, align 8 + ret void +} + +; Combine vtrn+vaddl->vpaddl +define void @addCombineToVPADDLq_u32(<4 x i32> *%cbcr, <2 x i64> *%X) nounwind ssp { +; CHECK-LABEL: addCombineToVPADDLq_u32: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r0] +; CHECK-NEXT: vpaddl.u32 q8, q8 +; CHECK-NEXT: vst1.64 {d16, d17}, [r1] +; CHECK-NEXT: mov pc, lr + %tmp = load <4 x i32>, <4 x i32>* %cbcr + %tmp1 = shufflevector <4 x i32> %tmp, <4 x i32> undef, <2 x i32> <i32 0, i32 2> + %tmp3 = shufflevector <4 x i32> %tmp, <4 x i32> undef, <2 x i32> <i32 1, i32 3> + %tmp4 = zext <2 x i32> %tmp3 to <2 x i64> + %tmp5 = zext <2 x i32> %tmp1 to <2 x i64> + %add = add <2 x i64> %tmp4, %tmp5 + store <2 x i64> %add, <2 x i64>* %X, align 8 + ret void +} + +; Legalization promotes the <4 x i8> to <4 x i16>. +define <4 x i8> @fromExtendingExtractVectorElt_i8(<8 x i8> %in) { +; CHECK-LABEL: fromExtendingExtractVectorElt_i8: +; CHECK: @ BB#0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vpaddl.s8 d16, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: mov pc, lr + %tmp1 = shufflevector <8 x i8> %in, <8 x i8> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6> + %tmp2 = shufflevector <8 x i8> %in, <8 x i8> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7> + %x = add <4 x i8> %tmp2, %tmp1 + ret <4 x i8> %x +} + +; Legalization promotes the <2 x i16> to <2 x i32>. +define <2 x i16> @fromExtendingExtractVectorElt_i16(<4 x i16> %in) { +; CHECK-LABEL: fromExtendingExtractVectorElt_i16: ; CHECK: @ BB#0: ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vpaddl.s16 d16, d16 diff --git a/llvm/test/CodeGen/ARM/vtrn.ll b/llvm/test/CodeGen/ARM/vtrn.ll index 36731e933ba..df6336043fd 100644 --- a/llvm/test/CodeGen/ARM/vtrn.ll +++ b/llvm/test/CodeGen/ARM/vtrn.ll @@ -70,14 +70,14 @@ define <2 x i32> @vtrni32(<2 x i32>* %A, <2 x i32>* %B) nounwind { ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vtrn.32 d17, d16 -; CHECK-NEXT: vadd.i32 d16, d17, d16 +; CHECK-NEXT: vmul.i32 d16, d17, d16 ; CHECK-NEXT: vmov r0, r1, d16 ; CHECK-NEXT: mov pc, lr %tmp1 = load <2 x i32>, <2 x i32>* %A %tmp2 = load <2 x i32>, <2 x i32>* %B %tmp3 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 0, i32 2> %tmp4 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 3> - %tmp5 = add <2 x i32> %tmp3, %tmp4 + %tmp5 = mul <2 x i32> %tmp3, %tmp4 ret <2 x i32> %tmp5 } |