diff options
| author | Eli Friedman <efriedma@codeaurora.org> | 2018-08-16 18:39:39 +0000 |
|---|---|---|
| committer | Eli Friedman <efriedma@codeaurora.org> | 2018-08-16 18:39:39 +0000 |
| commit | 73e8a784e62f945a51363c8b5ec4eaedcf9f87e8 (patch) | |
| tree | 15afc4a44bf8511135900d38ebc78216dcb23b1b /llvm/test/CodeGen/Thumb2 | |
| parent | d1767dc56f5be75bdff23f3fe33e54428fed704f (diff) | |
| download | bcm5719-llvm-73e8a784e62f945a51363c8b5ec4eaedcf9f87e8.tar.gz bcm5719-llvm-73e8a784e62f945a51363c8b5ec4eaedcf9f87e8.zip | |
[SelectionDAG] Improve the legalisation lowering of UMULO.
There is no way in the universe, that doing a full-width division in
software will be faster than doing overflowing multiplication in
software in the first place, especially given that this same full-width
multiplication needs to be done anyway.
This patch replaces the previous implementation with a direct lowering
into an overflowing multiplication algorithm based on half-width
operations.
Correctness of the algorithm was verified by exhaustively checking the
output of this algorithm for overflowing multiplication of 16 bit
integers against an obviously correct widening multiplication. Baring
any oversights introduced by porting the algorithm to DAG, confidence in
correctness of this algorithm is extremely high.
Following table shows the change in both t = runtime and s = space. The
change is expressed as a multiplier of original, so anything under 1 is
“better” and anything above 1 is worse.
+-------+-----------+-----------+-------------+-------------+
| Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s |
+-------+-----------+-----------+-------------+-------------+
| X64 | - | - | ~0.5 | ~0.64 |
| i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 |
| armv7 | - | ~0.75 | - | ~1.4 |
+-------+-----------+-----------+-------------+-------------+
Performance numbers have been collected by running overflowing
multiplication in a loop under `perf` on two x86_64 (one Intel Haswell,
other AMD Ryzen) based machines. Size numbers have been collected by
looking at the size of function containing an overflowing multiply in
a loop.
All in all, it can be seen that both performance and size has improved
except in the case of armv7 where code size has regressed for 128-bit
multiply. u128*u128 overflowing multiply on 32-bit platforms seem to
benefit from this change a lot, taking only 5% of the time compared to
original algorithm to calculate the same thing.
The final benefit of this change is that LLVM is now capable of lowering
the overflowing unsigned multiply for integers of any bit-width as long
as the target is capable of lowering regular multiplication for the same
bit-width. Previously, 128-bit overflowing multiply was the widest
possible.
Patch by Simonas Kazlauskas!
Differential Revision: https://reviews.llvm.org/D50310
llvm-svn: 339922
Diffstat (limited to 'llvm/test/CodeGen/Thumb2')
| -rw-r--r-- | llvm/test/CodeGen/Thumb2/umulo-128-legalisation-lowering.ll | 122 | ||||
| -rw-r--r-- | llvm/test/CodeGen/Thumb2/umulo-64-legalisation-lowering.ll | 49 |
2 files changed, 171 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/Thumb2/umulo-128-legalisation-lowering.ll b/llvm/test/CodeGen/Thumb2/umulo-128-legalisation-lowering.ll new file mode 100644 index 00000000000..d639b7acbbc --- /dev/null +++ b/llvm/test/CodeGen/Thumb2/umulo-128-legalisation-lowering.ll @@ -0,0 +1,122 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=thumbv7-unknown-none-gnueabi | FileCheck %s --check-prefixes=THUMBV7 + +define { i128, i8 } @muloti_test(i128 %l, i128 %r) unnamed_addr #0 { +; THUMBV7-LABEL: muloti_test: +; THUMBV7: @ %bb.0: @ %start +; THUMBV7-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; THUMBV7-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; THUMBV7-NEXT: .pad #44 +; THUMBV7-NEXT: sub sp, #44 +; THUMBV7-NEXT: str r0, [sp, #40] @ 4-byte Spill +; THUMBV7-NEXT: movs r0, #0 +; THUMBV7-NEXT: ldrd r4, r7, [sp, #88] +; THUMBV7-NEXT: mov r5, r3 +; THUMBV7-NEXT: strd r4, r7, [sp] +; THUMBV7-NEXT: mov r1, r3 +; THUMBV7-NEXT: strd r0, r0, [sp, #8] +; THUMBV7-NEXT: mov r6, r2 +; THUMBV7-NEXT: mov r0, r2 +; THUMBV7-NEXT: movs r2, #0 +; THUMBV7-NEXT: movs r3, #0 +; THUMBV7-NEXT: bl __multi3 +; THUMBV7-NEXT: strd r1, r0, [sp, #32] +; THUMBV7-NEXT: strd r3, r2, [sp, #24] +; THUMBV7-NEXT: ldrd r2, r0, [sp, #96] +; THUMBV7-NEXT: ldr.w r9, [sp, #80] +; THUMBV7-NEXT: umull lr, r0, r0, r6 +; THUMBV7-NEXT: ldr.w r11, [sp, #84] +; THUMBV7-NEXT: umull r3, r1, r5, r2 +; THUMBV7-NEXT: umull r2, r12, r2, r6 +; THUMBV7-NEXT: add r3, lr +; THUMBV7-NEXT: umull r8, r10, r7, r9 +; THUMBV7-NEXT: str r2, [sp, #20] @ 4-byte Spill +; THUMBV7-NEXT: adds.w lr, r12, r3 +; THUMBV7-NEXT: umull r6, r9, r9, r4 +; THUMBV7-NEXT: mov.w r3, #0 +; THUMBV7-NEXT: adc r12, r3, #0 +; THUMBV7-NEXT: umull r2, r4, r11, r4 +; THUMBV7-NEXT: add r2, r8 +; THUMBV7-NEXT: mov.w r8, #0 +; THUMBV7-NEXT: adds.w r2, r2, r9 +; THUMBV7-NEXT: adc r9, r3, #0 +; THUMBV7-NEXT: ldr r3, [sp, #20] @ 4-byte Reload +; THUMBV7-NEXT: adds r3, r3, r6 +; THUMBV7-NEXT: ldr r6, [sp, #28] @ 4-byte Reload +; THUMBV7-NEXT: adc.w r2, r2, lr +; THUMBV7-NEXT: adds r3, r3, r6 +; THUMBV7-NEXT: ldr r6, [sp, #24] @ 4-byte Reload +; THUMBV7-NEXT: adcs r2, r6 +; THUMBV7-NEXT: ldrd r6, lr, [sp, #36] +; THUMBV7-NEXT: str.w r6, [lr] +; THUMBV7-NEXT: adc r8, r8, #0 +; THUMBV7-NEXT: ldr r6, [sp, #32] @ 4-byte Reload +; THUMBV7-NEXT: cmp r5, #0 +; THUMBV7-NEXT: strd r6, r3, [lr, #4] +; THUMBV7-NEXT: str.w r2, [lr, #12] +; THUMBV7-NEXT: it ne +; THUMBV7-NEXT: movne r5, #1 +; THUMBV7-NEXT: ldr r2, [sp, #100] +; THUMBV7-NEXT: cmp r2, #0 +; THUMBV7-NEXT: mov r3, r2 +; THUMBV7-NEXT: it ne +; THUMBV7-NEXT: movne r3, #1 +; THUMBV7-NEXT: cmp r0, #0 +; THUMBV7-NEXT: it ne +; THUMBV7-NEXT: movne r0, #1 +; THUMBV7-NEXT: cmp r1, #0 +; THUMBV7-NEXT: and.w r3, r3, r5 +; THUMBV7-NEXT: it ne +; THUMBV7-NEXT: movne r1, #1 +; THUMBV7-NEXT: orrs r0, r3 +; THUMBV7-NEXT: cmp r7, #0 +; THUMBV7-NEXT: orr.w r0, r0, r1 +; THUMBV7-NEXT: it ne +; THUMBV7-NEXT: movne r7, #1 +; THUMBV7-NEXT: cmp.w r11, #0 +; THUMBV7-NEXT: mov r1, r11 +; THUMBV7-NEXT: it ne +; THUMBV7-NEXT: movne r1, #1 +; THUMBV7-NEXT: cmp r4, #0 +; THUMBV7-NEXT: ldr r3, [sp, #96] +; THUMBV7-NEXT: it ne +; THUMBV7-NEXT: movne r4, #1 +; THUMBV7-NEXT: cmp.w r10, #0 +; THUMBV7-NEXT: and.w r1, r1, r7 +; THUMBV7-NEXT: it ne +; THUMBV7-NEXT: movne.w r10, #1 +; THUMBV7-NEXT: orrs r3, r2 +; THUMBV7-NEXT: ldr r2, [sp, #80] +; THUMBV7-NEXT: orr.w r1, r1, r4 +; THUMBV7-NEXT: it ne +; THUMBV7-NEXT: movne r3, #1 +; THUMBV7-NEXT: orr.w r1, r1, r10 +; THUMBV7-NEXT: orrs.w r7, r2, r11 +; THUMBV7-NEXT: orr.w r1, r1, r9 +; THUMBV7-NEXT: it ne +; THUMBV7-NEXT: movne r7, #1 +; THUMBV7-NEXT: orr.w r0, r0, r12 +; THUMBV7-NEXT: ands r3, r7 +; THUMBV7-NEXT: orrs r1, r3 +; THUMBV7-NEXT: orrs r0, r1 +; THUMBV7-NEXT: orr.w r0, r0, r8 +; THUMBV7-NEXT: and r0, r0, #1 +; THUMBV7-NEXT: strb.w r0, [lr, #16] +; THUMBV7-NEXT: add sp, #44 +; THUMBV7-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} +start: + %0 = tail call { i128, i1 } @llvm.umul.with.overflow.i128(i128 %l, i128 %r) #2 + %1 = extractvalue { i128, i1 } %0, 0 + %2 = extractvalue { i128, i1 } %0, 1 + %3 = zext i1 %2 to i8 + %4 = insertvalue { i128, i8 } undef, i128 %1, 0 + %5 = insertvalue { i128, i8 } %4, i8 %3, 1 + ret { i128, i8 } %5 +} + +; Function Attrs: nounwind readnone speculatable +declare { i128, i1 } @llvm.umul.with.overflow.i128(i128, i128) #1 + +attributes #0 = { nounwind readnone uwtable } +attributes #1 = { nounwind readnone speculatable } +attributes #2 = { nounwind } diff --git a/llvm/test/CodeGen/Thumb2/umulo-64-legalisation-lowering.ll b/llvm/test/CodeGen/Thumb2/umulo-64-legalisation-lowering.ll new file mode 100644 index 00000000000..e47e88a6832 --- /dev/null +++ b/llvm/test/CodeGen/Thumb2/umulo-64-legalisation-lowering.ll @@ -0,0 +1,49 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=thumbv7-unknown-none-gnueabi | FileCheck %s --check-prefixes=THUMBV7 + +define { i64, i8 } @mulodi_test(i64 %l, i64 %r) unnamed_addr #0 { +; THUMBV7-LABEL: mulodi_test: +; THUMBV7: @ %bb.0: @ %start +; THUMBV7-NEXT: .save {r4, r5, r6, lr} +; THUMBV7-NEXT: push {r4, r5, r6, lr} +; THUMBV7-NEXT: umull r12, lr, r3, r0 +; THUMBV7-NEXT: movs r6, #0 +; THUMBV7-NEXT: umull r4, r5, r1, r2 +; THUMBV7-NEXT: umull r0, r2, r0, r2 +; THUMBV7-NEXT: add r4, r12 +; THUMBV7-NEXT: adds.w r12, r2, r4 +; THUMBV7-NEXT: adc r2, r6, #0 +; THUMBV7-NEXT: cmp r3, #0 +; THUMBV7-NEXT: it ne +; THUMBV7-NEXT: movne r3, #1 +; THUMBV7-NEXT: cmp r1, #0 +; THUMBV7-NEXT: it ne +; THUMBV7-NEXT: movne r1, #1 +; THUMBV7-NEXT: cmp r5, #0 +; THUMBV7-NEXT: it ne +; THUMBV7-NEXT: movne r5, #1 +; THUMBV7-NEXT: ands r1, r3 +; THUMBV7-NEXT: cmp.w lr, #0 +; THUMBV7-NEXT: orr.w r1, r1, r5 +; THUMBV7-NEXT: it ne +; THUMBV7-NEXT: movne.w lr, #1 +; THUMBV7-NEXT: orr.w r1, r1, lr +; THUMBV7-NEXT: orrs r2, r1 +; THUMBV7-NEXT: mov r1, r12 +; THUMBV7-NEXT: pop {r4, r5, r6, pc} +start: + %0 = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %l, i64 %r) #2 + %1 = extractvalue { i64, i1 } %0, 0 + %2 = extractvalue { i64, i1 } %0, 1 + %3 = zext i1 %2 to i8 + %4 = insertvalue { i64, i8 } undef, i64 %1, 0 + %5 = insertvalue { i64, i8 } %4, i8 %3, 1 + ret { i64, i8 } %5 +} + +; Function Attrs: nounwind readnone speculatable +declare { i64, i1 } @llvm.umul.with.overflow.i64(i64, i64) #1 + +attributes #0 = { nounwind readnone uwtable } +attributes #1 = { nounwind readnone speculatable } +attributes #2 = { nounwind } |

