diff options
| author | David Green <david.green@arm.com> | 2019-10-18 09:47:48 +0000 |
|---|---|---|
| committer | David Green <david.green@arm.com> | 2019-10-18 09:47:48 +0000 |
| commit | e6f313b3807d23017d188aa7060b8cad09b3d095 (patch) | |
| tree | a5ee51474dbccbb28c4739edcc39dec640610a6d /llvm/test/CodeGen/X86/ssub_sat.ll | |
| parent | 0c7cc383e5b846bc9e9fcc599d3f342333f5c963 (diff) | |
| download | bcm5719-llvm-e6f313b3807d23017d188aa7060b8cad09b3d095.tar.gz bcm5719-llvm-e6f313b3807d23017d188aa7060b8cad09b3d095.zip | |
[Codegen] Alter the default promotion for saturating adds and subs
The default promotion for the add_sat/sub_sat nodes currently does:
ANY_EXTEND iN to iM
SHL by M-N
[US][ADD|SUB]SAT
L/ASHR by M-N
If the promoted add_sat or sub_sat node is not legal, this can produce code
that effectively does a lot of shifting (and requiring large constants to be
materialised) just to use the overflow flag. It is simpler to just do the
saturation manually, using the higher bitwidth addition and a min/max against
the saturating bounds. That is what this patch attempts to do.
Differential Revision: https://reviews.llvm.org/D68926
llvm-svn: 375211
Diffstat (limited to 'llvm/test/CodeGen/X86/ssub_sat.ll')
| -rw-r--r-- | llvm/test/CodeGen/X86/ssub_sat.ll | 39 |
1 files changed, 17 insertions, 22 deletions
diff --git a/llvm/test/CodeGen/X86/ssub_sat.ll b/llvm/test/CodeGen/X86/ssub_sat.ll index a92905a975b..5e7a3f93506 100644 --- a/llvm/test/CodeGen/X86/ssub_sat.ll +++ b/llvm/test/CodeGen/X86/ssub_sat.ll @@ -147,34 +147,29 @@ define signext i8 @func8(i8 signext %x, i8 signext %y) nounwind { define signext i4 @func3(i4 signext %x, i4 signext %y) nounwind { ; X86-LABEL: func3: ; X86: # %bb.0: -; X86-NEXT: movb {{[0-9]+}}(%esp), %cl -; X86-NEXT: movb {{[0-9]+}}(%esp), %dl -; X86-NEXT: shlb $4, %dl -; X86-NEXT: shlb $4, %cl -; X86-NEXT: xorl %eax, %eax -; X86-NEXT: cmpb %dl, %cl -; X86-NEXT: setns %al -; X86-NEXT: addl $127, %eax -; X86-NEXT: subb %dl, %cl -; X86-NEXT: movzbl %cl, %ecx -; X86-NEXT: cmovol %eax, %ecx -; X86-NEXT: sarb $4, %cl +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: subb {{[0-9]+}}(%esp), %al +; X86-NEXT: movzbl %al, %ecx +; X86-NEXT: cmpb $7, %al +; X86-NEXT: movl $7, %eax +; X86-NEXT: cmovll %ecx, %eax +; X86-NEXT: cmpb $-8, %al +; X86-NEXT: movl $248, %ecx +; X86-NEXT: cmovgl %eax, %ecx ; X86-NEXT: movsbl %cl, %eax ; X86-NEXT: retl ; ; X64-LABEL: func3: ; X64: # %bb.0: -; X64-NEXT: shlb $4, %sil -; X64-NEXT: shlb $4, %dil -; X64-NEXT: xorl %eax, %eax -; X64-NEXT: cmpb %sil, %dil -; X64-NEXT: setns %al -; X64-NEXT: addl $127, %eax ; X64-NEXT: subb %sil, %dil -; X64-NEXT: movzbl %dil, %ecx -; X64-NEXT: cmovol %eax, %ecx -; X64-NEXT: sarb $4, %cl -; X64-NEXT: movsbl %cl, %eax +; X64-NEXT: movzbl %dil, %eax +; X64-NEXT: cmpb $7, %al +; X64-NEXT: movl $7, %ecx +; X64-NEXT: cmovll %eax, %ecx +; X64-NEXT: cmpb $-8, %cl +; X64-NEXT: movl $248, %eax +; X64-NEXT: cmovgl %ecx, %eax +; X64-NEXT: movsbl %al, %eax ; X64-NEXT: retq %tmp = call i4 @llvm.ssub.sat.i4(i4 %x, i4 %y) ret i4 %tmp |

