diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2019-01-26 20:13:44 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2019-01-26 20:13:44 +0000 |
| commit | b7a15acd38ec1302639e0fff752e01472620b5b8 (patch) | |
| tree | e49d205f04f4db4e815514c4ca15c83c72b02116 /llvm/test/CodeGen | |
| parent | 8fd74ebfc03317feac43f513cb9e7d45e9215d12 (diff) | |
| download | bcm5719-llvm-b7a15acd38ec1302639e0fff752e01472620b5b8.tar.gz bcm5719-llvm-b7a15acd38ec1302639e0fff752e01472620b5b8.zip | |
[X86] Fold X86ISD::SBB(ISD::SUB(X,Y),0) -> X86ISD::SBB(X,Y) (PR25858)
We often generate X86ISD::SBB(X, 0) for carry flag arithmetic.
I had tried to create test cases for the ADC equivalent (which often uses the same pattern) but haven't managed to find anything yet.
Differential Revision: https://reviews.llvm.org/D57169
llvm-svn: 352288
Diffstat (limited to 'llvm/test/CodeGen')
| -rw-r--r-- | llvm/test/CodeGen/X86/combine-sbb.ll | 15 |
1 files changed, 6 insertions, 9 deletions
diff --git a/llvm/test/CodeGen/X86/combine-sbb.ll b/llvm/test/CodeGen/X86/combine-sbb.ll index 78a45379874..3d34153c296 100644 --- a/llvm/test/CodeGen/X86/combine-sbb.ll +++ b/llvm/test/CodeGen/X86/combine-sbb.ll @@ -13,11 +13,10 @@ define void @PR25858_i32(%WideUInt32* sret, %WideUInt32*, %WideUInt32*) nounwind ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NEXT: movl (%ecx), %esi ; X86-NEXT: movl 4(%ecx), %ecx -; X86-NEXT: subl 4(%edx), %ecx ; X86-NEXT: subl (%edx), %esi -; X86-NEXT: sbbl $0, %ecx -; X86-NEXT: movl %esi, (%eax) +; X86-NEXT: sbbl 4(%edx), %ecx ; X86-NEXT: movl %ecx, 4(%eax) +; X86-NEXT: movl %esi, (%eax) ; X86-NEXT: popl %esi ; X86-NEXT: retl $4 ; @@ -26,11 +25,10 @@ define void @PR25858_i32(%WideUInt32* sret, %WideUInt32*, %WideUInt32*) nounwind ; X64-NEXT: movq %rdi, %rax ; X64-NEXT: movl (%rsi), %ecx ; X64-NEXT: movl 4(%rsi), %esi -; X64-NEXT: subl 4(%rdx), %esi ; X64-NEXT: subl (%rdx), %ecx -; X64-NEXT: sbbl $0, %esi -; X64-NEXT: movl %ecx, (%rdi) +; X64-NEXT: sbbl 4(%rdx), %esi ; X64-NEXT: movl %esi, 4(%rdi) +; X64-NEXT: movl %ecx, (%rdi) ; X64-NEXT: retq top: %3 = bitcast %WideUInt32* %1 to i32* @@ -94,11 +92,10 @@ define void @PR25858_i64(%WideUInt64* sret, %WideUInt64*, %WideUInt64*) nounwind ; X64-NEXT: movq %rdi, %rax ; X64-NEXT: movq (%rsi), %rcx ; X64-NEXT: movq 8(%rsi), %rsi -; X64-NEXT: subq 8(%rdx), %rsi ; X64-NEXT: subq (%rdx), %rcx -; X64-NEXT: sbbq $0, %rsi -; X64-NEXT: movq %rcx, (%rdi) +; X64-NEXT: sbbq 8(%rdx), %rsi ; X64-NEXT: movq %rsi, 8(%rdi) +; X64-NEXT: movq %rcx, (%rdi) ; X64-NEXT: retq top: %3 = bitcast %WideUInt64* %1 to i64* |

