summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/X86/combine-adc.ll92
-rw-r--r--llvm/test/CodeGen/X86/combine-sbb.ll95
2 files changed, 187 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/combine-adc.ll b/llvm/test/CodeGen/X86/combine-adc.ll
new file mode 100644
index 00000000000..0f11f3159f8
--- /dev/null
+++ b/llvm/test/CodeGen/X86/combine-adc.ll
@@ -0,0 +1,92 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=X64
+
+define i32 @PR40483_add1(i32*, i32) nounwind {
+; X86-LABEL: PR40483_add1:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl (%edx), %esi
+; X86-NEXT: leal (%esi,%ecx), %eax
+; X86-NEXT: addl %ecx, %esi
+; X86-NEXT: movl %esi, (%edx)
+; X86-NEXT: jae .LBB0_1
+; X86-NEXT: # %bb.2:
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+; X86-NEXT: .LBB0_1:
+; X86-NEXT: orl %eax, %eax
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+;
+; X64-LABEL: PR40483_add1:
+; X64: # %bb.0:
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
+; X64-NEXT: movl (%rdi), %ecx
+; X64-NEXT: leal (%rcx,%rsi), %edx
+; X64-NEXT: orl %edx, %edx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: addl %esi, %ecx
+; X64-NEXT: movl %ecx, (%rdi)
+; X64-NEXT: cmovael %edx, %eax
+; X64-NEXT: retq
+ %3 = load i32, i32* %0, align 8
+ %4 = tail call { i8, i32 } @llvm.x86.addcarry.32(i8 0, i32 %3, i32 %1)
+ %5 = extractvalue { i8, i32 } %4, 1
+ store i32 %5, i32* %0, align 8
+ %6 = extractvalue { i8, i32 } %4, 0
+ %7 = icmp eq i8 %6, 0
+ %8 = add i32 %1, %3
+ %9 = or i32 %5, %8
+ %10 = select i1 %7, i32 %9, i32 0
+ ret i32 %10
+}
+
+define i32 @PR40483_add2(i32*, i32) nounwind {
+; X86-LABEL: PR40483_add2:
+; X86: # %bb.0:
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl (%esi), %edi
+; X86-NEXT: leal (%edi,%edx), %ecx
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: addl %edx, %edi
+; X86-NEXT: movl %edi, (%esi)
+; X86-NEXT: jae .LBB1_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: orl %ecx, %ecx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: .LBB1_2:
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl
+;
+; X64-LABEL: PR40483_add2:
+; X64: # %bb.0:
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
+; X64-NEXT: movl (%rdi), %ecx
+; X64-NEXT: leal (%rcx,%rsi), %edx
+; X64-NEXT: orl %edx, %edx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: addl %esi, %ecx
+; X64-NEXT: movl %ecx, (%rdi)
+; X64-NEXT: cmovbl %edx, %eax
+; X64-NEXT: retq
+ %3 = load i32, i32* %0, align 8
+ %4 = tail call { i8, i32 } @llvm.x86.addcarry.32(i8 0, i32 %3, i32 %1)
+ %5 = extractvalue { i8, i32 } %4, 1
+ store i32 %5, i32* %0, align 8
+ %6 = extractvalue { i8, i32 } %4, 0
+ %7 = icmp eq i8 %6, 0
+ %8 = add i32 %3, %1
+ %9 = or i32 %5, %8
+ %10 = select i1 %7, i32 0, i32 %9
+ ret i32 %10
+}
+
+declare { i8, i32 } @llvm.x86.addcarry.32(i8, i32, i32)
diff --git a/llvm/test/CodeGen/X86/combine-sbb.ll b/llvm/test/CodeGen/X86/combine-sbb.ll
index 6ef26f0db0d..5390cf90de8 100644
--- a/llvm/test/CodeGen/X86/combine-sbb.ll
+++ b/llvm/test/CodeGen/X86/combine-sbb.ll
@@ -196,4 +196,99 @@ define i32 @PR40483_sub2(i32*, i32) nounwind {
ret i32 %7
}
+define i32 @PR40483_sub3(i32*, i32) nounwind {
+; X86-LABEL: PR40483_sub3:
+; X86: # %bb.0:
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl (%esi), %edx
+; X86-NEXT: movl %edx, %eax
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: movl %edx, %edi
+; X86-NEXT: subl %ecx, %edi
+; X86-NEXT: movl %edi, (%esi)
+; X86-NEXT: jae .LBB5_1
+; X86-NEXT: # %bb.2:
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: jmp .LBB5_3
+; X86-NEXT: .LBB5_1:
+; X86-NEXT: subl %edx, %ecx
+; X86-NEXT: orl %ecx, %eax
+; X86-NEXT: .LBB5_3:
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl
+;
+; X64-LABEL: PR40483_sub3:
+; X64: # %bb.0:
+; X64-NEXT: movl (%rdi), %ecx
+; X64-NEXT: movl %ecx, %eax
+; X64-NEXT: subl %esi, %eax
+; X64-NEXT: movl %esi, %edx
+; X64-NEXT: subl %ecx, %edx
+; X64-NEXT: orl %eax, %edx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: subl %esi, %ecx
+; X64-NEXT: movl %ecx, (%rdi)
+; X64-NEXT: cmovael %edx, %eax
+; X64-NEXT: retq
+ %3 = load i32, i32* %0, align 8
+ %4 = tail call { i8, i32 } @llvm.x86.subborrow.32(i8 0, i32 %3, i32 %1)
+ %5 = extractvalue { i8, i32 } %4, 1
+ store i32 %5, i32* %0, align 8
+ %6 = extractvalue { i8, i32 } %4, 0
+ %7 = icmp eq i8 %6, 0
+ %8 = sub i32 %1, %3
+ %9 = or i32 %5, %8
+ %10 = select i1 %7, i32 %9, i32 0
+ ret i32 %10
+}
+
+define i32 @PR40483_sub4(i32*, i32) nounwind {
+; X86-LABEL: PR40483_sub4:
+; X86: # %bb.0:
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl (%esi), %edi
+; X86-NEXT: movl %edi, %ecx
+; X86-NEXT: subl %edx, %ecx
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: subl %edx, %edi
+; X86-NEXT: movl %edi, (%esi)
+; X86-NEXT: jae .LBB6_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: orl %ecx, %ecx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: .LBB6_2:
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl
+;
+; X64-LABEL: PR40483_sub4:
+; X64: # %bb.0:
+; X64-NEXT: movl (%rdi), %ecx
+; X64-NEXT: movl %ecx, %edx
+; X64-NEXT: subl %esi, %edx
+; X64-NEXT: orl %edx, %edx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: subl %esi, %ecx
+; X64-NEXT: movl %ecx, (%rdi)
+; X64-NEXT: cmovbl %edx, %eax
+; X64-NEXT: retq
+ %3 = load i32, i32* %0, align 8
+ %4 = tail call { i8, i32 } @llvm.x86.subborrow.32(i8 0, i32 %3, i32 %1)
+ %5 = extractvalue { i8, i32 } %4, 1
+ store i32 %5, i32* %0, align 8
+ %6 = extractvalue { i8, i32 } %4, 0
+ %7 = icmp eq i8 %6, 0
+ %8 = sub i32 %3, %1
+ %9 = or i32 %5, %8
+ %10 = select i1 %7, i32 0, i32 %9
+ ret i32 %10
+}
+
declare { i8, i32 } @llvm.x86.subborrow.32(i8, i32, i32)
OpenPOWER on IntegriCloud