diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2019-01-27 14:04:45 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2019-01-27 14:04:45 +0000 |
| commit | f6d7cfef396b38ac68b2f0e9913ac9120bf534d4 (patch) | |
| tree | d83db72014933d5dcf83a0d4c76f7216072af051 /llvm/test/CodeGen | |
| parent | adca82092799a1cb4f463f60f3aaa08a0fadc146 (diff) | |
| download | bcm5719-llvm-f6d7cfef396b38ac68b2f0e9913ac9120bf534d4.tar.gz bcm5719-llvm-f6d7cfef396b38ac68b2f0e9913ac9120bf534d4.zip | |
[X86] Add CGP tests for PR40486
llvm-svn: 352316
Diffstat (limited to 'llvm/test/CodeGen')
| -rw-r--r-- | llvm/test/CodeGen/X86/codegen-prepare-uaddo.ll | 254 |
1 files changed, 254 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/codegen-prepare-uaddo.ll b/llvm/test/CodeGen/X86/codegen-prepare-uaddo.ll new file mode 100644 index 00000000000..52dfe1c8792 --- /dev/null +++ b/llvm/test/CodeGen/X86/codegen-prepare-uaddo.ll @@ -0,0 +1,254 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s + +; PR31754 +; +; #include <x86intrin.h> +; using u64 = unsigned long long; +; +; template<u64 K> +; void test(u64& alo, u64& ahi) +; { +; u64 blo = K; +; u64 bhi = 0; +; bool cf = (alo += blo) < blo; +; _addcarry_u64(cf, ahi, bhi, &ahi); +; } +; +; template void test<0ull>(u64&, u64&); +; template void test<1ull>(u64&, u64&); +; template void test<2ull>(u64&, u64&); +; template void test<3ull>(u64&, u64&); +; template void test<4ull>(u64&, u64&); +; template void test<0x7fffffffffffffffull>(u64&, u64&); +; template void test<0x8000000000000000ull>(u64&, u64&); +; template void test<0x8000000000000001ull>(u64&, u64&); +; template void test<0xffffffff80000000ull>(u64&, u64&); +; template void test<0xfffffffffffffffdull>(u64&, u64&); +; template void test<0xfffffffffffffffeull>(u64&, u64&); +; template void test<0xffffffffffffffffull>(u64&, u64&); + +define void @test_0(i64*, i64*) { +; CHECK-LABEL: test_0: +; CHECK: # %bb.0: +; CHECK-NEXT: retq + %3 = load i64, i64* %1, align 8 + %4 = tail call { i8, i64 } @llvm.x86.addcarry.64(i8 0, i64 %3, i64 0) + %5 = extractvalue { i8, i64 } %4, 1 + store i64 %5, i64* %1, align 8 + ret void +} + +define void @test_1(i64*, i64*) { +; CHECK-LABEL: test_1: +; CHECK: # %bb.0: +; CHECK-NEXT: incq (%rdi) +; CHECK-NEXT: sete %al +; CHECK-NEXT: addb $-1, %al +; CHECK-NEXT: adcq $0, (%rsi) +; CHECK-NEXT: retq + %3 = load i64, i64* %0, align 8 + %4 = add i64 %3, 1 + store i64 %4, i64* %0, align 8 + %5 = icmp eq i64 %4, 0 + %6 = zext i1 %5 to i8 + %7 = load i64, i64* %1, align 8 + %8 = tail call { i8, i64 } @llvm.x86.addcarry.64(i8 %6, i64 %7, i64 0) + %9 = extractvalue { i8, i64 } %8, 1 + store i64 %9, i64* %1, align 8 + ret void +} + +define void @test_2(i64*, i64*) { +; CHECK-LABEL: test_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addq $2, (%rdi) +; CHECK-NEXT: adcq $0, (%rsi) +; CHECK-NEXT: retq + %3 = load i64, i64* %0, align 8 + %4 = add i64 %3, 2 + store i64 %4, i64* %0, align 8 + %5 = icmp ult i64 %4, 2 + %6 = zext i1 %5 to i8 + %7 = load i64, i64* %1, align 8 + %8 = tail call { i8, i64 } @llvm.x86.addcarry.64(i8 %6, i64 %7, i64 0) + %9 = extractvalue { i8, i64 } %8, 1 + store i64 %9, i64* %1, align 8 + ret void +} + +define void @test_3(i64*, i64*) { +; CHECK-LABEL: test_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addq $3, (%rdi) +; CHECK-NEXT: adcq $0, (%rsi) +; CHECK-NEXT: retq + %3 = load i64, i64* %0, align 8 + %4 = add i64 %3, 3 + store i64 %4, i64* %0, align 8 + %5 = icmp ult i64 %4, 3 + %6 = zext i1 %5 to i8 + %7 = load i64, i64* %1, align 8 + %8 = tail call { i8, i64 } @llvm.x86.addcarry.64(i8 %6, i64 %7, i64 0) + %9 = extractvalue { i8, i64 } %8, 1 + store i64 %9, i64* %1, align 8 + ret void +} + +define void @test_4(i64*, i64*) { +; CHECK-LABEL: test_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addq $4, (%rdi) +; CHECK-NEXT: adcq $0, (%rsi) +; CHECK-NEXT: retq + %3 = load i64, i64* %0, align 8 + %4 = add i64 %3, 4 + store i64 %4, i64* %0, align 8 + %5 = icmp ult i64 %4, 4 + %6 = zext i1 %5 to i8 + %7 = load i64, i64* %1, align 8 + %8 = tail call { i8, i64 } @llvm.x86.addcarry.64(i8 %6, i64 %7, i64 0) + %9 = extractvalue { i8, i64 } %8, 1 + store i64 %9, i64* %1, align 8 + ret void +} + +define void @test_9223372036854775807(i64*, i64*) { +; CHECK-LABEL: test_9223372036854775807: +; CHECK: # %bb.0: +; CHECK-NEXT: movabsq $9223372036854775807, %rax # imm = 0x7FFFFFFFFFFFFFFF +; CHECK-NEXT: addq %rax, (%rdi) +; CHECK-NEXT: adcq $0, (%rsi) +; CHECK-NEXT: retq + %3 = load i64, i64* %0, align 8 + %4 = add i64 %3, 9223372036854775807 + store i64 %4, i64* %0, align 8 + %5 = icmp ult i64 %4, 9223372036854775807 + %6 = zext i1 %5 to i8 + %7 = load i64, i64* %1, align 8 + %8 = tail call { i8, i64 } @llvm.x86.addcarry.64(i8 %6, i64 %7, i64 0) + %9 = extractvalue { i8, i64 } %8, 1 + store i64 %9, i64* %1, align 8 + ret void +} + +define void @test_9223372036854775808(i64*, i64*) { +; CHECK-LABEL: test_9223372036854775808: +; CHECK: # %bb.0: +; CHECK-NEXT: movq (%rdi), %rax +; CHECK-NEXT: movabsq $-9223372036854775808, %rcx # imm = 0x8000000000000000 +; CHECK-NEXT: xorq %rax, %rcx +; CHECK-NEXT: movq %rcx, (%rdi) +; CHECK-NEXT: shrq $63, %rax +; CHECK-NEXT: addb $-1, %al +; CHECK-NEXT: adcq $0, (%rsi) +; CHECK-NEXT: retq + %3 = load i64, i64* %0, align 8 + %4 = xor i64 %3, -9223372036854775808 + store i64 %4, i64* %0, align 8 + %5 = lshr i64 %3, 63 + %6 = trunc i64 %5 to i8 + %7 = load i64, i64* %1, align 8 + %8 = tail call { i8, i64 } @llvm.x86.addcarry.64(i8 %6, i64 %7, i64 0) + %9 = extractvalue { i8, i64 } %8, 1 + store i64 %9, i64* %1, align 8 + ret void +} + +define void @test_9223372036854775809(i64*, i64*) { +; CHECK-LABEL: test_9223372036854775809: +; CHECK: # %bb.0: +; CHECK-NEXT: movabsq $-9223372036854775807, %rax # imm = 0x8000000000000001 +; CHECK-NEXT: addq %rax, (%rdi) +; CHECK-NEXT: adcq $0, (%rsi) +; CHECK-NEXT: retq + %3 = load i64, i64* %0, align 8 + %4 = add i64 %3, -9223372036854775807 + store i64 %4, i64* %0, align 8 + %5 = icmp ult i64 %4, -9223372036854775807 + %6 = zext i1 %5 to i8 + %7 = load i64, i64* %1, align 8 + %8 = tail call { i8, i64 } @llvm.x86.addcarry.64(i8 %6, i64 %7, i64 0) + %9 = extractvalue { i8, i64 } %8, 1 + store i64 %9, i64* %1, align 8 + ret void +} + +define void @test_18446744071562067968(i64*, i64*) { +; CHECK-LABEL: test_18446744071562067968: +; CHECK: # %bb.0: +; CHECK-NEXT: addq $-2147483648, (%rdi) # imm = 0x80000000 +; CHECK-NEXT: adcq $0, (%rsi) +; CHECK-NEXT: retq + %3 = load i64, i64* %0, align 8 + %4 = add i64 %3, -2147483648 + store i64 %4, i64* %0, align 8 + %5 = icmp ult i64 %4, -2147483648 + %6 = zext i1 %5 to i8 + %7 = load i64, i64* %1, align 8 + %8 = tail call { i8, i64 } @llvm.x86.addcarry.64(i8 %6, i64 %7, i64 0) + %9 = extractvalue { i8, i64 } %8, 1 + store i64 %9, i64* %1, align 8 + ret void +} + +define void @test_18446744073709551613(i64*, i64*) { +; CHECK-LABEL: test_18446744073709551613: +; CHECK: # %bb.0: +; CHECK-NEXT: addq $-3, (%rdi) +; CHECK-NEXT: adcq $0, (%rsi) +; CHECK-NEXT: retq + %3 = load i64, i64* %0, align 8 + %4 = add i64 %3, -3 + store i64 %4, i64* %0, align 8 + %5 = icmp ult i64 %4, -3 + %6 = zext i1 %5 to i8 + %7 = load i64, i64* %1, align 8 + %8 = tail call { i8, i64 } @llvm.x86.addcarry.64(i8 %6, i64 %7, i64 0) + %9 = extractvalue { i8, i64 } %8, 1 + store i64 %9, i64* %1, align 8 + ret void +} + +define void @test_18446744073709551614(i64*, i64*) { +; CHECK-LABEL: test_18446744073709551614: +; CHECK: # %bb.0: +; CHECK-NEXT: addq $-2, (%rdi) +; CHECK-NEXT: adcq $0, (%rsi) +; CHECK-NEXT: retq + %3 = load i64, i64* %0, align 8 + %4 = add i64 %3, -2 + store i64 %4, i64* %0, align 8 + %5 = icmp ult i64 %4, -2 + %6 = zext i1 %5 to i8 + %7 = load i64, i64* %1, align 8 + %8 = tail call { i8, i64 } @llvm.x86.addcarry.64(i8 %6, i64 %7, i64 0) + %9 = extractvalue { i8, i64 } %8, 1 + store i64 %9, i64* %1, align 8 + ret void +} + +define void @test_18446744073709551615(i64*, i64*) { +; CHECK-LABEL: test_18446744073709551615: +; CHECK: # %bb.0: +; CHECK-NEXT: movq (%rdi), %rax +; CHECK-NEXT: leaq -1(%rax), %rcx +; CHECK-NEXT: movq %rcx, (%rdi) +; CHECK-NEXT: testq %rax, %rax +; CHECK-NEXT: setne %al +; CHECK-NEXT: addb $-1, %al +; CHECK-NEXT: adcq $0, (%rsi) +; CHECK-NEXT: retq + %3 = load i64, i64* %0, align 8 + %4 = add i64 %3, -1 + store i64 %4, i64* %0, align 8 + %5 = icmp ne i64 %3, 0 + %6 = zext i1 %5 to i8 + %7 = load i64, i64* %1, align 8 + %8 = tail call { i8, i64 } @llvm.x86.addcarry.64(i8 %6, i64 %7, i64 0) + %9 = extractvalue { i8, i64 } %8, 1 + store i64 %9, i64* %1, align 8 + ret void +} + +declare { i8, i64 } @llvm.x86.addcarry.64(i8, i64, i64) |

