diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2019-01-27 12:38:09 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2019-01-27 12:38:09 +0000 |
| commit | 7b980ad368452bc28a678d6c333eef429579427c (patch) | |
| tree | 0a0678a8000c325525680afa0539b9c58b0686d2 /llvm/test/CodeGen | |
| parent | 711bbdc8942df7b81d82a9517e9dc67068eaa642 (diff) | |
| download | bcm5719-llvm-7b980ad368452bc28a678d6c333eef429579427c.tar.gz bcm5719-llvm-7b980ad368452bc28a678d6c333eef429579427c.zip | |
[X86] Regenerate test to explicitly show branching and condition codes.
llvm-svn: 352313
Diffstat (limited to 'llvm/test/CodeGen')
| -rw-r--r-- | llvm/test/CodeGen/X86/rd-mod-wr-eflags.ll | 127 |
1 files changed, 98 insertions, 29 deletions
diff --git a/llvm/test/CodeGen/X86/rd-mod-wr-eflags.ll b/llvm/test/CodeGen/X86/rd-mod-wr-eflags.ll index c49d5c91f61..482c08632cb 100644 --- a/llvm/test/CodeGen/X86/rd-mod-wr-eflags.ll +++ b/llvm/test/CodeGen/X86/rd-mod-wr-eflags.ll @@ -1,12 +1,18 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-- | FileCheck %s %struct.obj = type { i64 } -; CHECK: _Z7releaseP3obj define void @_Z7releaseP3obj(%struct.obj* nocapture %o) nounwind uwtable ssp { +; CHECK-LABEL: _Z7releaseP3obj: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: decq (%rdi) +; CHECK-NEXT: je .LBB0_2 +; CHECK-NEXT: # %bb.1: # %return +; CHECK-NEXT: retq +; CHECK-NEXT: .LBB0_2: # %if.end +; CHECK-NEXT: jmp free # TAILCALL entry: -; CHECK: decq (%{{rdi|rcx}}) -; CHECK-NEXT: je %refcnt = getelementptr inbounds %struct.obj, %struct.obj* %o, i64 0, i32 0 %0 = load i64, i64* %refcnt, align 8 %dec = add i64 %0, -1 @@ -28,11 +34,25 @@ return: ; preds = %entry, %if.end @.str = private unnamed_addr constant [5 x i8] c"%ld\0A\00", align 1 @b = common global i32 0, align 4 -; CHECK: test define i32 @test() nounwind uwtable ssp { +; CHECK-LABEL: test: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushq %rax +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: movq {{.*}}(%rip), %rsi +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: decq %rsi +; CHECK-NEXT: movq %rsi, {{.*}}(%rip) +; CHECK-NEXT: setne %al +; CHECK-NEXT: movl %eax, {{.*}}(%rip) +; CHECK-NEXT: movl $.L.str, %edi +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: callq printf +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: .cfi_def_cfa_offset 8 +; CHECK-NEXT: retq entry: -; CHECK: decq -; CHECK-NOT: decq %0 = load i64, i64* @c, align 8 %dec.i = add nsw i64 %0, -1 store i64 %dec.i, i64* @c, align 8 @@ -43,10 +63,26 @@ store i32 %lor.ext.i, i32* @a, align 4 ret i32 0 } -; CHECK: test2 define i32 @test2() nounwind uwtable ssp { +; CHECK-LABEL: test2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushq %rax +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: movq {{.*}}(%rip), %rax +; CHECK-NEXT: leaq -1(%rax), %rsi +; CHECK-NEXT: movq %rsi, {{.*}}(%rip) +; CHECK-NEXT: xorl %ecx, %ecx +; CHECK-NEXT: testq %rax, %rax +; CHECK-NEXT: setne %cl +; CHECK-NEXT: movl %ecx, {{.*}}(%rip) +; CHECK-NEXT: movl $.L.str, %edi +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: callq printf +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: .cfi_def_cfa_offset 8 +; CHECK-NEXT: retq entry: -; CHECK-NOT: decq ({{.*}}) %0 = load i64, i64* @c, align 8 %dec.i = add nsw i64 %0, -1 store i64 %dec.i, i64* @c, align 8 @@ -65,14 +101,28 @@ declare void @free(i8* nocapture) nounwind declare void @other(%struct.obj2* ) nounwind; -; CHECK: example_dec define void @example_dec(%struct.obj2* %o) nounwind uwtable ssp { ; 64 bit dec +; CHECK-LABEL: example_dec: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: decq (%rdi) +; CHECK-NEXT: jne .LBB3_4 +; CHECK-NEXT: # %bb.1: # %if.end +; CHECK-NEXT: decl 8(%rdi) +; CHECK-NEXT: jne .LBB3_4 +; CHECK-NEXT: # %bb.2: # %if.end1 +; CHECK-NEXT: decw 12(%rdi) +; CHECK-NEXT: jne .LBB3_4 +; CHECK-NEXT: # %bb.3: # %if.end2 +; CHECK-NEXT: decb 14(%rdi) +; CHECK-NEXT: je .LBB3_5 +; CHECK-NEXT: .LBB3_4: # %return +; CHECK-NEXT: retq +; CHECK-NEXT: .LBB3_5: # %if.end4 +; CHECK-NEXT: jmp other # TAILCALL entry: %s64 = getelementptr inbounds %struct.obj2, %struct.obj2* %o, i64 0, i32 0 -; CHECK-NOT: load %0 = load i64, i64* %s64, align 8 -; CHECK: decq ({{.*}}) %dec = add i64 %0, -1 store i64 %dec, i64* %s64, align 8 %tobool = icmp eq i64 %dec, 0 @@ -81,9 +131,7 @@ entry: ; 32 bit dec if.end: %s32 = getelementptr inbounds %struct.obj2, %struct.obj2* %o, i64 0, i32 1 -; CHECK-NOT: load %1 = load i32, i32* %s32, align 4 -; CHECK: decl {{[0-9][0-9]*}}({{.*}}) %dec1 = add i32 %1, -1 store i32 %dec1, i32* %s32, align 4 %tobool2 = icmp eq i32 %dec1, 0 @@ -92,9 +140,7 @@ if.end: ; 16 bit dec if.end1: %s16 = getelementptr inbounds %struct.obj2, %struct.obj2* %o, i64 0, i32 2 -; CHECK-NOT: load %2 = load i16, i16* %s16, align 2 -; CHECK: decw {{[0-9][0-9]*}}({{.*}}) %dec2 = add i16 %2, -1 store i16 %dec2, i16* %s16, align 2 %tobool3 = icmp eq i16 %dec2, 0 @@ -103,9 +149,7 @@ if.end1: ; 8 bit dec if.end2: %s8 = getelementptr inbounds %struct.obj2, %struct.obj2* %o, i64 0, i32 3 -; CHECK-NOT: load %3 = load i8, i8* %s8 -; CHECK: decb {{[0-9][0-9]*}}({{.*}}) %dec3 = add i8 %3, -1 store i8 %dec3, i8* %s8 %tobool4 = icmp eq i8 %dec3, 0 @@ -115,18 +159,32 @@ if.end4: tail call void @other(%struct.obj2* %o) nounwind br label %return -return: ; preds = %if.end4, %if.end, %entry +return: ; preds = %if.end4, %if.end, %entry ret void } -; CHECK: example_inc define void @example_inc(%struct.obj2* %o) nounwind uwtable ssp { ; 64 bit inc +; CHECK-LABEL: example_inc: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: incq (%rdi) +; CHECK-NEXT: jne .LBB4_4 +; CHECK-NEXT: # %bb.1: # %if.end +; CHECK-NEXT: incl 8(%rdi) +; CHECK-NEXT: jne .LBB4_4 +; CHECK-NEXT: # %bb.2: # %if.end1 +; CHECK-NEXT: incw 12(%rdi) +; CHECK-NEXT: jne .LBB4_4 +; CHECK-NEXT: # %bb.3: # %if.end2 +; CHECK-NEXT: incb 14(%rdi) +; CHECK-NEXT: je .LBB4_5 +; CHECK-NEXT: .LBB4_4: # %return +; CHECK-NEXT: retq +; CHECK-NEXT: .LBB4_5: # %if.end4 +; CHECK-NEXT: jmp other # TAILCALL entry: %s64 = getelementptr inbounds %struct.obj2, %struct.obj2* %o, i64 0, i32 0 -; CHECK-NOT: load %0 = load i64, i64* %s64, align 8 -; CHECK: incq ({{.*}}) %inc = add i64 %0, 1 store i64 %inc, i64* %s64, align 8 %tobool = icmp eq i64 %inc, 0 @@ -135,9 +193,7 @@ entry: ; 32 bit inc if.end: %s32 = getelementptr inbounds %struct.obj2, %struct.obj2* %o, i64 0, i32 1 -; CHECK-NOT: load %1 = load i32, i32* %s32, align 4 -; CHECK: incl {{[0-9][0-9]*}}({{.*}}) %inc1 = add i32 %1, 1 store i32 %inc1, i32* %s32, align 4 %tobool2 = icmp eq i32 %inc1, 0 @@ -146,9 +202,7 @@ if.end: ; 16 bit inc if.end1: %s16 = getelementptr inbounds %struct.obj2, %struct.obj2* %o, i64 0, i32 2 -; CHECK-NOT: load %2 = load i16, i16* %s16, align 2 -; CHECK: incw {{[0-9][0-9]*}}({{.*}}) %inc2 = add i16 %2, 1 store i16 %inc2, i16* %s16, align 2 %tobool3 = icmp eq i16 %inc2, 0 @@ -157,9 +211,7 @@ if.end1: ; 8 bit inc if.end2: %s8 = getelementptr inbounds %struct.obj2, %struct.obj2* %o, i64 0, i32 3 -; CHECK-NOT: load %3 = load i8, i8* %s8 -; CHECK: incb {{[0-9][0-9]*}}({{.*}}) %inc3 = add i8 %3, 1 store i8 %inc3, i8* %s8 %tobool4 = icmp eq i8 %inc3, 0 @@ -178,9 +230,16 @@ return: @foo = external global i64*, align 8 define void @test3() nounwind ssp { -entry: ; CHECK-LABEL: test3: -; CHECK: decq 16(%rax) +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movq {{.*}}(%rip), %rax +; CHECK-NEXT: decq 16(%rax) +; CHECK-NEXT: je .LBB5_2 +; CHECK-NEXT: # %bb.1: # %if.end +; CHECK-NEXT: retq +; CHECK-NEXT: .LBB5_2: # %if.then +; CHECK-NEXT: jmp baz # TAILCALL +entry: %0 = load i64*, i64** @foo, align 8 %arrayidx = getelementptr inbounds i64, i64* %0, i64 2 %1 = load i64, i64* %arrayidx, align 8 @@ -208,6 +267,16 @@ declare void @baz() @z = external global i32, align 4 define void @test4() nounwind uwtable ssp { +; CHECK-LABEL: test4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: decl {{.*}}(%rip) +; CHECK-NEXT: je .LBB6_2 +; CHECK-NEXT: # %bb.1: # %entry +; CHECK-NEXT: movl {{.*}}(%rip), %eax +; CHECK-NEXT: .LBB6_2: # %entry +; CHECK-NEXT: movl %eax, {{.*}}(%rip) +; CHECK-NEXT: retq entry: %0 = load i32, i32* @x, align 4 %1 = load i32, i32* @y, align 4 |

