; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -o - -mtriple=i686-unknown-unknown %s | FileCheck %s --check-prefixes=ALL,X32 ; RUN: llc -o - -mtriple=x86_64-unknown-unknown %s | FileCheck %s --check-prefixes=ALL,X64 ; ; Test patterns that require preserving and restoring flags. @b = common global i8 0, align 1 @c = common global i32 0, align 4 @a = common global i8 0, align 1 @d = common global i8 0, align 1 @.str = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1 declare void @external(i32) ; A test that re-uses flags in interesting ways due to volatile accesses. ; Specifically, the first increment's flags are reused for the branch despite ; being clobbered by the second increment. define i32 @test1() nounwind { ; X32-LABEL: test1: ; X32: # %bb.0: # %entry ; X32-NEXT: movb b, %cl ; X32-NEXT: movl %ecx, %eax ; X32-NEXT: incb %al ; X32-NEXT: movb %al, b ; X32-NEXT: incl c ; X32-NEXT: sete %dl ; X32-NEXT: movb a, %ah ; X32-NEXT: movb %ah, %ch ; X32-NEXT: incb %ch ; X32-NEXT: cmpb %cl, %ah ; X32-NEXT: sete d ; X32-NEXT: movb %ch, a ; X32-NEXT: testb $-1, %dl ; X32-NEXT: jne .LBB0_2 ; X32-NEXT: # %bb.1: # %if.then ; X32-NEXT: movsbl %al, %eax ; X32-NEXT: pushl %eax ; X32-NEXT: calll external ; X32-NEXT: addl $4, %esp ; X32-NEXT: .LBB0_2: # %if.end ; X32-NEXT: xorl %eax, %eax ; X32-NEXT: retl ; ; X64-LABEL: test1: ; X64: # %bb.0: # %entry ; X64-NEXT: movb {{.*}}(%rip), %dil ; X64-NEXT: movl %edi, %eax ; X64-NEXT: incb %al ; X64-NEXT: movb %al, {{.*}}(%rip) ; X64-NEXT: incl {{.*}}(%rip) ; X64-NEXT: sete %sil ; X64-NEXT: movb {{.*}}(%rip), %cl ; X64-NEXT: movl %ecx, %edx ; X64-NEXT: incb %dl ; X64-NEXT: cmpb %dil, %cl ; X64-NEXT: sete {{.*}}(%rip) ; X64-NEXT: movb %dl, {{.*}}(%rip) ; X64-NEXT: testb $-1, %sil ; X64-NEXT: jne .LBB0_2 ; X64-NEXT: # %bb.1: # %if.then ; X64-NEXT: pushq %rax ; X64-NEXT: movsbl %al, %edi ; X64-NEXT: callq external ; X64-NEXT: addq $8, %rsp ; X64-NEXT: .LBB0_2: # %if.end ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: retq entry: %bval = load i8, i8* @b %inc = add i8 %bval, 1 store volatile i8 %inc, i8* @b %cval = load volatile i32, i32* @c %inc1 = add nsw i32 %cval, 1 store volatile i32 %inc1, i32* @c %aval = load volatile i8, i8* @a %inc2 = add i8 %aval, 1 store volatile i8 %inc2, i8* @a %cmp = icmp eq i8 %aval, %bval %conv5 = zext i1 %cmp to i8 store i8 %conv5, i8* @d %tobool = icmp eq i32 %inc1, 0 br i1 %tobool, label %if.end, label %if.then if.then: %conv6 = sext i8 %inc to i32 call void @external(i32 %conv6) br label %if.end if.end: ret i32 0 } ; Preserve increment flags across a call. define i32 @test2(i32* %ptr) nounwind { ; X32-LABEL: test2: ; X32: # %bb.0: # %entry ; X32-NEXT: pushl %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: incl (%eax) ; X32-NEXT: setne %bl ; X32-NEXT: pushl $42 ; X32-NEXT: calll external ; X32-NEXT: addl $4, %esp ; X32-NEXT: testb $-1, %bl ; X32-NEXT: je .LBB1_1 ; X32-NEXT: # %bb.2: # %else ; X32-NEXT: xorl %eax, %eax ; X32-NEXT: popl %ebx ; X32-NEXT: retl ; X32-NEXT: .LBB1_1: # %then ; X32-NEXT: movl $64, %eax ; X32-NEXT: popl %ebx ; X32-NEXT: retl ; ; X64-LABEL: test2: ; X64: # %bb.0: # %entry ; X64-NEXT: pushq %rbx ; X64-NEXT: incl (%rdi) ; X64-NEXT: setne %bl ; X64-NEXT: movl $42, %edi ; X64-NEXT: callq external ; X64-NEXT: testb $-1, %bl ; X64-NEXT: je .LBB1_1 ; X64-NEXT: # %bb.2: # %else ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: popq %rbx ; X64-NEXT: retq ; X64-NEXT: .LBB1_1: # %then ; X64-NEXT: movl $64, %eax ; X64-NEXT: popq %rbx ; X64-NEXT: retq entry: %val = load i32, i32* %ptr %inc = add i32 %val, 1 store i32 %inc, i32* %ptr %cmp = icmp eq i32 %inc, 0 call void @external(i32 42) br i1 %cmp, label %then, label %else then: ret i32 64 else: ret i32 0 } declare void @external_a() declare void @external_b() ; This lowers to a conditional tail call instead of a conditional branch. This ; is tricky because we can only do this from a leaf function, and so we have to ; use volatile stores similar to test1 to force the save and restore of ; a condition without calling another function. We then set up subsequent calls ; in tail position. define void @test_tail_call(i32* %ptr) nounwind optsize { ; X32-LABEL: test_tail_call: ; X32: # %bb.0: # %entry ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: incl (%eax) ; X32-NEXT: setne %al ; X32-NEXT: incb a ; X32-NEXT: sete d ; X32-NEXT: testb $-1, %al ; X32-NEXT: jne external_b # TAILCALL ; X32-NEXT: # %bb.1: # %then ; X32-NEXT: jmp external_a # TAILCALL ; ; X64-LABEL: test_tail_call: ; X64: # %bb.0: # %entry ; X64-NEXT: incl (%rdi) ; X64-NEXT: setne %al ; X64-NEXT: incb {{.*}}(%rip) ; X64-NEXT: sete {{.*}}(%rip) ; X64-NEXT: testb $-1, %al ; X64-NEXT: jne external_b # TAILCALL ; X64-NEXT: # %bb.1: # %then ; X64-NEXT: jmp external_a # TAILCALL entry: %val = load i32, i32* %ptr %inc = add i32 %val, 1 store i32 %inc, i32* %ptr %cmp = icmp eq i32 %inc, 0 %aval = load volatile i8, i8* @a %inc2 = add i8 %aval, 1 store volatile i8 %inc2, i8* @a %cmp2 = icmp eq i8 %inc2, 0 %conv5 = zext i1 %cmp2 to i8 store i8 %conv5, i8* @d br i1 %cmp, label %then, label %else then: tail call void @external_a() ret void else: tail call void @external_b() ret void }