summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/pr32284.ll
blob: e05fc926b0801e9a45e0df6e912dc4b066d764a3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown -mcpu=skx | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=i686-unknown -mcpu=skx -O0 | FileCheck %s --check-prefix=X86-O0
; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=skx | FileCheck %s --check-prefix=X64
; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=skx -O0 | FileCheck %s --check-prefix=X64-O0

@c = external constant i8, align 1

define void @foo() {
; X86-LABEL: foo:
; X86:       # BB#0: # %entry
; X86-NEXT:    subl $8, %esp
; X86-NEXT:  .Lcfi0:
; X86-NEXT:    .cfi_def_cfa_offset 12
; X86-NEXT:    movzbl c, %eax
; X86-NEXT:    xorl %ecx, %ecx
; X86-NEXT:    testl %eax, %eax
; X86-NEXT:    setne %cl
; X86-NEXT:    testb %al, %al
; X86-NEXT:    setne {{[0-9]+}}(%esp)
; X86-NEXT:    xorl %edx, %edx
; X86-NEXT:    cmpl %eax, %ecx
; X86-NEXT:    setle %dl
; X86-NEXT:    movl %edx, {{[0-9]+}}(%esp)
; X86-NEXT:    addl $8, %esp
; X86-NEXT:    retl
;
; X86-O0-LABEL: foo:
; X86-O0:       # BB#0: # %entry
; X86-O0-NEXT:    subl $12, %esp
; X86-O0-NEXT:  .Lcfi0:
; X86-O0-NEXT:    .cfi_def_cfa_offset 16
; X86-O0-NEXT:    movzbl c, %eax
; X86-O0-NEXT:    testl %eax, %eax
; X86-O0-NEXT:    setne %cl
; X86-O0-NEXT:    movl %eax, %edx
; X86-O0-NEXT:    movb %dl, %ch
; X86-O0-NEXT:    testb %ch, %ch
; X86-O0-NEXT:    setne {{[0-9]+}}(%esp)
; X86-O0-NEXT:    movzbl %cl, %edx
; X86-O0-NEXT:    subl %eax, %edx
; X86-O0-NEXT:    setle %cl
; X86-O0-NEXT:    # implicit-def: %EAX
; X86-O0-NEXT:    movb %cl, %al
; X86-O0-NEXT:    andl $1, %eax
; X86-O0-NEXT:    kmovd %eax, %k0
; X86-O0-NEXT:    kmovd %k0, %eax
; X86-O0-NEXT:    movb %al, %cl
; X86-O0-NEXT:    andb $1, %cl
; X86-O0-NEXT:    movzbl %cl, %eax
; X86-O0-NEXT:    movl %eax, {{[0-9]+}}(%esp)
; X86-O0-NEXT:    movl %edx, (%esp) # 4-byte Spill
; X86-O0-NEXT:    addl $12, %esp
; X86-O0-NEXT:    retl
;
; X64-LABEL: foo:
; X64:       # BB#0: # %entry
; X64-NEXT:    movzbl {{.*}}(%rip), %eax
; X64-NEXT:    testb %al, %al
; X64-NEXT:    setne -{{[0-9]+}}(%rsp)
; X64-NEXT:    xorl %ecx, %ecx
; X64-NEXT:    testl %eax, %eax
; X64-NEXT:    setne %cl
; X64-NEXT:    xorl %edx, %edx
; X64-NEXT:    cmpl %eax, %ecx
; X64-NEXT:    setle %dl
; X64-NEXT:    movl %edx, -{{[0-9]+}}(%rsp)
; X64-NEXT:    retq
;
; X64-O0-LABEL: foo:
; X64-O0:       # BB#0: # %entry
; X64-O0-NEXT:    movzbl {{.*}}(%rip), %eax
; X64-O0-NEXT:    movl %eax, %ecx
; X64-O0-NEXT:    movb %cl, %dl
; X64-O0-NEXT:    movl %ecx, %eax
; X64-O0-NEXT:    testq %rcx, %rcx
; X64-O0-NEXT:    setne %sil
; X64-O0-NEXT:    testb %dl, %dl
; X64-O0-NEXT:    setne -{{[0-9]+}}(%rsp)
; X64-O0-NEXT:    movzbl %sil, %edi
; X64-O0-NEXT:    subl %eax, %edi
; X64-O0-NEXT:    setle %dl
; X64-O0-NEXT:    # implicit-def: %EAX
; X64-O0-NEXT:    movb %dl, %al
; X64-O0-NEXT:    andl $1, %eax
; X64-O0-NEXT:    kmovd %eax, %k0
; X64-O0-NEXT:    kmovd %k0, %eax
; X64-O0-NEXT:    movb %al, %dl
; X64-O0-NEXT:    andb $1, %dl
; X64-O0-NEXT:    movzbl %dl, %eax
; X64-O0-NEXT:    movl %eax, -{{[0-9]+}}(%rsp)
; X64-O0-NEXT:    movl %edi, -{{[0-9]+}}(%rsp) # 4-byte Spill
; X64-O0-NEXT:    retq
entry:
  %a = alloca i8, align 1
  %b = alloca i32, align 4
  %0 = load i8, i8* @c, align 1
  %conv = zext i8 %0 to i32
  %sub = sub nsw i32 0, %conv
  %conv1 = sext i32 %sub to i64
  %sub2 = sub nsw i64 0, %conv1
  %conv3 = trunc i64 %sub2 to i8
  %tobool = icmp ne i8 %conv3, 0
  %frombool = zext i1 %tobool to i8
  store i8 %frombool, i8* %a, align 1
  %1 = load i8, i8* @c, align 1
  %tobool4 = icmp ne i8 %1, 0
  %lnot = xor i1 %tobool4, true
  %lnot5 = xor i1 %lnot, true
  %conv6 = zext i1 %lnot5 to i32
  %2 = load i8, i8* @c, align 1
  %conv7 = zext i8 %2 to i32
  %cmp = icmp sle i32 %conv6, %conv7
  %conv8 = zext i1 %cmp to i32
  store i32 %conv8, i32* %b, align 4
  ret void
}
OpenPOWER on IntegriCloud