1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
|
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -instcombine -S | FileCheck %s
define i64 @test_sext_zext(i16 %A) {
; CHECK-LABEL: @test_sext_zext(
; CHECK-NEXT: [[C2:%.*]] = zext i16 %A to i64
; CHECK-NEXT: ret i64 [[C2]]
;
%c1 = zext i16 %A to i32
%c2 = sext i32 %c1 to i64
ret i64 %c2
}
define <2 x i64> @test2(<2 x i1> %A) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i1> %A, <i1 true, i1 true>
; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i1> [[XOR]] to <2 x i64>
; CHECK-NEXT: ret <2 x i64> [[ZEXT]]
;
%xor = xor <2 x i1> %A, <i1 true, i1 true>
%zext = zext <2 x i1> %xor to <2 x i64>
ret <2 x i64> %zext
}
define <2 x i64> @test3(<2 x i64> %A) {
; CHECK-LABEL: @test3(
; CHECK-NEXT: [[AND:%.*]] = and <2 x i64> %A, <i64 23, i64 42>
; CHECK-NEXT: ret <2 x i64> [[AND]]
;
%trunc = trunc <2 x i64> %A to <2 x i32>
%and = and <2 x i32> %trunc, <i32 23, i32 42>
%zext = zext <2 x i32> %and to <2 x i64>
ret <2 x i64> %zext
}
define <2 x i64> @test4(<2 x i64> %A) {
; CHECK-LABEL: @test4(
; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i64> %A, <i64 4294967295, i64 4294967295>
; CHECK-NEXT: [[XOR:%.*]] = and <2 x i64> [[TMP1]], <i64 23, i64 42>
; CHECK-NEXT: ret <2 x i64> [[XOR]]
;
%trunc = trunc <2 x i64> %A to <2 x i32>
%and = and <2 x i32> %trunc, <i32 23, i32 42>
%xor = xor <2 x i32> %and, <i32 23, i32 42>
%zext = zext <2 x i32> %xor to <2 x i64>
ret <2 x i64> %zext
}
define i64 @fold_xor_zext_sandwich(i1 %a) {
; CHECK-LABEL: @fold_xor_zext_sandwich(
; CHECK-NEXT: [[TMP1:%.*]] = xor i1 %a, true
; CHECK-NEXT: [[ZEXT2:%.*]] = zext i1 [[TMP1]] to i64
; CHECK-NEXT: ret i64 [[ZEXT2]]
;
%zext1 = zext i1 %a to i32
%xor = xor i32 %zext1, 1
%zext2 = zext i32 %xor to i64
ret i64 %zext2
}
define <2 x i64> @fold_xor_zext_sandwich_vec(<2 x i1> %a) {
; CHECK-LABEL: @fold_xor_zext_sandwich_vec(
; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i1> %a, <i1 true, i1 true>
; CHECK-NEXT: [[ZEXT2:%.*]] = zext <2 x i1> [[TMP1]] to <2 x i64>
; CHECK-NEXT: ret <2 x i64> [[ZEXT2]]
;
%zext1 = zext <2 x i1> %a to <2 x i32>
%xor = xor <2 x i32> %zext1, <i32 1, i32 1>
%zext2 = zext <2 x i32> %xor to <2 x i64>
ret <2 x i64> %zext2
}
; Assert that zexts in and(zext(icmp), zext(icmp)) can be folded.
; CHECK-LABEL: @fold_and_zext_icmp(
; CHECK-NEXT: [[ICMP1:%.*]] = icmp sgt i64 %a, %b
; CHECK-NEXT: [[ICMP2:%.*]] = icmp slt i64 %a, %c
; CHECK-NEXT: [[AND:%.*]] = and i1 [[ICMP1]], [[ICMP2]]
; CHECK-NEXT: [[ZEXT:%.*]] = zext i1 [[AND]] to i8
; CHECK-NEXT: ret i8 [[ZEXT]]
define i8 @fold_and_zext_icmp(i64 %a, i64 %b, i64 %c) {
%1 = icmp sgt i64 %a, %b
%2 = zext i1 %1 to i8
%3 = icmp slt i64 %a, %c
%4 = zext i1 %3 to i8
%5 = and i8 %2, %4
ret i8 %5
}
; Assert that zexts in or(zext(icmp), zext(icmp)) can be folded.
; CHECK-LABEL: @fold_or_zext_icmp(
; CHECK-NEXT: [[ICMP1:%.*]] = icmp sgt i64 %a, %b
; CHECK-NEXT: [[ICMP2:%.*]] = icmp slt i64 %a, %c
; CHECK-NEXT: [[OR:%.*]] = or i1 [[ICMP1]], [[ICMP2]]
; CHECK-NEXT: [[ZEXT:%.*]] = zext i1 [[OR]] to i8
; CHECK-NEXT: ret i8 [[ZEXT]]
define i8 @fold_or_zext_icmp(i64 %a, i64 %b, i64 %c) {
%1 = icmp sgt i64 %a, %b
%2 = zext i1 %1 to i8
%3 = icmp slt i64 %a, %c
%4 = zext i1 %3 to i8
%5 = or i8 %2, %4
ret i8 %5
}
; Assert that zexts in xor(zext(icmp), zext(icmp)) can be folded.
; CHECK-LABEL: @fold_xor_zext_icmp(
; CHECK-NEXT: [[ICMP1:%.*]] = icmp sgt i64 %a, %b
; CHECK-NEXT: [[ICMP2:%.*]] = icmp slt i64 %a, %c
; CHECK-NEXT: [[XOR:%.*]] = xor i1 [[ICMP1]], [[ICMP2]]
; CHECK-NEXT: [[ZEXT:%.*]] = zext i1 [[XOR]] to i8
; CHECK-NEXT: ret i8 [[ZEXT]]
define i8 @fold_xor_zext_icmp(i64 %a, i64 %b, i64 %c) {
%1 = icmp sgt i64 %a, %b
%2 = zext i1 %1 to i8
%3 = icmp slt i64 %a, %c
%4 = zext i1 %3 to i8
%5 = xor i8 %2, %4
ret i8 %5
}
; Assert that zexts in logic(zext(icmp), zext(icmp)) are also folded accross
; nested logical operators.
; CHECK-LABEL: @fold_nested_logic_zext_icmp(
; CHECK-NEXT: [[ICMP1:%.*]] = icmp sgt i64 %a, %b
; CHECK-NEXT: [[ICMP2:%.*]] = icmp slt i64 %a, %c
; CHECK-NEXT: [[AND:%.*]] = and i1 [[ICMP1]], [[ICMP2]]
; CHECK-NEXT: [[ICMP3:%.*]] = icmp eq i64 %a, %d
; CHECK-NEXT: [[OR:%.*]] = or i1 [[AND]], [[ICMP3]]
; CHECK-NEXT: [[ZEXT:%.*]] = zext i1 [[OR]] to i8
; CHECK-NEXT: ret i8 [[ZEXT]]
define i8 @fold_nested_logic_zext_icmp(i64 %a, i64 %b, i64 %c, i64 %d) {
%1 = icmp sgt i64 %a, %b
%2 = zext i1 %1 to i8
%3 = icmp slt i64 %a, %c
%4 = zext i1 %3 to i8
%5 = and i8 %2, %4
%6 = icmp eq i64 %a, %d
%7 = zext i1 %6 to i8
%8 = or i8 %5, %7
ret i8 %8
}
|