1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
; fold (urem undef, x) -> 0
define <4 x i32> @combine_vec_urem_undef0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_urem_undef0:
; SSE: # BB#0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_urem_undef0:
; AVX: # BB#0:
; AVX-NEXT: retq
%1 = urem <4 x i32> undef, %x
ret <4 x i32> %1
}
; fold (urem x, undef) -> undef
define <4 x i32> @combine_vec_urem_undef1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_urem_undef1:
; SSE: # BB#0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_urem_undef1:
; AVX: # BB#0:
; AVX-NEXT: retq
%1 = urem <4 x i32> %x, undef
ret <4 x i32> %1
}
; fold (urem x, pow2) -> (and x, (pow2-1))
define <4 x i32> @combine_vec_urem_by_pow2a(<4 x i32> %x) {
; SSE-LABEL: combine_vec_urem_by_pow2a:
; SSE: # BB#0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_urem_by_pow2a:
; AVX1: # BB#0:
; AVX1-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_urem_by_pow2a:
; AVX2: # BB#0:
; AVX2-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
; AVX2-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
%1 = urem <4 x i32> %x, <i32 4, i32 4, i32 4, i32 4>
ret <4 x i32> %1
}
define <4 x i32> @combine_vec_urem_by_pow2b(<4 x i32> %x) {
; SSE-LABEL: combine_vec_urem_by_pow2b:
; SSE: # BB#0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_urem_by_pow2b:
; AVX: # BB#0:
; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = urem <4 x i32> %x, <i32 1, i32 4, i32 8, i32 16>
ret <4 x i32> %1
}
; fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1))
define <4 x i32> @combine_vec_urem_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_urem_by_shl_pow2a:
; SSE: # BB#0:
; SSE-NEXT: pslld $23, %xmm1
; SSE-NEXT: paddd {{.*}}(%rip), %xmm1
; SSE-NEXT: cvttps2dq %xmm1, %xmm1
; SSE-NEXT: pslld $2, %xmm1
; SSE-NEXT: pcmpeqd %xmm2, %xmm2
; SSE-NEXT: paddd %xmm1, %xmm2
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_urem_by_shl_pow2a:
; AVX1: # BB#0:
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
; AVX1-NEXT: vpslld $2, %xmm1, %xmm1
; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_urem_by_shl_pow2a:
; AVX2: # BB#0:
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1
; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
%1 = shl <4 x i32> <i32 4, i32 4, i32 4, i32 4>, %y
%2 = urem <4 x i32> %x, %1
ret <4 x i32> %2
}
define <4 x i32> @combine_vec_urem_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_urem_by_shl_pow2b:
; SSE: # BB#0:
; SSE-NEXT: pslld $23, %xmm1
; SSE-NEXT: paddd {{.*}}(%rip), %xmm1
; SSE-NEXT: cvttps2dq %xmm1, %xmm1
; SSE-NEXT: pmulld {{.*}}(%rip), %xmm1
; SSE-NEXT: pcmpeqd %xmm2, %xmm2
; SSE-NEXT: paddd %xmm1, %xmm2
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_urem_by_shl_pow2b:
; AVX1: # BB#0:
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_urem_by_shl_pow2b:
; AVX2: # BB#0:
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [1,4,8,16]
; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1
; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
%1 = shl <4 x i32> <i32 1, i32 4, i32 8, i32 16>, %y
%2 = urem <4 x i32> %x, %1
ret <4 x i32> %2
}
|