1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx2 < %s | FileCheck %s --check-prefix=AVX2
; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx512f,+avx512vl,+avx512bw < %s | FileCheck %s --check-prefix=AVX512
define i16 @v16i16(<16 x i16> %a, <16 x i16> %b) {
; AVX2-LABEL: v16i16:
; AVX2: ## BB#0:
; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
; AVX2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: v16i16:
; AVX512: ## BB#0:
; AVX512-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
; AVX512-NEXT: kmovd %k0, %eax
; AVX512-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%x = icmp sgt <16 x i16> %a, %b
%res = bitcast <16 x i1> %x to i16
ret i16 %res
}
define i8 @v8i32(<8 x i32> %a, <8 x i32> %b) {
; AVX2-LABEL: v8i32:
; AVX2: ## BB#0:
; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovmskps %ymm0, %eax
; AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: v8i32:
; AVX512: ## BB#0:
; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
; AVX512-NEXT: kmovd %k0, %eax
; AVX512-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%x = icmp sgt <8 x i32> %a, %b
%res = bitcast <8 x i1> %x to i8
ret i8 %res
}
define i8 @v8f32(<8 x float> %a, <8 x float> %b) {
; AVX2-LABEL: v8f32:
; AVX2: ## BB#0:
; AVX2-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vmovmskps %ymm0, %eax
; AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: v8f32:
; AVX512: ## BB#0:
; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %k0
; AVX512-NEXT: kmovd %k0, %eax
; AVX512-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%x = fcmp ogt <8 x float> %a, %b
%res = bitcast <8 x i1> %x to i8
ret i8 %res
}
define i32 @v32i8(<32 x i8> %a, <32 x i8> %b) {
; AVX2-LABEL: v32i8:
; AVX2: ## BB#0:
; AVX2-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpmovmskb %ymm0, %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: v32i8:
; AVX512: ## BB#0:
; AVX512-NEXT: vpcmpgtb %ymm1, %ymm0, %k0
; AVX512-NEXT: kmovd %k0, %eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%x = icmp sgt <32 x i8> %a, %b
%res = bitcast <32 x i1> %x to i32
ret i32 %res
}
define i4 @v4i64(<4 x i64> %a, <4 x i64> %b) {
; AVX2-LABEL: v4i64:
; AVX2: ## BB#0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovmskpd %ymm0, %eax
; AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: v4i64:
; AVX512: ## BB#0:
; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
; AVX512-NEXT: kmovd %k0, %eax
; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp)
; AVX512-NEXT: movb -{{[0-9]+}}(%rsp), %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%x = icmp sgt <4 x i64> %a, %b
%res = bitcast <4 x i1> %x to i4
ret i4 %res
}
define i4 @v4f64(<4 x double> %a, <4 x double> %b) {
; AVX2-LABEL: v4f64:
; AVX2: ## BB#0:
; AVX2-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vmovmskpd %ymm0, %eax
; AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: v4f64:
; AVX512: ## BB#0:
; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %k0
; AVX512-NEXT: kmovd %k0, %eax
; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp)
; AVX512-NEXT: movb -{{[0-9]+}}(%rsp), %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%x = fcmp ogt <4 x double> %a, %b
%res = bitcast <4 x i1> %x to i4
ret i4 %res
}
|