blob: 767899432b03daf4fe1595ec85e19880c21cb427 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
|
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -instcombine -S | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
;
; DemandedBits - MOVMSK zeros the upper bits of the result.
;
define i32 @test_upper_x86_sse_movmsk_ps(<4 x float> %a0) {
; CHECK-LABEL: @test_upper_x86_sse_movmsk_ps(
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %a0)
; CHECK-NEXT: ret i32 [[TMP1]]
;
%1 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %a0)
%2 = and i32 %1, 15
ret i32 %2
}
define i32 @test_upper_x86_sse2_movmsk_pd(<2 x double> %a0) {
; CHECK-LABEL: @test_upper_x86_sse2_movmsk_pd(
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %a0)
; CHECK-NEXT: ret i32 [[TMP1]]
;
%1 = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %a0)
%2 = and i32 %1, 3
ret i32 %2
}
define i32 @test_upper_x86_sse2_pmovmskb_128(<16 x i8> %a0) {
; CHECK-LABEL: @test_upper_x86_sse2_pmovmskb_128(
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %a0)
; CHECK-NEXT: ret i32 [[TMP1]]
;
%1 = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %a0)
%2 = and i32 %1, 65535
ret i32 %2
}
define i32 @test_upper_x86_avx_movmsk_ps_256(<8 x float> %a0) {
; CHECK-LABEL: @test_upper_x86_avx_movmsk_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %a0)
; CHECK-NEXT: ret i32 [[TMP1]]
;
%1 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %a0)
%2 = and i32 %1, 255
ret i32 %2
}
define i32 @test_upper_x86_avx_movmsk_pd_256(<4 x double> %a0) {
; CHECK-LABEL: @test_upper_x86_avx_movmsk_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> %a0)
; CHECK-NEXT: ret i32 [[TMP1]]
;
%1 = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> %a0)
%2 = and i32 %1, 15
ret i32 %2
}
; llvm.x86.avx2.pmovmskb uses the whole of the 32-bit register.
;
; DemandedBits - If we don't use the lower bits then we just return zero.
;
define i32 @test_lower_x86_sse_movmsk_ps(<4 x float> %a0) {
; CHECK-LABEL: @test_lower_x86_sse_movmsk_ps(
; CHECK-NEXT: ret i32 0
;
%1 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %a0)
%2 = and i32 %1, -16
ret i32 %2
}
define i32 @test_lower_x86_sse2_movmsk_pd(<2 x double> %a0) {
; CHECK-LABEL: @test_lower_x86_sse2_movmsk_pd(
; CHECK-NEXT: ret i32 0
;
%1 = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %a0)
%2 = and i32 %1, -4
ret i32 %2
}
define i32 @test_lower_x86_sse2_pmovmskb_128(<16 x i8> %a0) {
; CHECK-LABEL: @test_lower_x86_sse2_pmovmskb_128(
; CHECK-NEXT: ret i32 0
;
%1 = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %a0)
%2 = and i32 %1, -65536
ret i32 %2
}
define i32 @test_lower_x86_avx_movmsk_ps_256(<8 x float> %a0) {
; CHECK-LABEL: @test_lower_x86_avx_movmsk_ps_256(
; CHECK-NEXT: ret i32 0
;
%1 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %a0)
%2 = and i32 %1, -256
ret i32 %2
}
define i32 @test_lower_x86_avx_movmsk_pd_256(<4 x double> %a0) {
; CHECK-LABEL: @test_lower_x86_avx_movmsk_pd_256(
; CHECK-NEXT: ret i32 0
;
%1 = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> %a0)
%2 = and i32 %1, -16
ret i32 %2
}
; llvm.x86.avx2.pmovmskb uses the whole of the 32-bit register.
declare i32 @llvm.x86.sse.movmsk.ps(<4 x float>)
declare i32 @llvm.x86.sse2.movmsk.pd(<2 x double>)
declare i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8>)
declare i32 @llvm.x86.avx.movmsk.ps.256(<8 x float>)
declare i32 @llvm.x86.avx.movmsk.pd.256(<4 x double>)
declare i32 @llvm.x86.avx2.pmovmskb(<32 x i8>)
|