summaryrefslogtreecommitdiffstats
path: root/llvm/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll
blob: b566c147e9b883f2f87d68ced337376a83d6a8fa (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s

; Trivial optimization of generic addressing

; CHECK-LABEL: @load_global_from_flat(
; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(1)*
; CHECK-NEXT: %tmp1 = load float, float addrspace(1)* %tmp0
; CHECK-NEXT: ret float %tmp1
define float @load_global_from_flat(float addrspace(4)* %generic_scalar) #0 {
  %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(1)*
  %tmp1 = load float, float addrspace(1)* %tmp0
  ret float %tmp1
}

; CHECK-LABEL: @load_constant_from_flat(
; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(2)*
; CHECK-NEXT: %tmp1 = load float, float addrspace(2)* %tmp0
; CHECK-NEXT: ret float %tmp1
define float @load_constant_from_flat(float addrspace(4)* %generic_scalar) #0 {
  %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(2)*
  %tmp1 = load float, float addrspace(2)* %tmp0
  ret float %tmp1
}

; CHECK-LABEL: @load_group_from_flat(
; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(3)*
; CHECK-NEXT: %tmp1 = load float, float addrspace(3)* %tmp0
; CHECK-NEXT: ret float %tmp1
define float @load_group_from_flat(float addrspace(4)* %generic_scalar) #0 {
  %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(3)*
  %tmp1 = load float, float addrspace(3)* %tmp0
  ret float %tmp1
}

; CHECK-LABEL: @load_private_from_flat(
; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float*
; CHECK-NEXT: %tmp1 = load float, float* %tmp0
; CHECK-NEXT: ret float %tmp1
define float @load_private_from_flat(float addrspace(4)* %generic_scalar) #0 {
  %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float*
  %tmp1 = load float, float* %tmp0
  ret float %tmp1
}

; CHECK-LABEL: @store_global_from_flat(
; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(1)*
; CHECK-NEXT: store float 0.000000e+00, float addrspace(1)* %tmp0
define amdgpu_kernel void @store_global_from_flat(float addrspace(4)* %generic_scalar) #0 {
  %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(1)*
  store float 0.0, float addrspace(1)* %tmp0
  ret void
}

; CHECK-LABEL: @store_group_from_flat(
; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(3)*
; CHECK-NEXT: store float 0.000000e+00, float addrspace(3)* %tmp0
define amdgpu_kernel void @store_group_from_flat(float addrspace(4)* %generic_scalar) #0 {
  %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(3)*
  store float 0.0, float addrspace(3)* %tmp0
  ret void
}

; CHECK-LABEL: @store_private_from_flat(
; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float*
; CHECK-NEXT: store float 0.000000e+00, float* %tmp0
define amdgpu_kernel void @store_private_from_flat(float addrspace(4)* %generic_scalar) #0 {
  %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float*
  store float 0.0, float* %tmp0
  ret void
}

; optimized to global load/store.
; CHECK-LABEL: @load_store_global(
; CHECK-NEXT: %val = load i32, i32 addrspace(1)* %input, align 4
; CHECK-NEXT: store i32 %val, i32 addrspace(1)* %output, align 4
; CHECK-NEXT: ret void
define amdgpu_kernel void @load_store_global(i32 addrspace(1)* nocapture %input, i32 addrspace(1)* nocapture %output) #0 {
  %tmp0 = addrspacecast i32 addrspace(1)* %input to i32 addrspace(4)*
  %tmp1 = addrspacecast i32 addrspace(1)* %output to i32 addrspace(4)*
  %val = load i32, i32 addrspace(4)* %tmp0, align 4
  store i32 %val, i32 addrspace(4)* %tmp1, align 4
  ret void
}

; Optimized to group load/store.
; CHECK-LABEL: @load_store_group(
; CHECK-NEXT: %val = load i32, i32 addrspace(3)* %input, align 4
; CHECK-NEXT: store i32 %val, i32 addrspace(3)* %output, align 4
; CHECK-NEXT: ret void
define amdgpu_kernel void @load_store_group(i32 addrspace(3)* nocapture %input, i32 addrspace(3)* nocapture %output) #0 {
  %tmp0 = addrspacecast i32 addrspace(3)* %input to i32 addrspace(4)*
  %tmp1 = addrspacecast i32 addrspace(3)* %output to i32 addrspace(4)*
  %val = load i32, i32 addrspace(4)* %tmp0, align 4
  store i32 %val, i32 addrspace(4)* %tmp1, align 4
  ret void
}

; Optimized to private load/store.
; CHECK-LABEL: @load_store_private(
; CHECK-NEXT: %val = load i32, i32* %input, align 4
; CHECK-NEXT: store i32 %val, i32* %output, align 4
; CHECK-NEXT: ret void
define amdgpu_kernel void @load_store_private(i32* nocapture %input, i32* nocapture %output) #0 {
  %tmp0 = addrspacecast i32* %input to i32 addrspace(4)*
  %tmp1 = addrspacecast i32* %output to i32 addrspace(4)*
  %val = load i32, i32 addrspace(4)* %tmp0, align 4
  store i32 %val, i32 addrspace(4)* %tmp1, align 4
  ret void
}

; No optimization. flat load/store.
; CHECK-LABEL: @load_store_flat(
; CHECK-NEXT: %val = load i32, i32 addrspace(4)* %input, align 4
; CHECK-NEXT: store i32 %val, i32 addrspace(4)* %output, align 4
; CHECK-NEXT: ret void
define amdgpu_kernel void @load_store_flat(i32 addrspace(4)* nocapture %input, i32 addrspace(4)* nocapture %output) #0 {
  %val = load i32, i32 addrspace(4)* %input, align 4
  store i32 %val, i32 addrspace(4)* %output, align 4
  ret void
}

; CHECK-LABEL: @store_addrspacecast_ptr_value(
; CHECK: %cast = addrspacecast i32 addrspace(1)* %input to i32 addrspace(4)*
; CHECK-NEXT: store i32 addrspace(4)* %cast, i32 addrspace(4)* addrspace(1)* %output, align 4
define amdgpu_kernel void @store_addrspacecast_ptr_value(i32 addrspace(1)* nocapture %input, i32 addrspace(4)* addrspace(1)* nocapture %output) #0 {
  %cast = addrspacecast i32 addrspace(1)* %input to i32 addrspace(4)*
  store i32 addrspace(4)* %cast, i32 addrspace(4)* addrspace(1)* %output, align 4
  ret void
}

; CHECK-LABEL: @atomicrmw_add_global_to_flat(
; CHECK-NEXT: %ret = atomicrmw add i32 addrspace(1)* %global.ptr, i32 %y seq_cst
define i32 @atomicrmw_add_global_to_flat(i32 addrspace(1)* %global.ptr, i32 %y) #0 {
  %cast = addrspacecast i32 addrspace(1)* %global.ptr to i32 addrspace(4)*
  %ret = atomicrmw add i32 addrspace(4)* %cast, i32 %y seq_cst
  ret i32 %ret
}

; CHECK-LABEL: @atomicrmw_add_group_to_flat(
; CHECK-NEXT: %ret = atomicrmw add i32 addrspace(3)* %group.ptr, i32 %y seq_cst
define i32 @atomicrmw_add_group_to_flat(i32 addrspace(3)* %group.ptr, i32 %y) #0 {
  %cast = addrspacecast i32 addrspace(3)* %group.ptr to i32 addrspace(4)*
  %ret = atomicrmw add i32 addrspace(4)* %cast, i32 %y seq_cst
  ret i32 %ret
}

; CHECK-LABEL: @cmpxchg_global_to_flat(
; CHECK: %ret = cmpxchg i32 addrspace(1)* %global.ptr, i32 %cmp, i32 %val seq_cst monotonic
define { i32, i1 } @cmpxchg_global_to_flat(i32 addrspace(1)* %global.ptr, i32 %cmp, i32 %val) #0 {
  %cast = addrspacecast i32 addrspace(1)* %global.ptr to i32 addrspace(4)*
  %ret = cmpxchg i32 addrspace(4)* %cast, i32 %cmp, i32 %val seq_cst monotonic
  ret { i32, i1 } %ret
}

; CHECK-LABEL: @cmpxchg_group_to_flat(
; CHECK: %ret = cmpxchg i32 addrspace(3)* %group.ptr, i32 %cmp, i32 %val seq_cst monotonic
define { i32, i1 } @cmpxchg_group_to_flat(i32 addrspace(3)* %group.ptr, i32 %cmp, i32 %val) #0 {
  %cast = addrspacecast i32 addrspace(3)* %group.ptr to i32 addrspace(4)*
  %ret = cmpxchg i32 addrspace(4)* %cast, i32 %cmp, i32 %val seq_cst monotonic
  ret { i32, i1 } %ret
}

; Not pointer operand
; CHECK-LABEL: @cmpxchg_group_to_flat_wrong_operand(
; CHECK: %cast.cmp = addrspacecast i32 addrspace(3)* %cmp.ptr to i32 addrspace(4)*
; CHECK: %ret = cmpxchg i32 addrspace(4)* addrspace(3)* %cas.ptr, i32 addrspace(4)* %cast.cmp, i32 addrspace(4)* %val seq_cst monotonic
define { i32 addrspace(4)*, i1 } @cmpxchg_group_to_flat_wrong_operand(i32 addrspace(4)* addrspace(3)* %cas.ptr, i32 addrspace(3)* %cmp.ptr, i32 addrspace(4)* %val) #0 {
  %cast.cmp = addrspacecast i32 addrspace(3)* %cmp.ptr to i32 addrspace(4)*
  %ret = cmpxchg i32 addrspace(4)* addrspace(3)* %cas.ptr, i32 addrspace(4)* %cast.cmp, i32 addrspace(4)* %val seq_cst monotonic
  ret { i32 addrspace(4)*, i1 } %ret
}

attributes #0 = { nounwind }
OpenPOWER on IntegriCloud