summaryrefslogtreecommitdiffstats
path: root/llvm/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll
blob: 4f938413d73dc989b2f356f1f1f3018ae54ae016 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s

; Trivial optimization of generic addressing

; CHECK-LABEL: @load_global_from_flat(
; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(1)*
; CHECK-NEXT: %tmp1 = load float, float addrspace(1)* %tmp0
; CHECK-NEXT: ret float %tmp1
define float @load_global_from_flat(float addrspace(4)* %generic_scalar) #0 {
  %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(1)*
  %tmp1 = load float, float addrspace(1)* %tmp0
  ret float %tmp1
}

; CHECK-LABEL: @load_constant_from_flat(
; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(2)*
; CHECK-NEXT: %tmp1 = load float, float addrspace(2)* %tmp0
; CHECK-NEXT: ret float %tmp1
define float @load_constant_from_flat(float addrspace(4)* %generic_scalar) #0 {
  %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(2)*
  %tmp1 = load float, float addrspace(2)* %tmp0
  ret float %tmp1
}

; CHECK-LABEL: @load_group_from_flat(
; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(3)*
; CHECK-NEXT: %tmp1 = load float, float addrspace(3)* %tmp0
; CHECK-NEXT: ret float %tmp1
define float @load_group_from_flat(float addrspace(4)* %generic_scalar) #0 {
  %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(3)*
  %tmp1 = load float, float addrspace(3)* %tmp0
  ret float %tmp1
}

; CHECK-LABEL: @load_private_from_flat(
; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float*
; CHECK-NEXT: %tmp1 = load float, float* %tmp0
; CHECK-NEXT: ret float %tmp1
define float @load_private_from_flat(float addrspace(4)* %generic_scalar) #0 {
  %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float*
  %tmp1 = load float, float* %tmp0
  ret float %tmp1
}

; CHECK-LABEL: @store_global_from_flat(
; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(1)*
; CHECK-NEXT: store float 0.000000e+00, float addrspace(1)* %tmp0
define void @store_global_from_flat(float addrspace(4)* %generic_scalar) #0 {
  %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(1)*
  store float 0.0, float addrspace(1)* %tmp0
  ret void
}

; CHECK-LABEL: @store_group_from_flat(
; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(3)*
; CHECK-NEXT: store float 0.000000e+00, float addrspace(3)* %tmp0
define void @store_group_from_flat(float addrspace(4)* %generic_scalar) #0 {
  %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(3)*
  store float 0.0, float addrspace(3)* %tmp0
  ret void
}

; CHECK-LABEL: @store_private_from_flat(
; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float*
; CHECK-NEXT: store float 0.000000e+00, float* %tmp0
define void @store_private_from_flat(float addrspace(4)* %generic_scalar) #0 {
  %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float*
  store float 0.0, float* %tmp0
  ret void
}

; optimized to global load/store.
; CHECK-LABEL: @load_store_global(
; CHECK-NEXT: %val = load i32, i32 addrspace(1)* %input, align 4
; CHECK-NEXT: store i32 %val, i32 addrspace(1)* %output, align 4
; CHECK-NEXT: ret void
define void @load_store_global(i32 addrspace(1)* nocapture %input, i32 addrspace(1)* nocapture %output) #0 {
  %tmp0 = addrspacecast i32 addrspace(1)* %input to i32 addrspace(4)*
  %tmp1 = addrspacecast i32 addrspace(1)* %output to i32 addrspace(4)*
  %val = load i32, i32 addrspace(4)* %tmp0, align 4
  store i32 %val, i32 addrspace(4)* %tmp1, align 4
  ret void
}

; Optimized to group load/store.
; CHECK-LABEL: @load_store_group(
; CHECK-NEXT: %val = load i32, i32 addrspace(3)* %input, align 4
; CHECK-NEXT: store i32 %val, i32 addrspace(3)* %output, align 4
; CHECK-NEXT: ret void
define void @load_store_group(i32 addrspace(3)* nocapture %input, i32 addrspace(3)* nocapture %output) #0 {
  %tmp0 = addrspacecast i32 addrspace(3)* %input to i32 addrspace(4)*
  %tmp1 = addrspacecast i32 addrspace(3)* %output to i32 addrspace(4)*
  %val = load i32, i32 addrspace(4)* %tmp0, align 4
  store i32 %val, i32 addrspace(4)* %tmp1, align 4
  ret void
}

; Optimized to private load/store.
; CHECK-LABEL: @load_store_private(
; CHECK-NEXT: %val = load i32, i32* %input, align 4
; CHECK-NEXT: store i32 %val, i32* %output, align 4
; CHECK-NEXT: ret void
define void @load_store_private(i32* nocapture %input, i32* nocapture %output) #0 {
  %tmp0 = addrspacecast i32* %input to i32 addrspace(4)*
  %tmp1 = addrspacecast i32* %output to i32 addrspace(4)*
  %val = load i32, i32 addrspace(4)* %tmp0, align 4
  store i32 %val, i32 addrspace(4)* %tmp1, align 4
  ret void
}

; No optimization. flat load/store.
; CHECK-LABEL: @load_store_flat(
; CHECK-NEXT: %val = load i32, i32 addrspace(4)* %input, align 4
; CHECK-NEXT: store i32 %val, i32 addrspace(4)* %output, align 4
; CHECK-NEXT: ret void
define void @load_store_flat(i32 addrspace(4)* nocapture %input, i32 addrspace(4)* nocapture %output) #0 {
  %val = load i32, i32 addrspace(4)* %input, align 4
  store i32 %val, i32 addrspace(4)* %output, align 4
  ret void
}

; CHECK-LABEL: @store_addrspacecast_ptr_value(
; CHECK: %cast = addrspacecast i32 addrspace(1)* %input to i32 addrspace(4)*
; CHECK-NEXT: store i32 addrspace(4)* %cast, i32 addrspace(4)* addrspace(1)* %output, align 4
define void @store_addrspacecast_ptr_value(i32 addrspace(1)* nocapture %input, i32 addrspace(4)* addrspace(1)* nocapture %output) #0 {
  %cast = addrspacecast i32 addrspace(1)* %input to i32 addrspace(4)*
  store i32 addrspace(4)* %cast, i32 addrspace(4)* addrspace(1)* %output, align 4
  ret void
}

attributes #0 = { nounwind }
OpenPOWER on IntegriCloud