1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
|
#include <timer.h>
#include <timebase.h>
#include <lock.h>
#include <fsp.h>
#include <device.h>
#include <opal.h>
#ifdef __TEST__
#define this_cpu() ((void *)-1)
#define cpu_relax()
#else
#include <cpu.h>
#endif
static struct lock timer_lock = LOCK_UNLOCKED;
static LIST_HEAD(timer_list);
static LIST_HEAD(timer_poll_list);
static bool timer_in_poll;
static uint64_t timer_poll_gen;
void init_timer(struct timer *t, timer_func_t expiry, void *data)
{
t->link.next = t->link.prev = NULL;
t->target = 0;
t->expiry = expiry;
t->user_data = data;
t->running = NULL;
}
static void __remove_timer(struct timer *t)
{
list_del(&t->link);
t->link.next = t->link.prev = NULL;
}
static void __sync_timer(struct timer *t)
{
sync();
/* Guard against re-entrancy */
assert(t->running != this_cpu());
while (t->running) {
unlock(&timer_lock);
cpu_relax();
/* Should we call the pollers here ? */
lock(&timer_lock);
}
}
void sync_timer(struct timer *t)
{
lock(&timer_lock);
__sync_timer(t);
unlock(&timer_lock);
}
void cancel_timer(struct timer *t)
{
lock(&timer_lock);
__sync_timer(t);
if (t->link.next)
__remove_timer(t);
unlock(&timer_lock);
}
void cancel_timer_async(struct timer *t)
{
lock(&timer_lock);
if (t->link.next)
__remove_timer(t);
unlock(&timer_lock);
}
void schedule_timer_at(struct timer *t, uint64_t when)
{
struct timer *lt;
lock(&timer_lock);
if (t->link.next)
__remove_timer(t);
t->target = when;
if (when == TIMER_POLL) {
t->gen = timer_poll_gen;
list_add_tail(&timer_poll_list, &t->link);
} else {
list_for_each(&timer_list, lt, link) {
if (when < lt->target) {
list_add_before(&timer_list, &t->link,
<->link);
unlock(&timer_lock);
return;
}
}
list_add_tail(&timer_list, &t->link);
}
unlock(&timer_lock);
}
uint64_t schedule_timer(struct timer *t, uint64_t how_long)
{
uint64_t now = mftb();
if (how_long == TIMER_POLL)
schedule_timer_at(t, TIMER_POLL);
else
schedule_timer_at(t, now + how_long);
return now;
}
static void __check_poll_timers(void)
{
struct timer *t;
/* Don't call this from multiple CPUs at once */
if (timer_in_poll)
return;
timer_in_poll = true;
/*
* Poll timers might re-enqueue themselves and don't have an
* expiry so we can't do like normal timers and just run until
* we hit a wall. Instead, each timer has a generation count,
* which we set to the current global gen count when we schedule
* it and update when we run it. It will only be considered if
* the generation count is different than the current one. We
* don't try to compare generations being larger or smaller
* because at boot, this can be called quite quickly and I want
* to be safe vs. wraps.
*/
timer_poll_gen++;
for (;;) {
t = list_top(&timer_poll_list, struct timer, link);
/* Top timer has a different generation than current ? Must
* be older
*/
if (!t || t->gen == timer_poll_gen)
break;
/* Top of list still running, we have to delay handling
* it. For now just skip until the next poll, when we have
* SLW interrupts, we'll probably want to trip another one
* ASAP
*/
if (t->running)
break;
/* Allright, first remove it and mark it running */
__remove_timer(t);
t->running = this_cpu();
/* Now we can unlock and call it's expiry */
unlock(&timer_lock);
t->expiry(t, t->user_data);
/* Re-lock and mark not running */
lock(&timer_lock);
t->running = NULL;
}
timer_in_poll = false;
}
static void __check_timers(uint64_t now)
{
struct timer *t;
for (;;) {
t = list_top(&timer_list, struct timer, link);
/* Top of list not expired ? that's it ... */
if (!t || t->target > now)
break;
/* Top of list still running, we have to delay handling
* it. For now just skip until the next poll, when we have
* SLW interrupts, we'll probably want to trip another one
* ASAP
*/
if (t->running)
break;
/* Allright, first remove it and mark it running */
__remove_timer(t);
t->running = this_cpu();
/* Now we can unlock and call it's expiry */
unlock(&timer_lock);
t->expiry(t, t->user_data);
/* Re-lock and mark not running */
lock(&timer_lock);
t->running = NULL;
/* Update time stamp */
now = mftb();
}
}
void check_timers(bool from_interrupt)
{
struct timer *t;
uint64_t now = mftb();
/* This is the polling variant, the SLW interrupt path, when it
* exists, will use a slight variant of this that doesn't call
* the pollers
*/
/* Lockless "peek", a bit racy but shouldn't be a problem */
t = list_top(&timer_list, struct timer, link);
if (list_empty(&timer_poll_list) && (!t || t->target > now))
return;
/* Take lock and try again */
lock(&timer_lock);
if (!from_interrupt)
__check_poll_timers();
__check_timers(now);
unlock(&timer_lock);
}
#ifndef __TEST__
void late_init_timers(void)
{
/* Add a property requesting the OS to call opal_poll_event() at
* a specified interval in order for us to run our background
* low priority poller.
*
* When we have a working SLW based HW timer, we'll be able to
* reduce this or even remove it, for now however, we want to be
* called at least every couple of seconds on FSP based machines
* and a bit faster on BMC based machines where the LPC and i2c
* interrupts might not be functional.
*
* We use a value in milliseconds, we don't want this to ever be
* faster than that.
*/
if (fsp_present())
dt_add_property_cells(opal_node, "ibm,heartbeat-ms", 2000);
else
dt_add_property_cells(opal_node, "ibm,heartbeat-ms", 250);
}
#endif
|