diff options
author | Steven Rostedt <srostedt@redhat.com> | 2008-01-25 21:08:06 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-25 21:08:06 +0100 |
commit | 4fd29176b7cd24909f8ceba2105cb3ae2857b90c (patch) | |
tree | 2d83445de1d500cd3794a73e6d9d35b44444e259 /kernel/sched_rt.c | |
parent | e8fa136262e1121288bb93befe2295928ffd240d (diff) | |
download | blackbird-op-linux-4fd29176b7cd24909f8ceba2105cb3ae2857b90c.tar.gz blackbird-op-linux-4fd29176b7cd24909f8ceba2105cb3ae2857b90c.zip |
sched: add rt-overload tracking
This patch adds an RT overload accounting system. When a runqueue has
more than one RT task queued, it is marked as overloaded. That is that it
is a candidate to have RT tasks pulled from it.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 36 |
1 files changed, 36 insertions, 0 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 7815e90b1147..547f858b0752 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -3,6 +3,38 @@ * policies) */ +#ifdef CONFIG_SMP +static cpumask_t rt_overload_mask; +static atomic_t rto_count; +static inline int rt_overloaded(void) +{ + return atomic_read(&rto_count); +} +static inline cpumask_t *rt_overload(void) +{ + return &rt_overload_mask; +} +static inline void rt_set_overload(struct rq *rq) +{ + cpu_set(rq->cpu, rt_overload_mask); + /* + * Make sure the mask is visible before we set + * the overload count. That is checked to determine + * if we should look at the mask. It would be a shame + * if we looked at the mask, but the mask was not + * updated yet. + */ + wmb(); + atomic_inc(&rto_count); +} +static inline void rt_clear_overload(struct rq *rq) +{ + /* the order here really doesn't matter */ + atomic_dec(&rto_count); + cpu_clear(rq->cpu, rt_overload_mask); +} +#endif /* CONFIG_SMP */ + /* * Update the current task's runtime statistics. Skip current tasks that * are not in our scheduling class. @@ -33,6 +65,8 @@ static inline void inc_rt_tasks(struct task_struct *p, struct rq *rq) #ifdef CONFIG_SMP if (p->prio < rq->rt.highest_prio) rq->rt.highest_prio = p->prio; + if (rq->rt.rt_nr_running > 1) + rt_set_overload(rq); #endif /* CONFIG_SMP */ } @@ -54,6 +88,8 @@ static inline void dec_rt_tasks(struct task_struct *p, struct rq *rq) } /* otherwise leave rq->highest prio alone */ } else rq->rt.highest_prio = MAX_RT_PRIO; + if (rq->rt.rt_nr_running < 2) + rt_clear_overload(rq); #endif /* CONFIG_SMP */ } |