diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-07-09 18:51:59 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-07-09 18:51:59 +0200 |
commit | 14531189f0a1071b928586e9e1a89eceac91d95f (patch) | |
tree | bb5ddb4a284e42b76a2f9378788ddb0ec4c8689f /kernel | |
parent | 71f8bd4600521fecb08644072052b85853a5a615 (diff) | |
download | blackbird-op-linux-14531189f0a1071b928586e9e1a89eceac91d95f.tar.gz blackbird-op-linux-14531189f0a1071b928586e9e1a89eceac91d95f.zip |
sched: clean up __normal_prio() position
clean up: move __normal_prio() in head of normal_prio().
no code changed.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 58 |
1 files changed, 29 insertions, 29 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 683d2a524e61..5cd069b77fd7 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -672,35 +672,6 @@ static inline void resched_task(struct task_struct *p) #include "sched_stats.h" /* - * __normal_prio - return the priority that is based on the static - * priority but is modified by bonuses/penalties. - * - * We scale the actual sleep average [0 .... MAX_SLEEP_AVG] - * into the -5 ... 0 ... +5 bonus/penalty range. - * - * We use 25% of the full 0...39 priority range so that: - * - * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs. - * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks. - * - * Both properties are important to certain workloads. - */ - -static inline int __normal_prio(struct task_struct *p) -{ - int bonus, prio; - - bonus = CURRENT_BONUS(p) - MAX_BONUS / 2; - - prio = p->static_prio - bonus; - if (prio < MAX_RT_PRIO) - prio = MAX_RT_PRIO; - if (prio > MAX_PRIO-1) - prio = MAX_PRIO-1; - return prio; -} - -/* * To aid in avoiding the subversion of "niceness" due to uneven distribution * of tasks with abnormal "nice" values across CPUs the contribution that * each task makes to its run queue's load is weighted according to its @@ -803,6 +774,35 @@ enqueue_task_head(struct task_struct *p, struct prio_array *array) } /* + * __normal_prio - return the priority that is based on the static + * priority but is modified by bonuses/penalties. + * + * We scale the actual sleep average [0 .... MAX_SLEEP_AVG] + * into the -5 ... 0 ... +5 bonus/penalty range. + * + * We use 25% of the full 0...39 priority range so that: + * + * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs. + * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks. + * + * Both properties are important to certain workloads. + */ + +static inline int __normal_prio(struct task_struct *p) +{ + int bonus, prio; + + bonus = CURRENT_BONUS(p) - MAX_BONUS / 2; + + prio = p->static_prio - bonus; + if (prio < MAX_RT_PRIO) + prio = MAX_RT_PRIO; + if (prio > MAX_PRIO-1) + prio = MAX_PRIO-1; + return prio; +} + +/* * Calculate the expected normal priority: i.e. priority * without taking RT-inheritance into account. Might be * boosted by interactivity modifiers. Changes upon fork, |