summaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-12-04 17:04:39 +0100
committerIngo Molnar <mingo@elte.hu>2007-12-04 17:04:39 +0100
commitdb292ca302e83534f5f0f7139e13d7e6976e51f9 (patch)
tree5d217514330e447b82f07601cdc87e6fd135d76b /kernel/sched_fair.c
parent77034937dc4575ca0a76bf209838ecd39e804089 (diff)
downloadblackbird-op-linux-db292ca302e83534f5f0f7139e13d7e6976e51f9.tar.gz
blackbird-op-linux-db292ca302e83534f5f0f7139e13d7e6976e51f9.zip
sched: default to more agressive yield for SCHED_BATCH tasks
do more agressive yield for SCHED_BATCH tuned tasks: they are all about throughput anyway. This allows a gentler migration path for any apps that relied on stronger yield. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 37bb265598db..c33f0ceb3de9 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -799,8 +799,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
*/
static void yield_task_fair(struct rq *rq)
{
- struct cfs_rq *cfs_rq = task_cfs_rq(rq->curr);
- struct sched_entity *rightmost, *se = &rq->curr->se;
+ struct task_struct *curr = rq->curr;
+ struct cfs_rq *cfs_rq = task_cfs_rq(curr);
+ struct sched_entity *rightmost, *se = &curr->se;
/*
* Are we the only task in the tree?
@@ -808,7 +809,7 @@ static void yield_task_fair(struct rq *rq)
if (unlikely(cfs_rq->nr_running == 1))
return;
- if (likely(!sysctl_sched_compat_yield)) {
+ if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
__update_rq_clock(rq);
/*
* Update run-time statistics of the 'current'.
OpenPOWER on IntegriCloud