diff options
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/core.c | 47 | ||||
-rw-r--r-- | kernel/sched/deadline.c | 1 | ||||
-rw-r--r-- | kernel/sched/fair.c | 29 | ||||
-rw-r--r-- | kernel/sched/rt.c | 1 |
4 files changed, 50 insertions, 28 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d8465eeab8b3..d1f7149f8704 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -321,6 +321,24 @@ static inline void init_hrtick(void) } #endif /* CONFIG_SCHED_HRTICK */ +/* + * cmpxchg based fetch_or, macro so it works for different integer types + */ +#define fetch_or(ptr, mask) \ + ({ \ + typeof(ptr) _ptr = (ptr); \ + typeof(mask) _mask = (mask); \ + typeof(*_ptr) _old, _val = *_ptr; \ + \ + for (;;) { \ + _old = cmpxchg(_ptr, _val, _val | _mask); \ + if (_old == _val) \ + break; \ + _val = _old; \ + } \ + _old; \ +}) + #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) /* * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, @@ -578,17 +596,8 @@ bool sched_can_stop_tick(struct rq *rq) return false; /* - * FIFO realtime policy runs the highest priority task (after DEADLINE). - * Other runnable tasks are of a lower priority. The scheduler tick - * isn't needed. - */ - fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; - if (fifo_nr_running) - return true; - - /* - * Round-robin realtime tasks time slice with other tasks at the same - * realtime priority. + * If there are more than one RR tasks, we need the tick to effect the + * actual RR behaviour. */ if (rq->rt.rr_nr_running) { if (rq->rt.rr_nr_running == 1) @@ -597,8 +606,20 @@ bool sched_can_stop_tick(struct rq *rq) return false; } - /* Normal multitasking need periodic preemption checks */ - if (rq->cfs.nr_running > 1) + /* + * If there's no RR tasks, but FIFO tasks, we can skip the tick, no + * forced preemption between FIFO tasks. + */ + fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; + if (fifo_nr_running) + return true; + + /* + * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left; + * if there's more than one we need the tick for involuntary + * preemption. + */ + if (rq->nr_running > 1) return false; return true; diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index affd97ec9f65..686ec8adf952 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1394,6 +1394,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) || task_running(rq, task) || + !dl_task(task) || !task_on_rq_queued(task))) { double_unlock_balance(rq, later_rq); later_rq = NULL; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0fe30e66aff1..e7dd0ec169be 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3030,7 +3030,14 @@ static int idle_balance(struct rq *this_rq); #else /* CONFIG_SMP */ -static inline void update_load_avg(struct sched_entity *se, int update_tg) {} +static inline void update_load_avg(struct sched_entity *se, int not_used) +{ + struct cfs_rq *cfs_rq = cfs_rq_of(se); + struct rq *rq = rq_of(cfs_rq); + + cpufreq_trigger_update(rq_clock(rq)); +} + static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} static inline void @@ -3181,25 +3188,17 @@ static inline void check_schedstat_required(void) static void enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) { - bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING); - bool curr = cfs_rq->curr == se; - /* - * If we're the current task, we must renormalise before calling - * update_curr(). + * Update the normalized vruntime before updating min_vruntime + * through calling update_curr(). */ - if (renorm && curr) + if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING)) se->vruntime += cfs_rq->min_vruntime; - update_curr(cfs_rq); - /* - * Otherwise, renormalise after, such that we're placed at the current - * moment in time, instead of some random moment in the past. + * Update run-time statistics of the 'current'. */ - if (renorm && !curr) - se->vruntime += cfs_rq->min_vruntime; - + update_curr(cfs_rq); enqueue_entity_load_avg(cfs_rq, se); account_entity_enqueue(cfs_rq, se); update_cfs_shares(cfs_rq); @@ -3215,7 +3214,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) update_stats_enqueue(cfs_rq, se); check_spread(cfs_rq, se); } - if (!curr) + if (se != cfs_rq->curr) __enqueue_entity(cfs_rq, se); se->on_rq = 1; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index c41ea7ac1764..ec4f538d4396 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1729,6 +1729,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) !cpumask_test_cpu(lowest_rq->cpu, tsk_cpus_allowed(task)) || task_running(rq, task) || + !rt_task(task) || !task_on_rq_queued(task))) { double_unlock_balance(rq, lowest_rq); |