summaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-11-26 10:50:39 +0100
committerIngo Molnar <mingo@elte.hu>2009-11-26 10:50:42 +0100
commit16bc67edeb49b531940b2ba6c183780a1b5c472d (patch)
tree71b4bc48e47e54f2c0b3126d8f81d2f31b707ea8 /kernel/sched.c
parentf6630114d9198aa959ac95c131334c020038f253 (diff)
parent047106adcc85e3023da210143a6ab8a55df9e0fc (diff)
downloadblackbird-op-linux-16bc67edeb49b531940b2ba6c183780a1b5c472d.tar.gz
blackbird-op-linux-16bc67edeb49b531940b2ba6c183780a1b5c472d.zip
Merge branch 'sched/urgent' into sched/core
Merge reason: Pick up fixes that did not make it into .32.0 Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c86
1 files changed, 64 insertions, 22 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 48ff66a6892d..315ba4059f93 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -309,6 +309,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
*/
static DEFINE_SPINLOCK(task_group_lock);
+#ifdef CONFIG_FAIR_GROUP_SCHED
+
#ifdef CONFIG_SMP
static int root_task_group_empty(void)
{
@@ -316,7 +318,6 @@ static int root_task_group_empty(void)
}
#endif
-#ifdef CONFIG_FAIR_GROUP_SCHED
#ifdef CONFIG_USER_SCHED
# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
#else /* !CONFIG_USER_SCHED */
@@ -1564,11 +1565,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
#ifdef CONFIG_FAIR_GROUP_SCHED
-struct update_shares_data {
- unsigned long rq_weight[NR_CPUS];
-};
-
-static DEFINE_PER_CPU(struct update_shares_data, update_shares_data);
+static __read_mostly unsigned long *update_shares_data;
static void __set_se_shares(struct sched_entity *se, unsigned long shares);
@@ -1578,12 +1575,12 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares);
static void update_group_shares_cpu(struct task_group *tg, int cpu,
unsigned long sd_shares,
unsigned long sd_rq_weight,
- struct update_shares_data *usd)
+ unsigned long *usd_rq_weight)
{
unsigned long shares, rq_weight;
int boost = 0;
- rq_weight = usd->rq_weight[cpu];
+ rq_weight = usd_rq_weight[cpu];
if (!rq_weight) {
boost = 1;
rq_weight = NICE_0_LOAD;
@@ -1618,7 +1615,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
static int tg_shares_up(struct task_group *tg, void *data)
{
unsigned long weight, rq_weight = 0, shares = 0;
- struct update_shares_data *usd;
+ unsigned long *usd_rq_weight;
struct sched_domain *sd = data;
unsigned long flags;
int i;
@@ -1627,11 +1624,11 @@ static int tg_shares_up(struct task_group *tg, void *data)
return 0;
local_irq_save(flags);
- usd = &__get_cpu_var(update_shares_data);
+ usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id());
for_each_cpu(i, sched_domain_span(sd)) {
weight = tg->cfs_rq[i]->load.weight;
- usd->rq_weight[i] = weight;
+ usd_rq_weight[i] = weight;
/*
* If there are currently no tasks on the cpu pretend there
@@ -1652,7 +1649,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
shares = tg->shares;
for_each_cpu(i, sched_domain_span(sd))
- update_group_shares_cpu(tg, i, shares, rq_weight, usd);
+ update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight);
local_irq_restore(flags);
@@ -1996,6 +1993,39 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
p->sched_class->prio_changed(rq, p, oldprio, running);
}
+/**
+ * kthread_bind - bind a just-created kthread to a cpu.
+ * @p: thread created by kthread_create().
+ * @cpu: cpu (might not be online, must be possible) for @k to run on.
+ *
+ * Description: This function is equivalent to set_cpus_allowed(),
+ * except that @cpu doesn't need to be online, and the thread must be
+ * stopped (i.e., just returned from kthread_create()).
+ *
+ * Function lives here instead of kthread.c because it messes with
+ * scheduler internals which require locking.
+ */
+void kthread_bind(struct task_struct *p, unsigned int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long flags;
+
+ /* Must have done schedule() in kthread() before we set_task_cpu */
+ if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
+ WARN_ON(1);
+ return;
+ }
+
+ spin_lock_irqsave(&rq->lock, flags);
+ update_rq_clock(rq);
+ set_task_cpu(p, cpu);
+ p->cpus_allowed = cpumask_of_cpu(cpu);
+ p->rt.nr_cpus_allowed = 1;
+ p->flags |= PF_THREAD_BOUND;
+ spin_unlock_irqrestore(&rq->lock, flags);
+}
+EXPORT_SYMBOL(kthread_bind);
+
#ifdef CONFIG_SMP
/*
* Is this task likely cache-hot:
@@ -2008,7 +2038,7 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
/*
* Buddy candidates are cache hot:
*/
- if (sched_feat(CACHE_HOT_BUDDY) &&
+ if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
(&p->se == cfs_rq_of(&p->se)->next ||
&p->se == cfs_rq_of(&p->se)->last))
return 1;
@@ -2085,6 +2115,7 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
* it is sufficient to simply update the task's cpu field.
*/
if (!p->se.on_rq && !task_running(rq, p)) {
+ update_rq_clock(rq);
set_task_cpu(p, dest_cpu);
return 0;
}
@@ -2346,14 +2377,15 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
task_rq_unlock(rq, &flags);
cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
- if (cpu != orig_cpu)
+ if (cpu != orig_cpu) {
+ local_irq_save(flags);
+ rq = cpu_rq(cpu);
+ update_rq_clock(rq);
set_task_cpu(p, cpu);
-
+ local_irq_restore(flags);
+ }
rq = task_rq_lock(p, &flags);
- if (rq != orig_rq)
- update_rq_clock(rq);
-
WARN_ON(p->state != TASK_WAKING);
cpu = task_cpu(p);
@@ -2526,6 +2558,7 @@ static void __sched_fork(struct task_struct *p)
void sched_fork(struct task_struct *p, int clone_flags)
{
int cpu = get_cpu();
+ unsigned long flags;
__sched_fork(p);
@@ -2562,7 +2595,10 @@ void sched_fork(struct task_struct *p, int clone_flags)
#ifdef CONFIG_SMP
cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0);
#endif
+ local_irq_save(flags);
+ update_rq_clock(cpu_rq(cpu));
set_task_cpu(p, cpu);
+ local_irq_restore(flags);
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
if (likely(sched_info_on()))
@@ -2732,9 +2768,9 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
prev_state = prev->state;
finish_arch_switch(prev);
perf_event_task_sched_in(current, cpu_of(rq));
+ fire_sched_in_preempt_notifiers(current);
finish_lock_switch(rq, prev);
- fire_sched_in_preempt_notifiers(current);
if (mm)
mmdrop(mm);
if (unlikely(prev_state == TASK_DEAD)) {
@@ -7898,6 +7934,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
static void free_rootdomain(struct root_domain *rd)
{
+ synchronize_sched();
+
cpupri_cleanup(&rd->cpupri);
free_cpumask_var(rd->rto_mask);
@@ -9449,6 +9487,10 @@ void __init sched_init(void)
#endif /* CONFIG_USER_SCHED */
#endif /* CONFIG_GROUP_SCHED */
+#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
+ update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
+ __alignof__(unsigned long));
+#endif
for_each_possible_cpu(i) {
struct rq *rq;
@@ -9576,13 +9618,13 @@ void __init sched_init(void)
current->sched_class = &fair_sched_class;
/* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
- alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
+ zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
#ifdef CONFIG_SMP
#ifdef CONFIG_NO_HZ
- alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
+ zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
#endif
- alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
+ zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
#endif /* SMP */
perf_event_init();
OpenPOWER on IntegriCloud