diff options
author | Gautham R Shenoy <ego@in.ibm.com> | 2007-05-09 02:34:04 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-09 12:30:51 -0700 |
commit | 5be9361cdff17fc76fa0c3e262ead94158555f16 (patch) | |
tree | c0fd48bc0a39a2ba8a71677467f3d4e164b40f65 | |
parent | baaca49f415b25fdbe2a8f3c22b39929e450fbfd (diff) | |
download | talos-op-linux-5be9361cdff17fc76fa0c3e262ead94158555f16.tar.gz talos-op-linux-5be9361cdff17fc76fa0c3e262ead94158555f16.zip |
Eliminate lock_cpu_hotplug in kernel/schedc
Eliminate lock_cpu_hotplug from kernel/sched.c and use sched_hotcpu_mutex
instead to postpone a hotplug event.
In the migration_call hotcpu callback function, take sched_hotcpu_mutex
while handling the event CPU_LOCK_ACQUIRE and release it while handling
CPU_LOCK_RELEASE event.
[akpm@linux-foundation.org: fix deadlock]
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | kernel/sched.c | 28 |
1 files changed, 18 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 66bd7ff23f18..fe1a9c2b855a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -305,6 +305,7 @@ struct rq { }; static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp; +static DEFINE_MUTEX(sched_hotcpu_mutex); static inline int cpu_of(struct rq *rq) { @@ -4520,13 +4521,13 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) struct task_struct *p; int retval; - lock_cpu_hotplug(); + mutex_lock(&sched_hotcpu_mutex); read_lock(&tasklist_lock); p = find_process_by_pid(pid); if (!p) { read_unlock(&tasklist_lock); - unlock_cpu_hotplug(); + mutex_unlock(&sched_hotcpu_mutex); return -ESRCH; } @@ -4553,7 +4554,7 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) out_unlock: put_task_struct(p); - unlock_cpu_hotplug(); + mutex_unlock(&sched_hotcpu_mutex); return retval; } @@ -4610,7 +4611,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) struct task_struct *p; int retval; - lock_cpu_hotplug(); + mutex_lock(&sched_hotcpu_mutex); read_lock(&tasklist_lock); retval = -ESRCH; @@ -4626,7 +4627,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) out_unlock: read_unlock(&tasklist_lock); - unlock_cpu_hotplug(); + mutex_unlock(&sched_hotcpu_mutex); if (retval) return retval; @@ -5388,6 +5389,10 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) struct rq *rq; switch (action) { + case CPU_LOCK_ACQUIRE: + mutex_lock(&sched_hotcpu_mutex); + break; + case CPU_UP_PREPARE: p = kthread_create(migration_thread, hcpu, "migration/%d",cpu); if (IS_ERR(p)) @@ -5433,7 +5438,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) BUG_ON(rq->nr_running != 0); /* No need to migrate the tasks: it was best-effort if - * they didn't do lock_cpu_hotplug(). Just wake up + * they didn't take sched_hotcpu_mutex. Just wake up * the requestors. */ spin_lock_irq(&rq->lock); while (!list_empty(&rq->migration_queue)) { @@ -5447,6 +5452,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) spin_unlock_irq(&rq->lock); break; #endif + case CPU_LOCK_RELEASE: + mutex_unlock(&sched_hotcpu_mutex); + break; } return NOTIFY_OK; } @@ -6822,10 +6830,10 @@ int arch_reinit_sched_domains(void) { int err; - lock_cpu_hotplug(); + mutex_lock(&sched_hotcpu_mutex); detach_destroy_domains(&cpu_online_map); err = arch_init_sched_domains(&cpu_online_map); - unlock_cpu_hotplug(); + mutex_unlock(&sched_hotcpu_mutex); return err; } @@ -6930,12 +6938,12 @@ void __init sched_init_smp(void) { cpumask_t non_isolated_cpus; - lock_cpu_hotplug(); + mutex_lock(&sched_hotcpu_mutex); arch_init_sched_domains(&cpu_online_map); cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); if (cpus_empty(non_isolated_cpus)) cpu_set(smp_processor_id(), non_isolated_cpus); - unlock_cpu_hotplug(); + mutex_unlock(&sched_hotcpu_mutex); /* XXX: Theoretical race here - CPU may be hotplugged now */ hotcpu_notifier(update_sched_domains, 0); |