diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-07-28 23:32:00 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-28 23:32:00 +0200 |
commit | 9e3ee1c39c0cc71222f9980ccbf87fe072897eef (patch) | |
tree | 99462000e6f0d4f907cb2fc690f19d4d441ba0f3 /include/linux/stop_machine.h | |
parent | e56b3bc7942982ac2589c942fb345e38bc7a341a (diff) | |
parent | f934fb19ef34730263e6afc01e8ec27a8a71470f (diff) | |
download | blackbird-op-linux-9e3ee1c39c0cc71222f9980ccbf87fe072897eef.tar.gz blackbird-op-linux-9e3ee1c39c0cc71222f9980ccbf87fe072897eef.zip |
Merge branch 'linus' into cpus4096
Conflicts:
kernel/stop_machine.c
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/stop_machine.h')
-rw-r--r-- | include/linux/stop_machine.h | 50 |
1 files changed, 33 insertions, 17 deletions
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 5bfc553bdb21..f1cb0ba6d715 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -5,41 +5,43 @@ (and more). So the "read" side to such a lock is anything which diables preeempt. */ #include <linux/cpu.h> +#include <linux/cpumask.h> #include <asm/system.h> #if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP) + +/* Deprecated, but useful for transition. */ +#define ALL_CPUS ~0U + /** - * stop_machine_run: freeze the machine on all CPUs and run this function + * stop_machine: freeze the machine on all CPUs and run this function * @fn: the function to run * @data: the data ptr for the @fn() - * @cpu: the cpu to run @fn() on (or any, if @cpu == NR_CPUS. + * @cpus: the cpus to run the @fn() on (NULL = any online cpu) * - * Description: This causes a thread to be scheduled on every other cpu, - * each of which disables interrupts, and finally interrupts are disabled - * on the current CPU. The result is that noone is holding a spinlock - * or inside any other preempt-disabled region when @fn() runs. + * Description: This causes a thread to be scheduled on every cpu, + * each of which disables interrupts. The result is that noone is + * holding a spinlock or inside any other preempt-disabled region when + * @fn() runs. * * This can be thought of as a very heavy write lock, equivalent to * grabbing every spinlock in the kernel. */ -int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu); +int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus); /** - * __stop_machine_run: freeze the machine on all CPUs and run this function + * __stop_machine: freeze the machine on all CPUs and run this function * @fn: the function to run * @data: the data ptr for the @fn - * @cpu: the cpu to run @fn on (or any, if @cpu == NR_CPUS. + * @cpus: the cpus to run the @fn() on (NULL = any online cpu) * - * Description: This is a special version of the above, which returns the - * thread which has run @fn(): kthread_stop will return the return value - * of @fn(). Used by hotplug cpu. + * Description: This is a special version of the above, which assumes cpus + * won't come or go while it's being called. Used by hotplug cpu. */ -struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, - unsigned int cpu); - +int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus); #else -static inline int stop_machine_run(int (*fn)(void *), void *data, - unsigned int cpu) +static inline int stop_machine(int (*fn)(void *), void *data, + const cpumask_t *cpus) { int ret; local_irq_disable(); @@ -48,4 +50,18 @@ static inline int stop_machine_run(int (*fn)(void *), void *data, return ret; } #endif /* CONFIG_SMP */ + +static inline int __deprecated stop_machine_run(int (*fn)(void *), void *data, + unsigned int cpu) +{ + /* If they don't care which cpu fn runs on, just pick one. */ + if (cpu == NR_CPUS) + return stop_machine(fn, data, NULL); + else if (cpu == ~0U) + return stop_machine(fn, data, &cpu_possible_map); + else { + cpumask_t cpus = cpumask_of_cpu(cpu); + return stop_machine(fn, data, &cpus); + } +} #endif /* _LINUX_STOP_MACHINE */ |