summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@novell.com>2007-05-02 19:27:05 +0200
committerAndi Kleen <andi@basil.nowhere.org>2007-05-02 19:27:05 +0200
commit9964cf7d776600724ef5f1b33303ceadc588b8ba (patch)
tree80cf8f027b251ed5243d4f8e2219782abfe18df6
parentb0354795c9c8fef2fadf8f867586c78efd9a1dc9 (diff)
downloadblackbird-op-linux-9964cf7d776600724ef5f1b33303ceadc588b8ba.tar.gz
blackbird-op-linux-9964cf7d776600724ef5f1b33303ceadc588b8ba.zip
[PATCH] x86: consolidate smp_send_stop()
Synchronize i386's smp_send_stop() with x86-64's in only try-locking the call lock to prevent deadlocks when called from panic(). In both version, disable interrupts before clearing the CPU off the online map to eliminate races with IRQ handlers inspecting this map. Also in both versions, save/restore interrupts rather than disabling/ enabling them. On x86-64, eliminate one function used here by folding it into its single caller, convert to static, and rename for consistency with i386 (lkcd may like this). Signed-off-by: Jan Beulich <jbeulich@novell.com> Signed-off-by: Andi Kleen <ak@suse.de>
-rw-r--r--arch/i386/kernel/smp.c68
-rw-r--r--arch/x86_64/kernel/smp.c28
-rw-r--r--include/asm-x86_64/smp.h1
3 files changed, 48 insertions, 49 deletions
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index 0e8977871b1f..0cd459baad68 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -515,35 +515,14 @@ void unlock_ipi_call_lock(void)
static struct call_data_struct *call_data;
-/**
- * smp_call_function(): Run a function on all other CPUs.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @nonatomic: currently unused.
- * @wait: If true, wait (atomically) until function has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code. Does not return until
- * remote CPUs are nearly ready to execute <<func>> or are or have executed.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
- int wait)
+static void __smp_call_function(void (*func) (void *info), void *info,
+ int nonatomic, int wait)
{
struct call_data_struct data;
- int cpus;
-
- /* Holding any lock stops cpus from going down. */
- spin_lock(&call_lock);
- cpus = num_online_cpus() - 1;
- if (!cpus) {
- spin_unlock(&call_lock);
- return 0;
- }
+ int cpus = num_online_cpus() - 1;
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
+ if (!cpus)
+ return;
data.func = func;
data.info = info;
@@ -565,6 +544,30 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
if (wait)
while (atomic_read(&data.finished) != cpus)
cpu_relax();
+}
+
+/**
+ * smp_call_function(): Run a function on all other CPUs.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @nonatomic: currently unused.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code. Does not return until
+ * remote CPUs are nearly ready to execute <<func>> or are or have executed.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
+ int wait)
+{
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(irqs_disabled());
+
+ /* Holding any lock stops cpus from going down. */
+ spin_lock(&call_lock);
+ __smp_call_function(func, info, nonatomic, wait);
spin_unlock(&call_lock);
return 0;
@@ -573,11 +576,11 @@ EXPORT_SYMBOL(smp_call_function);
static void stop_this_cpu (void * dummy)
{
+ local_irq_disable();
/*
* Remove this CPU:
*/
cpu_clear(smp_processor_id(), cpu_online_map);
- local_irq_disable();
disable_local_APIC();
if (cpu_data[smp_processor_id()].hlt_works_ok)
for(;;) halt();
@@ -590,11 +593,16 @@ static void stop_this_cpu (void * dummy)
void smp_send_stop(void)
{
- smp_call_function(stop_this_cpu, NULL, 1, 0);
+ /* Don't deadlock on the call lock in panic */
+ int nolock = !spin_trylock(&call_lock);
+ unsigned long flags;
- local_irq_disable();
+ local_irq_save(flags);
+ __smp_call_function(stop_this_cpu, NULL, 0, 0);
+ if (!nolock)
+ spin_unlock(&call_lock);
disable_local_APIC();
- local_irq_enable();
+ local_irq_restore(flags);
}
/*
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index af1ec4d23cf8..bd1d123947ce 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -452,42 +452,34 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
}
EXPORT_SYMBOL(smp_call_function);
-void smp_stop_cpu(void)
+static void stop_this_cpu(void *dummy)
{
- unsigned long flags;
+ local_irq_disable();
/*
* Remove this CPU:
*/
cpu_clear(smp_processor_id(), cpu_online_map);
- local_irq_save(flags);
disable_local_APIC();
- local_irq_restore(flags);
-}
-
-static void smp_really_stop_cpu(void *dummy)
-{
- smp_stop_cpu();
for (;;)
halt();
}
void smp_send_stop(void)
{
- int nolock = 0;
+ int nolock;
+ unsigned long flags;
+
if (reboot_force)
return;
+
/* Don't deadlock on the call lock in panic */
- if (!spin_trylock(&call_lock)) {
- /* ignore locking because we have panicked anyways */
- nolock = 1;
- }
- __smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
+ nolock = !spin_trylock(&call_lock);
+ local_irq_save(flags);
+ __smp_call_function(stop_this_cpu, NULL, 0, 0);
if (!nolock)
spin_unlock(&call_lock);
-
- local_irq_disable();
disable_local_APIC();
- local_irq_enable();
+ local_irq_restore(flags);
}
/*
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index f4236d7789aa..d5704421456b 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -37,7 +37,6 @@ extern void lock_ipi_call_lock(void);
extern void unlock_ipi_call_lock(void);
extern int smp_num_siblings;
extern void smp_send_reschedule(int cpu);
-void smp_stop_cpu(void);
extern cpumask_t cpu_sibling_map[NR_CPUS];
extern cpumask_t cpu_core_map[NR_CPUS];
OpenPOWER on IntegriCloud