summaryrefslogtreecommitdiffstats
path: root/kernel/irq_work.c
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2012-11-15 11:34:21 -0500
committerFrederic Weisbecker <fweisbec@gmail.com>2012-11-17 19:31:03 +0100
commitc0e980a4bd7fc5c9b748f2f0209d2a48c0fdf0ab (patch)
tree39eda738087b5bda53b0cb06c9549656266a03c2 /kernel/irq_work.c
parent00b42959106a9ca1c2899e591ae4e9a83ad6af05 (diff)
downloadblackbird-op-linux-c0e980a4bd7fc5c9b748f2f0209d2a48c0fdf0ab.tar.gz
blackbird-op-linux-c0e980a4bd7fc5c9b748f2f0209d2a48c0fdf0ab.zip
irq_work: Flush work on CPU_DYING
In order not to offline a CPU with pending irq works, flush the queue from CPU_DYING. The notifier is called by stop_machine on the CPU that is going down. The code will not be called from irq context (so things like get_irq_regs() wont work) but I'm not sure what the requirements are for irq_work in that regard (Peter?). But irqs are disabled and the CPU is about to go offline. Might as well flush the work. Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Diffstat (limited to 'kernel/irq_work.c')
-rw-r--r--kernel/irq_work.c51
1 files changed, 45 insertions, 6 deletions
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index b3c113a14727..4ed17490f629 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -12,6 +12,8 @@
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <linux/irqflags.h>
+#include <linux/cpu.h>
+#include <linux/notifier.h>
#include <asm/processor.h>
/*
@@ -110,11 +112,7 @@ bool irq_work_needs_cpu(void)
return true;
}
-/*
- * Run the irq_work entries on this cpu. Requires to be ran from hardirq
- * context with local IRQs disabled.
- */
-void irq_work_run(void)
+static void __irq_work_run(void)
{
struct irq_work *work;
struct llist_head *this_list;
@@ -124,7 +122,6 @@ void irq_work_run(void)
if (llist_empty(this_list))
return;
- BUG_ON(!in_irq());
BUG_ON(!irqs_disabled());
llnode = llist_del_all(this_list);
@@ -149,6 +146,16 @@ void irq_work_run(void)
(void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
}
}
+
+/*
+ * Run the irq_work entries on this cpu. Requires to be ran from hardirq
+ * context with local IRQs disabled.
+ */
+void irq_work_run(void)
+{
+ BUG_ON(!in_irq());
+ __irq_work_run();
+}
EXPORT_SYMBOL_GPL(irq_work_run);
/*
@@ -163,3 +170,35 @@ void irq_work_sync(struct irq_work *work)
cpu_relax();
}
EXPORT_SYMBOL_GPL(irq_work_sync);
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int irq_work_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ long cpu = (long)hcpu;
+
+ switch (action) {
+ case CPU_DYING:
+ /* Called from stop_machine */
+ if (WARN_ON_ONCE(cpu != smp_processor_id()))
+ break;
+ __irq_work_run();
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpu_notify;
+
+static __init int irq_work_init_cpu_notifier(void)
+{
+ cpu_notify.notifier_call = irq_work_cpu_notify;
+ cpu_notify.priority = 0;
+ register_cpu_notifier(&cpu_notify);
+ return 0;
+}
+device_initcall(irq_work_init_cpu_notifier);
+
+#endif /* CONFIG_HOTPLUG_CPU */
OpenPOWER on IntegriCloud