summaryrefslogtreecommitdiffstats
path: root/kernel/printk/printk_safe.c
diff options
context:
space:
mode:
authorSergey Senozhatsky <sergey.senozhatsky@gmail.com>2016-12-27 23:16:06 +0900
committerPetr Mladek <pmladek@suse.com>2017-02-08 11:07:11 +0100
commit099f1c84c0052ec1b27f1c3942eed5830d86bdbb (patch)
treed24459538b67e6b94b389d7022653c294e510094 /kernel/printk/printk_safe.c
parentf92bac3b141b8233e34ddf32d227e12bfba07b48 (diff)
downloadblackbird-op-linux-099f1c84c0052ec1b27f1c3942eed5830d86bdbb.tar.gz
blackbird-op-linux-099f1c84c0052ec1b27f1c3942eed5830d86bdbb.zip
printk: introduce per-cpu safe_print seq buffer
This patch extends the idea of NMI per-cpu buffers to regions that may cause recursive printk() calls and possible deadlocks. Namely, printk() can't handle printk calls from schedule code or printk() calls from lock debugging code (spin_dump() for instance); because those may be called with `sem->lock' already taken or any other `critical' locks (p->pi_lock, etc.). An example of deadlock can be vprintk_emit() console_unlock() up() << raw_spin_lock_irqsave(&sem->lock, flags); wake_up_process() try_to_wake_up() ttwu_queue() ttwu_activate() activate_task() enqueue_task() enqueue_task_fair() cfs_rq_of() task_of() WARN_ON_ONCE(!entity_is_task(se)) vprintk_emit() console_trylock() down_trylock() raw_spin_lock_irqsave(&sem->lock, flags) ^^^^ deadlock and some other cases. Just like in NMI implementation, the solution uses a per-cpu `printk_func' pointer to 'redirect' printk() calls to a 'safe' callback, that store messages in a per-cpu buffer and flushes them back to logbuf buffer later. Usage example: printk() printk_safe_enter_irqsave(flags) // // any printk() call from here will endup in vprintk_safe(), // that stores messages in a special per-CPU buffer. // printk_safe_exit_irqrestore(flags) The 'redirection' mechanism, though, has been reworked, as suggested by Petr Mladek. Instead of using a per-cpu @print_func callback we now keep a per-cpu printk-context variable and call either default or nmi vprintk function depending on its value. printk_nmi_entrer/exit and printk_safe_enter/exit, thus, just set/celar corresponding bits in printk-context functions. The patch only adds printk_safe support, we don't use it yet. Link: http://lkml.kernel.org/r/20161227141611.940-4-sergey.senozhatsky@gmail.com Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Jan Kara <jack@suse.cz> Cc: Tejun Heo <tj@kernel.org> Cc: Calvin Owens <calvinowens@fb.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Peter Hurley <peter@hurleysoftware.com> Cc: linux-kernel@vger.kernel.org Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Signed-off-by: Petr Mladek <pmladek@suse.com> Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/printk/printk_safe.c')
-rw-r--r--kernel/printk/printk_safe.c128
1 files changed, 103 insertions, 25 deletions
diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
index fc80359dcd78..efc89a4e9df5 100644
--- a/kernel/printk/printk_safe.c
+++ b/kernel/printk/printk_safe.c
@@ -1,5 +1,5 @@
/*
- * printk_safe.c - Safe printk in NMI context
+ * printk_safe.c - Safe printk for printk-deadlock-prone contexts
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -32,13 +32,13 @@
* is later flushed into the main ring buffer via IRQ work.
*
* The alternative implementation is chosen transparently
- * via @printk_func per-CPU variable.
+ * by examinig current printk() context mask stored in @printk_context
+ * per-CPU variable.
*
* The implementation allows to flush the strings also from another CPU.
* There are situations when we want to make sure that all buffers
* were handled or when IRQs are blocked.
*/
-DEFINE_PER_CPU(printk_func_t, printk_func) = vprintk_default;
static int printk_safe_irq_ready;
atomic_t nmi_message_lost;
@@ -50,18 +50,28 @@ struct printk_safe_seq_buf {
struct irq_work work; /* IRQ work that flushes the buffer */
unsigned char buffer[SAFE_LOG_BUF_LEN];
};
+
+static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
+static DEFINE_PER_CPU(int, printk_context);
+
+#ifdef CONFIG_PRINTK_NMI
static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
+#endif
/*
- * Safe printk() for NMI context. It uses a per-CPU buffer to
- * store the message. NMIs are not nested, so there is always only
- * one writer running. But the buffer might get flushed from another
- * CPU, so we need to be careful.
+ * Add a message to per-CPU context-dependent buffer. NMI and printk-safe
+ * have dedicated buffers, because otherwise printk-safe preempted by
+ * NMI-printk would have overwritten the NMI messages.
+ *
+ * The messages are fushed from irq work (or from panic()), possibly,
+ * from other CPU, concurrently with printk_safe_log_store(). Should this
+ * happen, printk_safe_log_store() will notice the buffer->len mismatch
+ * and repeat the write.
*/
-static int vprintk_nmi(const char *fmt, va_list args)
+static int printk_safe_log_store(struct printk_safe_seq_buf *s,
+ const char *fmt, va_list args)
{
- struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
- int add = 0;
+ int add;
size_t len;
again:
@@ -74,8 +84,8 @@ again:
}
/*
- * Make sure that all old data have been read before the buffer was
- * reseted. This is not needed when we just append data.
+ * Make sure that all old data have been read before the buffer
+ * was reset. This is not needed when we just append data.
*/
if (!len)
smp_rmb();
@@ -161,7 +171,7 @@ static int printk_safe_flush_buffer(const char *start, size_t len)
}
/*
- * Flush data from the associated per_CPU buffer. The function
+ * Flush data from the associated per-CPU buffer. The function
* can be called either via IRQ work or independently.
*/
static void __printk_safe_flush(struct irq_work *work)
@@ -231,8 +241,12 @@ void printk_safe_flush(void)
{
int cpu;
- for_each_possible_cpu(cpu)
+ for_each_possible_cpu(cpu) {
+#ifdef CONFIG_PRINTK_NMI
__printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work);
+#endif
+ __printk_safe_flush(&per_cpu(safe_print_seq, cpu).work);
+ }
}
/**
@@ -262,14 +276,88 @@ void printk_safe_flush_on_panic(void)
printk_safe_flush();
}
+#ifdef CONFIG_PRINTK_NMI
+/*
+ * Safe printk() for NMI context. It uses a per-CPU buffer to
+ * store the message. NMIs are not nested, so there is always only
+ * one writer running. But the buffer might get flushed from another
+ * CPU, so we need to be careful.
+ */
+static int vprintk_nmi(const char *fmt, va_list args)
+{
+ struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
+
+ return printk_safe_log_store(s, fmt, args);
+}
+
+void printk_nmi_enter(void)
+{
+ this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
+}
+
+void printk_nmi_exit(void)
+{
+ this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
+}
+
+#else
+
+static int vprintk_nmi(const char *fmt, va_list args)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PRINTK_NMI */
+
+/*
+ * Lock-less printk(), to avoid deadlocks should the printk() recurse
+ * into itself. It uses a per-CPU buffer to store the message, just like
+ * NMI.
+ */
+static int vprintk_safe(const char *fmt, va_list args)
+{
+ struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq);
+
+ return printk_safe_log_store(s, fmt, args);
+}
+
+/* Can be preempted by NMI. */
+void __printk_safe_enter(void)
+{
+ this_cpu_inc(printk_context);
+}
+
+/* Can be preempted by NMI. */
+void __printk_safe_exit(void)
+{
+ this_cpu_dec(printk_context);
+}
+
+__printf(1, 0) int vprintk_func(const char *fmt, va_list args)
+{
+ if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
+ return vprintk_nmi(fmt, args);
+
+ if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
+ return vprintk_safe(fmt, args);
+
+ return vprintk_default(fmt, args);
+}
+
void __init printk_safe_init(void)
{
int cpu;
for_each_possible_cpu(cpu) {
- struct printk_safe_seq_buf *s = &per_cpu(nmi_print_seq, cpu);
+ struct printk_safe_seq_buf *s;
+
+ s = &per_cpu(safe_print_seq, cpu);
+ init_irq_work(&s->work, __printk_safe_flush);
+#ifdef CONFIG_PRINTK_NMI
+ s = &per_cpu(nmi_print_seq, cpu);
init_irq_work(&s->work, __printk_safe_flush);
+#endif
}
/* Make sure that IRQ works are initialized before enabling. */
@@ -279,13 +367,3 @@ void __init printk_safe_init(void)
/* Flush pending messages that did not have scheduled IRQ works. */
printk_safe_flush();
}
-
-void printk_nmi_enter(void)
-{
- this_cpu_write(printk_func, vprintk_nmi);
-}
-
-void printk_nmi_exit(void)
-{
- this_cpu_write(printk_func, vprintk_default);
-}
OpenPOWER on IntegriCloud