From 01f38497c6f6525f57eb445887b9ed1867dbd05c Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Mon, 30 Jul 2018 15:24:21 -0700 Subject: lockdep: Use this_cpu_ptr instead of get_cpu_var stats get_cpu_var disables preemption which has the potential to call into the preemption disable trace points causing some complications. There's also no need to disable preemption in uses of get_lock_stats anyway since preempt is already disabled. So lets simplify the code. Link: http://lkml.kernel.org/r/20180730222423.196630-2-joel@joelfernandes.org Suggested-by: Peter Zijlstra Acked-by: Peter Zijlstra Signed-off-by: Joel Fernandes (Google) Signed-off-by: Steven Rostedt (VMware) --- kernel/locking/lockdep.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 5fa4d3138bf1..fbbb79d5cfa0 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -248,12 +248,7 @@ void clear_lock_stats(struct lock_class *class) static struct lock_class_stats *get_lock_stats(struct lock_class *class) { - return &get_cpu_var(cpu_lock_stats)[class - lock_classes]; -} - -static void put_lock_stats(struct lock_class_stats *stats) -{ - put_cpu_var(cpu_lock_stats); + return &this_cpu_ptr(cpu_lock_stats)[class - lock_classes]; } static void lock_release_holdtime(struct held_lock *hlock) @@ -271,7 +266,6 @@ static void lock_release_holdtime(struct held_lock *hlock) lock_time_inc(&stats->read_holdtime, holdtime); else lock_time_inc(&stats->write_holdtime, holdtime); - put_lock_stats(stats); } #else static inline void lock_release_holdtime(struct held_lock *hlock) @@ -4090,7 +4084,6 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip) stats->contending_point[contending_point]++; if (lock->cpu != smp_processor_id()) stats->bounces[bounce_contended + !!hlock->read]++; - put_lock_stats(stats); } static void @@ -4138,7 +4131,6 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip) } if (lock->cpu != cpu) stats->bounces[bounce_acquired + !!hlock->read]++; - put_lock_stats(stats); lock->cpu = cpu; lock->ip = ip; -- cgit v1.2.1 From c3bc8fd637a9623f5c507bd18f9677effbddf584 Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Mon, 30 Jul 2018 15:24:23 -0700 Subject: tracing: Centralize preemptirq tracepoints and unify their usage This patch detaches the preemptirq tracepoints from the tracers and keeps it separate. Advantages: * Lockdep and irqsoff event can now run in parallel since they no longer have their own calls. * This unifies the usecase of adding hooks to an irqsoff and irqson event, and a preemptoff and preempton event. 3 users of the events exist: - Lockdep - irqsoff and preemptoff tracers - irqs and preempt trace events The unification cleans up several ifdefs and makes the code in preempt tracer and irqsoff tracers simpler. It gets rid of all the horrific ifdeferry around PROVE_LOCKING and makes configuration of the different users of the tracepoints more easy and understandable. It also gets rid of the time_* function calls from the lockdep hooks used to call into the preemptirq tracer which is not needed anymore. The negative delta in lines of code in this patch is quite large too. In the patch we introduce a new CONFIG option PREEMPTIRQ_TRACEPOINTS as a single point for registering probes onto the tracepoints. With this, the web of config options for preempt/irq toggle tracepoints and its users becomes: PREEMPT_TRACER PREEMPTIRQ_EVENTS IRQSOFF_TRACER PROVE_LOCKING | | \ | | \ (selects) / \ \ (selects) / TRACE_PREEMPT_TOGGLE ----> TRACE_IRQFLAGS \ / \ (depends on) / PREEMPTIRQ_TRACEPOINTS Other than the performance tests mentioned in the previous patch, I also ran the locking API test suite. I verified that all tests cases are passing. I also injected issues by not registering lockdep probes onto the tracepoints and I see failures to confirm that the probes are indeed working. This series + lockdep probes not registered (just to inject errors): [ 0.000000] hard-irqs-on + irq-safe-A/21: ok | ok | ok | [ 0.000000] soft-irqs-on + irq-safe-A/21: ok | ok | ok | [ 0.000000] sirq-safe-A => hirqs-on/12:FAILED|FAILED| ok | [ 0.000000] sirq-safe-A => hirqs-on/21:FAILED|FAILED| ok | [ 0.000000] hard-safe-A + irqs-on/12:FAILED|FAILED| ok | [ 0.000000] soft-safe-A + irqs-on/12:FAILED|FAILED| ok | [ 0.000000] hard-safe-A + irqs-on/21:FAILED|FAILED| ok | [ 0.000000] soft-safe-A + irqs-on/21:FAILED|FAILED| ok | [ 0.000000] hard-safe-A + unsafe-B #1/123: ok | ok | ok | [ 0.000000] soft-safe-A + unsafe-B #1/123: ok | ok | ok | With this series + lockdep probes registered, all locking tests pass: [ 0.000000] hard-irqs-on + irq-safe-A/21: ok | ok | ok | [ 0.000000] soft-irqs-on + irq-safe-A/21: ok | ok | ok | [ 0.000000] sirq-safe-A => hirqs-on/12: ok | ok | ok | [ 0.000000] sirq-safe-A => hirqs-on/21: ok | ok | ok | [ 0.000000] hard-safe-A + irqs-on/12: ok | ok | ok | [ 0.000000] soft-safe-A + irqs-on/12: ok | ok | ok | [ 0.000000] hard-safe-A + irqs-on/21: ok | ok | ok | [ 0.000000] soft-safe-A + irqs-on/21: ok | ok | ok | [ 0.000000] hard-safe-A + unsafe-B #1/123: ok | ok | ok | [ 0.000000] soft-safe-A + unsafe-B #1/123: ok | ok | ok | Link: http://lkml.kernel.org/r/20180730222423.196630-4-joel@joelfernandes.org Acked-by: Peter Zijlstra (Intel) Reviewed-by: Namhyung Kim Signed-off-by: Joel Fernandes (Google) Signed-off-by: Steven Rostedt (VMware) --- kernel/locking/lockdep.c | 35 ++++++++++++++--------------------- 1 file changed, 14 insertions(+), 21 deletions(-) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index fbbb79d5cfa0..03bfaeb9f4e6 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -55,6 +55,7 @@ #include "lockdep_internals.h" +#include #define CREATE_TRACE_POINTS #include @@ -2839,10 +2840,9 @@ static void __trace_hardirqs_on_caller(unsigned long ip) debug_atomic_inc(hardirqs_on_events); } -__visible void trace_hardirqs_on_caller(unsigned long ip) +static void lockdep_hardirqs_on(void *none, unsigned long ignore, + unsigned long ip) { - time_hardirqs_on(CALLER_ADDR0, ip); - if (unlikely(!debug_locks || current->lockdep_recursion)) return; @@ -2881,23 +2881,15 @@ __visible void trace_hardirqs_on_caller(unsigned long ip) __trace_hardirqs_on_caller(ip); current->lockdep_recursion = 0; } -EXPORT_SYMBOL(trace_hardirqs_on_caller); - -void trace_hardirqs_on(void) -{ - trace_hardirqs_on_caller(CALLER_ADDR0); -} -EXPORT_SYMBOL(trace_hardirqs_on); /* * Hardirqs were disabled: */ -__visible void trace_hardirqs_off_caller(unsigned long ip) +static void lockdep_hardirqs_off(void *none, unsigned long ignore, + unsigned long ip) { struct task_struct *curr = current; - time_hardirqs_off(CALLER_ADDR0, ip); - if (unlikely(!debug_locks || current->lockdep_recursion)) return; @@ -2919,13 +2911,6 @@ __visible void trace_hardirqs_off_caller(unsigned long ip) } else debug_atomic_inc(redundant_hardirqs_off); } -EXPORT_SYMBOL(trace_hardirqs_off_caller); - -void trace_hardirqs_off(void) -{ - trace_hardirqs_off_caller(CALLER_ADDR0); -} -EXPORT_SYMBOL(trace_hardirqs_off); /* * Softirqs will be enabled: @@ -4330,7 +4315,15 @@ out_restore: raw_local_irq_restore(flags); } -void __init lockdep_info(void) +void __init lockdep_init_early(void) +{ +#ifdef CONFIG_PROVE_LOCKING + register_trace_prio_irq_disable(lockdep_hardirqs_off, NULL, INT_MAX); + register_trace_prio_irq_enable(lockdep_hardirqs_on, NULL, INT_MIN); +#endif +} + +void __init lockdep_init(void) { printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); -- cgit v1.2.1 From bff1b208a5d1dbb2355822ef859edcb9be0379e4 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Mon, 6 Aug 2018 15:50:58 -0400 Subject: tracing: Partial revert of "tracing: Centralize preemptirq tracepoints and unify their usage" Joel Fernandes created a nice patch that cleaned up the duplicate hooks used by lockdep and irqsoff latency tracer. It made both use tracepoints. But it caused lockdep to trigger several false positives. We have not figured out why yet, but removing lockdep from using the trace event hooks and just call its helper functions directly (like it use to), makes the problem go away. This is a partial revert of the clean up patch c3bc8fd637a9 ("tracing: Centralize preemptirq tracepoints and unify their usage") that adds direct calls for lockdep, but also keeps most of the clean up done to get rid of the horrible preprocessor if statements. Link: http://lkml.kernel.org/r/20180806155058.5ee875f4@gandalf.local.home Cc: Peter Zijlstra Reviewed-by: Joel Fernandes (Google) Fixes: c3bc8fd637a9 ("tracing: Centralize preemptirq tracepoints and unify their usage") Signed-off-by: Steven Rostedt (VMware) --- kernel/locking/lockdep.c | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 03bfaeb9f4e6..e406c5fdb41e 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -2840,8 +2840,7 @@ static void __trace_hardirqs_on_caller(unsigned long ip) debug_atomic_inc(hardirqs_on_events); } -static void lockdep_hardirqs_on(void *none, unsigned long ignore, - unsigned long ip) +void lockdep_hardirqs_on(unsigned long ip) { if (unlikely(!debug_locks || current->lockdep_recursion)) return; @@ -2885,8 +2884,7 @@ static void lockdep_hardirqs_on(void *none, unsigned long ignore, /* * Hardirqs were disabled: */ -static void lockdep_hardirqs_off(void *none, unsigned long ignore, - unsigned long ip) +void lockdep_hardirqs_off(unsigned long ip) { struct task_struct *curr = current; @@ -4315,14 +4313,6 @@ out_restore: raw_local_irq_restore(flags); } -void __init lockdep_init_early(void) -{ -#ifdef CONFIG_PROVE_LOCKING - register_trace_prio_irq_disable(lockdep_hardirqs_off, NULL, INT_MAX); - register_trace_prio_irq_enable(lockdep_hardirqs_on, NULL, INT_MIN); -#endif -} - void __init lockdep_init(void) { printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); -- cgit v1.2.1