summaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-01-26 12:30:14 +0100
committerIngo Molnar <mingo@kernel.org>2016-01-29 08:35:31 +0100
commitf47c02c0c8403963fbb8c3484e285727305d0f73 (patch)
tree3672f498ef05670f261a8bfbfd66925eca852879 /kernel/events
parent6e801e016917989ab8a7ddfc4229a15a5621622a (diff)
downloadtalos-op-linux-f47c02c0c8403963fbb8c3484e285727305d0f73.tar.gz
talos-op-linux-f47c02c0c8403963fbb8c3484e285727305d0f73.zip
perf: Robustify event->owner usage and SMP ordering
Use smp_store_release() to clear event->owner and lockless_dereference() to observe it. Further use READ_ONCE() for all lockless reads. This changes perf_remove_from_owner() to leave event->owner cleared. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d84374fa44e5..5f055de90c6d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -152,7 +152,7 @@ static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
static bool is_kernel_event(struct perf_event *event)
{
- return event->owner == TASK_TOMBSTONE;
+ return READ_ONCE(event->owner) == TASK_TOMBSTONE;
}
/*
@@ -1651,7 +1651,7 @@ out:
*/
static bool is_orphaned_event(struct perf_event *event)
{
- return event && !is_kernel_event(event) && !event->owner;
+ return event && !is_kernel_event(event) && !READ_ONCE(event->owner);
}
/*
@@ -3733,14 +3733,13 @@ static void perf_remove_from_owner(struct perf_event *event)
struct task_struct *owner;
rcu_read_lock();
- owner = ACCESS_ONCE(event->owner);
/*
- * Matches the smp_wmb() in perf_event_exit_task(). If we observe
- * !owner it means the list deletion is complete and we can indeed
- * free this event, otherwise we need to serialize on
+ * Matches the smp_store_release() in perf_event_exit_task(). If we
+ * observe !owner it means the list deletion is complete and we can
+ * indeed free this event, otherwise we need to serialize on
* owner->perf_event_mutex.
*/
- smp_read_barrier_depends();
+ owner = lockless_dereference(event->owner);
if (owner) {
/*
* Since delayed_put_task_struct() also drops the last
@@ -3768,8 +3767,10 @@ static void perf_remove_from_owner(struct perf_event *event)
* ensured they're done, and we can proceed with freeing the
* event.
*/
- if (event->owner)
+ if (event->owner) {
list_del_init(&event->owner_entry);
+ smp_store_release(&event->owner, NULL);
+ }
mutex_unlock(&owner->perf_event_mutex);
put_task_struct(owner);
}
@@ -8829,8 +8830,7 @@ void perf_event_exit_task(struct task_struct *child)
* the owner, closes a race against perf_release() where
* we need to serialize on the owner->perf_event_mutex.
*/
- smp_wmb();
- event->owner = NULL;
+ smp_store_release(&event->owner, NULL);
}
mutex_unlock(&child->perf_event_mutex);
OpenPOWER on IntegriCloud