From 1e40c2edef2537f87f94d0baf80aeaeb7d51cc23 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 19 Jul 2013 20:31:01 +0200 Subject: mutex: Fix/document access-once assumption in mutex_can_spin_on_owner() mutex_can_spin_on_owner() is technically broken in that it would in theory allow the compiler to load lock->owner twice, seeing a pointer first time and a NULL pointer the second time. Linus pointed out that a compiler has to be seriously broken to not compile this correctly - but nevertheless this change is correct as it will better document the implementation. Signed-off-by: Peter Zijlstra Acked-by: Davidlohr Bueso Acked-by: Waiman Long Acked-by: Linus Torvalds Acked-by: Thomas Gleixner Acked-by: Rik van Riel Cc: Paul E. McKenney Cc: David Howells Link: http://lkml.kernel.org/r/20130719183101.GA20909@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- kernel/mutex.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'kernel/mutex.c') diff --git a/kernel/mutex.c b/kernel/mutex.c index ff05f4bd86eb..7ff48c55a98b 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -209,11 +209,13 @@ int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) */ static inline int mutex_can_spin_on_owner(struct mutex *lock) { + struct task_struct *owner; int retval = 1; rcu_read_lock(); - if (lock->owner) - retval = lock->owner->on_cpu; + owner = ACCESS_ONCE(lock->owner); + if (owner) + retval = owner->on_cpu; rcu_read_unlock(); /* * if lock->owner is not set, the mutex owner may have just acquired -- cgit v1.2.1 From ec83f425dbca47e19c6737e8e7db0d0924a5de1b Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Fri, 28 Jun 2013 13:13:18 -0700 Subject: mutex: Do not unnecessarily deal with waiters Upon entering the slowpath, we immediately attempt to acquire the lock by checking if it is already unlocked. If we are lucky enough that this is the case, then we don't need to deal with any waiter related logic. Furthermore any checks for an empty wait_list are unnecessary as we already know that count is non-negative and hence no one is waiting for the lock. Move the count check and xchg calls to be done before any waiters are setup - including waiter debugging. Upon failure to acquire the lock, the xchg sets the counter to 0, instead of -1 as it was originally. This can be done here since we set it back to -1 right at the beginning of the loop so other waiters are woken up when the lock is released. When tested on a 8-socket (80 core) system against a vanilla 3.10-rc1 kernel, this patch provides some small performance benefits (+2-6%). While it could be considered in the noise level, the average percentages were stable across multiple runs and no performance regressions were seen. Two big winners, for small amounts of users (10-100), were the short and compute workloads had a +19.36% and +%15.76% in jobs per minute. Also change some break statements to 'goto slowpath', which IMO makes a little more intuitive to read. Signed-off-by: Davidlohr Bueso Acked-by: Rik van Riel Acked-by: Maarten Lankhorst Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1372450398.2106.1.camel@buesod1.americas.hpqcorp.net Signed-off-by: Ingo Molnar --- kernel/mutex.c | 41 ++++++++++++++++++----------------------- 1 file changed, 18 insertions(+), 23 deletions(-) (limited to 'kernel/mutex.c') diff --git a/kernel/mutex.c b/kernel/mutex.c index 7ff48c55a98b..386ad5da47a5 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -463,7 +463,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, * performed the optimistic spinning cannot be done. */ if (ACCESS_ONCE(ww->ctx)) - break; + goto slowpath; } /* @@ -474,7 +474,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, owner = ACCESS_ONCE(lock->owner); if (owner && !mutex_spin_on_owner(lock, owner)) { mspin_unlock(MLOCK(lock), &node); - break; + goto slowpath; } if ((atomic_read(&lock->count) == 1) && @@ -489,8 +489,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, mutex_set_owner(lock); mspin_unlock(MLOCK(lock), &node); - preempt_enable(); - return 0; + goto done; } mspin_unlock(MLOCK(lock), &node); @@ -501,7 +500,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, * the owner complete. */ if (!owner && (need_resched() || rt_task(task))) - break; + goto slowpath; /* * The cpu_relax() call is a compiler barrier which forces @@ -515,6 +514,10 @@ slowpath: #endif spin_lock_mutex(&lock->wait_lock, flags); + /* once more, can we acquire the lock? */ + if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1)) + goto skip_wait; + debug_mutex_lock_common(lock, &waiter); debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); @@ -522,9 +525,6 @@ slowpath: list_add_tail(&waiter.list, &lock->wait_list); waiter.task = task; - if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1)) - goto done; - lock_contended(&lock->dep_map, ip); for (;;) { @@ -538,7 +538,7 @@ slowpath: * other waiters: */ if (MUTEX_SHOW_NO_WAITER(lock) && - (atomic_xchg(&lock->count, -1) == 1)) + (atomic_xchg(&lock->count, -1) == 1)) break; /* @@ -563,24 +563,25 @@ slowpath: schedule_preempt_disabled(); spin_lock_mutex(&lock->wait_lock, flags); } + mutex_remove_waiter(lock, &waiter, current_thread_info()); + /* set it to 0 if there are no waiters left: */ + if (likely(list_empty(&lock->wait_list))) + atomic_set(&lock->count, 0); + debug_mutex_free_waiter(&waiter); -done: +skip_wait: + /* got the lock - cleanup and rejoice! */ lock_acquired(&lock->dep_map, ip); - /* got the lock - rejoice! */ - mutex_remove_waiter(lock, &waiter, current_thread_info()); mutex_set_owner(lock); if (!__builtin_constant_p(ww_ctx == NULL)) { - struct ww_mutex *ww = container_of(lock, - struct ww_mutex, - base); + struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); struct mutex_waiter *cur; /* * This branch gets optimized out for the common case, * and is only important for ww_mutex_lock. */ - ww_mutex_lock_acquired(ww, ww_ctx); ww->ctx = ww_ctx; @@ -594,15 +595,9 @@ done: } } - /* set it to 0 if there are no waiters left: */ - if (likely(list_empty(&lock->wait_list))) - atomic_set(&lock->count, 0); - spin_unlock_mutex(&lock->wait_lock, flags); - - debug_mutex_free_waiter(&waiter); +done: preempt_enable(); - return 0; err: -- cgit v1.2.1 From 2b4883972271f8d61de67aa365ade89dfff69db1 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Wed, 24 Jul 2013 11:25:17 -0700 Subject: mutex: Avoid label warning when !CONFIG_MUTEX_SPIN_ON_OWNER Fengguang reported the following warning when optimistic spinning is disabled (ie: make allnoconfig): kernel/mutex.c:599:1: warning: label 'done' defined but not used Remove the 'done' label altogether. Reported-by: Fengguang Wu Signed-off-by: Davidlohr Bueso Signed-off-by: Ingo Molnar --- kernel/mutex.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel/mutex.c') diff --git a/kernel/mutex.c b/kernel/mutex.c index 386ad5da47a5..98164a55a4dc 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -489,7 +489,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, mutex_set_owner(lock); mspin_unlock(MLOCK(lock), &node); - goto done; + preempt_enable(); + return 0; } mspin_unlock(MLOCK(lock), &node); @@ -596,7 +597,6 @@ skip_wait: } spin_unlock_mutex(&lock->wait_lock, flags); -done: preempt_enable(); return 0; -- cgit v1.2.1