summaryrefslogtreecommitdiffstats
path: root/lib/rwsem.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/rwsem.c')
-rw-r--r--lib/rwsem.c150
1 files changed, 88 insertions, 62 deletions
diff --git a/lib/rwsem.c b/lib/rwsem.c
index ceba8e28807a..f236d7cd5cf3 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -36,45 +36,56 @@ struct rwsem_waiter {
#define RWSEM_WAITING_FOR_WRITE 0x00000002
};
+/* Wake types for __rwsem_do_wake(). Note that RWSEM_WAKE_NO_ACTIVE and
+ * RWSEM_WAKE_READ_OWNED imply that the spinlock must have been kept held
+ * since the rwsem value was observed.
+ */
+#define RWSEM_WAKE_ANY 0 /* Wake whatever's at head of wait list */
+#define RWSEM_WAKE_NO_ACTIVE 1 /* rwsem was observed with no active thread */
+#define RWSEM_WAKE_READ_OWNED 2 /* rwsem was observed to be read owned */
+
/*
* handle the lock release when processes blocked on it that can now run
* - if we come here from up_xxxx(), then:
* - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
* - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
- * - there must be someone on the queue
+ * - there must be someone on the queue
* - the spinlock must be held by the caller
* - woken process blocks are discarded from the list after having task zeroed
* - writers are only woken if downgrading is false
*/
-static inline struct rw_semaphore *
-__rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
+static struct rw_semaphore *
+__rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
{
struct rwsem_waiter *waiter;
struct task_struct *tsk;
struct list_head *next;
- signed long oldcount, woken, loop;
-
- if (downgrading)
- goto dont_wake_writers;
-
- /* if we came through an up_xxxx() call, we only only wake someone up
- * if we can transition the active part of the count from 0 -> 1
- */
- try_again:
- oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem)
- - RWSEM_ACTIVE_BIAS;
- if (oldcount & RWSEM_ACTIVE_MASK)
- goto undo;
+ signed long oldcount, woken, loop, adjustment;
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
-
- /* try to grant a single write lock if there's a writer at the front
- * of the queue - note we leave the 'active part' of the count
- * incremented by 1 and the waiting part incremented by 0x00010000
- */
if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
goto readers_only;
+ if (wake_type == RWSEM_WAKE_READ_OWNED)
+ /* Another active reader was observed, so wakeup is not
+ * likely to succeed. Save the atomic op.
+ */
+ goto out;
+
+ /* There's a writer at the front of the queue - try to grant it the
+ * write lock. However, we only wake this writer if we can transition
+ * the active part of the count from 0 -> 1
+ */
+ adjustment = RWSEM_ACTIVE_WRITE_BIAS;
+ if (waiter->list.next == &sem->wait_list)
+ adjustment -= RWSEM_WAITING_BIAS;
+
+ try_again_write:
+ oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
+ if (oldcount & RWSEM_ACTIVE_MASK)
+ /* Someone grabbed the sem already */
+ goto undo_write;
+
/* We must be careful not to touch 'waiter' after we set ->task = NULL.
* It is an allocated on the waiter's stack and may become invalid at
* any time after that point (due to a wakeup from another source).
@@ -87,18 +98,30 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
put_task_struct(tsk);
goto out;
- /* don't want to wake any writers */
- dont_wake_writers:
- waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
- if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
+ readers_only:
+ /* If we come here from up_xxxx(), another thread might have reached
+ * rwsem_down_failed_common() before we acquired the spinlock and
+ * woken up a waiter, making it now active. We prefer to check for
+ * this first in order to not spend too much time with the spinlock
+ * held if we're not going to be able to wake up readers in the end.
+ *
+ * Note that we do not need to update the rwsem count: any writer
+ * trying to acquire rwsem will run rwsem_down_write_failed() due
+ * to the waiting threads and block trying to acquire the spinlock.
+ *
+ * We use a dummy atomic update in order to acquire the cache line
+ * exclusively since we expect to succeed and run the final rwsem
+ * count adjustment pretty soon.
+ */
+ if (wake_type == RWSEM_WAKE_ANY &&
+ rwsem_atomic_update(0, sem) < RWSEM_WAITING_BIAS)
+ /* Someone grabbed the sem for write already */
goto out;
- /* grant an infinite number of read locks to the readers at the front
- * of the queue
- * - note we increment the 'active part' of the count by the number of
- * readers before waking any processes up
+ /* Grant an infinite number of read locks to the readers at the front
+ * of the queue. Note we increment the 'active part' of the count by
+ * the number of readers before waking any processes up.
*/
- readers_only:
woken = 0;
do {
woken++;
@@ -111,16 +134,15 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
} while (waiter->flags & RWSEM_WAITING_FOR_READ);
- loop = woken;
- woken *= RWSEM_ACTIVE_BIAS - RWSEM_WAITING_BIAS;
- if (!downgrading)
- /* we'd already done one increment earlier */
- woken -= RWSEM_ACTIVE_BIAS;
+ adjustment = woken * RWSEM_ACTIVE_READ_BIAS;
+ if (waiter->flags & RWSEM_WAITING_FOR_READ)
+ /* hit end of list above */
+ adjustment -= RWSEM_WAITING_BIAS;
- rwsem_atomic_add(woken, sem);
+ rwsem_atomic_add(adjustment, sem);
next = sem->wait_list.next;
- for (; loop > 0; loop--) {
+ for (loop = woken; loop > 0; loop--) {
waiter = list_entry(next, struct rwsem_waiter, list);
next = waiter->list.next;
tsk = waiter->task;
@@ -138,10 +160,10 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
/* undo the change to the active count, but check for a transition
* 1->0 */
- undo:
- if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK)
+ undo_write:
+ if (rwsem_atomic_update(-adjustment, sem) & RWSEM_ACTIVE_MASK)
goto out;
- goto try_again;
+ goto try_again_write;
}
/*
@@ -149,8 +171,9 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
*/
static struct rw_semaphore __sched *
rwsem_down_failed_common(struct rw_semaphore *sem,
- struct rwsem_waiter *waiter, signed long adjustment)
+ unsigned int flags, signed long adjustment)
{
+ struct rwsem_waiter waiter;
struct task_struct *tsk = current;
signed long count;
@@ -158,23 +181,34 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
/* set up my own style of waitqueue */
spin_lock_irq(&sem->wait_lock);
- waiter->task = tsk;
+ waiter.task = tsk;
+ waiter.flags = flags;
get_task_struct(tsk);
- list_add_tail(&waiter->list, &sem->wait_list);
+ if (list_empty(&sem->wait_list))
+ adjustment += RWSEM_WAITING_BIAS;
+ list_add_tail(&waiter.list, &sem->wait_list);
- /* we're now waiting on the lock, but no longer actively read-locking */
+ /* we're now waiting on the lock, but no longer actively locking */
count = rwsem_atomic_update(adjustment, sem);
- /* if there are no active locks, wake the front queued process(es) up */
- if (!(count & RWSEM_ACTIVE_MASK))
- sem = __rwsem_do_wake(sem, 0);
+ /* If there are no active locks, wake the front queued process(es) up.
+ *
+ * Alternatively, if we're called from a failed down_write(), there
+ * were already threads queued before us and there are no active
+ * writers, the lock must be read owned; so we try to wake any read
+ * locks that were queued ahead of us. */
+ if (count == RWSEM_WAITING_BIAS)
+ sem = __rwsem_do_wake(sem, RWSEM_WAKE_NO_ACTIVE);
+ else if (count > RWSEM_WAITING_BIAS &&
+ adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
+ sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
spin_unlock_irq(&sem->wait_lock);
/* wait to be given the lock */
for (;;) {
- if (!waiter->task)
+ if (!waiter.task)
break;
schedule();
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
@@ -191,12 +225,8 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
asmregparm struct rw_semaphore __sched *
rwsem_down_read_failed(struct rw_semaphore *sem)
{
- struct rwsem_waiter waiter;
-
- waiter.flags = RWSEM_WAITING_FOR_READ;
- rwsem_down_failed_common(sem, &waiter,
- RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS);
- return sem;
+ return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_READ,
+ -RWSEM_ACTIVE_READ_BIAS);
}
/*
@@ -205,12 +235,8 @@ rwsem_down_read_failed(struct rw_semaphore *sem)
asmregparm struct rw_semaphore __sched *
rwsem_down_write_failed(struct rw_semaphore *sem)
{
- struct rwsem_waiter waiter;
-
- waiter.flags = RWSEM_WAITING_FOR_WRITE;
- rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS);
-
- return sem;
+ return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_WRITE,
+ -RWSEM_ACTIVE_WRITE_BIAS);
}
/*
@@ -225,7 +251,7 @@ asmregparm struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
/* do nothing if list empty */
if (!list_empty(&sem->wait_list))
- sem = __rwsem_do_wake(sem, 0);
+ sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -245,7 +271,7 @@ asmregparm struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
/* do nothing if list empty */
if (!list_empty(&sem->wait_list))
- sem = __rwsem_do_wake(sem, 1);
+ sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
spin_unlock_irqrestore(&sem->wait_lock, flags);
OpenPOWER on IntegriCloud