diff options
Diffstat (limited to 'kernel/locking')
| -rw-r--r-- | kernel/locking/mutex.c | 37 | ||||
| -rw-r--r-- | kernel/locking/qspinlock.c | 21 | ||||
| -rw-r--r-- | kernel/locking/rtmutex.c | 5 | 
3 files changed, 48 insertions, 15 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 858a07590e39..2048359f33d2 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -1082,15 +1082,16 @@ static noinline int __sched  __mutex_lock_interruptible_slowpath(struct mutex *lock);  /** - * mutex_lock_interruptible - acquire the mutex, interruptible - * @lock: the mutex to be acquired + * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals. + * @lock: The mutex to be acquired.   * - * Lock the mutex like mutex_lock(), and return 0 if the mutex has - * been acquired or sleep until the mutex becomes available. If a - * signal arrives while waiting for the lock then this function - * returns -EINTR. + * Lock the mutex like mutex_lock().  If a signal is delivered while the + * process is sleeping, this function will return without acquiring the + * mutex.   * - * This function is similar to (but not equivalent to) down_interruptible(). + * Context: Process context. + * Return: 0 if the lock was successfully acquired or %-EINTR if a + * signal arrived.   */  int __sched mutex_lock_interruptible(struct mutex *lock)  { @@ -1104,6 +1105,18 @@ int __sched mutex_lock_interruptible(struct mutex *lock)  EXPORT_SYMBOL(mutex_lock_interruptible); +/** + * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals. + * @lock: The mutex to be acquired. + * + * Lock the mutex like mutex_lock().  If a signal which will be fatal to + * the current process is delivered while the process is sleeping, this + * function will return without acquiring the mutex. + * + * Context: Process context. + * Return: 0 if the lock was successfully acquired or %-EINTR if a + * fatal signal arrived. + */  int __sched mutex_lock_killable(struct mutex *lock)  {  	might_sleep(); @@ -1115,6 +1128,16 @@ int __sched mutex_lock_killable(struct mutex *lock)  }  EXPORT_SYMBOL(mutex_lock_killable); +/** + * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O + * @lock: The mutex to be acquired. + * + * Lock the mutex like mutex_lock().  While the task is waiting for this + * mutex, it will be accounted as being in the IO wait state by the + * scheduler. + * + * Context: Process context. + */  void __sched mutex_lock_io(struct mutex *lock)  {  	int token; diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 38ece035039e..d880296245c5 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -379,6 +379,14 @@ queue:  	tail = encode_tail(smp_processor_id(), idx);  	node += idx; + +	/* +	 * Ensure that we increment the head node->count before initialising +	 * the actual node. If the compiler is kind enough to reorder these +	 * stores, then an IRQ could overwrite our assignments. +	 */ +	barrier(); +  	node->locked = 0;  	node->next = NULL;  	pv_init_node(node); @@ -408,14 +416,15 @@ queue:  	 */  	if (old & _Q_TAIL_MASK) {  		prev = decode_tail(old); +  		/* -		 * The above xchg_tail() is also a load of @lock which -		 * generates, through decode_tail(), a pointer.  The address -		 * dependency matches the RELEASE of xchg_tail() such that -		 * the subsequent access to @prev happens after. +		 * We must ensure that the stores to @node are observed before +		 * the write to prev->next. The address dependency from +		 * xchg_tail is not sufficient to ensure this because the read +		 * component of xchg_tail is unordered with respect to the +		 * initialisation of @node.  		 */ - -		WRITE_ONCE(prev->next, node); +		smp_store_release(&prev->next, node);  		pv_wait_node(node, prev);  		arch_mcs_spin_lock_contended(&node->locked); diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 65cc0cb984e6..940633c63254 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1616,11 +1616,12 @@ bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,  void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)  {  	DEFINE_WAKE_Q(wake_q); +	unsigned long flags;  	bool postunlock; -	raw_spin_lock_irq(&lock->wait_lock); +	raw_spin_lock_irqsave(&lock->wait_lock, flags);  	postunlock = __rt_mutex_futex_unlock(lock, &wake_q); -	raw_spin_unlock_irq(&lock->wait_lock); +	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);  	if (postunlock)  		rt_mutex_postunlock(&wake_q);  | 

