summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2008-01-30 13:31:20 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-30 13:31:20 +0100
commit95c354fe9f7d6decc08a92aa26eb233ecc2155bf (patch)
treeec9267032ea875e84216cfb20acb2cfc7c62149f /kernel
parenta95d67f87e1a5f1b4429be3ba3bf7b4051657908 (diff)
downloadblackbird-op-linux-95c354fe9f7d6decc08a92aa26eb233ecc2155bf.tar.gz
blackbird-op-linux-95c354fe9f7d6decc08a92aa26eb233ecc2155bf.zip
spinlock: lockbreak cleanup
The break_lock data structure and code for spinlocks is quite nasty. Not only does it double the size of a spinlock but it changes locking to a potentially less optimal trylock. Put all of that under CONFIG_GENERIC_LOCKBREAK, and introduce a __raw_spin_is_contended that uses the lock data itself to determine whether there are waiters on the lock, to be used if CONFIG_GENERIC_LOCKBREAK is not set. Rename need_lockbreak to spin_needbreak, make it use spin_is_contended to decouple it from the spinlock implementation, and make it typesafe (rwlocks do not have any need_lockbreak sites -- why do they even get bloated up with that break_lock then?). Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c16
-rw-r--r--kernel/spinlock.c3
2 files changed, 7 insertions, 12 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 524285e46fa7..ba4c88088f62 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4945,19 +4945,15 @@ EXPORT_SYMBOL(_cond_resched);
*/
int cond_resched_lock(spinlock_t *lock)
{
+ int resched = need_resched() && system_state == SYSTEM_RUNNING;
int ret = 0;
- if (need_lockbreak(lock)) {
+ if (spin_needbreak(lock) || resched) {
spin_unlock(lock);
- cpu_relax();
- ret = 1;
- spin_lock(lock);
- }
- if (need_resched() && system_state == SYSTEM_RUNNING) {
- spin_release(&lock->dep_map, 1, _THIS_IP_);
- _raw_spin_unlock(lock);
- preempt_enable_no_resched();
- __cond_resched();
+ if (resched && need_resched())
+ __cond_resched();
+ else
+ cpu_relax();
ret = 1;
spin_lock(lock);
}
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index cd72424c2662..ae28c8245123 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -65,8 +65,7 @@ EXPORT_SYMBOL(_write_trylock);
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
* not re-enabled during lock-acquire (which the preempt-spin-ops do):
*/
-#if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) || \
- defined(CONFIG_DEBUG_LOCK_ALLOC)
+#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
void __lockfunc _read_lock(rwlock_t *lock)
{
OpenPOWER on IntegriCloud