diff options
author | Ingo Molnar <mingo@elte.hu> | 2006-07-03 00:24:33 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-07-03 15:27:01 -0700 |
commit | 9a11b49a805665e13a56aa067afaf81d43ec1514 (patch) | |
tree | bf499956e3f67d1211d68ab1e2eb76645f453dfb /kernel/rtmutex.c | |
parent | fb7e42413a098cc45b3adf858da290033af62bae (diff) | |
download | talos-obmc-linux-9a11b49a805665e13a56aa067afaf81d43ec1514.tar.gz talos-obmc-linux-9a11b49a805665e13a56aa067afaf81d43ec1514.zip |
[PATCH] lockdep: better lock debugging
Generic lock debugging:
- generalized lock debugging framework. For example, a bug in one lock
subsystem turns off debugging in all lock subsystems.
- got rid of the caller address passing (__IP__/__IP_DECL__/etc.) from
the mutex/rtmutex debugging code: it caused way too much prototype
hackery, and lockdep will give the same information anyway.
- ability to do silent tests
- check lock freeing in vfree too.
- more finegrained debugging options, to allow distributions to
turn off more expensive debugging features.
There's no separate 'held mutexes' list anymore - but there's a 'held locks'
stack within lockdep, which unifies deadlock detection across all lock
classes. (this is independent of the lockdep validation stuff - lockdep first
checks whether we are holding a lock already)
Here are the current debugging options:
CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_LOCK_ALLOC=y
which do:
config DEBUG_MUTEXES
bool "Mutex debugging, basic checks"
config DEBUG_LOCK_ALLOC
bool "Detect incorrect freeing of live mutexes"
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/rtmutex.c')
-rw-r--r-- | kernel/rtmutex.c | 46 |
1 files changed, 22 insertions, 24 deletions
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 45d61016da57..91b699aa658b 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -161,8 +161,7 @@ static int rt_mutex_adjust_prio_chain(task_t *task, int deadlock_detect, struct rt_mutex *orig_lock, struct rt_mutex_waiter *orig_waiter, - struct task_struct *top_task - __IP_DECL__) + struct task_struct *top_task) { struct rt_mutex *lock; struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; @@ -357,7 +356,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock) * * Must be called with lock->wait_lock held. */ -static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__) +static int try_to_take_rt_mutex(struct rt_mutex *lock) { /* * We have to be careful here if the atomic speedups are @@ -384,7 +383,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__) return 0; /* We got the lock. */ - debug_rt_mutex_lock(lock __IP__); + debug_rt_mutex_lock(lock); rt_mutex_set_owner(lock, current, 0); @@ -402,8 +401,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__) */ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, - int detect_deadlock - __IP_DECL__) + int detect_deadlock) { struct rt_mutex_waiter *top_waiter = waiter; task_t *owner = rt_mutex_owner(lock); @@ -454,7 +452,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, spin_unlock(&lock->wait_lock); res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, - current __IP__); + current); spin_lock(&lock->wait_lock); @@ -526,7 +524,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock) * Must be called with lock->wait_lock held */ static void remove_waiter(struct rt_mutex *lock, - struct rt_mutex_waiter *waiter __IP_DECL__) + struct rt_mutex_waiter *waiter) { int first = (waiter == rt_mutex_top_waiter(lock)); int boost = 0; @@ -568,7 +566,7 @@ static void remove_waiter(struct rt_mutex *lock, spin_unlock(&lock->wait_lock); - rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current __IP__); + rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); spin_lock(&lock->wait_lock); } @@ -595,7 +593,7 @@ void rt_mutex_adjust_pi(struct task_struct *task) get_task_struct(task); spin_unlock_irqrestore(&task->pi_lock, flags); - rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task __RET_IP__); + rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); } /* @@ -604,7 +602,7 @@ void rt_mutex_adjust_pi(struct task_struct *task) static int __sched rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - int detect_deadlock __IP_DECL__) + int detect_deadlock) { struct rt_mutex_waiter waiter; int ret = 0; @@ -615,7 +613,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, spin_lock(&lock->wait_lock); /* Try to acquire the lock again: */ - if (try_to_take_rt_mutex(lock __IP__)) { + if (try_to_take_rt_mutex(lock)) { spin_unlock(&lock->wait_lock); return 0; } @@ -629,7 +627,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, for (;;) { /* Try to acquire the lock: */ - if (try_to_take_rt_mutex(lock __IP__)) + if (try_to_take_rt_mutex(lock)) break; /* @@ -653,7 +651,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, */ if (!waiter.task) { ret = task_blocks_on_rt_mutex(lock, &waiter, - detect_deadlock __IP__); + detect_deadlock); /* * If we got woken up by the owner then start loop * all over without going into schedule to try @@ -680,7 +678,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, set_current_state(TASK_RUNNING); if (unlikely(waiter.task)) - remove_waiter(lock, &waiter __IP__); + remove_waiter(lock, &waiter); /* * try_to_take_rt_mutex() sets the waiter bit @@ -711,7 +709,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, * Slow path try-lock function: */ static inline int -rt_mutex_slowtrylock(struct rt_mutex *lock __IP_DECL__) +rt_mutex_slowtrylock(struct rt_mutex *lock) { int ret = 0; @@ -719,7 +717,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock __IP_DECL__) if (likely(rt_mutex_owner(lock) != current)) { - ret = try_to_take_rt_mutex(lock __IP__); + ret = try_to_take_rt_mutex(lock); /* * try_to_take_rt_mutex() sets the lock waiters * bit unconditionally. Clean this up. @@ -769,13 +767,13 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state, int detect_deadlock, int (*slowfn)(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - int detect_deadlock __IP_DECL__)) + int detect_deadlock)) { if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { rt_mutex_deadlock_account_lock(lock, current); return 0; } else - return slowfn(lock, state, NULL, detect_deadlock __RET_IP__); + return slowfn(lock, state, NULL, detect_deadlock); } static inline int @@ -783,24 +781,24 @@ rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, int detect_deadlock, int (*slowfn)(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - int detect_deadlock __IP_DECL__)) + int detect_deadlock)) { if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { rt_mutex_deadlock_account_lock(lock, current); return 0; } else - return slowfn(lock, state, timeout, detect_deadlock __RET_IP__); + return slowfn(lock, state, timeout, detect_deadlock); } static inline int rt_mutex_fasttrylock(struct rt_mutex *lock, - int (*slowfn)(struct rt_mutex *lock __IP_DECL__)) + int (*slowfn)(struct rt_mutex *lock)) { if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { rt_mutex_deadlock_account_lock(lock, current); return 1; } - return slowfn(lock __RET_IP__); + return slowfn(lock); } static inline void @@ -948,7 +946,7 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner) { __rt_mutex_init(lock, NULL); - debug_rt_mutex_proxy_lock(lock, proxy_owner __RET_IP__); + debug_rt_mutex_proxy_lock(lock, proxy_owner); rt_mutex_set_owner(lock, proxy_owner, 0); rt_mutex_deadlock_account_lock(lock, proxy_owner); } |