summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@tv-sign.ru>2007-12-05 15:46:09 +0100
committerIngo Molnar <mingo@elte.hu>2007-12-05 15:46:09 +0100
commit54561783ee99d73a086f3abbda3e44f87f6bf65b (patch)
tree73f836a158f9c05ece41cae6cdb80888d9d6ba07 /kernel
parent856848737bd944c1db3ce0a66bbf67e56bd6f77d (diff)
downloadblackbird-obmc-linux-54561783ee99d73a086f3abbda3e44f87f6bf65b.tar.gz
blackbird-obmc-linux-54561783ee99d73a086f3abbda3e44f87f6bf65b.zip
lockdep: in_range() fix
Torsten Kaiser wrote: | static inline int in_range(const void *start, const void *addr, const void *end) | { | return addr >= start && addr <= end; | } | This will return true, if addr is in the range of start (including) | to end (including). | | But debug_check_no_locks_freed() seems does: | const void *mem_to = mem_from + mem_len | -> mem_to is the last byte of the freed range, that fits in_range | lock_from = (void *)hlock->instance; | -> first byte of the lock | lock_to = (void *)(hlock->instance + 1); | -> first byte of the next lock, not last byte of the lock that is being checked! | | The test is: | if (!in_range(mem_from, lock_from, mem_to) && | !in_range(mem_from, lock_to, mem_to)) | continue; | So it tests, if the first byte of the lock is in the range that is freed ->OK | And if the first byte of the *next* lock is in the range that is freed | -> Not OK. We can also simplify in_range checks, we need only 2 comparisons, not 4. If the lock is not in memory range, it should be either at the left of range or at the right. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/lockdep.c22
1 files changed, 10 insertions, 12 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 7e2ca7c9d99c..0f389621bb6b 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -3054,11 +3054,6 @@ void __init lockdep_info(void)
#endif
}
-static inline int in_range(const void *start, const void *addr, const void *end)
-{
- return addr >= start && addr <= end;
-}
-
static void
print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
const void *mem_to, struct held_lock *hlock)
@@ -3080,6 +3075,13 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
dump_stack();
}
+static inline int not_in_range(const void* mem_from, unsigned long mem_len,
+ const void* lock_from, unsigned long lock_len)
+{
+ return lock_from + lock_len <= mem_from ||
+ mem_from + mem_len <= lock_from;
+}
+
/*
* Called when kernel memory is freed (or unmapped), or if a lock
* is destroyed or reinitialized - this code checks whether there is
@@ -3087,7 +3089,6 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
*/
void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
{
- const void *mem_to = mem_from + mem_len, *lock_from, *lock_to;
struct task_struct *curr = current;
struct held_lock *hlock;
unsigned long flags;
@@ -3100,14 +3101,11 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
for (i = 0; i < curr->lockdep_depth; i++) {
hlock = curr->held_locks + i;
- lock_from = (void *)hlock->instance;
- lock_to = (void *)(hlock->instance + 1);
-
- if (!in_range(mem_from, lock_from, mem_to) &&
- !in_range(mem_from, lock_to, mem_to))
+ if (not_in_range(mem_from, mem_len, hlock->instance,
+ sizeof(*hlock->instance)))
continue;
- print_freed_lock_bug(curr, mem_from, mem_to, hlock);
+ print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
break;
}
local_irq_restore(flags);
OpenPOWER on IntegriCloud