diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 10 | ||||
-rw-r--r-- | lib/idr.c | 16 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 4 | ||||
-rw-r--r-- | lib/spinlock_debug.c | 10 | ||||
-rw-r--r-- | lib/ts_bm.c | 11 | ||||
-rw-r--r-- | lib/zlib_inflate/inflate.c | 5 |
6 files changed, 31 insertions, 25 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index e5889b1a33ff..554ee688a9f8 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -158,7 +158,7 @@ config DEBUG_RWSEMS config DEBUG_LOCK_ALLOC bool "Lock debugging: detect incorrect freeing of live locks" - depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT + depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT select DEBUG_SPINLOCK select DEBUG_MUTEXES select DEBUG_RWSEMS @@ -173,7 +173,7 @@ config DEBUG_LOCK_ALLOC config PROVE_LOCKING bool "Lock debugging: prove locking correctness" - depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT + depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT select LOCKDEP select DEBUG_SPINLOCK select DEBUG_MUTEXES @@ -216,7 +216,7 @@ config PROVE_LOCKING config LOCKDEP bool - depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT + depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT select STACKTRACE select FRAME_POINTER select KALLSYMS @@ -224,13 +224,14 @@ config LOCKDEP config DEBUG_LOCKDEP bool "Lock dependency engine debugging" - depends on LOCKDEP + depends on DEBUG_KERNEL && LOCKDEP help If you say Y here, the lock dependency engine will do additional runtime checks to debug itself, at the price of more runtime overhead. config TRACE_IRQFLAGS + depends on DEBUG_KERNEL bool default y depends on TRACE_IRQFLAGS_SUPPORT @@ -256,6 +257,7 @@ config DEBUG_LOCKING_API_SELFTESTS config STACKTRACE bool + depends on DEBUG_KERNEL depends on STACKTRACE_SUPPORT config DEBUG_KOBJECT diff --git a/lib/idr.c b/lib/idr.c index 4d096819511a..16d2143fea48 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -38,14 +38,15 @@ static kmem_cache_t *idr_layer_cache; static struct idr_layer *alloc_layer(struct idr *idp) { struct idr_layer *p; + unsigned long flags; - spin_lock(&idp->lock); + spin_lock_irqsave(&idp->lock, flags); if ((p = idp->id_free)) { idp->id_free = p->ary[0]; idp->id_free_cnt--; p->ary[0] = NULL; } - spin_unlock(&idp->lock); + spin_unlock_irqrestore(&idp->lock, flags); return(p); } @@ -59,12 +60,14 @@ static void __free_layer(struct idr *idp, struct idr_layer *p) static void free_layer(struct idr *idp, struct idr_layer *p) { + unsigned long flags; + /* * Depends on the return element being zeroed. */ - spin_lock(&idp->lock); + spin_lock_irqsave(&idp->lock, flags); __free_layer(idp, p); - spin_unlock(&idp->lock); + spin_unlock_irqrestore(&idp->lock, flags); } /** @@ -168,6 +171,7 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) { struct idr_layer *p, *new; int layers, v, id; + unsigned long flags; id = starting_id; build_up: @@ -191,14 +195,14 @@ build_up: * The allocation failed. If we built part of * the structure tear it down. */ - spin_lock(&idp->lock); + spin_lock_irqsave(&idp->lock, flags); for (new = p; p && p != idp->top; new = p) { p = p->ary[0]; new->ary[0] = NULL; new->bitmap = new->count = 0; __free_layer(idp, new); } - spin_unlock(&idp->lock); + spin_unlock_irqrestore(&idp->lock, flags); return -1; } new->ary[0] = p; diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 2b1530fc573b..7f20e7b857cb 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -50,10 +50,6 @@ static char *action_to_string(enum kobject_action action) return "offline"; case KOBJ_ONLINE: return "online"; - case KOBJ_DOCK: - return "dock"; - case KOBJ_UNDOCK: - return "undock"; default: return NULL; } diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 3d9c4dc965ed..58c577dd82e5 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -162,6 +162,7 @@ static void rwlock_bug(rwlock_t *lock, const char *msg) #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) +#if 0 /* __write_lock_debug() can lock up - maybe this can too? */ static void __read_lock_debug(rwlock_t *lock) { int print_once = 1; @@ -184,12 +185,12 @@ static void __read_lock_debug(rwlock_t *lock) } } } +#endif void _raw_read_lock(rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); - if (unlikely(!__raw_read_trylock(&lock->raw_lock))) - __read_lock_debug(lock); + __raw_read_lock(&lock->raw_lock); } int _raw_read_trylock(rwlock_t *lock) @@ -235,6 +236,7 @@ static inline void debug_write_unlock(rwlock_t *lock) lock->owner_cpu = -1; } +#if 0 /* This can cause lockups */ static void __write_lock_debug(rwlock_t *lock) { int print_once = 1; @@ -257,12 +259,12 @@ static void __write_lock_debug(rwlock_t *lock) } } } +#endif void _raw_write_lock(rwlock_t *lock) { debug_write_lock_before(lock); - if (unlikely(!__raw_write_trylock(&lock->raw_lock))) - __write_lock_debug(lock); + __raw_write_lock(&lock->raw_lock); debug_write_lock_after(lock); } diff --git a/lib/ts_bm.c b/lib/ts_bm.c index 0110e4414805..d90822c378a4 100644 --- a/lib/ts_bm.c +++ b/lib/ts_bm.c @@ -111,15 +111,14 @@ static int subpattern(u8 *pattern, int i, int j, int g) return ret; } -static void compute_prefix_tbl(struct ts_bm *bm, const u8 *pattern, - unsigned int len) +static void compute_prefix_tbl(struct ts_bm *bm) { int i, j, g; for (i = 0; i < ASIZE; i++) - bm->bad_shift[i] = len; - for (i = 0; i < len - 1; i++) - bm->bad_shift[pattern[i]] = len - 1 - i; + bm->bad_shift[i] = bm->patlen; + for (i = 0; i < bm->patlen - 1; i++) + bm->bad_shift[bm->pattern[i]] = bm->patlen - 1 - i; /* Compute the good shift array, used to match reocurrences * of a subpattern */ @@ -150,8 +149,8 @@ static struct ts_config *bm_init(const void *pattern, unsigned int len, bm = ts_config_priv(conf); bm->patlen = len; bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len; - compute_prefix_tbl(bm, pattern, len); memcpy(bm->pattern, pattern, len); + compute_prefix_tbl(bm); return conf; } diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c index 7f922dccf1a5..fceb97c3aff7 100644 --- a/lib/zlib_inflate/inflate.c +++ b/lib/zlib_inflate/inflate.c @@ -347,7 +347,10 @@ int zlib_inflate(z_streamp strm, int flush) static const unsigned short order[19] = /* permutation of code lengths */ {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; - if (strm == NULL || strm->state == NULL || strm->next_out == NULL || + /* Do not check for strm->next_out == NULL here as ppc zImage + inflates to strm->next_out = 0 */ + + if (strm == NULL || strm->state == NULL || (strm->next_in == NULL && strm->avail_in != 0)) return Z_STREAM_ERROR; |