diff options
Diffstat (limited to 'mm/kmemleak.c')
-rw-r--r-- | mm/kmemleak.c | 458 |
1 files changed, 156 insertions, 302 deletions
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index f6e602918dac..3a4259eeb5a0 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -13,7 +13,7 @@ * * The following locks and mutexes are used by kmemleak: * - * - kmemleak_lock (rwlock): protects the object_list modifications and + * - kmemleak_lock (raw_spinlock_t): protects the object_list modifications and * accesses to the object_tree_root. The object_list is the main list * holding the metadata (struct kmemleak_object) for the allocated memory * blocks. The object_tree_root is a red black tree used to look-up @@ -22,13 +22,13 @@ * object_tree_root in the create_object() function called from the * kmemleak_alloc() callback and removed in delete_object() called from the * kmemleak_free() callback - * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to - * the metadata (e.g. count) are protected by this lock. Note that some - * members of this structure may be protected by other means (atomic or - * kmemleak_lock). This lock is also held when scanning the corresponding - * memory block to avoid the kernel freeing it via the kmemleak_free() - * callback. This is less heavyweight than holding a global lock like - * kmemleak_lock during scanning + * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object. + * Accesses to the metadata (e.g. count) are protected by this lock. Note + * that some members of this structure may be protected by other means + * (atomic or kmemleak_lock). This lock is also held when scanning the + * corresponding memory block to avoid the kernel freeing it via the + * kmemleak_free() callback. This is less heavyweight than holding a global + * lock like kmemleak_lock during scanning. * - scan_mutex (mutex): ensures that only one thread may scan the memory for * unreferenced objects at a time. The gray_list contains the objects which * are already referenced or marked as false positives and need to be @@ -135,7 +135,7 @@ struct kmemleak_scan_area { * (use_count) and freed using the RCU mechanism. */ struct kmemleak_object { - spinlock_t lock; + raw_spinlock_t lock; unsigned int flags; /* object status flags */ struct list_head object_list; struct list_head gray_list; @@ -168,6 +168,8 @@ struct kmemleak_object { #define OBJECT_REPORTED (1 << 1) /* flag set to not scan the object */ #define OBJECT_NO_SCAN (1 << 2) +/* flag set to fully scan the object when scan_area allocation failed */ +#define OBJECT_FULL_SCAN (1 << 3) #define HEX_PREFIX " " /* number of bytes to print per line; must be 16 or 32 */ @@ -183,23 +185,25 @@ struct kmemleak_object { static LIST_HEAD(object_list); /* the list of gray-colored objects (see color_gray comment below) */ static LIST_HEAD(gray_list); +/* memory pool allocation */ +static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE]; +static int mem_pool_free_count = ARRAY_SIZE(mem_pool); +static LIST_HEAD(mem_pool_free_list); /* search tree for object boundaries */ static struct rb_root object_tree_root = RB_ROOT; -/* rw_lock protecting the access to object_list and object_tree_root */ -static DEFINE_RWLOCK(kmemleak_lock); +/* protecting the access to object_list and object_tree_root */ +static DEFINE_RAW_SPINLOCK(kmemleak_lock); /* allocation caches for kmemleak internal data */ static struct kmem_cache *object_cache; static struct kmem_cache *scan_area_cache; /* set if tracing memory operations is enabled */ -static int kmemleak_enabled; +static int kmemleak_enabled = 1; /* same as above but only for the kmemleak_free() callback */ -static int kmemleak_free_enabled; +static int kmemleak_free_enabled = 1; /* set in the late_initcall if there were no errors */ static int kmemleak_initialized; -/* enables or disables early logging of the memory operations */ -static int kmemleak_early_log = 1; /* set if a kmemleak warning was issued */ static int kmemleak_warning; /* set if a fatal kmemleak error has occurred */ @@ -227,49 +231,6 @@ static bool kmemleak_found_leaks; static bool kmemleak_verbose; module_param_named(verbose, kmemleak_verbose, bool, 0600); -/* - * Early object allocation/freeing logging. Kmemleak is initialized after the - * kernel allocator. However, both the kernel allocator and kmemleak may - * allocate memory blocks which need to be tracked. Kmemleak defines an - * arbitrary buffer to hold the allocation/freeing information before it is - * fully initialized. - */ - -/* kmemleak operation type for early logging */ -enum { - KMEMLEAK_ALLOC, - KMEMLEAK_ALLOC_PERCPU, - KMEMLEAK_FREE, - KMEMLEAK_FREE_PART, - KMEMLEAK_FREE_PERCPU, - KMEMLEAK_NOT_LEAK, - KMEMLEAK_IGNORE, - KMEMLEAK_SCAN_AREA, - KMEMLEAK_NO_SCAN, - KMEMLEAK_SET_EXCESS_REF -}; - -/* - * Structure holding the information passed to kmemleak callbacks during the - * early logging. - */ -struct early_log { - int op_type; /* kmemleak operation type */ - int min_count; /* minimum reference count */ - const void *ptr; /* allocated/freed memory block */ - union { - size_t size; /* memory block size */ - unsigned long excess_ref; /* surplus reference passing */ - }; - unsigned long trace[MAX_TRACE]; /* stack trace */ - unsigned int trace_len; /* stack trace length */ -}; - -/* early logging buffer and current position */ -static struct early_log - early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata; -static int crt_early_log __initdata; - static void kmemleak_disable(void); /* @@ -450,6 +411,54 @@ static int get_object(struct kmemleak_object *object) } /* + * Memory pool allocation and freeing. kmemleak_lock must not be held. + */ +static struct kmemleak_object *mem_pool_alloc(gfp_t gfp) +{ + unsigned long flags; + struct kmemleak_object *object; + + /* try the slab allocator first */ + if (object_cache) { + object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); + if (object) + return object; + } + + /* slab allocation failed, try the memory pool */ + raw_spin_lock_irqsave(&kmemleak_lock, flags); + object = list_first_entry_or_null(&mem_pool_free_list, + typeof(*object), object_list); + if (object) + list_del(&object->object_list); + else if (mem_pool_free_count) + object = &mem_pool[--mem_pool_free_count]; + else + pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n"); + raw_spin_unlock_irqrestore(&kmemleak_lock, flags); + + return object; +} + +/* + * Return the object to either the slab allocator or the memory pool. + */ +static void mem_pool_free(struct kmemleak_object *object) +{ + unsigned long flags; + + if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) { + kmem_cache_free(object_cache, object); + return; + } + + /* add the object to the memory pool free list */ + raw_spin_lock_irqsave(&kmemleak_lock, flags); + list_add(&object->object_list, &mem_pool_free_list); + raw_spin_unlock_irqrestore(&kmemleak_lock, flags); +} + +/* * RCU callback to free a kmemleak_object. */ static void free_object_rcu(struct rcu_head *rcu) @@ -467,7 +476,7 @@ static void free_object_rcu(struct rcu_head *rcu) hlist_del(&area->node); kmem_cache_free(scan_area_cache, area); } - kmem_cache_free(object_cache, object); + mem_pool_free(object); } /* @@ -485,7 +494,15 @@ static void put_object(struct kmemleak_object *object) /* should only get here after delete_object was called */ WARN_ON(object->flags & OBJECT_ALLOCATED); - call_rcu(&object->rcu, free_object_rcu); + /* + * It may be too early for the RCU callbacks, however, there is no + * concurrent object_list traversal when !object_cache and all objects + * came from the memory pool. Free the object directly. + */ + if (object_cache) + call_rcu(&object->rcu, free_object_rcu); + else + free_object_rcu(&object->rcu); } /* @@ -497,9 +514,9 @@ static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias) struct kmemleak_object *object; rcu_read_lock(); - read_lock_irqsave(&kmemleak_lock, flags); + raw_spin_lock_irqsave(&kmemleak_lock, flags); object = lookup_object(ptr, alias); - read_unlock_irqrestore(&kmemleak_lock, flags); + raw_spin_unlock_irqrestore(&kmemleak_lock, flags); /* check whether the object is still available */ if (object && !get_object(object)) @@ -510,6 +527,16 @@ static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias) } /* + * Remove an object from the object_tree_root and object_list. Must be called + * with the kmemleak_lock held _if_ kmemleak is still enabled. + */ +static void __remove_object(struct kmemleak_object *object) +{ + rb_erase(&object->rb_node, &object_tree_root); + list_del_rcu(&object->object_list); +} + +/* * Look up an object in the object search tree and remove it from both * object_tree_root and object_list. The returned object's use_count should be * at least 1, as initially set by create_object(). @@ -519,13 +546,11 @@ static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int ali unsigned long flags; struct kmemleak_object *object; - write_lock_irqsave(&kmemleak_lock, flags); + raw_spin_lock_irqsave(&kmemleak_lock, flags); object = lookup_object(ptr, alias); - if (object) { - rb_erase(&object->rb_node, &object_tree_root); - list_del_rcu(&object->object_list); - } - write_unlock_irqrestore(&kmemleak_lock, flags); + if (object) + __remove_object(object); + raw_spin_unlock_irqrestore(&kmemleak_lock, flags); return object; } @@ -550,7 +575,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, struct rb_node **link, *rb_parent; unsigned long untagged_ptr; - object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); + object = mem_pool_alloc(gfp); if (!object) { pr_warn("Cannot allocate a kmemleak_object structure\n"); kmemleak_disable(); @@ -560,7 +585,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, INIT_LIST_HEAD(&object->object_list); INIT_LIST_HEAD(&object->gray_list); INIT_HLIST_HEAD(&object->area_list); - spin_lock_init(&object->lock); + raw_spin_lock_init(&object->lock); atomic_set(&object->use_count, 1); object->flags = OBJECT_ALLOCATED; object->pointer = ptr; @@ -592,7 +617,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, /* kernel backtrace */ object->trace_len = __save_stack_trace(object->trace); - write_lock_irqsave(&kmemleak_lock, flags); + raw_spin_lock_irqsave(&kmemleak_lock, flags); untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr); min_addr = min(min_addr, untagged_ptr); @@ -624,7 +649,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, list_add_tail_rcu(&object->object_list, &object_list); out: - write_unlock_irqrestore(&kmemleak_lock, flags); + raw_spin_unlock_irqrestore(&kmemleak_lock, flags); return object; } @@ -642,9 +667,9 @@ static void __delete_object(struct kmemleak_object *object) * Locking here also ensures that the corresponding memory block * cannot be freed when it is being scanned. */ - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); object->flags &= ~OBJECT_ALLOCATED; - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); put_object(object); } @@ -689,9 +714,7 @@ static void delete_object_part(unsigned long ptr, size_t size) /* * Create one or two objects that may result from the memory block * split. Note that partial freeing is only done by free_bootmem() and - * this happens before kmemleak_init() is called. The path below is - * only executed during early log recording in kmemleak_init(), so - * GFP_KERNEL is enough. + * this happens before kmemleak_init() is called. */ start = object->pointer; end = object->pointer + object->size; @@ -716,9 +739,9 @@ static void paint_it(struct kmemleak_object *object, int color) { unsigned long flags; - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); __paint_it(object, color); - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); } static void paint_ptr(unsigned long ptr, int color) @@ -763,7 +786,7 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) { unsigned long flags; struct kmemleak_object *object; - struct kmemleak_scan_area *area; + struct kmemleak_scan_area *area = NULL; object = find_and_get_object(ptr, 1); if (!object) { @@ -772,13 +795,16 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) return; } - area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp)); + if (scan_area_cache) + area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp)); + + raw_spin_lock_irqsave(&object->lock, flags); if (!area) { - pr_warn("Cannot allocate a scan area\n"); - goto out; + pr_warn_once("Cannot allocate a scan area, scanning the full object\n"); + /* mark the object for full scan to avoid false positives */ + object->flags |= OBJECT_FULL_SCAN; + goto out_unlock; } - - spin_lock_irqsave(&object->lock, flags); if (size == SIZE_MAX) { size = object->pointer + object->size - ptr; } else if (ptr + size > object->pointer + object->size) { @@ -794,8 +820,7 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) hlist_add_head(&area->node, &object->area_list); out_unlock: - spin_unlock_irqrestore(&object->lock, flags); -out: + raw_spin_unlock_irqrestore(&object->lock, flags); put_object(object); } @@ -817,9 +842,9 @@ static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref) return; } - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); object->excess_ref = excess_ref; - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); put_object(object); } @@ -839,92 +864,12 @@ static void object_no_scan(unsigned long ptr) return; } - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); object->flags |= OBJECT_NO_SCAN; - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); put_object(object); } -/* - * Log an early kmemleak_* call to the early_log buffer. These calls will be - * processed later once kmemleak is fully initialized. - */ -static void __init log_early(int op_type, const void *ptr, size_t size, - int min_count) -{ - unsigned long flags; - struct early_log *log; - - if (kmemleak_error) { - /* kmemleak stopped recording, just count the requests */ - crt_early_log++; - return; - } - - if (crt_early_log >= ARRAY_SIZE(early_log)) { - crt_early_log++; - kmemleak_disable(); - return; - } - - /* - * There is no need for locking since the kernel is still in UP mode - * at this stage. Disabling the IRQs is enough. - */ - local_irq_save(flags); - log = &early_log[crt_early_log]; - log->op_type = op_type; - log->ptr = ptr; - log->size = size; - log->min_count = min_count; - log->trace_len = __save_stack_trace(log->trace); - crt_early_log++; - local_irq_restore(flags); -} - -/* - * Log an early allocated block and populate the stack trace. - */ -static void early_alloc(struct early_log *log) -{ - struct kmemleak_object *object; - unsigned long flags; - int i; - - if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr)) - return; - - /* - * RCU locking needed to ensure object is not freed via put_object(). - */ - rcu_read_lock(); - object = create_object((unsigned long)log->ptr, log->size, - log->min_count, GFP_ATOMIC); - if (!object) - goto out; - spin_lock_irqsave(&object->lock, flags); - for (i = 0; i < log->trace_len; i++) - object->trace[i] = log->trace[i]; - object->trace_len = log->trace_len; - spin_unlock_irqrestore(&object->lock, flags); -out: - rcu_read_unlock(); -} - -/* - * Log an early allocated block and populate the stack trace. - */ -static void early_alloc_percpu(struct early_log *log) -{ - unsigned int cpu; - const void __percpu *ptr = log->ptr; - - for_each_possible_cpu(cpu) { - log->ptr = per_cpu_ptr(ptr, cpu); - early_alloc(log); - } -} - /** * kmemleak_alloc - register a newly allocated object * @ptr: pointer to beginning of the object @@ -946,8 +891,6 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count, if (kmemleak_enabled && ptr && !IS_ERR(ptr)) create_object((unsigned long)ptr, size, min_count, gfp); - else if (kmemleak_early_log) - log_early(KMEMLEAK_ALLOC, ptr, size, min_count); } EXPORT_SYMBOL_GPL(kmemleak_alloc); @@ -975,8 +918,6 @@ void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size, for_each_possible_cpu(cpu) create_object((unsigned long)per_cpu_ptr(ptr, cpu), size, 0, gfp); - else if (kmemleak_early_log) - log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0); } EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu); @@ -1001,11 +942,6 @@ void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp create_object((unsigned long)area->addr, size, 2, gfp); object_set_excess_ref((unsigned long)area, (unsigned long)area->addr); - } else if (kmemleak_early_log) { - log_early(KMEMLEAK_ALLOC, area->addr, size, 2); - /* reusing early_log.size for storing area->addr */ - log_early(KMEMLEAK_SET_EXCESS_REF, - area, (unsigned long)area->addr, 0); } } EXPORT_SYMBOL_GPL(kmemleak_vmalloc); @@ -1023,8 +959,6 @@ void __ref kmemleak_free(const void *ptr) if (kmemleak_free_enabled && ptr && !IS_ERR(ptr)) delete_object_full((unsigned long)ptr); - else if (kmemleak_early_log) - log_early(KMEMLEAK_FREE, ptr, 0, 0); } EXPORT_SYMBOL_GPL(kmemleak_free); @@ -1043,8 +977,6 @@ void __ref kmemleak_free_part(const void *ptr, size_t size) if (kmemleak_enabled && ptr && !IS_ERR(ptr)) delete_object_part((unsigned long)ptr, size); - else if (kmemleak_early_log) - log_early(KMEMLEAK_FREE_PART, ptr, size, 0); } EXPORT_SYMBOL_GPL(kmemleak_free_part); @@ -1065,8 +997,6 @@ void __ref kmemleak_free_percpu(const void __percpu *ptr) for_each_possible_cpu(cpu) delete_object_full((unsigned long)per_cpu_ptr(ptr, cpu)); - else if (kmemleak_early_log) - log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0); } EXPORT_SYMBOL_GPL(kmemleak_free_percpu); @@ -1096,9 +1026,9 @@ void __ref kmemleak_update_trace(const void *ptr) return; } - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); object->trace_len = __save_stack_trace(object->trace); - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); put_object(object); } @@ -1117,8 +1047,6 @@ void __ref kmemleak_not_leak(const void *ptr) if (kmemleak_enabled && ptr && !IS_ERR(ptr)) make_gray_object((unsigned long)ptr); - else if (kmemleak_early_log) - log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0); } EXPORT_SYMBOL(kmemleak_not_leak); @@ -1137,8 +1065,6 @@ void __ref kmemleak_ignore(const void *ptr) if (kmemleak_enabled && ptr && !IS_ERR(ptr)) make_black_object((unsigned long)ptr); - else if (kmemleak_early_log) - log_early(KMEMLEAK_IGNORE, ptr, 0, 0); } EXPORT_SYMBOL(kmemleak_ignore); @@ -1159,8 +1085,6 @@ void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) if (kmemleak_enabled && ptr && size && !IS_ERR(ptr)) add_scan_area((unsigned long)ptr, size, gfp); - else if (kmemleak_early_log) - log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0); } EXPORT_SYMBOL(kmemleak_scan_area); @@ -1179,8 +1103,6 @@ void __ref kmemleak_no_scan(const void *ptr) if (kmemleak_enabled && ptr && !IS_ERR(ptr)) object_no_scan((unsigned long)ptr); - else if (kmemleak_early_log) - log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0); } EXPORT_SYMBOL(kmemleak_no_scan); @@ -1311,7 +1233,7 @@ static void scan_block(void *_start, void *_end, unsigned long flags; unsigned long untagged_ptr; - read_lock_irqsave(&kmemleak_lock, flags); + raw_spin_lock_irqsave(&kmemleak_lock, flags); for (ptr = start; ptr < end; ptr++) { struct kmemleak_object *object; unsigned long pointer; @@ -1346,7 +1268,7 @@ static void scan_block(void *_start, void *_end, * previously acquired in scan_object(). These locks are * enclosed by scan_mutex. */ - spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); + raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); /* only pass surplus references (object already gray) */ if (color_gray(object)) { excess_ref = object->excess_ref; @@ -1355,7 +1277,7 @@ static void scan_block(void *_start, void *_end, excess_ref = 0; update_refs(object); } - spin_unlock(&object->lock); + raw_spin_unlock(&object->lock); if (excess_ref) { object = lookup_object(excess_ref, 0); @@ -1364,12 +1286,12 @@ static void scan_block(void *_start, void *_end, if (object == scanned) /* circular reference, ignore */ continue; - spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); + raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); update_refs(object); - spin_unlock(&object->lock); + raw_spin_unlock(&object->lock); } } - read_unlock_irqrestore(&kmemleak_lock, flags); + raw_spin_unlock_irqrestore(&kmemleak_lock, flags); } /* @@ -1402,13 +1324,14 @@ static void scan_object(struct kmemleak_object *object) * Once the object->lock is acquired, the corresponding memory block * cannot be freed (the same lock is acquired in delete_object). */ - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); if (object->flags & OBJECT_NO_SCAN) goto out; if (!(object->flags & OBJECT_ALLOCATED)) /* already freed object */ goto out; - if (hlist_empty(&object->area_list)) { + if (hlist_empty(&object->area_list) || + object->flags & OBJECT_FULL_SCAN) { void *start = (void *)object->pointer; void *end = (void *)(object->pointer + object->size); void *next; @@ -1421,9 +1344,9 @@ static void scan_object(struct kmemleak_object *object) if (start >= end) break; - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); cond_resched(); - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); } while (object->flags & OBJECT_ALLOCATED); } else hlist_for_each_entry(area, &object->area_list, node) @@ -1431,7 +1354,7 @@ static void scan_object(struct kmemleak_object *object) (void *)(area->start + area->size), object); out: - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); } /* @@ -1484,7 +1407,7 @@ static void kmemleak_scan(void) /* prepare the kmemleak_object's */ rcu_read_lock(); list_for_each_entry_rcu(object, &object_list, object_list) { - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); #ifdef DEBUG /* * With a few exceptions there should be a maximum of @@ -1501,7 +1424,7 @@ static void kmemleak_scan(void) if (color_gray(object) && get_object(object)) list_add_tail(&object->gray_list, &gray_list); - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); } rcu_read_unlock(); @@ -1569,14 +1492,14 @@ static void kmemleak_scan(void) */ rcu_read_lock(); list_for_each_entry_rcu(object, &object_list, object_list) { - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); if (color_white(object) && (object->flags & OBJECT_ALLOCATED) && update_checksum(object) && get_object(object)) { /* color it gray temporarily */ object->count = object->min_count; list_add_tail(&object->gray_list, &gray_list); } - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); } rcu_read_unlock(); @@ -1596,7 +1519,7 @@ static void kmemleak_scan(void) */ rcu_read_lock(); list_for_each_entry_rcu(object, &object_list, object_list) { - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); if (unreferenced_object(object) && !(object->flags & OBJECT_REPORTED)) { object->flags |= OBJECT_REPORTED; @@ -1606,7 +1529,7 @@ static void kmemleak_scan(void) new_leaks++; } - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); } rcu_read_unlock(); @@ -1758,10 +1681,10 @@ static int kmemleak_seq_show(struct seq_file *seq, void *v) struct kmemleak_object *object = v; unsigned long flags; - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) print_unreferenced(seq, object); - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); return 0; } @@ -1791,9 +1714,9 @@ static int dump_str_object_info(const char *str) return -EINVAL; } - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); dump_object_info(object); - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); put_object(object); return 0; @@ -1812,11 +1735,11 @@ static void kmemleak_clear(void) rcu_read_lock(); list_for_each_entry_rcu(object, &object_list, object_list) { - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) __paint_it(object, KMEMLEAK_GREY); - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); } rcu_read_unlock(); @@ -1919,12 +1842,16 @@ static const struct file_operations kmemleak_fops = { static void __kmemleak_do_cleanup(void) { - struct kmemleak_object *object; + struct kmemleak_object *object, *tmp; - rcu_read_lock(); - list_for_each_entry_rcu(object, &object_list, object_list) - delete_object_full(object->pointer); - rcu_read_unlock(); + /* + * Kmemleak has already been disabled, no need for RCU list traversal + * or kmemleak_lock held. + */ + list_for_each_entry_safe(object, tmp, &object_list, object_list) { + __remove_object(object); + __delete_object(object); + } } /* @@ -1966,7 +1893,6 @@ static void kmemleak_disable(void) /* stop any memory operation tracing */ kmemleak_enabled = 0; - kmemleak_early_log = 0; /* check whether it is too early for a kernel thread */ if (kmemleak_initialized) @@ -1994,20 +1920,11 @@ static int __init kmemleak_boot_config(char *str) } early_param("kmemleak", kmemleak_boot_config); -static void __init print_log_trace(struct early_log *log) -{ - pr_notice("Early log backtrace:\n"); - stack_trace_print(log->trace, log->trace_len, 2); -} - /* * Kmemleak initialization. */ void __init kmemleak_init(void) { - int i; - unsigned long flags; - #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF if (!kmemleak_skip_disable) { kmemleak_disable(); @@ -2015,28 +1932,15 @@ void __init kmemleak_init(void) } #endif + if (kmemleak_error) + return; + jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE); scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); - if (crt_early_log > ARRAY_SIZE(early_log)) - pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", - crt_early_log); - - /* the kernel is still in UP mode, so disabling the IRQs is enough */ - local_irq_save(flags); - kmemleak_early_log = 0; - if (kmemleak_error) { - local_irq_restore(flags); - return; - } else { - kmemleak_enabled = 1; - kmemleak_free_enabled = 1; - } - local_irq_restore(flags); - /* register the data/bss sections */ create_object((unsigned long)_sdata, _edata - _sdata, KMEMLEAK_GREY, GFP_ATOMIC); @@ -2047,57 +1951,6 @@ void __init kmemleak_init(void) create_object((unsigned long)__start_ro_after_init, __end_ro_after_init - __start_ro_after_init, KMEMLEAK_GREY, GFP_ATOMIC); - - /* - * This is the point where tracking allocations is safe. Automatic - * scanning is started during the late initcall. Add the early logged - * callbacks to the kmemleak infrastructure. - */ - for (i = 0; i < crt_early_log; i++) { - struct early_log *log = &early_log[i]; - - switch (log->op_type) { - case KMEMLEAK_ALLOC: - early_alloc(log); - break; - case KMEMLEAK_ALLOC_PERCPU: - early_alloc_percpu(log); - break; - case KMEMLEAK_FREE: - kmemleak_free(log->ptr); - break; - case KMEMLEAK_FREE_PART: - kmemleak_free_part(log->ptr, log->size); - break; - case KMEMLEAK_FREE_PERCPU: - kmemleak_free_percpu(log->ptr); - break; - case KMEMLEAK_NOT_LEAK: - kmemleak_not_leak(log->ptr); - break; - case KMEMLEAK_IGNORE: - kmemleak_ignore(log->ptr); - break; - case KMEMLEAK_SCAN_AREA: - kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL); - break; - case KMEMLEAK_NO_SCAN: - kmemleak_no_scan(log->ptr); - break; - case KMEMLEAK_SET_EXCESS_REF: - object_set_excess_ref((unsigned long)log->ptr, - log->excess_ref); - break; - default: - kmemleak_warn("Unknown early log operation: %d\n", - log->op_type); - } - - if (kmemleak_warning) { - print_log_trace(log); - kmemleak_warning = 0; - } - } } /* @@ -2126,7 +1979,8 @@ static int __init kmemleak_late_init(void) mutex_unlock(&scan_mutex); } - pr_info("Kernel memory leak detector initialized\n"); + pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n", + mem_pool_free_count); return 0; } |