From 7ed9f7e5db58c6e8c2b4b738a75d5dcd8e17aad5 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 25 Jun 2009 12:31:37 -0700 Subject: fix RCU-callback-after-kmem_cache_destroy problem in sl[aou]b Jesper noted that kmem_cache_destroy() invokes synchronize_rcu() rather than rcu_barrier() in the SLAB_DESTROY_BY_RCU case, which could result in RCU callbacks accessing a kmem_cache after it had been destroyed. Cc: Acked-by: Matt Mackall Reported-by: Jesper Dangaard Brouer Signed-off-by: Paul E. McKenney Signed-off-by: Pekka Enberg --- mm/slab.c | 2 +- mm/slob.c | 2 ++ mm/slub.c | 2 ++ 3 files changed, 5 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index e74a16e4ced6..5241b6598ba3 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2547,7 +2547,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep) } if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) - synchronize_rcu(); + rcu_barrier(); __kmem_cache_destroy(cachep); mutex_unlock(&cache_chain_mutex); diff --git a/mm/slob.c b/mm/slob.c index c78742defdc6..9641da3d5e58 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -595,6 +595,8 @@ EXPORT_SYMBOL(kmem_cache_create); void kmem_cache_destroy(struct kmem_cache *c) { kmemleak_free(c); + if (c->flags & SLAB_DESTROY_BY_RCU) + rcu_barrier(); slob_free(c, sizeof(struct kmem_cache)); } EXPORT_SYMBOL(kmem_cache_destroy); diff --git a/mm/slub.c b/mm/slub.c index 819f056b39c6..a9201d83178b 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2595,6 +2595,8 @@ static inline int kmem_cache_close(struct kmem_cache *s) */ void kmem_cache_destroy(struct kmem_cache *s) { + if (s->flags & SLAB_DESTROY_BY_RCU) + rcu_barrier(); down_write(&slub_lock); s->refcount--; if (!s->refcount) { -- cgit v1.2.1 From ec5a36f94e7ca4b1f28ae4dd135cd415a704e772 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 29 Jun 2009 09:57:10 +0300 Subject: SLAB: Fix lockdep annotations Commit 8429db5... ("slab: setup cpu caches later on when interrupts are enabled") broke mm/slab.c lockdep annotations: [ 11.554715] ============================================= [ 11.555249] [ INFO: possible recursive locking detected ] [ 11.555560] 2.6.31-rc1 #896 [ 11.555861] --------------------------------------------- [ 11.556127] udevd/1899 is trying to acquire lock: [ 11.556436] (&nc->lock){-.-...}, at: [] kmem_cache_free+0xcd/0x25b [ 11.557101] [ 11.557102] but task is already holding lock: [ 11.557706] (&nc->lock){-.-...}, at: [] kfree+0x137/0x292 [ 11.558109] [ 11.558109] other info that might help us debug this: [ 11.558720] 2 locks held by udevd/1899: [ 11.558983] #0: (&nc->lock){-.-...}, at: [] kfree+0x137/0x292 [ 11.559734] #1: (&parent->list_lock){-.-...}, at: [] __drain_alien_cache+0x3b/0xbd [ 11.560442] [ 11.560443] stack backtrace: [ 11.561009] Pid: 1899, comm: udevd Not tainted 2.6.31-rc1 #896 [ 11.561276] Call Trace: [ 11.561632] [] __lock_acquire+0x15ec/0x168f [ 11.561901] [] ? __lock_acquire+0x1676/0x168f [ 11.562171] [] ? trace_hardirqs_on_caller+0x113/0x13e [ 11.562490] [] ? trace_hardirqs_on_thunk+0x3a/0x3f [ 11.562807] [] lock_acquire+0xc1/0xe5 [ 11.563073] [] ? kmem_cache_free+0xcd/0x25b [ 11.563385] [] _spin_lock+0x31/0x66 [ 11.563696] [] ? kmem_cache_free+0xcd/0x25b [ 11.563964] [] kmem_cache_free+0xcd/0x25b [ 11.564235] [] ? __free_pages+0x1b/0x24 [ 11.564551] [] slab_destroy+0x57/0x5c [ 11.564860] [] free_block+0xd8/0x123 [ 11.565126] [] __drain_alien_cache+0xa2/0xbd [ 11.565441] [] kfree+0x14c/0x292 [ 11.565752] [] skb_release_data+0xc6/0xcb [ 11.566020] [] __kfree_skb+0x19/0x86 [ 11.566286] [] consume_skb+0x2b/0x2d [ 11.566631] [] skb_free_datagram+0x14/0x3a [ 11.566901] [] netlink_recvmsg+0x164/0x258 [ 11.567170] [] sock_recvmsg+0xe5/0xfe [ 11.567486] [] ? might_fault+0xaf/0xb1 [ 11.567802] [] ? autoremove_wake_function+0x0/0x38 [ 11.568073] [] ? core_sys_select+0x3d/0x2b4 [ 11.568378] [] ? __lock_acquire+0x1676/0x168f [ 11.568693] [] ? sockfd_lookup_light+0x1b/0x54 [ 11.568961] [] sys_recvfrom+0xa3/0xf8 [ 11.569228] [] ? trace_hardirqs_on+0xd/0xf [ 11.569546] [] system_call_fastpath+0x16/0x1b# Fix that up. Closes-bug: http://bugzilla.kernel.org/show_bug.cgi?id=13654 Tested-by: Venkatesh Pallipadi Signed-off-by: Pekka Enberg --- mm/slab.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index 5241b6598ba3..7b5d4deacfcd 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1544,9 +1544,6 @@ void __init kmem_cache_init(void) } g_cpucache_up = EARLY; - - /* Annotate slab for lockdep -- annotate the malloc caches */ - init_lock_keys(); } void __init kmem_cache_init_late(void) @@ -1563,6 +1560,9 @@ void __init kmem_cache_init_late(void) /* Done! */ g_cpucache_up = FULL; + /* Annotate slab for lockdep -- annotate the malloc caches */ + init_lock_keys(); + /* * Register a cpu startup notifier callback that initializes * cpu_cache_get for all new cpus -- cgit v1.2.1 From c8236db9cd7aa492dcfcdcca702638e704abed49 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Sun, 5 Jul 2009 12:08:18 -0700 Subject: mm: mark page accessed before we write_end() In testing a backport of the write_begin/write_end AOPs, a 10% re-read regression was noticed when running iozone. This regression was introduced because the old AOPs would always do a mark_page_accessed(page) after the commit_write, but when the new AOPs where introduced, the only place this was kept was in pagecache_write_end(). This patch does the same thing in the generic case as what is done in pagecache_write_end(), which is just to mark the page accessed before we do write_end(). Signed-off-by: Josef Bacik Acked-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/filemap.c | 1 + 1 file changed, 1 insertion(+) (limited to 'mm') diff --git a/mm/filemap.c b/mm/filemap.c index 22396713feb9..ccea3b665c12 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2272,6 +2272,7 @@ again: pagefault_enable(); flush_dcache_page(page); + mark_page_accessed(page); status = a_ops->write_end(file, mapping, pos, bytes, copied, page, fsdata); if (unlikely(status < 0)) -- cgit v1.2.1 From 5bfd7560979062ad75c9805c1719cec990b5db29 Mon Sep 17 00:00:00 2001 From: Kevin Cernekee Date: Sun, 5 Jul 2009 12:08:19 -0700 Subject: Fix virt_to_phys() warnings These warnings were observed on MIPS32 using 2.6.31-rc1 and gcc-4.2.0: mm/page_alloc.c: In function 'alloc_pages_exact': mm/page_alloc.c:1986: warning: passing argument 1 of 'virt_to_phys' makes pointer from integer without a cast drivers/usb/mon/mon_bin.c: In function 'mon_alloc_buff': drivers/usb/mon/mon_bin.c:1264: warning: passing argument 1 of 'virt_to_phys' makes pointer from integer without a cast [akpm@linux-foundation.org: fix kernel/perf_counter.c too] Signed-off-by: Kevin Cernekee Cc: Andi Kleen Cc: Ralf Baechle Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e0f2cdf9d8b1..ad7cd1c56b07 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1983,7 +1983,7 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask) unsigned long alloc_end = addr + (PAGE_SIZE << order); unsigned long used = addr + PAGE_ALIGN(size); - split_page(virt_to_page(addr), order); + split_page(virt_to_page((void *)addr), order); while (used < alloc_end) { free_page(used); used += PAGE_SIZE; -- cgit v1.2.1 From bf2a76b317c6ccc6f7b6b1dc09664c5b6a155c61 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 7 Jul 2009 10:32:55 +0100 Subject: kmemleak: Renice the scanning thread to +10 This is a long-running thread but not high-priority. So it makes sense to renice it to +10. Signed-off-by: Catalin Marinas --- mm/kmemleak.c | 1 + 1 file changed, 1 insertion(+) (limited to 'mm') diff --git a/mm/kmemleak.c b/mm/kmemleak.c index e766e1da09d2..60065531f60c 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -1039,6 +1039,7 @@ static int kmemleak_scan_thread(void *arg) static int first_run = 1; pr_info("Automatic memory scanning thread started\n"); + set_user_nice(current, 10); /* * Wait before the first scan to allow the system to fully initialize. -- cgit v1.2.1 From 4b8a96744c0c27ab94fb4e8155d4384c3b399e27 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 7 Jul 2009 10:32:56 +0100 Subject: kmemleak: Add more cond_resched() calls in the scanning thread Following recent fix to no longer reschedule in the scan_block() function, the system may become unresponsive with !PREEMPT. This patch re-adds the cond_resched() call to scan_block() but conditioned by the allow_resched parameter. Signed-off-by: Catalin Marinas Cc: Ingo Molnar --- mm/kmemleak.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 60065531f60c..93f14818e901 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -807,7 +807,7 @@ static int scan_should_stop(void) * found to the gray list. */ static void scan_block(void *_start, void *_end, - struct kmemleak_object *scanned) + struct kmemleak_object *scanned, int allow_resched) { unsigned long *ptr; unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); @@ -818,6 +818,8 @@ static void scan_block(void *_start, void *_end, unsigned long pointer = *ptr; struct kmemleak_object *object; + if (allow_resched) + cond_resched(); if (scan_should_stop()) break; @@ -881,12 +883,12 @@ static void scan_object(struct kmemleak_object *object) goto out; if (hlist_empty(&object->area_list)) scan_block((void *)object->pointer, - (void *)(object->pointer + object->size), object); + (void *)(object->pointer + object->size), object, 0); else hlist_for_each_entry(area, elem, &object->area_list, node) scan_block((void *)(object->pointer + area->offset), (void *)(object->pointer + area->offset - + area->length), object); + + area->length), object, 0); out: spin_unlock_irqrestore(&object->lock, flags); } @@ -931,14 +933,14 @@ static void kmemleak_scan(void) rcu_read_unlock(); /* data/bss scanning */ - scan_block(_sdata, _edata, NULL); - scan_block(__bss_start, __bss_stop, NULL); + scan_block(_sdata, _edata, NULL, 1); + scan_block(__bss_start, __bss_stop, NULL, 1); #ifdef CONFIG_SMP /* per-cpu sections scanning */ for_each_possible_cpu(i) scan_block(__per_cpu_start + per_cpu_offset(i), - __per_cpu_end + per_cpu_offset(i), NULL); + __per_cpu_end + per_cpu_offset(i), NULL, 1); #endif /* @@ -960,7 +962,7 @@ static void kmemleak_scan(void) /* only scan if page is in use */ if (page_count(page) == 0) continue; - scan_block(page, page + 1, NULL); + scan_block(page, page + 1, NULL, 1); } } @@ -972,7 +974,8 @@ static void kmemleak_scan(void) read_lock(&tasklist_lock); for_each_process(task) scan_block(task_stack_page(task), - task_stack_page(task) + THREAD_SIZE, NULL); + task_stack_page(task) + THREAD_SIZE, + NULL, 0); read_unlock(&tasklist_lock); } -- cgit v1.2.1 From 288c857d66a400ca4846dd700eb1c4820d738bb9 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 7 Jul 2009 10:32:57 +0100 Subject: kmemleak: Remove the reported leaks number limitation Since the leaks are no longer printed to the syslog, there is no point in keeping this limitation. All the suspected leaks are shown on /sys/kernel/debug/kmemleak file. Signed-off-by: Catalin Marinas --- mm/kmemleak.c | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) (limited to 'mm') diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 93f14818e901..3c2b24c49a84 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -103,7 +103,6 @@ * Kmemleak configuration and common defines. */ #define MAX_TRACE 16 /* stack trace length */ -#define REPORTS_NR 50 /* maximum number of reported leaks */ #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ #define SECS_FIRST_SCAN 60 /* delay before the first scan */ #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ @@ -196,9 +195,6 @@ static int kmemleak_stack_scan = 1; /* protects the memory scanning, parameters and debug/kmemleak file access */ static DEFINE_MUTEX(scan_mutex); -/* number of leaks reported (for limitation purposes) */ -static int reported_leaks; - /* * Early object allocation/freeing logging. Kmemleak is initialized after the * kernel allocator. However, both the kernel allocator and kmemleak may @@ -1106,11 +1102,6 @@ static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) struct kmemleak_object *object; loff_t n = *pos; - if (!n) - reported_leaks = 0; - if (reported_leaks >= REPORTS_NR) - return NULL; - rcu_read_lock(); list_for_each_entry_rcu(object, &object_list, object_list) { if (n-- > 0) @@ -1135,8 +1126,6 @@ static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) struct list_head *n = &prev_obj->object_list; ++(*pos); - if (reported_leaks >= REPORTS_NR) - goto out; rcu_read_lock(); list_for_each_continue_rcu(n, &object_list) { @@ -1145,7 +1134,7 @@ static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) break; } rcu_read_unlock(); -out: + put_object(prev_obj); return next_obj; } @@ -1168,10 +1157,8 @@ static int kmemleak_seq_show(struct seq_file *seq, void *v) unsigned long flags; spin_lock_irqsave(&object->lock, flags); - if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) { + if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) print_unreferenced(seq, object); - reported_leaks++; - } spin_unlock_irqrestore(&object->lock, flags); return 0; } -- cgit v1.2.1 From b87324d082d9d898e3c06b2a07a2b94b2430b8ba Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 7 Jul 2009 10:32:58 +0100 Subject: kmemleak: Do not acquire scan_mutex in kmemleak_open() Initially, the scan_mutex was acquired in kmemleak_open() and released in kmemleak_release() (corresponding to /sys/kernel/debug/kmemleak operations). This was causing some lockdep reports when the file was closed from a different task than the one opening it. This patch moves the scan_mutex acquiring in kmemleak_write() or kmemleak_seq_start() with releasing in kmemleak_seq_stop(). Signed-off-by: Catalin Marinas --- mm/kmemleak.c | 63 ++++++++++++++++++++++++++++------------------------------- 1 file changed, 30 insertions(+), 33 deletions(-) (limited to 'mm') diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 3c2b24c49a84..7cfb7d014a20 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -1101,6 +1101,11 @@ static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) { struct kmemleak_object *object; loff_t n = *pos; + int err; + + err = mutex_lock_interruptible(&scan_mutex); + if (err < 0) + return ERR_PTR(err); rcu_read_lock(); list_for_each_entry_rcu(object, &object_list, object_list) { @@ -1144,8 +1149,15 @@ static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) */ static void kmemleak_seq_stop(struct seq_file *seq, void *v) { - if (v) - put_object(v); + if (!IS_ERR(v)) { + /* + * kmemleak_seq_start may return ERR_PTR if the scan_mutex + * waiting was interrupted, so only release it if !IS_ERR. + */ + mutex_unlock(&scan_mutex); + if (v) + put_object(v); + } } /* @@ -1172,36 +1184,15 @@ static const struct seq_operations kmemleak_seq_ops = { static int kmemleak_open(struct inode *inode, struct file *file) { - int ret = 0; - if (!atomic_read(&kmemleak_enabled)) return -EBUSY; - ret = mutex_lock_interruptible(&scan_mutex); - if (ret < 0) - goto out; - if (file->f_mode & FMODE_READ) { - ret = seq_open(file, &kmemleak_seq_ops); - if (ret < 0) - goto scan_unlock; - } - return ret; - -scan_unlock: - mutex_unlock(&scan_mutex); -out: - return ret; + return seq_open(file, &kmemleak_seq_ops); } static int kmemleak_release(struct inode *inode, struct file *file) { - int ret = 0; - - if (file->f_mode & FMODE_READ) - seq_release(inode, file); - mutex_unlock(&scan_mutex); - - return ret; + return seq_release(inode, file); } /* @@ -1221,15 +1212,17 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, { char buf[64]; int buf_size; - - if (!atomic_read(&kmemleak_enabled)) - return -EBUSY; + int ret; buf_size = min(size, (sizeof(buf) - 1)); if (strncpy_from_user(buf, user_buf, buf_size) < 0) return -EFAULT; buf[buf_size] = 0; + ret = mutex_lock_interruptible(&scan_mutex); + if (ret < 0) + return ret; + if (strncmp(buf, "off", 3) == 0) kmemleak_disable(); else if (strncmp(buf, "stack=on", 8) == 0) @@ -1242,11 +1235,10 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, stop_scan_thread(); else if (strncmp(buf, "scan=", 5) == 0) { unsigned long secs; - int err; - err = strict_strtoul(buf + 5, 0, &secs); - if (err < 0) - return err; + ret = strict_strtoul(buf + 5, 0, &secs); + if (ret < 0) + goto out; stop_scan_thread(); if (secs) { jiffies_scan_wait = msecs_to_jiffies(secs * 1000); @@ -1255,7 +1247,12 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, } else if (strncmp(buf, "scan", 4) == 0) kmemleak_scan(); else - return -EINVAL; + ret = -EINVAL; + +out: + mutex_unlock(&scan_mutex); + if (ret < 0) + return ret; /* ignore the rest of the buffer, only one command at a time */ *ppos += size; -- cgit v1.2.1 From 2587362eaf5c9df4e08de11e6340e3c4a88ed4c8 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 7 Jul 2009 10:32:58 +0100 Subject: kmemleak: Scan objects allocated during a scanning episode Many of the false positives in kmemleak happen on busy systems where objects are allocated during a kmemleak scanning episode. These objects aren't scanned by default until the next memory scan. When such object is added, for example, at the head of a list, it is possible that all the other objects in the list become unreferenced until the next scan. This patch adds checking for newly allocated objects at the end of the scan and repeats the scanning on these objects. If Linux allocates new objects at a higher rate than their scanning, it stops after a predefined number of passes. Signed-off-by: Catalin Marinas --- mm/kmemleak.c | 43 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 7cfb7d014a20..466d39007264 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -106,6 +106,7 @@ #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ #define SECS_FIRST_SCAN 60 /* delay before the first scan */ #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ +#define GRAY_LIST_PASSES 25 /* maximum number of gray list scans */ #define BYTES_PER_POINTER sizeof(void *) @@ -157,6 +158,8 @@ struct kmemleak_object { #define OBJECT_REPORTED (1 << 1) /* flag set to not scan the object */ #define OBJECT_NO_SCAN (1 << 2) +/* flag set on newly allocated objects */ +#define OBJECT_NEW (1 << 3) /* the list of all allocated objects */ static LIST_HEAD(object_list); @@ -270,6 +273,11 @@ static int color_gray(const struct kmemleak_object *object) return object->min_count != -1 && object->count >= object->min_count; } +static int color_black(const struct kmemleak_object *object) +{ + return object->min_count == -1; +} + /* * Objects are considered unreferenced only if their color is white, they have * not be deleted and have a minimum age to avoid false positives caused by @@ -447,7 +455,7 @@ static void create_object(unsigned long ptr, size_t size, int min_count, INIT_HLIST_HEAD(&object->area_list); spin_lock_init(&object->lock); atomic_set(&object->use_count, 1); - object->flags = OBJECT_ALLOCATED; + object->flags = OBJECT_ALLOCATED | OBJECT_NEW; object->pointer = ptr; object->size = size; object->min_count = min_count; @@ -901,6 +909,7 @@ static void kmemleak_scan(void) struct task_struct *task; int i; int new_leaks = 0; + int gray_list_pass = 0; jiffies_last_scan = jiffies; @@ -921,6 +930,7 @@ static void kmemleak_scan(void) #endif /* reset the reference count (whiten the object) */ object->count = 0; + object->flags &= ~OBJECT_NEW; if (color_gray(object) && get_object(object)) list_add_tail(&object->gray_list, &gray_list); @@ -983,6 +993,7 @@ static void kmemleak_scan(void) * kmemleak objects cannot be freed from outside the loop because their * use_count was increased. */ +repeat: object = list_entry(gray_list.next, typeof(*object), gray_list); while (&object->gray_list != &gray_list) { cond_resched(); @@ -1000,12 +1011,38 @@ static void kmemleak_scan(void) object = tmp; } + + if (scan_should_stop() || ++gray_list_pass >= GRAY_LIST_PASSES) + goto scan_end; + + /* + * Check for new objects allocated during this scanning and add them + * to the gray list. + */ + rcu_read_lock(); + list_for_each_entry_rcu(object, &object_list, object_list) { + spin_lock_irqsave(&object->lock, flags); + if ((object->flags & OBJECT_NEW) && !color_black(object) && + get_object(object)) { + object->flags &= ~OBJECT_NEW; + list_add_tail(&object->gray_list, &gray_list); + } + spin_unlock_irqrestore(&object->lock, flags); + } + rcu_read_unlock(); + + if (!list_empty(&gray_list)) + goto repeat; + +scan_end: WARN_ON(!list_empty(&gray_list)); /* - * If scanning was stopped do not report any new unreferenced objects. + * If scanning was stopped or new objects were being allocated at a + * higher rate than gray list scanning, do not report any new + * unreferenced objects. */ - if (scan_should_stop()) + if (scan_should_stop() || gray_list_pass >= GRAY_LIST_PASSES) return; /* -- cgit v1.2.1 From e4f7c0b44a8ac8935f223195af9ea637d0c08091 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 7 Jul 2009 10:32:59 +0100 Subject: kmemleak: Trace the kmalloc_large* functions in slub The kmalloc_large() and kmalloc_large_node() functions were missed when adding the kmemleak hooks to the slub allocator. However, they should be traced to avoid false positives. Signed-off-by: Catalin Marinas Cc: Christoph Lameter Acked-by: Pekka Enberg --- mm/slub.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index a9201d83178b..b9f1491a58a1 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include @@ -2835,13 +2834,15 @@ EXPORT_SYMBOL(__kmalloc); static void *kmalloc_large_node(size_t size, gfp_t flags, int node) { struct page *page; + void *ptr = NULL; flags |= __GFP_COMP | __GFP_NOTRACK; page = alloc_pages_node(node, flags, get_order(size)); if (page) - return page_address(page); - else - return NULL; + ptr = page_address(page); + + kmemleak_alloc(ptr, size, 1, flags); + return ptr; } #ifdef CONFIG_NUMA @@ -2926,6 +2927,7 @@ void kfree(const void *x) page = virt_to_head_page(x); if (unlikely(!PageSlab(page))) { BUG_ON(!PageCompound(page)); + kmemleak_free(x); put_page(page); return; } -- cgit v1.2.1 From 53238a60dd4a679f6fe5613a7ed46899587205cf Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 7 Jul 2009 10:33:00 +0100 Subject: kmemleak: Allow partial freeing of memory blocks Functions like free_bootmem() are allowed to free only part of a memory block. This patch adds support for this via the kmemleak_free_part() callback which removes the original object and creates one or two additional objects as a result of the memory block split. Signed-off-by: Catalin Marinas Cc: Ingo Molnar Acked-by: Pekka Enberg --- mm/kmemleak.c | 95 ++++++++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 81 insertions(+), 14 deletions(-) (limited to 'mm') diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 466d39007264..5aabd41ffb8f 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -210,6 +210,7 @@ static DEFINE_MUTEX(scan_mutex); enum { KMEMLEAK_ALLOC, KMEMLEAK_FREE, + KMEMLEAK_FREE_PART, KMEMLEAK_NOT_LEAK, KMEMLEAK_IGNORE, KMEMLEAK_SCAN_AREA, @@ -523,27 +524,17 @@ out: * Remove the metadata (struct kmemleak_object) for a memory block from the * object_list and object_tree_root and decrement its use_count. */ -static void delete_object(unsigned long ptr) +static void __delete_object(struct kmemleak_object *object) { unsigned long flags; - struct kmemleak_object *object; write_lock_irqsave(&kmemleak_lock, flags); - object = lookup_object(ptr, 0); - if (!object) { -#ifdef DEBUG - kmemleak_warn("Freeing unknown object at 0x%08lx\n", - ptr); -#endif - write_unlock_irqrestore(&kmemleak_lock, flags); - return; - } prio_tree_remove(&object_tree_root, &object->tree_node); list_del_rcu(&object->object_list); write_unlock_irqrestore(&kmemleak_lock, flags); WARN_ON(!(object->flags & OBJECT_ALLOCATED)); - WARN_ON(atomic_read(&object->use_count) < 1); + WARN_ON(atomic_read(&object->use_count) < 2); /* * Locking here also ensures that the corresponding memory block @@ -555,6 +546,64 @@ static void delete_object(unsigned long ptr) put_object(object); } +/* + * Look up the metadata (struct kmemleak_object) corresponding to ptr and + * delete it. + */ +static void delete_object_full(unsigned long ptr) +{ + struct kmemleak_object *object; + + object = find_and_get_object(ptr, 0); + if (!object) { +#ifdef DEBUG + kmemleak_warn("Freeing unknown object at 0x%08lx\n", + ptr); +#endif + return; + } + __delete_object(object); + put_object(object); +} + +/* + * Look up the metadata (struct kmemleak_object) corresponding to ptr and + * delete it. If the memory block is partially freed, the function may create + * additional metadata for the remaining parts of the block. + */ +static void delete_object_part(unsigned long ptr, size_t size) +{ + struct kmemleak_object *object; + unsigned long start, end; + + object = find_and_get_object(ptr, 1); + if (!object) { +#ifdef DEBUG + kmemleak_warn("Partially freeing unknown object at 0x%08lx " + "(size %zu)\n", ptr, size); +#endif + return; + } + __delete_object(object); + + /* + * Create one or two objects that may result from the memory block + * split. Note that partial freeing is only done by free_bootmem() and + * this happens before kmemleak_init() is called. The path below is + * only executed during early log recording in kmemleak_init(), so + * GFP_KERNEL is enough. + */ + start = object->pointer; + end = object->pointer + object->size; + if (ptr > start) + create_object(start, ptr - start, object->min_count, + GFP_KERNEL); + if (ptr + size < end) + create_object(ptr + size, end - ptr - size, object->min_count, + GFP_KERNEL); + + put_object(object); +} /* * Make a object permanently as gray-colored so that it can no longer be * reported as a leak. This is used in general to mark a false positive. @@ -719,12 +768,27 @@ void kmemleak_free(const void *ptr) pr_debug("%s(0x%p)\n", __func__, ptr); if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) - delete_object((unsigned long)ptr); + delete_object_full((unsigned long)ptr); else if (atomic_read(&kmemleak_early_log)) log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0); } EXPORT_SYMBOL_GPL(kmemleak_free); +/* + * Partial memory freeing function callback. This function is usually called + * from bootmem allocator when (part of) a memory block is freed. + */ +void kmemleak_free_part(const void *ptr, size_t size) +{ + pr_debug("%s(0x%p)\n", __func__, ptr); + + if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) + delete_object_part((unsigned long)ptr, size); + else if (atomic_read(&kmemleak_early_log)) + log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0); +} +EXPORT_SYMBOL_GPL(kmemleak_free_part); + /* * Mark an already allocated memory block as a false positive. This will cause * the block to no longer be reported as leak and always be scanned. @@ -1318,7 +1382,7 @@ static int kmemleak_cleanup_thread(void *arg) rcu_read_lock(); list_for_each_entry_rcu(object, &object_list, object_list) - delete_object(object->pointer); + delete_object_full(object->pointer); rcu_read_unlock(); mutex_unlock(&scan_mutex); @@ -1413,6 +1477,9 @@ void __init kmemleak_init(void) case KMEMLEAK_FREE: kmemleak_free(log->ptr); break; + case KMEMLEAK_FREE_PART: + kmemleak_free_part(log->ptr, log->size); + break; case KMEMLEAK_NOT_LEAK: kmemleak_not_leak(log->ptr); break; -- cgit v1.2.1 From ec3a354bd46cbdaa7933ba57a142ee2d2dbde0e5 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 7 Jul 2009 10:33:01 +0100 Subject: kmemleak: Add callbacks to the bootmem allocator This patch adds kmemleak_alloc/free callbacks to the bootmem allocator. This would allow scanning of such blocks and help avoiding a whole class of false positives and more kmemleak annotations. Signed-off-by: Catalin Marinas Cc: Ingo Molnar Acked-by: Pekka Enberg Reviewed-by: Johannes Weiner --- mm/bootmem.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'mm') diff --git a/mm/bootmem.c b/mm/bootmem.c index d2a9ce952768..701740c9e81b 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -335,6 +336,8 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, { unsigned long start, end; + kmemleak_free_part(__va(physaddr), size); + start = PFN_UP(physaddr); end = PFN_DOWN(physaddr + size); @@ -354,6 +357,8 @@ void __init free_bootmem(unsigned long addr, unsigned long size) { unsigned long start, end; + kmemleak_free_part(__va(addr), size); + start = PFN_UP(addr); end = PFN_DOWN(addr + size); @@ -516,6 +521,7 @@ find_block: region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) + start_off); memset(region, 0, size); + kmemleak_alloc(region, size, 1, 0); return region; } -- cgit v1.2.1 From 264ef8a904943ed7d0b04fa958894d7a5c2b2c61 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 7 Jul 2009 10:33:01 +0100 Subject: kmemleak: Remove alloc_bootmem annotations introduced in the past kmemleak_alloc() calls were added in some places where alloc_bootmem was called. Since now kmemleak tracks bootmem allocations, these explicit calls should be run. Signed-off-by: Catalin Marinas Cc: Ingo Molnar Acked-by: Pekka Enberg --- mm/page_alloc.c | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ad7cd1c56b07..3ef628845f07 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4745,8 +4745,10 @@ void *__init alloc_large_system_hash(const char *tablename, * some pages at the end of hash table which * alloc_pages_exact() automatically does */ - if (get_order(size) < MAX_ORDER) + if (get_order(size) < MAX_ORDER) { table = alloc_pages_exact(size, GFP_ATOMIC); + kmemleak_alloc(table, size, 1, GFP_ATOMIC); + } } } while (!table && size > PAGE_SIZE && --log2qty); @@ -4764,16 +4766,6 @@ void *__init alloc_large_system_hash(const char *tablename, if (_hash_mask) *_hash_mask = (1 << log2qty) - 1; - /* - * If hashdist is set, the table allocation is done with __vmalloc() - * which invokes the kmemleak_alloc() callback. This function may also - * be called before the slab and kmemleak are initialised when - * kmemleak simply buffers the request to be executed later - * (GFP_ATOMIC flag ignored in this case). - */ - if (!hashdist) - kmemleak_alloc(table, size, 1, GFP_ATOMIC); - return table; } -- cgit v1.2.1 From 8aa7e847d834ed937a9ad37a0f2ad5b8584c1ab0 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 9 Jul 2009 14:52:32 +0200 Subject: Fix congestion_wait() sync/async vs read/write confusion Commit 1faa16d22877f4839bd433547d770c676d1d964c accidentally broke the bdi congestion wait queue logic, causing us to wait on congestion for WRITE (== 1) when we really wanted BLK_RW_ASYNC (== 0) instead. Signed-off-by: Jens Axboe --- mm/backing-dev.c | 7 +++---- mm/memcontrol.c | 2 +- mm/page-writeback.c | 8 ++++---- mm/page_alloc.c | 4 ++-- mm/vmscan.c | 8 ++++---- 5 files changed, 14 insertions(+), 15 deletions(-) (limited to 'mm') diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 493b468a5035..c86edd244294 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -283,7 +283,6 @@ static wait_queue_head_t congestion_wqh[2] = { __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) }; - void clear_bdi_congested(struct backing_dev_info *bdi, int sync) { enum bdi_state bit; @@ -308,18 +307,18 @@ EXPORT_SYMBOL(set_bdi_congested); /** * congestion_wait - wait for a backing_dev to become uncongested - * @rw: READ or WRITE + * @sync: SYNC or ASYNC IO * @timeout: timeout in jiffies * * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit * write congestion. If no backing_devs are congested then just wait for the * next write to be completed. */ -long congestion_wait(int rw, long timeout) +long congestion_wait(int sync, long timeout) { long ret; DEFINE_WAIT(wait); - wait_queue_head_t *wqh = &congestion_wqh[rw]; + wait_queue_head_t *wqh = &congestion_wqh[sync]; prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); ret = io_schedule_timeout(timeout); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e2fa20dadf40..e717964cb5a0 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1973,7 +1973,7 @@ try_to_free: if (!progress) { nr_retries--; /* maybe some writeback is necessary */ - congestion_wait(WRITE, HZ/10); + congestion_wait(BLK_RW_ASYNC, HZ/10); } } diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 7687879253b9..81627ebcd313 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -575,7 +575,7 @@ static void balance_dirty_pages(struct address_space *mapping) if (pages_written >= write_chunk) break; /* We've done our duty */ - congestion_wait(WRITE, HZ/10); + congestion_wait(BLK_RW_ASYNC, HZ/10); } if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh && @@ -669,7 +669,7 @@ void throttle_vm_writeout(gfp_t gfp_mask) if (global_page_state(NR_UNSTABLE_NFS) + global_page_state(NR_WRITEBACK) <= dirty_thresh) break; - congestion_wait(WRITE, HZ/10); + congestion_wait(BLK_RW_ASYNC, HZ/10); /* * The caller might hold locks which can prevent IO completion @@ -715,7 +715,7 @@ static void background_writeout(unsigned long _min_pages) if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) { /* Wrote less than expected */ if (wbc.encountered_congestion || wbc.more_io) - congestion_wait(WRITE, HZ/10); + congestion_wait(BLK_RW_ASYNC, HZ/10); else break; } @@ -787,7 +787,7 @@ static void wb_kupdate(unsigned long arg) writeback_inodes(&wbc); if (wbc.nr_to_write > 0) { if (wbc.encountered_congestion || wbc.more_io) - congestion_wait(WRITE, HZ/10); + congestion_wait(BLK_RW_ASYNC, HZ/10); else break; /* All the old data is written */ } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ad7cd1c56b07..a35eeab2724c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1666,7 +1666,7 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, preferred_zone, migratetype); if (!page && gfp_mask & __GFP_NOFAIL) - congestion_wait(WRITE, HZ/50); + congestion_wait(BLK_RW_ASYNC, HZ/50); } while (!page && (gfp_mask & __GFP_NOFAIL)); return page; @@ -1831,7 +1831,7 @@ rebalance: pages_reclaimed += did_some_progress; if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) { /* Wait for some write requests to complete then retry */ - congestion_wait(WRITE, HZ/50); + congestion_wait(BLK_RW_ASYNC, HZ/50); goto rebalance; } diff --git a/mm/vmscan.c b/mm/vmscan.c index 54155268dfca..dea7abd31098 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1104,7 +1104,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, */ if (nr_freed < nr_taken && !current_is_kswapd() && lumpy_reclaim) { - congestion_wait(WRITE, HZ/10); + congestion_wait(BLK_RW_ASYNC, HZ/10); /* * The attempt at page out may have made some @@ -1721,7 +1721,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, /* Take a nap, wait for some writeback to complete */ if (sc->nr_scanned && priority < DEF_PRIORITY - 2) - congestion_wait(WRITE, HZ/10); + congestion_wait(BLK_RW_ASYNC, HZ/10); } /* top priority shrink_zones still had more to do? don't OOM, then */ if (!sc->all_unreclaimable && scanning_global_lru(sc)) @@ -1960,7 +1960,7 @@ loop_again: * another pass across the zones. */ if (total_scanned && priority < DEF_PRIORITY - 2) - congestion_wait(WRITE, HZ/10); + congestion_wait(BLK_RW_ASYNC, HZ/10); /* * We do this so kswapd doesn't build up large priorities for @@ -2233,7 +2233,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages) goto out; if (sc.nr_scanned && prio < DEF_PRIORITY - 2) - congestion_wait(WRITE, HZ / 10); + congestion_wait(BLK_RW_ASYNC, HZ / 10); } } -- cgit v1.2.1 From 9e1b32caa525cb236e80e9c671e179bcecccc657 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 22 Jul 2009 15:44:28 +1000 Subject: mm: Pass virtual address to [__]p{te,ud,md}_free_tlb() mm: Pass virtual address to [__]p{te,ud,md}_free_tlb() Upcoming paches to support the new 64-bit "BookE" powerpc architecture will need to have the virtual address corresponding to PTE page when freeing it, due to the way the HW table walker works. Basically, the TLB can be loaded with "large" pages that cover the whole virtual space (well, sort-of, half of it actually) represented by a PTE page, and which contain an "indirect" bit indicating that this TLB entry RPN points to an array of PTEs from which the TLB can then create direct entries. Thus, in order to invalidate those when PTE pages are deleted, we need the virtual address to pass to tlbilx or tlbivax instructions. The old trick of sticking it somewhere in the PTE page struct page sucks too much, the address is almost readily available in all call sites and almost everybody implemets these as macros, so we may as well add the argument everywhere. I added it to the pmd and pud variants for consistency. Signed-off-by: Benjamin Herrenschmidt Acked-by: David Howells [MN10300 & FRV] Acked-by: Nick Piggin Acked-by: Martin Schwidefsky [s390] Signed-off-by: Linus Torvalds --- mm/memory.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index 65216194eb8d..aede2ce3aba4 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -135,11 +135,12 @@ void pmd_clear_bad(pmd_t *pmd) * Note: this doesn't free the actual pages themselves. That * has been handled earlier when unmapping all the memory regions. */ -static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd) +static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, + unsigned long addr) { pgtable_t token = pmd_pgtable(*pmd); pmd_clear(pmd); - pte_free_tlb(tlb, token); + pte_free_tlb(tlb, token, addr); tlb->mm->nr_ptes--; } @@ -157,7 +158,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, next = pmd_addr_end(addr, end); if (pmd_none_or_clear_bad(pmd)) continue; - free_pte_range(tlb, pmd); + free_pte_range(tlb, pmd, addr); } while (pmd++, addr = next, addr != end); start &= PUD_MASK; @@ -173,7 +174,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, pmd = pmd_offset(pud, start); pud_clear(pud); - pmd_free_tlb(tlb, pmd); + pmd_free_tlb(tlb, pmd, start); } static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, @@ -206,7 +207,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, pud = pud_offset(pgd, start); pgd_clear(pgd); - pud_free_tlb(tlb, pud); + pud_free_tlb(tlb, pud, start); } /* -- cgit v1.2.1 From dddac6a7b445de95515f64fdf82fe5dc36c02f26 Mon Sep 17 00:00:00 2001 From: Alan Jenkins Date: Wed, 29 Jul 2009 21:07:55 +0200 Subject: PM / Hibernate: Replace bdget call with simple atomic_inc of i_count Create bdgrab(). This function copies an existing reference to a block_device. It is safe to call from any context. Hibernation code wishes to copy a reference to the active swap device. Right now it calls bdget() under a spinlock, but this is wrong because bdget() can sleep. It doesn't need a full bdget() because we already hold a reference to active swap devices (and the spinlock protects against swapoff). Fixes http://bugzilla.kernel.org/show_bug.cgi?id=13827 Signed-off-by: Alan Jenkins Signed-off-by: Rafael J. Wysocki --- mm/swapfile.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index d1ade1a48ee7..8ffdc0d23c53 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -753,7 +753,7 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p) if (!bdev) { if (bdev_p) - *bdev_p = bdget(sis->bdev->bd_dev); + *bdev_p = bdgrab(sis->bdev); spin_unlock(&swap_lock); return i; @@ -765,7 +765,7 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p) struct swap_extent, list); if (se->start_block == offset) { if (bdev_p) - *bdev_p = bdget(sis->bdev->bd_dev); + *bdev_p = bdgrab(sis->bdev); spin_unlock(&swap_lock); bdput(bdev); -- cgit v1.2.1 From f5886c7f96f2542382d3a983c5f13e03d7fc5259 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 29 Jul 2009 16:26:57 +0100 Subject: kmemleak: Protect the seq start/next/stop sequence by rcu_read_lock() Objects passed to kmemleak_seq_next() have an incremented reference count (hence not freed) but they may point via object_list.next to other freed objects. To avoid this, the whole start/next/stop sequence must be protected by rcu_read_lock(). Signed-off-by: Catalin Marinas Signed-off-by: Linus Torvalds --- mm/kmemleak.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 5aabd41ffb8f..487267310a84 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -1217,7 +1217,6 @@ static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) } object = NULL; out: - rcu_read_unlock(); return object; } @@ -1233,13 +1232,11 @@ static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) ++(*pos); - rcu_read_lock(); list_for_each_continue_rcu(n, &object_list) { next_obj = list_entry(n, struct kmemleak_object, object_list); if (get_object(next_obj)) break; } - rcu_read_unlock(); put_object(prev_obj); return next_obj; @@ -1255,6 +1252,7 @@ static void kmemleak_seq_stop(struct seq_file *seq, void *v) * kmemleak_seq_start may return ERR_PTR if the scan_mutex * waiting was interrupted, so only release it if !IS_ERR. */ + rcu_read_unlock(); mutex_unlock(&scan_mutex); if (v) put_object(v); -- cgit v1.2.1 From e084b2d95e48b31aa45f9c49ffc6cdae8bdb21d4 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 29 Jul 2009 15:02:04 -0700 Subject: page-allocator: preserve PFN ordering when __GFP_COLD is set Fix a post-2.6.24 performace regression caused by 3dfa5721f12c3d5a441448086bee156887daa961 ("page-allocator: preserve PFN ordering when __GFP_COLD is set"). Narayanan reports "The regression is around 15%. There is no disk controller as our setup is based on Samsung OneNAND used as a memory mapped device on a OMAP2430 based board." The page allocator tries to preserve contiguous PFN ordering when returning pages such that repeated callers to the allocator have a strong chance of getting physically contiguous pages, particularly when external fragmentation is low. However, of the bulk of the allocations have __GFP_COLD set as they are due to aio_read() for example, then the PFNs are in reverse PFN order. This can cause performance degration when used with IO controllers that could have merged the requests. This patch attempts to preserve the contiguous ordering of PFNs for users of __GFP_COLD. Signed-off-by: Mel Gorman Reported-by: Narayananu Gopalakrishnan Tested-by: Narayanan Gopalakrishnan Cc: KAMEZAWA Hiroyuki Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index caa92689aac9..ae28c22a7fdb 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -882,7 +882,7 @@ retry_reserve: */ static int rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list, - int migratetype) + int migratetype, int cold) { int i; @@ -901,7 +901,10 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, * merge IO requests if the physical pages are ordered * properly. */ - list_add(&page->lru, list); + if (likely(cold == 0)) + list_add(&page->lru, list); + else + list_add_tail(&page->lru, list); set_page_private(page, migratetype); list = &page->lru; } @@ -1119,7 +1122,8 @@ again: local_irq_save(flags); if (!pcp->count) { pcp->count = rmqueue_bulk(zone, 0, - pcp->batch, &pcp->list, migratetype); + pcp->batch, &pcp->list, + migratetype, cold); if (unlikely(!pcp->count)) goto failed; } @@ -1138,7 +1142,8 @@ again: /* Allocate more to the pcp list if necessary */ if (unlikely(&page->lru == &pcp->list)) { pcp->count += rmqueue_bulk(zone, 0, - pcp->batch, &pcp->list, migratetype); + pcp->batch, &pcp->list, + migratetype, cold); page = list_entry(pcp->list.next, struct page, lru); } -- cgit v1.2.1 From 6583bb64fc370842b32a87c67750c26f6d559af0 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Wed, 29 Jul 2009 15:02:06 -0700 Subject: mm: avoid endless looping for oom killed tasks If a task is oom killed and still cannot find memory when trying with no watermarks, it's better to fail the allocation attempt than to loop endlessly. Direct reclaim has already failed and the oom killer will be a no-op since current has yet to die, so there is no other alternative for allocations that are not __GFP_NOFAIL. Acked-by: Mel Gorman Signed-off-by: David Rientjes Acked-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ae28c22a7fdb..2dbb2fc68837 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1794,6 +1794,10 @@ rebalance: if (p->flags & PF_MEMALLOC) goto nopage; + /* Avoid allocations with no watermarks from looping endlessly */ + if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL)) + goto nopage; + /* Try direct reclaim and then allocating */ page = __alloc_pages_direct_reclaim(gfp_mask, order, zonelist, high_zoneidx, -- cgit v1.2.1 From e4c6f8bed01f9f9a5c607bd689bf67e7b8a36bd8 Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Wed, 29 Jul 2009 15:02:16 -0700 Subject: hugetlbfs: fix i_blocks accounting As reported in Red Hat bz #509671, i_blocks for files on hugetlbfs get accounting wrong when doing something like: $ > foo $ date > foo date: write error: Invalid argument $ /usr/bin/stat foo File: `foo' Size: 0 Blocks: 18446744073709547520 IO Block: 2097152 regular ... This is because hugetlb_unreserve_pages() is unconditionally removing blocks_per_huge_page(h) on each call rather than using the freed amount. If there were 0 blocks, it goes negative, resulting in the above. This is a regression from commit a5516438959d90b071ff0a484ce4f3f523dc3152 ("hugetlb: modular state for hugetlb page size") which did: - inode->i_blocks -= BLOCKS_PER_HUGEPAGE * freed; + inode->i_blocks -= blocks_per_huge_page(h); so just put back the freed multiplier, and it's all happy again. Signed-off-by: Eric Sandeen Acked-by: Andi Kleen Cc: William Lee Irwin III Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hugetlb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index d0351e31f474..cafdcee154e8 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2370,7 +2370,7 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) long chg = region_truncate(&inode->i_mapping->private_list, offset); spin_lock(&inode->i_lock); - inode->i_blocks -= blocks_per_huge_page(h); + inode->i_blocks -= (blocks_per_huge_page(h) * freed); spin_unlock(&inode->i_lock); hugetlb_put_quota(inode->i_mapping, (chg - freed)); -- cgit v1.2.1 From 887032670d47366a8c8f25396ea7c14b7b2cc620 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 29 Jul 2009 15:04:06 -0700 Subject: cgroup avoid permanent sleep at rmdir After commit ec64f51545fffbc4cb968f0cea56341a4b07e85a ("cgroup: fix frequent -EBUSY at rmdir"), cgroup's rmdir (especially against memcg) doesn't return -EBUSY by temporary ref counts. That commit expects all refs after pre_destroy() is temporary but...it wasn't. Then, rmdir can wait permanently. This patch tries to fix that and change followings. - set CGRP_WAIT_ON_RMDIR flag before pre_destroy(). - clear CGRP_WAIT_ON_RMDIR flag when the subsys finds racy case. if there are sleeping ones, wakes them up. - rmdir() sleeps only when CGRP_WAIT_ON_RMDIR flag is set. Tested-by: Daisuke Nishimura Reported-by: Daisuke Nishimura Reviewed-by: Paul Menage Acked-by: Balbir Sigh Signed-off-by: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e717964cb5a0..fd4529d86de5 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1207,6 +1207,12 @@ static int mem_cgroup_move_account(struct page_cgroup *pc, ret = 0; out: unlock_page_cgroup(pc); + /* + * We charges against "to" which may not have any tasks. Then, "to" + * can be under rmdir(). But in current implementation, caller of + * this function is just force_empty() and it's garanteed that + * "to" is never removed. So, we don't check rmdir status here. + */ return ret; } @@ -1428,6 +1434,7 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, return; if (!ptr) return; + cgroup_exclude_rmdir(&ptr->css); pc = lookup_page_cgroup(page); mem_cgroup_lru_del_before_commit_swapcache(page); __mem_cgroup_commit_charge(ptr, pc, ctype); @@ -1457,8 +1464,12 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, } rcu_read_unlock(); } - /* add this page(page_cgroup) to the LRU we want. */ - + /* + * At swapin, we may charge account against cgroup which has no tasks. + * So, rmdir()->pre_destroy() can be called while we do this charge. + * In that case, we need to call pre_destroy() again. check it here. + */ + cgroup_release_and_wakeup_rmdir(&ptr->css); } void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) @@ -1664,7 +1675,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem, if (!mem) return; - + cgroup_exclude_rmdir(&mem->css); /* at migration success, oldpage->mapping is NULL. */ if (oldpage->mapping) { target = oldpage; @@ -1704,6 +1715,12 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem, */ if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED) mem_cgroup_uncharge_page(target); + /* + * At migration, we may charge account against cgroup which has no tasks + * So, rmdir()->pre_destroy() can be called while we do this charge. + * In that case, we need to call pre_destroy() again. check it here. + */ + cgroup_release_and_wakeup_rmdir(&mem->css); } /* -- cgit v1.2.1 From 1fc28b70fe2dbf87e061b6ce5091a1f8e4e5d4e7 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 29 Jul 2009 15:04:08 -0700 Subject: page-allocator: allow too high-order warning messages to be suppressed with __GFP_NOWARN The page allocator warns once when an order >= MAX_ORDER is specified. This is to catch callers of the allocator that are always falling back to their worst-case when it was not expected. However, there are cases where the caller is behaving correctly but cannot suppress the warning. This patch allows the warning to be suppressed by the callers by specifying __GFP_NOWARN. Signed-off-by: Mel Gorman Acked-by: David Rientjes Cc: Arnaldo Carvalho de Melo Cc: "David S. Miller" Cc: "Rafael J. Wysocki" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2dbb2fc68837..d052abbe3063 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1745,8 +1745,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, * be using allocators in order of preference for an area that is * too large. */ - if (WARN_ON_ONCE(order >= MAX_ORDER)) + if (order >= MAX_ORDER) { + WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); return NULL; + } /* * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and -- cgit v1.2.1 From 4bfc44958e499af9a73f62201543b3a1f617cfeb Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Thu, 6 Aug 2009 15:07:33 -0700 Subject: mm: make set_mempolicy(MPOL_INTERLEAV) N_HIGH_MEMORY aware At first, init_task's mems_allowed is initialized as this. init_task->mems_allowed == node_state[N_POSSIBLE] And cpuset's top_cpuset mask is initialized as this top_cpuset->mems_allowed = node_state[N_HIGH_MEMORY] Before 2.6.29: policy's mems_allowed is initialized as this. 1. update tasks->mems_allowed by its cpuset->mems_allowed. 2. policy->mems_allowed = nodes_and(tasks->mems_allowed, user's mask) Updating task's mems_allowed in reference to top_cpuset's one. cpuset's mems_allowed is aware of N_HIGH_MEMORY, always. In 2.6.30: After commit 58568d2a8215cb6f55caf2332017d7bdff954e1c ("cpuset,mm: update tasks' mems_allowed in time"), policy's mems_allowed is initialized as this. 1. policy->mems_allowd = nodes_and(task->mems_allowed, user's mask) Here, if task is in top_cpuset, task->mems_allowed is not updated from init's one. Assume user excutes command as #numactrl --interleave=all ,.... policy->mems_allowd = nodes_and(N_POSSIBLE, ALL_SET_MASK) Then, policy's mems_allowd can includes a possible node, which has no pgdat. MPOL's INTERLEAVE just scans nodemask of task->mems_allowd and access this directly. NODE_DATA(nid)->zonelist even if NODE_DATA(nid)==NULL Then, what's we need is making policy->mems_allowed be aware of N_HIGH_MEMORY. This patch does that. But to do so, extra nodemask will be on statck. Because I know cpumask has a new interface of CPUMASK_ALLOC(), I added it to node. This patch stands on old behavior. But I feel this fix itself is just a Band-Aid. But to do fundametal fix, we have to take care of memory hotplug and it takes time. (task->mems_allowd should be N_HIGH_MEMORY, I think.) mpol_set_nodemask() should be aware of N_HIGH_MEMORY and policy's nodemask should be includes only online nodes. In old behavior, this is guaranteed by frequent reference to cpuset's code. Now, most of them are removed and mempolicy has to check it by itself. To do check, a few nodemask_t will be used for calculating nodemask. But, size of nodemask_t can be big and it's not good to allocate them on stack. Now, cpumask_t has CPUMASK_ALLOC/FREE an easy code for get scratch area. NODEMASK_ALLOC/FREE shoudl be there. [akpm@linux-foundation.org: cleanups & tweaks] Tested-by: KOSAKI Motohiro Signed-off-by: KAMEZAWA Hiroyuki Cc: Miao Xie Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Christoph Lameter Cc: Paul Menage Cc: Nick Piggin Cc: Yasunori Goto Cc: Pekka Enberg Cc: David Rientjes Cc: Lee Schermerhorn Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mempolicy.c | 84 ++++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 58 insertions(+), 26 deletions(-) (limited to 'mm') diff --git a/mm/mempolicy.c b/mm/mempolicy.c index e08e2c4da63a..7dd9d9f80694 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -191,25 +191,27 @@ static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) * Must be called holding task's alloc_lock to protect task's mems_allowed * and mempolicy. May also be called holding the mmap_semaphore for write. */ -static int mpol_set_nodemask(struct mempolicy *pol, const nodemask_t *nodes) +static int mpol_set_nodemask(struct mempolicy *pol, + const nodemask_t *nodes, struct nodemask_scratch *nsc) { - nodemask_t cpuset_context_nmask; int ret; /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ if (pol == NULL) return 0; + /* Check N_HIGH_MEMORY */ + nodes_and(nsc->mask1, + cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]); VM_BUG_ON(!nodes); if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) nodes = NULL; /* explicit local allocation */ else { if (pol->flags & MPOL_F_RELATIVE_NODES) - mpol_relative_nodemask(&cpuset_context_nmask, nodes, - &cpuset_current_mems_allowed); + mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1); else - nodes_and(cpuset_context_nmask, *nodes, - cpuset_current_mems_allowed); + nodes_and(nsc->mask2, *nodes, nsc->mask1); + if (mpol_store_user_nodemask(pol)) pol->w.user_nodemask = *nodes; else @@ -217,8 +219,10 @@ static int mpol_set_nodemask(struct mempolicy *pol, const nodemask_t *nodes) cpuset_current_mems_allowed; } - ret = mpol_ops[pol->mode].create(pol, - nodes ? &cpuset_context_nmask : NULL); + if (nodes) + ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); + else + ret = mpol_ops[pol->mode].create(pol, NULL); return ret; } @@ -620,12 +624,17 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags, { struct mempolicy *new, *old; struct mm_struct *mm = current->mm; + NODEMASK_SCRATCH(scratch); int ret; - new = mpol_new(mode, flags, nodes); - if (IS_ERR(new)) - return PTR_ERR(new); + if (!scratch) + return -ENOMEM; + new = mpol_new(mode, flags, nodes); + if (IS_ERR(new)) { + ret = PTR_ERR(new); + goto out; + } /* * prevent changing our mempolicy while show_numa_maps() * is using it. @@ -635,13 +644,13 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags, if (mm) down_write(&mm->mmap_sem); task_lock(current); - ret = mpol_set_nodemask(new, nodes); + ret = mpol_set_nodemask(new, nodes, scratch); if (ret) { task_unlock(current); if (mm) up_write(&mm->mmap_sem); mpol_put(new); - return ret; + goto out; } old = current->mempolicy; current->mempolicy = new; @@ -654,7 +663,10 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags, up_write(&mm->mmap_sem); mpol_put(old); - return 0; + ret = 0; +out: + NODEMASK_SCRATCH_FREE(scratch); + return ret; } /* @@ -1014,12 +1026,20 @@ static long do_mbind(unsigned long start, unsigned long len, if (err) return err; } - down_write(&mm->mmap_sem); - task_lock(current); - err = mpol_set_nodemask(new, nmask); - task_unlock(current); + { + NODEMASK_SCRATCH(scratch); + if (scratch) { + down_write(&mm->mmap_sem); + task_lock(current); + err = mpol_set_nodemask(new, nmask, scratch); + task_unlock(current); + if (err) + up_write(&mm->mmap_sem); + } else + err = -ENOMEM; + NODEMASK_SCRATCH_FREE(scratch); + } if (err) { - up_write(&mm->mmap_sem); mpol_put(new); return err; } @@ -1891,6 +1911,7 @@ restart: * Install non-NULL @mpol in inode's shared policy rb-tree. * On entry, the current task has a reference on a non-NULL @mpol. * This must be released on exit. + * This is called at get_inode() calls and we can use GFP_KERNEL. */ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) { @@ -1902,19 +1923,24 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) if (mpol) { struct vm_area_struct pvma; struct mempolicy *new; + NODEMASK_SCRATCH(scratch); + if (!scratch) + return; /* contextualize the tmpfs mount point mempolicy */ new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); if (IS_ERR(new)) { mpol_put(mpol); /* drop our ref on sb mpol */ + NODEMASK_SCRATCH_FREE(scratch); return; /* no valid nodemask intersection */ } task_lock(current); - ret = mpol_set_nodemask(new, &mpol->w.user_nodemask); + ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); task_unlock(current); mpol_put(mpol); /* drop our ref on sb mpol */ if (ret) { + NODEMASK_SCRATCH_FREE(scratch); mpol_put(new); return; } @@ -1924,6 +1950,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) pvma.vm_end = TASK_SIZE; /* policy covers entire file */ mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ mpol_put(new); /* drop initial ref */ + NODEMASK_SCRATCH_FREE(scratch); } } @@ -2140,13 +2167,18 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) err = 1; else { int ret; - - task_lock(current); - ret = mpol_set_nodemask(new, &nodes); - task_unlock(current); - if (ret) + NODEMASK_SCRATCH(scratch); + if (scratch) { + task_lock(current); + ret = mpol_set_nodemask(new, &nodes, scratch); + task_unlock(current); + } else + ret = -ENOMEM; + NODEMASK_SCRATCH_FREE(scratch); + if (ret) { err = 1; - else if (no_context) { + mpol_put(new); + } else if (no_context) { /* save for contextualization */ new->w.user_nodemask = nodes; } -- cgit v1.2.1 From 5e2f89b5d5d87a7c3ba19fc85ba0c29adb65f639 Mon Sep 17 00:00:00 2001 From: "Figo.zhang" Date: Sat, 8 Aug 2009 21:01:22 +0800 Subject: mempool.c: clean up type-casting clean up type-casting twice. "size_t" is typedef as "unsigned long" in 64-bit system, and "unsigned int" in 32-bit system, and the intermediate cast to 'long' is pointless. Signed-off-by: Figo.zhang Signed-off-by: Linus Torvalds --- mm/mempool.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/mempool.c b/mm/mempool.c index a46eb1b4bb66..32e75d400503 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -303,14 +303,14 @@ EXPORT_SYMBOL(mempool_free_slab); */ void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) { - size_t size = (size_t)(long)pool_data; + size_t size = (size_t)pool_data; return kmalloc(size, gfp_mask); } EXPORT_SYMBOL(mempool_kmalloc); void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data) { - size_t size = (size_t) pool_data; + size_t size = (size_t)pool_data; return kzalloc(size, gfp_mask); } EXPORT_SYMBOL(mempool_kzalloc); -- cgit v1.2.1 From 74d46d6b2d23d44d72c37df4c6a5d2e782f7b088 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 21 Jul 2009 17:11:50 +0900 Subject: percpu, sparc64: fix sparse possible cpu map handling percpu code has been assuming num_possible_cpus() == nr_cpu_ids which is incorrect if cpu_possible_map contains holes. This causes percpu code to access beyond allocated memories and vmalloc areas. On a sparc64 machine with cpus 0 and 2 (u60), this triggers the following warning or fails boot. WARNING: at /devel/tj/os/work/mm/vmalloc.c:106 vmap_page_range_noflush+0x1f0/0x240() Modules linked in: Call Trace: [00000000004b17d0] vmap_page_range_noflush+0x1f0/0x240 [00000000004b1840] map_vm_area+0x20/0x60 [00000000004b1950] __vmalloc_area_node+0xd0/0x160 [0000000000593434] deflate_init+0x14/0xe0 [0000000000583b94] __crypto_alloc_tfm+0xd4/0x1e0 [00000000005844f0] crypto_alloc_base+0x50/0xa0 [000000000058b898] alg_test_comp+0x18/0x80 [000000000058dad4] alg_test+0x54/0x180 [000000000058af00] cryptomgr_test+0x40/0x60 [0000000000473098] kthread+0x58/0x80 [000000000042b590] kernel_thread+0x30/0x60 [0000000000472fd0] kthreadd+0xf0/0x160 ---[ end trace 429b268a213317ba ]--- This patch fixes generic percpu functions and sparc64 setup_per_cpu_areas() so that they handle sparse cpu_possible_map properly. Please note that on x86, cpu_possible_map() doesn't contain holes and thus num_possible_cpus() == nr_cpu_ids and this patch doesn't cause any behavior difference. Signed-off-by: Tejun Heo Acked-by: David S. Miller Cc: Ingo Molnar --- mm/percpu.c | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) (limited to 'mm') diff --git a/mm/percpu.c b/mm/percpu.c index b70f2acd8853..e0be1146f617 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -8,12 +8,12 @@ * * This is percpu allocator which can handle both static and dynamic * areas. Percpu areas are allocated in chunks in vmalloc area. Each - * chunk is consisted of num_possible_cpus() units and the first chunk - * is used for static percpu variables in the kernel image (special - * boot time alloc/init handling necessary as these areas need to be - * brought up before allocation services are running). Unit grows as - * necessary and all units grow or shrink in unison. When a chunk is - * filled up, another chunk is allocated. ie. in vmalloc area + * chunk is consisted of nr_cpu_ids units and the first chunk is used + * for static percpu variables in the kernel image (special boot time + * alloc/init handling necessary as these areas need to be brought up + * before allocation services are running). Unit grows as necessary + * and all units grow or shrink in unison. When a chunk is filled up, + * another chunk is allocated. ie. in vmalloc area * * c0 c1 c2 * ------------------- ------------------- ------------ @@ -558,7 +558,7 @@ static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme) static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end, bool flush_tlb) { - unsigned int last = num_possible_cpus() - 1; + unsigned int last = nr_cpu_ids - 1; unsigned int cpu; /* unmap must not be done on immutable chunk */ @@ -643,7 +643,7 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size, */ static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end) { - unsigned int last = num_possible_cpus() - 1; + unsigned int last = nr_cpu_ids - 1; unsigned int cpu; int err; @@ -1067,9 +1067,9 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, PFN_UP(size_sum)); pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; - pcpu_chunk_size = num_possible_cpus() * pcpu_unit_size; + pcpu_chunk_size = nr_cpu_ids * pcpu_unit_size; pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) - + num_possible_cpus() * pcpu_unit_pages * sizeof(struct page *); + + nr_cpu_ids * pcpu_unit_pages * sizeof(struct page *); if (dyn_size < 0) dyn_size = pcpu_unit_size - static_size - reserved_size; @@ -1248,7 +1248,7 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, } else pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE); - chunk_size = pcpue_unit_size * num_possible_cpus(); + chunk_size = pcpue_unit_size * nr_cpu_ids; pcpue_ptr = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); @@ -1259,12 +1259,15 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, } /* return the leftover and copy */ - for_each_possible_cpu(cpu) { + for (cpu = 0; cpu < nr_cpu_ids; cpu++) { void *ptr = pcpue_ptr + cpu * pcpue_unit_size; - free_bootmem(__pa(ptr + pcpue_size), - pcpue_unit_size - pcpue_size); - memcpy(ptr, __per_cpu_load, static_size); + if (cpu_possible(cpu)) { + free_bootmem(__pa(ptr + pcpue_size), + pcpue_unit_size - pcpue_size); + memcpy(ptr, __per_cpu_load, static_size); + } else + free_bootmem(__pa(ptr), pcpue_unit_size); } /* we're ready, commit */ -- cgit v1.2.1 From 142d44b0dd6741a64a7bdbe029110e7c1dcf1d23 Mon Sep 17 00:00:00 2001 From: Amerigo Wang Date: Thu, 13 Aug 2009 02:00:13 -0400 Subject: percpu: use the right flag for get_vm_area() get_vm_area() only accepts VM_* flags, not GFP_*. And according to the doc of get_vm_area(), here should be VM_ALLOC. Signed-off-by: WANG Cong Acked-by: Tejun Heo Cc: Ingo Molnar --- mm/percpu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/percpu.c b/mm/percpu.c index e0be1146f617..5fe37842e0ea 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -749,7 +749,7 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void) chunk->map[chunk->map_used++] = pcpu_unit_size; chunk->page = chunk->page_ar; - chunk->vm = get_vm_area(pcpu_chunk_size, GFP_KERNEL); + chunk->vm = get_vm_area(pcpu_chunk_size, VM_ALLOC); if (!chunk->vm) { free_pcpu_chunk(chunk); return NULL; -- cgit v1.2.1