diff options
author | Jens Axboe <axboe@fb.com> | 2014-09-22 11:57:32 -0600 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-09-22 11:57:32 -0600 |
commit | 6d11fb454b161a4565c57be6f1c5527235741003 (patch) | |
tree | c238ed3df2f654181c2a0746478a33b32214cc60 /mm | |
parent | b207892b061da7608878e273ae22ba9bf9be264b (diff) | |
parent | 8b95741569eabc5eb17da71d1d3668cdb0bef86c (diff) | |
download | blackbird-op-linux-6d11fb454b161a4565c57be6f1c5527235741003.tar.gz blackbird-op-linux-6d11fb454b161a4565c57be6f1c5527235741003.zip |
Merge branch 'for-linus' into for-3.18/core
Moving patches from for-linus to 3.18 instead, pull in this changes
that will go to Linus today.
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memblock.c | 4 | ||||
-rw-r--r-- | mm/memcontrol.c | 103 | ||||
-rw-r--r-- | mm/mmap.c | 16 | ||||
-rw-r--r-- | mm/nobootmem.c | 2 | ||||
-rw-r--r-- | mm/percpu-vm.c | 22 | ||||
-rw-r--r-- | mm/percpu.c | 2 |
6 files changed, 110 insertions, 39 deletions
diff --git a/mm/memblock.c b/mm/memblock.c index 70fad0c0dafb..6ecb0d937fb5 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -816,6 +816,10 @@ void __init_memblock __next_mem_range(u64 *idx, int nid, if (nid != NUMA_NO_NODE && nid != m_nid) continue; + /* skip hotpluggable memory regions if needed */ + if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) + continue; + if (!type_b) { if (out_start) *out_start = m_start; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ec4dcf1b9562..085dc6d2f876 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2534,6 +2534,8 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, unsigned long long size; int ret = 0; + if (mem_cgroup_is_root(memcg)) + goto done; retry: if (consume_stock(memcg, nr_pages)) goto done; @@ -2611,9 +2613,7 @@ nomem: if (!(gfp_mask & __GFP_NOFAIL)) return -ENOMEM; bypass: - memcg = root_mem_cgroup; - ret = -EINTR; - goto retry; + return -EINTR; done_restock: if (batch > nr_pages) @@ -2626,6 +2626,9 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) { unsigned long bytes = nr_pages * PAGE_SIZE; + if (mem_cgroup_is_root(memcg)) + return; + res_counter_uncharge(&memcg->res, bytes); if (do_swap_account) res_counter_uncharge(&memcg->memsw, bytes); @@ -2640,6 +2643,9 @@ static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg, { unsigned long bytes = nr_pages * PAGE_SIZE; + if (mem_cgroup_is_root(memcg)) + return; + res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes); if (do_swap_account) res_counter_uncharge_until(&memcg->memsw, @@ -4093,6 +4099,46 @@ out: return retval; } +static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg, + enum mem_cgroup_stat_index idx) +{ + struct mem_cgroup *iter; + long val = 0; + + /* Per-cpu values can be negative, use a signed accumulator */ + for_each_mem_cgroup_tree(iter, memcg) + val += mem_cgroup_read_stat(iter, idx); + + if (val < 0) /* race ? */ + val = 0; + return val; +} + +static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) +{ + u64 val; + + if (!mem_cgroup_is_root(memcg)) { + if (!swap) + return res_counter_read_u64(&memcg->res, RES_USAGE); + else + return res_counter_read_u64(&memcg->memsw, RES_USAGE); + } + + /* + * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS + * as well as in MEM_CGROUP_STAT_RSS_HUGE. + */ + val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE); + val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS); + + if (swap) + val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP); + + return val << PAGE_SHIFT; +} + + static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, struct cftype *cft) { @@ -4102,8 +4148,12 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, switch (type) { case _MEM: + if (name == RES_USAGE) + return mem_cgroup_usage(memcg, false); return res_counter_read_u64(&memcg->res, name); case _MEMSWAP: + if (name == RES_USAGE) + return mem_cgroup_usage(memcg, true); return res_counter_read_u64(&memcg->memsw, name); case _KMEM: return res_counter_read_u64(&memcg->kmem, name); @@ -4572,10 +4622,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) if (!t) goto unlock; - if (!swap) - usage = res_counter_read_u64(&memcg->res, RES_USAGE); - else - usage = res_counter_read_u64(&memcg->memsw, RES_USAGE); + usage = mem_cgroup_usage(memcg, swap); /* * current_threshold points to threshold just below or equal to usage. @@ -4673,10 +4720,10 @@ static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, if (type == _MEM) { thresholds = &memcg->thresholds; - usage = res_counter_read_u64(&memcg->res, RES_USAGE); + usage = mem_cgroup_usage(memcg, false); } else if (type == _MEMSWAP) { thresholds = &memcg->memsw_thresholds; - usage = res_counter_read_u64(&memcg->memsw, RES_USAGE); + usage = mem_cgroup_usage(memcg, true); } else BUG(); @@ -4762,10 +4809,10 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, if (type == _MEM) { thresholds = &memcg->thresholds; - usage = res_counter_read_u64(&memcg->res, RES_USAGE); + usage = mem_cgroup_usage(memcg, false); } else if (type == _MEMSWAP) { thresholds = &memcg->memsw_thresholds; - usage = res_counter_read_u64(&memcg->memsw, RES_USAGE); + usage = mem_cgroup_usage(memcg, true); } else BUG(); @@ -5525,9 +5572,9 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) * core guarantees its existence. */ } else { - res_counter_init(&memcg->res, &root_mem_cgroup->res); - res_counter_init(&memcg->memsw, &root_mem_cgroup->memsw); - res_counter_init(&memcg->kmem, &root_mem_cgroup->kmem); + res_counter_init(&memcg->res, NULL); + res_counter_init(&memcg->memsw, NULL); + res_counter_init(&memcg->kmem, NULL); /* * Deeper hierachy with use_hierarchy == false doesn't make * much sense so let cgroup subsystem know about this @@ -5969,8 +6016,9 @@ static void __mem_cgroup_clear_mc(void) /* we must fixup refcnts and charges */ if (mc.moved_swap) { /* uncharge swap account from the old cgroup */ - res_counter_uncharge(&mc.from->memsw, - PAGE_SIZE * mc.moved_swap); + if (!mem_cgroup_is_root(mc.from)) + res_counter_uncharge(&mc.from->memsw, + PAGE_SIZE * mc.moved_swap); for (i = 0; i < mc.moved_swap; i++) css_put(&mc.from->css); @@ -5979,8 +6027,9 @@ static void __mem_cgroup_clear_mc(void) * we charged both to->res and to->memsw, so we should * uncharge to->res. */ - res_counter_uncharge(&mc.to->res, - PAGE_SIZE * mc.moved_swap); + if (!mem_cgroup_is_root(mc.to)) + res_counter_uncharge(&mc.to->res, + PAGE_SIZE * mc.moved_swap); /* we've already done css_get(mc.to) */ mc.moved_swap = 0; } @@ -6345,7 +6394,8 @@ void mem_cgroup_uncharge_swap(swp_entry_t entry) rcu_read_lock(); memcg = mem_cgroup_lookup(id); if (memcg) { - res_counter_uncharge(&memcg->memsw, PAGE_SIZE); + if (!mem_cgroup_is_root(memcg)) + res_counter_uncharge(&memcg->memsw, PAGE_SIZE); mem_cgroup_swap_statistics(memcg, false); css_put(&memcg->css); } @@ -6509,12 +6559,15 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, { unsigned long flags; - if (nr_mem) - res_counter_uncharge(&memcg->res, nr_mem * PAGE_SIZE); - if (nr_memsw) - res_counter_uncharge(&memcg->memsw, nr_memsw * PAGE_SIZE); - - memcg_oom_recover(memcg); + if (!mem_cgroup_is_root(memcg)) { + if (nr_mem) + res_counter_uncharge(&memcg->res, + nr_mem * PAGE_SIZE); + if (nr_memsw) + res_counter_uncharge(&memcg->memsw, + nr_memsw * PAGE_SIZE); + memcg_oom_recover(memcg); + } local_irq_save(flags); __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); diff --git a/mm/mmap.c b/mm/mmap.c index c1f2ea4a0b99..c0a3637cdb64 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -369,20 +369,20 @@ static int browse_rb(struct rb_root *root) struct vm_area_struct *vma; vma = rb_entry(nd, struct vm_area_struct, vm_rb); if (vma->vm_start < prev) { - pr_info("vm_start %lx prev %lx\n", vma->vm_start, prev); + pr_emerg("vm_start %lx prev %lx\n", vma->vm_start, prev); bug = 1; } if (vma->vm_start < pend) { - pr_info("vm_start %lx pend %lx\n", vma->vm_start, pend); + pr_emerg("vm_start %lx pend %lx\n", vma->vm_start, pend); bug = 1; } if (vma->vm_start > vma->vm_end) { - pr_info("vm_end %lx < vm_start %lx\n", + pr_emerg("vm_end %lx < vm_start %lx\n", vma->vm_end, vma->vm_start); bug = 1; } if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { - pr_info("free gap %lx, correct %lx\n", + pr_emerg("free gap %lx, correct %lx\n", vma->rb_subtree_gap, vma_compute_subtree_gap(vma)); bug = 1; @@ -396,7 +396,7 @@ static int browse_rb(struct rb_root *root) for (nd = pn; nd; nd = rb_prev(nd)) j++; if (i != j) { - pr_info("backwards %d, forwards %d\n", j, i); + pr_emerg("backwards %d, forwards %d\n", j, i); bug = 1; } return bug ? -1 : i; @@ -431,17 +431,17 @@ static void validate_mm(struct mm_struct *mm) i++; } if (i != mm->map_count) { - pr_info("map_count %d vm_next %d\n", mm->map_count, i); + pr_emerg("map_count %d vm_next %d\n", mm->map_count, i); bug = 1; } if (highest_address != mm->highest_vm_end) { - pr_info("mm->highest_vm_end %lx, found %lx\n", + pr_emerg("mm->highest_vm_end %lx, found %lx\n", mm->highest_vm_end, highest_address); bug = 1; } i = browse_rb(&mm->mm_rb); if (i != mm->map_count) { - pr_info("map_count %d rb %d\n", mm->map_count, i); + pr_emerg("map_count %d rb %d\n", mm->map_count, i); bug = 1; } BUG_ON(bug); diff --git a/mm/nobootmem.c b/mm/nobootmem.c index 7ed58602e71b..7c7ab32ee503 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c @@ -119,6 +119,8 @@ static unsigned long __init free_low_memory_core_early(void) phys_addr_t start, end; u64 i; + memblock_clear_hotplug(0, -1); + for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL) count += __free_memory_core(start, end); diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c index 3707c71ae4cd..51108165f829 100644 --- a/mm/percpu-vm.c +++ b/mm/percpu-vm.c @@ -108,7 +108,7 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk, int page_start, int page_end) { const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; - unsigned int cpu; + unsigned int cpu, tcpu; int i; for_each_possible_cpu(cpu) { @@ -116,14 +116,23 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk, struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); - if (!*pagep) { - pcpu_free_pages(chunk, pages, populated, - page_start, page_end); - return -ENOMEM; - } + if (!*pagep) + goto err; } } return 0; + +err: + while (--i >= page_start) + __free_page(pages[pcpu_page_idx(cpu, i)]); + + for_each_possible_cpu(tcpu) { + if (tcpu == cpu) + break; + for (i = page_start; i < page_end; i++) + __free_page(pages[pcpu_page_idx(tcpu, i)]); + } + return -ENOMEM; } /** @@ -263,6 +272,7 @@ err: __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start), page_end - page_start); } + pcpu_post_unmap_tlb_flush(chunk, page_start, page_end); return err; } diff --git a/mm/percpu.c b/mm/percpu.c index 2139e30a4b44..da997f9800bd 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1932,6 +1932,8 @@ void __init setup_per_cpu_areas(void) if (pcpu_setup_first_chunk(ai, fc) < 0) panic("Failed to initialize percpu areas."); + + pcpu_free_alloc_info(ai); } #endif /* CONFIG_SMP */ |