diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 117 | ||||
-rw-r--r-- | mm/memory.c | 28 | ||||
-rw-r--r-- | mm/mmap.c | 18 | ||||
-rw-r--r-- | mm/page_alloc.c | 6 | ||||
-rw-r--r-- | mm/slub.c | 2 |
5 files changed, 80 insertions, 91 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b659260c56ad..f342778a0c0a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3873,14 +3873,21 @@ static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) return val << PAGE_SHIFT; } -static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) +static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft, + struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos) { struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); + char str[64]; u64 val; - int type, name; + int type, name, len; type = MEMFILE_TYPE(cft->private); name = MEMFILE_ATTR(cft->private); + + if (!do_swap_account && type == _MEMSWAP) + return -EOPNOTSUPP; + switch (type) { case _MEM: if (name == RES_USAGE) @@ -3897,7 +3904,9 @@ static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) default: BUG(); } - return val; + + len = scnprintf(str, sizeof(str), "%llu\n", (unsigned long long)val); + return simple_read_from_buffer(buf, nbytes, ppos, str, len); } /* * The user of this function is... @@ -3913,6 +3922,10 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft, type = MEMFILE_TYPE(cft->private); name = MEMFILE_ATTR(cft->private); + + if (!do_swap_account && type == _MEMSWAP) + return -EOPNOTSUPP; + switch (name) { case RES_LIMIT: if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ @@ -3978,12 +3991,15 @@ out: static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) { - struct mem_cgroup *memcg; + struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); int type, name; - memcg = mem_cgroup_from_cont(cont); type = MEMFILE_TYPE(event); name = MEMFILE_ATTR(event); + + if (!do_swap_account && type == _MEMSWAP) + return -EOPNOTSUPP; + switch (name) { case RES_MAX_USAGE: if (type == _MEM) @@ -4624,29 +4640,22 @@ static int mem_control_numa_stat_open(struct inode *unused, struct file *file) #endif /* CONFIG_NUMA */ #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM -static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss) +static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) { - /* - * Part of this would be better living in a separate allocation - * function, leaving us with just the cgroup tree population work. - * We, however, depend on state such as network's proto_list that - * is only initialized after cgroup creation. I found the less - * cumbersome way to deal with it to defer it all to populate time - */ - return mem_cgroup_sockets_init(cont, ss); + return mem_cgroup_sockets_init(memcg, ss); }; -static void kmem_cgroup_destroy(struct cgroup *cont) +static void kmem_cgroup_destroy(struct mem_cgroup *memcg) { - mem_cgroup_sockets_destroy(cont); + mem_cgroup_sockets_destroy(memcg); } #else -static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss) +static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) { return 0; } -static void kmem_cgroup_destroy(struct cgroup *cont) +static void kmem_cgroup_destroy(struct mem_cgroup *memcg) { } #endif @@ -4655,7 +4664,7 @@ static struct cftype mem_cgroup_files[] = { { .name = "usage_in_bytes", .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), - .read_u64 = mem_cgroup_read, + .read = mem_cgroup_read, .register_event = mem_cgroup_usage_register_event, .unregister_event = mem_cgroup_usage_unregister_event, }, @@ -4663,25 +4672,25 @@ static struct cftype mem_cgroup_files[] = { .name = "max_usage_in_bytes", .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), .trigger = mem_cgroup_reset, - .read_u64 = mem_cgroup_read, + .read = mem_cgroup_read, }, { .name = "limit_in_bytes", .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), .write_string = mem_cgroup_write, - .read_u64 = mem_cgroup_read, + .read = mem_cgroup_read, }, { .name = "soft_limit_in_bytes", .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), .write_string = mem_cgroup_write, - .read_u64 = mem_cgroup_read, + .read = mem_cgroup_read, }, { .name = "failcnt", .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), .trigger = mem_cgroup_reset, - .read_u64 = mem_cgroup_read, + .read = mem_cgroup_read, }, { .name = "stat", @@ -4721,14 +4730,11 @@ static struct cftype mem_cgroup_files[] = { .mode = S_IRUGO, }, #endif -}; - #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP -static struct cftype memsw_cgroup_files[] = { { .name = "memsw.usage_in_bytes", .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), - .read_u64 = mem_cgroup_read, + .read = mem_cgroup_read, .register_event = mem_cgroup_usage_register_event, .unregister_event = mem_cgroup_usage_unregister_event, }, @@ -4736,35 +4742,23 @@ static struct cftype memsw_cgroup_files[] = { .name = "memsw.max_usage_in_bytes", .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), .trigger = mem_cgroup_reset, - .read_u64 = mem_cgroup_read, + .read = mem_cgroup_read, }, { .name = "memsw.limit_in_bytes", .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), .write_string = mem_cgroup_write, - .read_u64 = mem_cgroup_read, + .read = mem_cgroup_read, }, { .name = "memsw.failcnt", .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), .trigger = mem_cgroup_reset, - .read_u64 = mem_cgroup_read, + .read = mem_cgroup_read, }, -}; - -static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss) -{ - if (!do_swap_account) - return 0; - return cgroup_add_files(cont, ss, memsw_cgroup_files, - ARRAY_SIZE(memsw_cgroup_files)); -}; -#else -static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss) -{ - return 0; -} #endif + { }, /* terminate */ +}; static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) { @@ -5016,6 +5010,17 @@ mem_cgroup_create(struct cgroup *cont) memcg->move_charge_at_immigrate = 0; mutex_init(&memcg->thresholds_lock); spin_lock_init(&memcg->move_lock); + + error = memcg_init_kmem(memcg, &mem_cgroup_subsys); + if (error) { + /* + * We call put now because our (and parent's) refcnts + * are already in place. mem_cgroup_put() will internally + * call __mem_cgroup_free, so return directly + */ + mem_cgroup_put(memcg); + return ERR_PTR(error); + } return &memcg->css; free_out: __mem_cgroup_free(memcg); @@ -5033,28 +5038,11 @@ static void mem_cgroup_destroy(struct cgroup *cont) { struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); - kmem_cgroup_destroy(cont); + kmem_cgroup_destroy(memcg); mem_cgroup_put(memcg); } -static int mem_cgroup_populate(struct cgroup_subsys *ss, - struct cgroup *cont) -{ - int ret; - - ret = cgroup_add_files(cont, ss, mem_cgroup_files, - ARRAY_SIZE(mem_cgroup_files)); - - if (!ret) - ret = register_memsw_files(cont, ss); - - if (!ret) - ret = register_kmem_files(cont, ss); - - return ret; -} - #ifdef CONFIG_MMU /* Handlers for move charge at task migration. */ #define PRECHARGE_COUNT_AT_ONCE 256 @@ -5481,7 +5469,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, * part of thp split is not executed yet. */ if (pmd_trans_huge_lock(pmd, vma) == 1) { - if (!mc.precharge) { + if (mc.precharge < HPAGE_PMD_NR) { spin_unlock(&vma->vm_mm->page_table_lock); return 0; } @@ -5638,12 +5626,13 @@ struct cgroup_subsys mem_cgroup_subsys = { .create = mem_cgroup_create, .pre_destroy = mem_cgroup_pre_destroy, .destroy = mem_cgroup_destroy, - .populate = mem_cgroup_populate, .can_attach = mem_cgroup_can_attach, .cancel_attach = mem_cgroup_cancel_attach, .attach = mem_cgroup_move_task, + .base_cftypes = mem_cgroup_files, .early_init = 0, .use_id = 1, + .__DEPRECATED_clear_css_refs = true, }; #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP diff --git a/mm/memory.c b/mm/memory.c index 6105f475fa86..1e77da6d82c1 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1295,7 +1295,7 @@ static void unmap_page_range(struct mmu_gather *tlb, static void unmap_single_vma(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start_addr, - unsigned long end_addr, unsigned long *nr_accounted, + unsigned long end_addr, struct zap_details *details) { unsigned long start = max(vma->vm_start, start_addr); @@ -1307,9 +1307,6 @@ static void unmap_single_vma(struct mmu_gather *tlb, if (end <= vma->vm_start) return; - if (vma->vm_flags & VM_ACCOUNT) - *nr_accounted += (end - start) >> PAGE_SHIFT; - if (unlikely(is_pfn_mapping(vma))) untrack_pfn_vma(vma, 0, 0); @@ -1339,8 +1336,6 @@ static void unmap_single_vma(struct mmu_gather *tlb, * @vma: the starting vma * @start_addr: virtual address at which to start unmapping * @end_addr: virtual address at which to end unmapping - * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here - * @details: details of nonlinear truncation or shared cache invalidation * * Unmap all pages in the vma list. * @@ -1355,15 +1350,13 @@ static void unmap_single_vma(struct mmu_gather *tlb, */ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start_addr, - unsigned long end_addr, unsigned long *nr_accounted, - struct zap_details *details) + unsigned long end_addr) { struct mm_struct *mm = vma->vm_mm; mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) - unmap_single_vma(tlb, vma, start_addr, end_addr, nr_accounted, - details); + unmap_single_vma(tlb, vma, start_addr, end_addr, NULL); mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); } @@ -1376,19 +1369,21 @@ void unmap_vmas(struct mmu_gather *tlb, * * Caller must protect the VMA list */ -void zap_page_range(struct vm_area_struct *vma, unsigned long address, +void zap_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long size, struct zap_details *details) { struct mm_struct *mm = vma->vm_mm; struct mmu_gather tlb; - unsigned long end = address + size; - unsigned long nr_accounted = 0; + unsigned long end = start + size; lru_add_drain(); tlb_gather_mmu(&tlb, mm, 0); update_hiwater_rss(mm); - unmap_vmas(&tlb, vma, address, end, &nr_accounted, details); - tlb_finish_mmu(&tlb, address, end); + mmu_notifier_invalidate_range_start(mm, start, end); + for ( ; vma && vma->vm_start < end; vma = vma->vm_next) + unmap_single_vma(&tlb, vma, start, end, details); + mmu_notifier_invalidate_range_end(mm, start, end); + tlb_finish_mmu(&tlb, start, end); } /** @@ -1406,13 +1401,12 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr struct mm_struct *mm = vma->vm_mm; struct mmu_gather tlb; unsigned long end = address + size; - unsigned long nr_accounted = 0; lru_add_drain(); tlb_gather_mmu(&tlb, mm, 0); update_hiwater_rss(mm); mmu_notifier_invalidate_range_start(mm, address, end); - unmap_single_vma(&tlb, vma, address, end, &nr_accounted, details); + unmap_single_vma(&tlb, vma, address, end, details); mmu_notifier_invalidate_range_end(mm, address, end); tlb_finish_mmu(&tlb, address, end); } diff --git a/mm/mmap.c b/mm/mmap.c index 848ef52d9603..69a1889f3790 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1889,15 +1889,20 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr) */ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) { + unsigned long nr_accounted = 0; + /* Update high watermark before we lower total_vm */ update_hiwater_vm(mm); do { long nrpages = vma_pages(vma); + if (vma->vm_flags & VM_ACCOUNT) + nr_accounted += nrpages; mm->total_vm -= nrpages; vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); vma = remove_vma(vma); } while (vma); + vm_unacct_memory(nr_accounted); validate_mm(mm); } @@ -1912,13 +1917,11 @@ static void unmap_region(struct mm_struct *mm, { struct vm_area_struct *next = prev? prev->vm_next: mm->mmap; struct mmu_gather tlb; - unsigned long nr_accounted = 0; lru_add_drain(); tlb_gather_mmu(&tlb, mm, 0); update_hiwater_rss(mm); - unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL); - vm_unacct_memory(nr_accounted); + unmap_vmas(&tlb, vma, start, end); free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, next ? next->vm_start : 0); tlb_finish_mmu(&tlb, start, end); @@ -2305,8 +2308,7 @@ void exit_mmap(struct mm_struct *mm) tlb_gather_mmu(&tlb, mm, 1); /* update_hiwater_rss(mm) here? but nobody should be looking */ /* Use -1 here to ensure all VMAs in the mm are unmapped */ - unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); - vm_unacct_memory(nr_accounted); + unmap_vmas(&tlb, vma, 0, -1); free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); tlb_finish_mmu(&tlb, 0, -1); @@ -2315,8 +2317,12 @@ void exit_mmap(struct mm_struct *mm) * Walk the list again, actually closing and freeing it, * with preemption enabled, without holding any MM locks. */ - while (vma) + while (vma) { + if (vma->vm_flags & VM_ACCOUNT) + nr_accounted += vma_pages(vma); vma = remove_vma(vma); + } + vm_unacct_memory(nr_accounted); BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 918330f71dba..9f389e50ed18 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4763,12 +4763,12 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) for (i = 0; i < MAX_NR_ZONES; i++) { if (i == ZONE_MOVABLE) continue; - printk(" %-8s ", zone_names[i]); + printk(KERN_CONT " %-8s ", zone_names[i]); if (arch_zone_lowest_possible_pfn[i] == arch_zone_highest_possible_pfn[i]) - printk("empty\n"); + printk(KERN_CONT "empty\n"); else - printk("%0#10lx -> %0#10lx\n", + printk(KERN_CONT "%0#10lx -> %0#10lx\n", arch_zone_lowest_possible_pfn[i], arch_zone_highest_possible_pfn[i]); } diff --git a/mm/slub.c b/mm/slub.c index ffe13fdf8144..80848cd3901c 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2040,7 +2040,7 @@ static bool has_cpu_slab(int cpu, void *info) struct kmem_cache *s = info; struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); - return !!(c->page); + return c->page || c->partial; } static void flush_all(struct kmem_cache *s) |