diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/bootmem.c | 1 | ||||
-rw-r--r-- | mm/memblock.c | 28 | ||||
-rw-r--r-- | mm/memcontrol.c | 62 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 31 | ||||
-rw-r--r-- | mm/mremap.c | 4 | ||||
-rw-r--r-- | mm/nobootmem.c | 1 | ||||
-rw-r--r-- | mm/shmem.c | 181 | ||||
-rw-r--r-- | mm/util.c | 31 | ||||
-rw-r--r-- | mm/vmstat.c | 12 |
9 files changed, 208 insertions, 143 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c index 3b6380784c28..91e32bc8517f 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -33,6 +33,7 @@ EXPORT_SYMBOL(contig_page_data); unsigned long max_low_pfn; unsigned long min_low_pfn; unsigned long max_pfn; +unsigned long long max_possible_pfn; bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata; diff --git a/mm/memblock.c b/mm/memblock.c index d300f1329814..07ff069fef25 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -822,6 +822,17 @@ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR); } +/** + * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP. + * @base: the base phys addr of the region + * @size: the size of the region + * + * Return 0 on success, -errno on failure. + */ +int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) +{ + return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP); +} /** * __next_reserved_mem_region - next function for for_each_reserved_region() @@ -913,6 +924,10 @@ void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags, if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) continue; + /* skip nomap memory unless we were asked for it explicitly */ + if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) + continue; + if (!type_b) { if (out_start) *out_start = m_start; @@ -1022,6 +1037,10 @@ void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags, if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) continue; + /* skip nomap memory unless we were asked for it explicitly */ + if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) + continue; + if (!type_b) { if (out_start) *out_start = m_start; @@ -1519,6 +1538,15 @@ int __init_memblock memblock_is_memory(phys_addr_t addr) return memblock_search(&memblock.memory, addr) != -1; } +int __init_memblock memblock_is_map_memory(phys_addr_t addr) +{ + int i = memblock_search(&memblock.memory, addr); + + if (i == -1) + return false; + return !memblock_is_nomap(&memblock.memory.regions[i]); +} + #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP int __init_memblock memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, unsigned long *end_pfn) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e234c21a5e6c..14cb1db4c52b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -903,14 +903,20 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, if (prev && reclaim->generation != iter->generation) goto out_unlock; - do { + while (1) { pos = READ_ONCE(iter->position); + if (!pos || css_tryget(&pos->css)) + break; /* - * A racing update may change the position and - * put the last reference, hence css_tryget(), - * or retry to see the updated position. + * css reference reached zero, so iter->position will + * be cleared by ->css_released. However, we should not + * rely on this happening soon, because ->css_released + * is called from a work queue, and by busy-waiting we + * might block it. So we clear iter->position right + * away. */ - } while (pos && !css_tryget(&pos->css)); + (void)cmpxchg(&iter->position, pos, NULL); + } } if (pos) @@ -956,17 +962,13 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, } if (reclaim) { - if (cmpxchg(&iter->position, pos, memcg) == pos) { - if (memcg) - css_get(&memcg->css); - if (pos) - css_put(&pos->css); - } - /* - * pairs with css_tryget when dereferencing iter->position - * above. + * The position could have already been updated by a competing + * thread, so check that the value hasn't changed since we read + * it to avoid reclaiming from the same cgroup twice. */ + (void)cmpxchg(&iter->position, pos, memcg); + if (pos) css_put(&pos->css); @@ -999,6 +1001,28 @@ void mem_cgroup_iter_break(struct mem_cgroup *root, css_put(&prev->css); } +static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) +{ + struct mem_cgroup *memcg = dead_memcg; + struct mem_cgroup_reclaim_iter *iter; + struct mem_cgroup_per_zone *mz; + int nid, zid; + int i; + + while ((memcg = parent_mem_cgroup(memcg))) { + for_each_node(nid) { + for (zid = 0; zid < MAX_NR_ZONES; zid++) { + mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; + for (i = 0; i <= DEF_PRIORITY; i++) { + iter = &mz->iter[i]; + cmpxchg(&iter->position, + dead_memcg, NULL); + } + } + } + } +} + /* * Iteration constructs for visiting all cgroups (under a tree). If * loops are exited prematurely (break), mem_cgroup_iter_break() must @@ -4324,6 +4348,13 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) wb_memcg_offline(memcg); } +static void mem_cgroup_css_released(struct cgroup_subsys_state *css) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + invalidate_reclaim_iterators(memcg); +} + static void mem_cgroup_css_free(struct cgroup_subsys_state *css) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); @@ -4782,7 +4813,7 @@ static void mem_cgroup_clear_mc(void) static int mem_cgroup_can_attach(struct cgroup_taskset *tset) { struct cgroup_subsys_state *css; - struct mem_cgroup *memcg; + struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */ struct mem_cgroup *from; struct task_struct *leader, *p; struct mm_struct *mm; @@ -5185,6 +5216,7 @@ struct cgroup_subsys memory_cgrp_subsys = { .css_alloc = mem_cgroup_css_alloc, .css_online = mem_cgroup_css_online, .css_offline = mem_cgroup_css_offline, + .css_released = mem_cgroup_css_released, .css_free = mem_cgroup_css_free, .css_reset = mem_cgroup_css_reset, .can_attach = mem_cgroup_can_attach, diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 67d488ab495e..a042a9d537bb 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1375,23 +1375,30 @@ int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) */ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) { - unsigned long pfn; + unsigned long pfn, sec_end_pfn; struct zone *zone = NULL; struct page *page; int i; - for (pfn = start_pfn; + for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn); pfn < end_pfn; - pfn += MAX_ORDER_NR_PAGES) { - i = 0; - /* This is just a CONFIG_HOLES_IN_ZONE check.*/ - while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i)) - i++; - if (i == MAX_ORDER_NR_PAGES) + pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) { + /* Make sure the memory section is present first */ + if (!present_section_nr(pfn_to_section_nr(pfn))) continue; - page = pfn_to_page(pfn + i); - if (zone && page_zone(page) != zone) - return 0; - zone = page_zone(page); + for (; pfn < sec_end_pfn && pfn < end_pfn; + pfn += MAX_ORDER_NR_PAGES) { + i = 0; + /* This is just a CONFIG_HOLES_IN_ZONE check.*/ + while ((i < MAX_ORDER_NR_PAGES) && + !pfn_valid_within(pfn + i)) + i++; + if (i == MAX_ORDER_NR_PAGES) + continue; + page = pfn_to_page(pfn + i); + if (zone && page_zone(page) != zone) + return 0; + zone = page_zone(page); + } } return 1; } diff --git a/mm/mremap.c b/mm/mremap.c index c25bc6268e46..de824e72c3e8 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -319,6 +319,10 @@ static unsigned long move_vma(struct vm_area_struct *vma, hiwater_vm = mm->hiwater_vm; vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); + /* Tell pfnmap has moved from this vma */ + if (unlikely(vma->vm_flags & VM_PFNMAP)) + untrack_pfn_moved(vma); + if (do_munmap(mm, old_addr, old_len) < 0) { /* OOM: unable to split vma, just get accounts right */ vm_unacct_memory(excess >> PAGE_SHIFT); diff --git a/mm/nobootmem.c b/mm/nobootmem.c index e57cf24babd6..99feb2b07fc5 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c @@ -31,6 +31,7 @@ EXPORT_SYMBOL(contig_page_data); unsigned long max_low_pfn; unsigned long min_low_pfn; unsigned long max_pfn; +unsigned long long max_possible_pfn; static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, u64 goal, u64 limit) diff --git a/mm/shmem.c b/mm/shmem.c index 2afcdbbdb685..5813b7fa85b6 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2438,7 +2438,6 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s int len; struct inode *inode; struct page *page; - char *kaddr; struct shmem_inode_info *info; len = strlen(symname) + 1; @@ -2477,9 +2476,8 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s } inode->i_mapping->a_ops = &shmem_aops; inode->i_op = &shmem_symlink_inode_operations; - kaddr = kmap_atomic(page); - memcpy(kaddr, symname, len); - kunmap_atomic(kaddr); + inode_nohighmem(inode); + memcpy(page_address(page), symname, len); SetPageUptodate(page); set_page_dirty(page); unlock_page(page); @@ -2492,23 +2490,34 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s return 0; } -static const char *shmem_follow_link(struct dentry *dentry, void **cookie) +static void shmem_put_link(void *arg) { - struct page *page = NULL; - int error = shmem_getpage(d_inode(dentry), 0, &page, SGP_READ, NULL); - if (error) - return ERR_PTR(error); - unlock_page(page); - *cookie = page; - return kmap(page); + mark_page_accessed(arg); + put_page(arg); } -static void shmem_put_link(struct inode *unused, void *cookie) +static const char *shmem_get_link(struct dentry *dentry, + struct inode *inode, + struct delayed_call *done) { - struct page *page = cookie; - kunmap(page); - mark_page_accessed(page); - page_cache_release(page); + struct page *page = NULL; + int error; + if (!dentry) { + page = find_get_page(inode->i_mapping, 0); + if (!page) + return ERR_PTR(-ECHILD); + if (!PageUptodate(page)) { + put_page(page); + return ERR_PTR(-ECHILD); + } + } else { + error = shmem_getpage(inode, 0, &page, SGP_READ, NULL); + if (error) + return ERR_PTR(error); + unlock_page(page); + } + set_delayed_call(done, shmem_put_link, page); + return page_address(page); } #ifdef CONFIG_TMPFS_XATTR @@ -2555,122 +2564,74 @@ static int shmem_initxattrs(struct inode *inode, return 0; } -static const struct xattr_handler *shmem_xattr_handlers[] = { -#ifdef CONFIG_TMPFS_POSIX_ACL - &posix_acl_access_xattr_handler, - &posix_acl_default_xattr_handler, -#endif - NULL -}; - -static int shmem_xattr_validate(const char *name) -{ - struct { const char *prefix; size_t len; } arr[] = { - { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN }, - { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN } - }; - int i; - - for (i = 0; i < ARRAY_SIZE(arr); i++) { - size_t preflen = arr[i].len; - if (strncmp(name, arr[i].prefix, preflen) == 0) { - if (!name[preflen]) - return -EINVAL; - return 0; - } - } - return -EOPNOTSUPP; -} - -static ssize_t shmem_getxattr(struct dentry *dentry, const char *name, - void *buffer, size_t size) +static int shmem_xattr_handler_get(const struct xattr_handler *handler, + struct dentry *dentry, const char *name, + void *buffer, size_t size) { struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); - int err; - - /* - * If this is a request for a synthetic attribute in the system.* - * namespace use the generic infrastructure to resolve a handler - * for it via sb->s_xattr. - */ - if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) - return generic_getxattr(dentry, name, buffer, size); - - err = shmem_xattr_validate(name); - if (err) - return err; + name = xattr_full_name(handler, name); return simple_xattr_get(&info->xattrs, name, buffer, size); } -static int shmem_setxattr(struct dentry *dentry, const char *name, - const void *value, size_t size, int flags) +static int shmem_xattr_handler_set(const struct xattr_handler *handler, + struct dentry *dentry, const char *name, + const void *value, size_t size, int flags) { struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); - int err; - - /* - * If this is a request for a synthetic attribute in the system.* - * namespace use the generic infrastructure to resolve a handler - * for it via sb->s_xattr. - */ - if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) - return generic_setxattr(dentry, name, value, size, flags); - - err = shmem_xattr_validate(name); - if (err) - return err; + name = xattr_full_name(handler, name); return simple_xattr_set(&info->xattrs, name, value, size, flags); } -static int shmem_removexattr(struct dentry *dentry, const char *name) -{ - struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); - int err; - - /* - * If this is a request for a synthetic attribute in the system.* - * namespace use the generic infrastructure to resolve a handler - * for it via sb->s_xattr. - */ - if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) - return generic_removexattr(dentry, name); +static const struct xattr_handler shmem_security_xattr_handler = { + .prefix = XATTR_SECURITY_PREFIX, + .get = shmem_xattr_handler_get, + .set = shmem_xattr_handler_set, +}; - err = shmem_xattr_validate(name); - if (err) - return err; +static const struct xattr_handler shmem_trusted_xattr_handler = { + .prefix = XATTR_TRUSTED_PREFIX, + .get = shmem_xattr_handler_get, + .set = shmem_xattr_handler_set, +}; - return simple_xattr_remove(&info->xattrs, name); -} +static const struct xattr_handler *shmem_xattr_handlers[] = { +#ifdef CONFIG_TMPFS_POSIX_ACL + &posix_acl_access_xattr_handler, + &posix_acl_default_xattr_handler, +#endif + &shmem_security_xattr_handler, + &shmem_trusted_xattr_handler, + NULL +}; static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) { struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); - return simple_xattr_list(&info->xattrs, buffer, size); + return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size); } #endif /* CONFIG_TMPFS_XATTR */ static const struct inode_operations shmem_short_symlink_operations = { .readlink = generic_readlink, - .follow_link = simple_follow_link, + .get_link = simple_get_link, #ifdef CONFIG_TMPFS_XATTR - .setxattr = shmem_setxattr, - .getxattr = shmem_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = shmem_listxattr, - .removexattr = shmem_removexattr, + .removexattr = generic_removexattr, #endif }; static const struct inode_operations shmem_symlink_inode_operations = { .readlink = generic_readlink, - .follow_link = shmem_follow_link, - .put_link = shmem_put_link, + .get_link = shmem_get_link, #ifdef CONFIG_TMPFS_XATTR - .setxattr = shmem_setxattr, - .getxattr = shmem_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = shmem_listxattr, - .removexattr = shmem_removexattr, + .removexattr = generic_removexattr, #endif }; @@ -3142,10 +3103,10 @@ static const struct inode_operations shmem_inode_operations = { .getattr = shmem_getattr, .setattr = shmem_setattr, #ifdef CONFIG_TMPFS_XATTR - .setxattr = shmem_setxattr, - .getxattr = shmem_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = shmem_listxattr, - .removexattr = shmem_removexattr, + .removexattr = generic_removexattr, .set_acl = simple_set_acl, #endif }; @@ -3164,10 +3125,10 @@ static const struct inode_operations shmem_dir_inode_operations = { .tmpfile = shmem_tmpfile, #endif #ifdef CONFIG_TMPFS_XATTR - .setxattr = shmem_setxattr, - .getxattr = shmem_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = shmem_listxattr, - .removexattr = shmem_removexattr, + .removexattr = generic_removexattr, #endif #ifdef CONFIG_TMPFS_POSIX_ACL .setattr = shmem_setattr, @@ -3177,10 +3138,10 @@ static const struct inode_operations shmem_dir_inode_operations = { static const struct inode_operations shmem_special_inode_operations = { #ifdef CONFIG_TMPFS_XATTR - .setxattr = shmem_setxattr, - .getxattr = shmem_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = shmem_listxattr, - .removexattr = shmem_removexattr, + .removexattr = generic_removexattr, #endif #ifdef CONFIG_TMPFS_POSIX_ACL .setattr = shmem_setattr, diff --git a/mm/util.c b/mm/util.c index 9af1c12b310c..2d28f7930043 100644 --- a/mm/util.c +++ b/mm/util.c @@ -176,6 +176,37 @@ char *strndup_user(const char __user *s, long n) } EXPORT_SYMBOL(strndup_user); +/** + * memdup_user_nul - duplicate memory region from user space and NUL-terminate + * + * @src: source address in user space + * @len: number of bytes to copy + * + * Returns an ERR_PTR() on failure. + */ +void *memdup_user_nul(const void __user *src, size_t len) +{ + char *p; + + /* + * Always use GFP_KERNEL, since copy_from_user() can sleep and + * cause pagefault, which makes it pointless to use GFP_NOFS + * or GFP_ATOMIC. + */ + p = kmalloc_track_caller(len + 1, GFP_KERNEL); + if (!p) + return ERR_PTR(-ENOMEM); + + if (copy_from_user(p, src, len)) { + kfree(p); + return ERR_PTR(-EFAULT); + } + p[len] = '\0'; + + return p; +} +EXPORT_SYMBOL(memdup_user_nul); + void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node *rb_parent) { diff --git a/mm/vmstat.c b/mm/vmstat.c index 0d5712b0206c..c54fd2924f25 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -219,7 +219,7 @@ void set_pgdat_percpu_threshold(pg_data_t *pgdat, * particular counter cannot be updated from interrupt context. */ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, - int delta) + long delta) { struct per_cpu_pageset __percpu *pcp = zone->pageset; s8 __percpu *p = pcp->vm_stat_diff + item; @@ -318,8 +318,8 @@ EXPORT_SYMBOL(__dec_zone_page_state); * 1 Overstepping half of threshold * -1 Overstepping minus half of threshold */ -static inline void mod_state(struct zone *zone, - enum zone_stat_item item, int delta, int overstep_mode) +static inline void mod_state(struct zone *zone, enum zone_stat_item item, + long delta, int overstep_mode) { struct per_cpu_pageset __percpu *pcp = zone->pageset; s8 __percpu *p = pcp->vm_stat_diff + item; @@ -357,7 +357,7 @@ static inline void mod_state(struct zone *zone, } void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, - int delta) + long delta) { mod_state(zone, item, delta, 0); } @@ -384,7 +384,7 @@ EXPORT_SYMBOL(dec_zone_page_state); * Use interrupt disable to serialize counter updates */ void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, - int delta) + long delta) { unsigned long flags; @@ -1483,6 +1483,7 @@ static void __init start_shepherd_timer(void) BUG(); cpumask_copy(cpu_stat_off, cpu_online_mask); + vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); schedule_delayed_work(&shepherd, round_jiffies_relative(sysctl_stat_interval)); } @@ -1550,7 +1551,6 @@ static int __init setup_vmstat(void) start_shepherd_timer(); cpu_notifier_register_done(); - vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); #endif #ifdef CONFIG_PROC_FS proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); |