diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 14 | ||||
-rw-r--r-- | mm/fadvise.c | 10 | ||||
-rw-r--r-- | mm/kasan/kasan_init.c | 2 | ||||
-rw-r--r-- | mm/kmemleak.c | 12 | ||||
-rw-r--r-- | mm/memcontrol.c | 6 | ||||
-rw-r--r-- | mm/mempolicy.c | 92 | ||||
-rw-r--r-- | mm/migrate.c | 39 | ||||
-rw-r--r-- | mm/mmap.c | 17 | ||||
-rw-r--r-- | mm/nommu.c | 37 | ||||
-rw-r--r-- | mm/page_alloc.c | 5 | ||||
-rw-r--r-- | mm/page_owner.c | 6 | ||||
-rw-r--r-- | mm/percpu.c | 4 | ||||
-rw-r--r-- | mm/readahead.c | 7 | ||||
-rw-r--r-- | mm/slab.c | 1 | ||||
-rw-r--r-- | mm/sparse.c | 37 | ||||
-rw-r--r-- | mm/vmstat.c | 2 | ||||
-rw-r--r-- | mm/zsmalloc.c | 13 |
17 files changed, 178 insertions, 126 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index c782e8fb7235..d5004d82a1d6 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -278,13 +278,6 @@ config BOUNCE by default when ZONE_DMA or HIGHMEM is selected, but you may say n to override this. -# On the 'tile' arch, USB OHCI needs the bounce pool since tilegx will often -# have more than 4GB of memory, but we don't currently use the IOTLB to present -# a 32-bit address to OHCI. So we need to use a bounce pool instead. -config NEED_BOUNCE_POOL - bool - default y if TILE && USB_OHCI_HCD - config NR_QUICK int depends on QUICKLIST @@ -627,15 +620,14 @@ config GENERIC_EARLY_IOREMAP config MAX_STACK_SIZE_MB int "Maximum user stack size for 32-bit processes (MB)" default 80 - range 8 256 if METAG range 8 2048 depends on STACK_GROWSUP && (!64BIT || COMPAT) help This is the maximum stack size in Megabytes in the VM layout of 32-bit user processes when the stack grows upwards (currently only on parisc - and metag arch). The stack will be located at the highest memory - address minus the given value, unless the RLIMIT_STACK hard limit is - changed to a smaller value in which case that is used. + arch). The stack will be located at the highest memory address minus + the given value, unless the RLIMIT_STACK hard limit is changed to a + smaller value in which case that is used. A sane initial value is 80 MB. diff --git a/mm/fadvise.c b/mm/fadvise.c index 767887f5f3bf..afa41491d324 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c @@ -26,7 +26,8 @@ * POSIX_FADV_WILLNEED could set PG_Referenced, and POSIX_FADV_NOREUSE could * deactivate the pages and clear PG_Referenced. */ -SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice) + +int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) { struct fd f = fdget(fd); struct inode *inode; @@ -185,11 +186,16 @@ out: return ret; } +SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice) +{ + return ksys_fadvise64_64(fd, offset, len, advice); +} + #ifdef __ARCH_WANT_SYS_FADVISE64 SYSCALL_DEFINE4(fadvise64, int, fd, loff_t, offset, size_t, len, int, advice) { - return sys_fadvise64_64(fd, offset, len, advice); + return ksys_fadvise64_64(fd, offset, len, advice); } #endif diff --git a/mm/kasan/kasan_init.c b/mm/kasan/kasan_init.c index 554e4c0f23a2..f436246ccc79 100644 --- a/mm/kasan/kasan_init.c +++ b/mm/kasan/kasan_init.c @@ -31,7 +31,7 @@ unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss; #if CONFIG_PGTABLE_LEVELS > 4 -p4d_t kasan_zero_p4d[PTRS_PER_P4D] __page_aligned_bss; +p4d_t kasan_zero_p4d[MAX_PTRS_PER_P4D] __page_aligned_bss; #endif #if CONFIG_PGTABLE_LEVELS > 3 pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss; diff --git a/mm/kmemleak.c b/mm/kmemleak.c index e83987c55a08..46c2290a08f1 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -1657,8 +1657,7 @@ static void start_scan_thread(void) } /* - * Stop the automatic memory scanning thread. This function must be called - * with the scan_mutex held. + * Stop the automatic memory scanning thread. */ static void stop_scan_thread(void) { @@ -1921,12 +1920,15 @@ static void kmemleak_do_cleanup(struct work_struct *work) { stop_scan_thread(); + mutex_lock(&scan_mutex); /* - * Once the scan thread has stopped, it is safe to no longer track - * object freeing. Ordering of the scan thread stopping and the memory - * accesses below is guaranteed by the kthread_stop() function. + * Once it is made sure that kmemleak_scan has stopped, it is safe to no + * longer track object freeing. Ordering of the scan thread stopping and + * the memory accesses below is guaranteed by the kthread_stop() + * function. */ kmemleak_free_enabled = 0; + mutex_unlock(&scan_mutex); if (!kmemleak_found_leaks) __kmemleak_do_cleanup(); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 670e99b68aa6..9ec024b862ac 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -714,9 +714,9 @@ static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) * invocations for reference counting, or use mem_cgroup_iter_break() * to cancel a hierarchy walk before the round-trip is complete. * - * Reclaimers can specify a zone and a priority level in @reclaim to + * Reclaimers can specify a node and a priority level in @reclaim to * divide up the memcgs in the hierarchy among all concurrent - * reclaimers operating on the same zone and priority. + * reclaimers operating on the same node and priority. */ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, struct mem_cgroup *prev, @@ -2299,7 +2299,7 @@ void memcg_kmem_put_cache(struct kmem_cache *cachep) } /** - * memcg_kmem_charge: charge a kmem page + * memcg_kmem_charge_memcg: charge a kmem page * @page: page to charge * @gfp: reclaim mode * @order: allocation order diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 32cba0332787..01cbb7078d6c 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1336,9 +1336,9 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; } -SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, - unsigned long, mode, const unsigned long __user *, nmask, - unsigned long, maxnode, unsigned, flags) +static long kernel_mbind(unsigned long start, unsigned long len, + unsigned long mode, const unsigned long __user *nmask, + unsigned long maxnode, unsigned int flags) { nodemask_t nodes; int err; @@ -1357,9 +1357,16 @@ SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, return do_mbind(start, len, mode, mode_flags, &nodes, flags); } +SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, + unsigned long, mode, const unsigned long __user *, nmask, + unsigned long, maxnode, unsigned int, flags) +{ + return kernel_mbind(start, len, mode, nmask, maxnode, flags); +} + /* Set the process memory policy */ -SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, - unsigned long, maxnode) +static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask, + unsigned long maxnode) { int err; nodemask_t nodes; @@ -1377,9 +1384,15 @@ SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, return do_set_mempolicy(mode, flags, &nodes); } -SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, - const unsigned long __user *, old_nodes, - const unsigned long __user *, new_nodes) +SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, + unsigned long, maxnode) +{ + return kernel_set_mempolicy(mode, nmask, maxnode); +} + +static int kernel_migrate_pages(pid_t pid, unsigned long maxnode, + const unsigned long __user *old_nodes, + const unsigned long __user *new_nodes) { struct mm_struct *mm = NULL; struct task_struct *task; @@ -1469,11 +1482,20 @@ out_put: } +SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, + const unsigned long __user *, old_nodes, + const unsigned long __user *, new_nodes) +{ + return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes); +} + /* Retrieve NUMA policy */ -SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, - unsigned long __user *, nmask, unsigned long, maxnode, - unsigned long, addr, unsigned long, flags) +static int kernel_get_mempolicy(int __user *policy, + unsigned long __user *nmask, + unsigned long maxnode, + unsigned long addr, + unsigned long flags) { int err; int uninitialized_var(pval); @@ -1496,6 +1518,13 @@ SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, return err; } +SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, + unsigned long __user *, nmask, unsigned long, maxnode, + unsigned long, addr, unsigned long, flags) +{ + return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags); +} + #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, @@ -1514,7 +1543,7 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, if (nmask) nm = compat_alloc_user_space(alloc_size); - err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); + err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags); if (!err && nmask) { unsigned long copy_size; @@ -1546,7 +1575,7 @@ COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, return -EFAULT; } - return sys_set_mempolicy(mode, nm, nr_bits+1); + return kernel_set_mempolicy(mode, nm, nr_bits+1); } COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, @@ -1568,10 +1597,43 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, return -EFAULT; } - return sys_mbind(start, len, mode, nm, nr_bits+1, flags); + return kernel_mbind(start, len, mode, nm, nr_bits+1, flags); } -#endif +COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid, + compat_ulong_t, maxnode, + const compat_ulong_t __user *, old_nodes, + const compat_ulong_t __user *, new_nodes) +{ + unsigned long __user *old = NULL; + unsigned long __user *new = NULL; + nodemask_t tmp_mask; + unsigned long nr_bits; + unsigned long size; + + nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES); + size = ALIGN(nr_bits, BITS_PER_LONG) / 8; + if (old_nodes) { + if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits)) + return -EFAULT; + old = compat_alloc_user_space(new_nodes ? size * 2 : size); + if (new_nodes) + new = old + size / sizeof(unsigned long); + if (copy_to_user(old, nodes_addr(tmp_mask), size)) + return -EFAULT; + } + if (new_nodes) { + if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits)) + return -EFAULT; + if (new == NULL) + new = compat_alloc_user_space(size); + if (copy_to_user(new, nodes_addr(tmp_mask), size)) + return -EFAULT; + } + return kernel_migrate_pages(pid, nr_bits + 1, old, new); +} + +#endif /* CONFIG_COMPAT */ struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, unsigned long addr) diff --git a/mm/migrate.c b/mm/migrate.c index 1e5525a25691..003886606a22 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -34,6 +34,7 @@ #include <linux/backing-dev.h> #include <linux/compaction.h> #include <linux/syscalls.h> +#include <linux/compat.h> #include <linux/hugetlb.h> #include <linux/hugetlb_cgroup.h> #include <linux/gfp.h> @@ -1745,10 +1746,10 @@ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, * Move a list of pages in the address space of the currently executing * process. */ -SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, - const void __user * __user *, pages, - const int __user *, nodes, - int __user *, status, int, flags) +static int kernel_move_pages(pid_t pid, unsigned long nr_pages, + const void __user * __user *pages, + const int __user *nodes, + int __user *status, int flags) { struct task_struct *task; struct mm_struct *mm; @@ -1807,6 +1808,36 @@ out: return err; } +SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, + const void __user * __user *, pages, + const int __user *, nodes, + int __user *, status, int, flags) +{ + return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags); +} + +#ifdef CONFIG_COMPAT +COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages, + compat_uptr_t __user *, pages32, + const int __user *, nodes, + int __user *, status, + int, flags) +{ + const void __user * __user *pages; + int i; + + pages = compat_alloc_user_space(nr_pages * sizeof(void *)); + for (i = 0; i < nr_pages; i++) { + compat_uptr_t p; + + if (get_user(p, pages32 + i) || + put_user(compat_ptr(p), pages + i)) + return -EFAULT; + } + return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags); +} +#endif /* CONFIG_COMPAT */ + #ifdef CONFIG_NUMA_BALANCING /* * Returns true if this is a safe migration target node for misplaced NUMA diff --git a/mm/mmap.c b/mm/mmap.c index 9efdc021ad22..aa0dc8231c0d 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1488,9 +1488,9 @@ unsigned long do_mmap(struct file *file, unsigned long addr, return addr; } -SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, - unsigned long, prot, unsigned long, flags, - unsigned long, fd, unsigned long, pgoff) +unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff) { struct file *file = NULL; unsigned long retval; @@ -1537,6 +1537,13 @@ out_fput: return retval; } +SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, + unsigned long, prot, unsigned long, flags, + unsigned long, fd, unsigned long, pgoff) +{ + return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); +} + #ifdef __ARCH_WANT_SYS_OLD_MMAP struct mmap_arg_struct { unsigned long addr; @@ -1556,8 +1563,8 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) if (offset_in_page(a.offset)) return -EINVAL; - return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, - a.offset >> PAGE_SHIFT); + return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, + a.offset >> PAGE_SHIFT); } #endif /* __ARCH_WANT_SYS_OLD_MMAP */ diff --git a/mm/nommu.c b/mm/nommu.c index ebb6e618dade..4f8720243ae7 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -663,22 +663,6 @@ static void put_nommu_region(struct vm_region *region) } /* - * update protection on a vma - */ -static void protect_vma(struct vm_area_struct *vma, unsigned long flags) -{ -#ifdef CONFIG_MPU - struct mm_struct *mm = vma->vm_mm; - long start = vma->vm_start & PAGE_MASK; - while (start < vma->vm_end) { - protect_page(mm, start, flags); - start += PAGE_SIZE; - } - update_protections(mm); -#endif -} - -/* * add a VMA into a process's mm_struct in the appropriate place in the list * and tree and add to the address space's page tree also if not an anonymous * page @@ -695,8 +679,6 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) mm->map_count++; vma->vm_mm = mm; - protect_vma(vma, vma->vm_flags); - /* add the VMA to the mapping */ if (vma->vm_file) { mapping = vma->vm_file->f_mapping; @@ -757,8 +739,6 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) struct mm_struct *mm = vma->vm_mm; struct task_struct *curr = current; - protect_vma(vma, 0); - mm->map_count--; for (i = 0; i < VMACACHE_SIZE; i++) { /* if the vma is cached, invalidate the entire cache */ @@ -1423,9 +1403,9 @@ error_getting_region: return -ENOMEM; } -SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, - unsigned long, prot, unsigned long, flags, - unsigned long, fd, unsigned long, pgoff) +unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff) { struct file *file = NULL; unsigned long retval = -EBADF; @@ -1447,6 +1427,13 @@ out: return retval; } +SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, + unsigned long, prot, unsigned long, flags, + unsigned long, fd, unsigned long, pgoff) +{ + return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); +} + #ifdef __ARCH_WANT_SYS_OLD_MMAP struct mmap_arg_struct { unsigned long addr; @@ -1466,8 +1453,8 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) if (offset_in_page(a.offset)) return -EINVAL; - return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, - a.offset >> PAGE_SHIFT); + return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, + a.offset >> PAGE_SHIFT); } #endif /* __ARCH_WANT_SYS_OLD_MMAP */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1741dd23e7c1..4ea018263210 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6192,10 +6192,7 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat) end = pgdat_end_pfn(pgdat); end = ALIGN(end, MAX_ORDER_NR_PAGES); size = (end - start) * sizeof(struct page); - map = alloc_remap(pgdat->node_id, size); - if (!map) - map = memblock_virt_alloc_node_nopanic(size, - pgdat->node_id); + map = memblock_virt_alloc_node_nopanic(size, pgdat->node_id); pgdat->node_mem_map = map + offset; } pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n", diff --git a/mm/page_owner.c b/mm/page_owner.c index 9886c6073828..7172e0a80e13 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -123,13 +123,13 @@ void __reset_page_owner(struct page *page, unsigned int order) static inline bool check_recursive_alloc(struct stack_trace *trace, unsigned long ip) { - int i, count; + int i; if (!trace->nr_entries) return false; - for (i = 0, count = 0; i < trace->nr_entries; i++) { - if (trace->entries[i] == ip && ++count == 2) + for (i = 0; i < trace->nr_entries; i++) { + if (trace->entries[i] == ip) return true; } diff --git a/mm/percpu.c b/mm/percpu.c index 9297098519a6..0b6480979ac7 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -2740,11 +2740,7 @@ void __init setup_per_cpu_areas(void) if (pcpu_setup_first_chunk(ai, fc) < 0) panic("Failed to initialize percpu areas."); -#ifdef CONFIG_CRIS -#warning "the CRIS architecture has physical and virtual addresses confused" -#else pcpu_free_alloc_info(ai); -#endif } #endif /* CONFIG_SMP */ diff --git a/mm/readahead.c b/mm/readahead.c index c4ca70239233..4d57b4644f98 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -573,7 +573,7 @@ do_readahead(struct address_space *mapping, struct file *filp, return force_page_cache_readahead(mapping, filp, index, nr); } -SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count) +ssize_t ksys_readahead(int fd, loff_t offset, size_t count) { ssize_t ret; struct fd f; @@ -592,3 +592,8 @@ SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count) } return ret; } + +SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count) +{ + return ksys_readahead(fd, offset, count); +} diff --git a/mm/slab.c b/mm/slab.c index 324446621b3e..9095c3945425 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1283,6 +1283,7 @@ void __init kmem_cache_init(void) nr_node_ids * sizeof(struct kmem_cache_node *), SLAB_HWCACHE_ALIGN, 0, 0); list_add(&kmem_cache->list, &slab_caches); + memcg_link_cache(kmem_cache); slab_state = PARTIAL; /* diff --git a/mm/sparse.c b/mm/sparse.c index 7af5e7a92528..58cab483e81b 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -236,28 +236,6 @@ void __init memory_present(int nid, unsigned long start, unsigned long end) } /* - * Only used by the i386 NUMA architecures, but relatively - * generic code. - */ -unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn, - unsigned long end_pfn) -{ - unsigned long pfn; - unsigned long nr_pages = 0; - - mminit_validate_memmodel_limits(&start_pfn, &end_pfn); - for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { - if (nid != early_pfn_to_nid(pfn)) - continue; - - if (pfn_present(pfn)) - nr_pages += PAGES_PER_SECTION; - } - - return nr_pages * sizeof(struct page); -} - -/* * Subtle, we encode the real pfn into the mem_map such that * the identity pfn - section_mem_map will return the actual * physical page frame number. @@ -427,10 +405,6 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid, struct page *map; unsigned long size; - map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); - if (map) - return map; - size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); map = memblock_virt_alloc_try_nid(size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), @@ -446,17 +420,6 @@ void __init sparse_mem_maps_populate_node(struct page **map_map, unsigned long pnum; unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; - map = alloc_remap(nodeid, size * map_count); - if (map) { - for (pnum = pnum_begin; pnum < pnum_end; pnum++) { - if (!present_section_nr(pnum)) - continue; - map_map[pnum] = map; - map += size; - } - return; - } - size = PAGE_ALIGN(size); map = memblock_virt_alloc_try_nid_raw(size * map_count, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), diff --git a/mm/vmstat.c b/mm/vmstat.c index 40b2db6db6b1..33581be705f0 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1839,9 +1839,11 @@ static void vmstat_update(struct work_struct *w) * to occur in the future. Keep on running the * update worker thread. */ + preempt_disable(); queue_delayed_work_on(smp_processor_id(), mm_percpu_wq, this_cpu_ptr(&vmstat_work), round_jiffies_relative(sysctl_stat_interval)); + preempt_enable(); } } diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index c3013505c305..b7f61cd1c709 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -84,18 +84,19 @@ * This is made more complicated by various memory models and PAE. */ -#ifndef MAX_PHYSMEM_BITS -#ifdef CONFIG_HIGHMEM64G -#define MAX_PHYSMEM_BITS 36 -#else /* !CONFIG_HIGHMEM64G */ +#ifndef MAX_POSSIBLE_PHYSMEM_BITS +#ifdef MAX_PHYSMEM_BITS +#define MAX_POSSIBLE_PHYSMEM_BITS MAX_PHYSMEM_BITS +#else /* * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just * be PAGE_SHIFT */ -#define MAX_PHYSMEM_BITS BITS_PER_LONG +#define MAX_POSSIBLE_PHYSMEM_BITS BITS_PER_LONG #endif #endif -#define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT) + +#define _PFN_BITS (MAX_POSSIBLE_PHYSMEM_BITS - PAGE_SHIFT) /* * Memory for allocating for handle keeps object position by |