diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-11 18:05:37 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-11 18:05:37 -0800 |
commit | 608ff1a210ab0e8b969399039bf8e18693605910 (patch) | |
tree | faea7bb1764461c73d0953089bd5439d91733a03 /arch | |
parent | 414a6750e59b0b687034764c464e9ddecac0f7a6 (diff) | |
parent | 74d42d8fe146e870c52bde3b1c692f86cc8ff844 (diff) | |
download | blackbird-op-linux-608ff1a210ab0e8b969399039bf8e18693605910.tar.gz blackbird-op-linux-608ff1a210ab0e8b969399039bf8e18693605910.zip |
Merge branch 'akpm' (Andrew's patchbomb)
Merge misc updates from Andrew Morton:
"About half of most of MM. Going very early this time due to
uncertainty over the coreautounifiednumasched things. I'll send the
other half of most of MM tomorrow. The rest of MM awaits a slab merge
from Pekka."
* emailed patches from Andrew Morton: (71 commits)
memory_hotplug: ensure every online node has NORMAL memory
memory_hotplug: handle empty zone when online_movable/online_kernel
mm, memory-hotplug: dynamic configure movable memory and portion memory
drivers/base/node.c: cleanup node_state_attr[]
bootmem: fix wrong call parameter for free_bootmem()
avr32, kconfig: remove HAVE_ARCH_BOOTMEM
mm: cma: remove watermark hacks
mm: cma: skip watermarks check for already isolated blocks in split_free_page()
mm, oom: fix race when specifying a thread as the oom origin
mm, oom: change type of oom_score_adj to short
mm: cleanup register_node()
mm, mempolicy: remove duplicate code
mm/vmscan.c: try_to_freeze() returns boolean
mm: introduce putback_movable_pages()
virtio_balloon: introduce migration primitives to balloon pages
mm: introduce compaction and migration for ballooned pages
mm: introduce a common interface for balloon pages mobility
mm: redefine address_space.assoc_mapping
mm: adjust address_space_operations.migratepage() return code
arch/sparc/kernel/sys_sparc_64.c: s/COLOUR/COLOR/
...
Diffstat (limited to 'arch')
-rw-r--r-- | arch/alpha/include/asm/mman.h | 11 | ||||
-rw-r--r-- | arch/arm/mm/mmap.c | 132 | ||||
-rw-r--r-- | arch/avr32/Kconfig | 3 | ||||
-rw-r--r-- | arch/mips/include/uapi/asm/mman.h | 11 | ||||
-rw-r--r-- | arch/mips/mm/mmap.c | 111 | ||||
-rw-r--r-- | arch/parisc/include/uapi/asm/mman.h | 11 | ||||
-rw-r--r-- | arch/powerpc/kernel/sysfs.c | 4 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/celleb_pci.c | 4 | ||||
-rw-r--r-- | arch/s390/include/asm/page.h | 3 | ||||
-rw-r--r-- | arch/sh/mm/mmap.c | 137 | ||||
-rw-r--r-- | arch/sparc/kernel/sys_sparc_32.c | 27 | ||||
-rw-r--r-- | arch/sparc/kernel/sys_sparc_64.c | 150 | ||||
-rw-r--r-- | arch/sparc/mm/hugetlbpage.c | 124 | ||||
-rw-r--r-- | arch/tile/mm/hugetlbpage.c | 139 | ||||
-rw-r--r-- | arch/x86/include/asm/elf.h | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/mman.h | 3 | ||||
-rw-r--r-- | arch/x86/kernel/sys_x86_64.c | 151 | ||||
-rw-r--r-- | arch/x86/mm/hugetlbpage.c | 130 | ||||
-rw-r--r-- | arch/x86/vdso/vma.c | 2 | ||||
-rw-r--r-- | arch/xtensa/include/uapi/asm/mman.h | 11 |
20 files changed, 272 insertions, 898 deletions
diff --git a/arch/alpha/include/asm/mman.h b/arch/alpha/include/asm/mman.h index cbeb3616a28e..0086b472bc2b 100644 --- a/arch/alpha/include/asm/mman.h +++ b/arch/alpha/include/asm/mman.h @@ -63,4 +63,15 @@ /* compatibility flags */ #define MAP_FILE 0 +/* + * When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size. + * This gives us 6 bits, which is enough until someone invents 128 bit address + * spaces. + * + * Assume these are all power of twos. + * When 0 use the default page size. + */ +#define MAP_HUGE_SHIFT 26 +#define MAP_HUGE_MASK 0x3f + #endif /* __ALPHA_MMAN_H__ */ diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 89f2b7f7b042..10062ceadd1c 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -11,18 +11,6 @@ #include <linux/random.h> #include <asm/cachetype.h> -static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr, - unsigned long pgoff) -{ - unsigned long base = addr & ~(SHMLBA-1); - unsigned long off = (pgoff << PAGE_SHIFT) & (SHMLBA-1); - - if (base + off <= addr) - return base + off; - - return base - off; -} - #define COLOUR_ALIGN(addr,pgoff) \ ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) @@ -69,9 +57,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; - unsigned long start_addr; int do_align = 0; int aliasing = cache_is_vipt_aliasing(); + struct vm_unmapped_area_info info; /* * We only need to do colour alignment if either the I or D @@ -104,46 +92,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, (!vma || addr + len <= vma->vm_start)) return addr; } - if (len > mm->cached_hole_size) { - start_addr = addr = mm->free_area_cache; - } else { - start_addr = addr = mm->mmap_base; - mm->cached_hole_size = 0; - } -full_search: - if (do_align) - addr = COLOUR_ALIGN(addr, pgoff); - else - addr = PAGE_ALIGN(addr); - - for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { - /* At this point: (!vma || addr < vma->vm_end). */ - if (TASK_SIZE - len < addr) { - /* - * Start a new search - just in case we missed - * some holes. - */ - if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; - goto full_search; - } - return -ENOMEM; - } - if (!vma || addr + len <= vma->vm_start) { - /* - * Remember the place where we stopped the search: - */ - mm->free_area_cache = addr + len; - return addr; - } - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; - addr = vma->vm_end; - if (do_align) - addr = COLOUR_ALIGN(addr, pgoff); - } + info.flags = 0; + info.length = len; + info.low_limit = mm->mmap_base; + info.high_limit = TASK_SIZE; + info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + return vm_unmapped_area(&info); } unsigned long @@ -156,6 +112,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, unsigned long addr = addr0; int do_align = 0; int aliasing = cache_is_vipt_aliasing(); + struct vm_unmapped_area_info info; /* * We only need to do colour alignment if either the I or D @@ -187,70 +144,27 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, return addr; } - /* check if free_area_cache is useful for us */ - if (len <= mm->cached_hole_size) { - mm->cached_hole_size = 0; - mm->free_area_cache = mm->mmap_base; - } - - /* either no address requested or can't fit in requested address hole */ - addr = mm->free_area_cache; - if (do_align) { - unsigned long base = COLOUR_ALIGN_DOWN(addr - len, pgoff); - addr = base + len; - } - - /* make sure it can fit in the remaining address space */ - if (addr > len) { - vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) - /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr-len); - } - - if (mm->mmap_base < len) - goto bottomup; - - addr = mm->mmap_base - len; - if (do_align) - addr = COLOUR_ALIGN_DOWN(addr, pgoff); - - do { - /* - * Lookup failure means no vma is above this address, - * else if new region fits below vma->vm_start, - * return with success: - */ - vma = find_vma(mm, addr); - if (!vma || addr+len <= vma->vm_start) - /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr); + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = PAGE_SIZE; + info.high_limit = mm->mmap_base; + info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + addr = vm_unmapped_area(&info); - /* remember the largest hole we saw so far */ - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; - - /* try just below the current vma->vm_start */ - addr = vma->vm_start - len; - if (do_align) - addr = COLOUR_ALIGN_DOWN(addr, pgoff); - } while (len < vma->vm_start); - -bottomup: /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ - mm->cached_hole_size = ~0UL; - mm->free_area_cache = TASK_UNMAPPED_BASE; - addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); - /* - * Restore the topdown base: - */ - mm->free_area_cache = mm->mmap_base; - mm->cached_hole_size = ~0UL; + if (addr & ~PAGE_MASK) { + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = mm->mmap_base; + info.high_limit = TASK_SIZE; + addr = vm_unmapped_area(&info); + } return addr; } diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig index 06e73bf665e9..c2bbc9a72222 100644 --- a/arch/avr32/Kconfig +++ b/arch/avr32/Kconfig @@ -193,9 +193,6 @@ source "kernel/Kconfig.preempt" config QUICKLIST def_bool y -config HAVE_ARCH_BOOTMEM - def_bool n - config ARCH_HAVE_MEMORY_PRESENT def_bool n diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h index 46d3da0d4b92..9a936ac9a942 100644 --- a/arch/mips/include/uapi/asm/mman.h +++ b/arch/mips/include/uapi/asm/mman.h @@ -87,4 +87,15 @@ /* compatibility flags */ #define MAP_FILE 0 +/* + * When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size. + * This gives us 6 bits, which is enough until someone invents 128 bit address + * spaces. + * + * Assume these are all power of twos. + * When 0 use the default page size. + */ +#define MAP_HUGE_SHIFT 26 +#define MAP_HUGE_MASK 0x3f + #endif /* _ASM_MMAN_H */ diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index 302d779d5b0d..d9be7540a6be 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -45,18 +45,6 @@ static unsigned long mmap_base(unsigned long rnd) return PAGE_ALIGN(TASK_SIZE - gap - rnd); } -static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr, - unsigned long pgoff) -{ - unsigned long base = addr & ~shm_align_mask; - unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask; - - if (base + off <= addr) - return base + off; - - return base - off; -} - #define COLOUR_ALIGN(addr, pgoff) \ ((((addr) + shm_align_mask) & ~shm_align_mask) + \ (((pgoff) << PAGE_SHIFT) & shm_align_mask)) @@ -71,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, struct vm_area_struct *vma; unsigned long addr = addr0; int do_color_align; + struct vm_unmapped_area_info info; if (unlikely(len > TASK_SIZE)) return -ENOMEM; @@ -107,97 +96,31 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, return addr; } - if (dir == UP) { - addr = mm->mmap_base; - if (do_color_align) - addr = COLOUR_ALIGN(addr, pgoff); - else - addr = PAGE_ALIGN(addr); + info.length = len; + info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0; + info.align_offset = pgoff << PAGE_SHIFT; - for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { - /* At this point: (!vma || addr < vma->vm_end). */ - if (TASK_SIZE - len < addr) - return -ENOMEM; - if (!vma || addr + len <= vma->vm_start) - return addr; - addr = vma->vm_end; - if (do_color_align) - addr = COLOUR_ALIGN(addr, pgoff); - } - } else { - /* check if free_area_cache is useful for us */ - if (len <= mm->cached_hole_size) { - mm->cached_hole_size = 0; - mm->free_area_cache = mm->mmap_base; - } + if (dir == DOWN) { + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.low_limit = PAGE_SIZE; + info.high_limit = mm->mmap_base; + addr = vm_unmapped_area(&info); + + if (!(addr & ~PAGE_MASK)) + return addr; - /* - * either no address requested, or the mapping can't fit into - * the requested address hole - */ - addr = mm->free_area_cache; - if (do_color_align) { - unsigned long base = - COLOUR_ALIGN_DOWN(addr - len, pgoff); - addr = base + len; - } - - /* make sure it can fit in the remaining address space */ - if (likely(addr > len)) { - vma = find_vma(mm, addr - len); - if (!vma || addr <= vma->vm_start) { - /* cache the address as a hint for next time */ - return mm->free_area_cache = addr - len; - } - } - - if (unlikely(mm->mmap_base < len)) - goto bottomup; - - addr = mm->mmap_base - len; - if (do_color_align) - addr = COLOUR_ALIGN_DOWN(addr, pgoff); - - do { - /* - * Lookup failure means no vma is above this address, - * else if new region fits below vma->vm_start, - * return with success: - */ - vma = find_vma(mm, addr); - if (likely(!vma || addr + len <= vma->vm_start)) { - /* cache the address as a hint for next time */ - return mm->free_area_cache = addr; - } - - /* remember the largest hole we saw so far */ - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; - - /* try just below the current vma->vm_start */ - addr = vma->vm_start - len; - if (do_color_align) - addr = COLOUR_ALIGN_DOWN(addr, pgoff); - } while (likely(len < vma->vm_start)); - -bottomup: /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ - mm->cached_hole_size = ~0UL; - mm->free_area_cache = TASK_UNMAPPED_BASE; - addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); - /* - * Restore the topdown base: - */ - mm->free_area_cache = mm->mmap_base; - mm->cached_hole_size = ~0UL; - - return addr; } + + info.flags = 0; + info.low_limit = mm->mmap_base; + info.high_limit = TASK_SIZE; + return vm_unmapped_area(&info); } unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h index 12219ebce869..294d251ca7b2 100644 --- a/arch/parisc/include/uapi/asm/mman.h +++ b/arch/parisc/include/uapi/asm/mman.h @@ -70,4 +70,15 @@ #define MAP_FILE 0 #define MAP_VARIABLE 0 +/* + * When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size. + * This gives us 6 bits, which is enough until someone invents 128 bit address + * spaces. + * + * Assume these are all power of twos. + * When 0 use the default page size. + */ +#define MAP_HUGE_SHIFT 26 +#define MAP_HUGE_MASK 0x3f + #endif /* __PARISC_MMAN_H__ */ diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index cf357a059ddb..3ce1f864c2d3 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -607,7 +607,7 @@ static void register_nodes(void) int sysfs_add_device_to_node(struct device *dev, int nid) { - struct node *node = &node_devices[nid]; + struct node *node = node_devices[nid]; return sysfs_create_link(&node->dev.kobj, &dev->kobj, kobject_name(&dev->kobj)); } @@ -615,7 +615,7 @@ EXPORT_SYMBOL_GPL(sysfs_add_device_to_node); void sysfs_remove_device_from_node(struct device *dev, int nid) { - struct node *node = &node_devices[nid]; + struct node *node = node_devices[nid]; sysfs_remove_link(&node->dev.kobj, kobject_name(&dev->kobj)); } EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node); diff --git a/arch/powerpc/platforms/cell/celleb_pci.c b/arch/powerpc/platforms/cell/celleb_pci.c index abc8af43ea7c..173568140a32 100644 --- a/arch/powerpc/platforms/cell/celleb_pci.c +++ b/arch/powerpc/platforms/cell/celleb_pci.c @@ -401,11 +401,11 @@ error: } else { if (config && *config) { size = 256; - free_bootmem((unsigned long)(*config), size); + free_bootmem(__pa(*config), size); } if (res && *res) { size = sizeof(struct celleb_pci_resource); - free_bootmem((unsigned long)(*res), size); + free_bootmem(__pa(*res), size); } } diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 6d5367060a56..39faa4ac9660 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -158,6 +158,9 @@ static inline int page_reset_referenced(unsigned long addr) * race against modification of the referenced bit. This function * should therefore only be called if it is not mapped in any * address space. + * + * Note that the bit gets set whenever page content is changed. That means + * also when the page is modified by DMA or from inside the kernel. */ #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY static inline int page_test_and_clear_dirty(unsigned long pfn, int mapped) diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 80bf494ddbcb..6777177807c2 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -30,25 +30,13 @@ static inline unsigned long COLOUR_ALIGN(unsigned long addr, return base + off; } -static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr, - unsigned long pgoff) -{ - unsigned long base = addr & ~shm_align_mask; - unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask; - - if (base + off <= addr) - return base + off; - - return base - off; -} - unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; - unsigned long start_addr; int do_colour_align; + struct vm_unmapped_area_info info; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate @@ -79,47 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, return addr; } - if (len > mm->cached_hole_size) { - start_addr = addr = mm->free_area_cache; - } else { - mm->cached_hole_size = 0; - start_addr = addr = TASK_UNMAPPED_BASE; - } - -full_search: - if (do_colour_align) - addr = COLOUR_ALIGN(addr, pgoff); - else - addr = PAGE_ALIGN(mm->free_area_cache); - - for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { - /* At this point: (!vma || addr < vma->vm_end). */ - if (unlikely(TASK_SIZE - len < addr)) { - /* - * Start a new search - just in case we missed - * some holes. - */ - if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; - goto full_search; - } - return -ENOMEM; - } - if (likely(!vma || addr + len <= vma->vm_start)) { - /* - * Remember the place where we stopped the search: - */ - mm->free_area_cache = addr + len; - return addr; - } - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; - - addr = vma->vm_end; - if (do_colour_align) - addr = COLOUR_ALIGN(addr, pgoff); - } + info.flags = 0; + info.length = len; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = TASK_SIZE; + info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + return vm_unmapped_area(&info); } unsigned long @@ -131,6 +85,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, struct mm_struct *mm = current->mm; unsigned long addr = addr0; int do_colour_align; + struct vm_unmapped_area_info info; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate @@ -162,73 +117,27 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, return addr; } - /* check if free_area_cache is useful for us */ - if (len <= mm->cached_hole_size) { - mm->cached_hole_size = 0; - mm->free_area_cache = mm->mmap_base; - } - - /* either no address requested or can't fit in requested address hole */ - addr = mm->free_area_cache; - if (do_colour_align) { - unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff); + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = PAGE_SIZE; + info.high_limit = mm->mmap_base; + info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + addr = vm_unmapped_area(&info); - addr = base + len; - } - - /* make sure it can fit in the remaining address space */ - if (likely(addr > len)) { - vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) { - /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr-len); - } - } - - if (unlikely(mm->mmap_base < len)) - goto bottomup; - - addr = mm->mmap_base-len; - if (do_colour_align) - addr = COLOUR_ALIGN_DOWN(addr, pgoff); - - do { - /* - * Lookup failure means no vma is above this address, - * else if new region fits below vma->vm_start, - * return with success: - */ - vma = find_vma(mm, addr); - if (likely(!vma || addr+len <= vma->vm_start)) { - /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr); - } - - /* remember the largest hole we saw so far */ - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; - - /* try just below the current vma->vm_start */ - addr = vma->vm_start-len; - if (do_colour_align) - addr = COLOUR_ALIGN_DOWN(addr, pgoff); - } while (likely(len < vma->vm_start)); - -bottomup: /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ - mm->cached_hole_size = ~0UL; - mm->free_area_cache = TASK_UNMAPPED_BASE; - addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); - /* - * Restore the topdown base: - */ - mm->free_area_cache = mm->mmap_base; - mm->cached_hole_size = ~0UL; + if (addr & ~PAGE_MASK) { + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = TASK_SIZE; + addr = vm_unmapped_area(&info); + } return addr; } diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c index 0c9b31b22e07..57277c830151 100644 --- a/arch/sparc/kernel/sys_sparc_32.c +++ b/arch/sparc/kernel/sys_sparc_32.c @@ -34,11 +34,9 @@ asmlinkage unsigned long sys_getpagesize(void) return PAGE_SIZE; /* Possibly older binaries want 8192 on sun4's? */ } -#define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1)) - unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { - struct vm_area_struct * vmm; + struct vm_unmapped_area_info info; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate @@ -56,21 +54,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi if (!addr) addr = TASK_UNMAPPED_BASE; - if (flags & MAP_SHARED) - addr = COLOUR_ALIGN(addr); - else - addr = PAGE_ALIGN(addr); - - for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { - /* At this point: (!vmm || addr < vmm->vm_end). */ - if (TASK_SIZE - PAGE_SIZE - len < addr) - return -ENOMEM; - if (!vmm || addr + len <= vmm->vm_start) - return addr; - addr = vmm->vm_end; - if (flags & MAP_SHARED) - addr = COLOUR_ALIGN(addr); - } + info.flags = 0; + info.length = len; + info.low_limit = addr; + info.high_limit = TASK_SIZE; + info.align_mask = (flags & MAP_SHARED) ? + (PAGE_MASK & (SHMLBA - 1)) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + return vm_unmapped_area(&info); } /* diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 878ef3d5fec5..97309c0ec533 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -75,7 +75,7 @@ static inline int invalid_64bit_range(unsigned long addr, unsigned long len) * the spitfire/niagara VA-hole. */ -static inline unsigned long COLOUR_ALIGN(unsigned long addr, +static inline unsigned long COLOR_ALIGN(unsigned long addr, unsigned long pgoff) { unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1); @@ -84,24 +84,13 @@ static inline unsigned long COLOUR_ALIGN(unsigned long addr, return base + off; } -static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr, - unsigned long pgoff) -{ - unsigned long base = addr & ~(SHMLBA-1); - unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1); - - if (base + off <= addr) - return base + off; - return base - off; -} - unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct * vma; unsigned long task_size = TASK_SIZE; - unsigned long start_addr; int do_color_align; + struct vm_unmapped_area_info info; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate @@ -124,7 +113,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi if (addr) { if (do_color_align) - addr = COLOUR_ALIGN(addr, pgoff); + addr = COLOR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); @@ -134,50 +123,22 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi return addr; } - if (len > mm->cached_hole_size) { - start_addr = addr = mm->free_area_cache; - } else { - start_addr = addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; + info.flags = 0; + info.length = len; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = min(task_size, VA_EXCLUDE_START); + info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + addr = vm_unmapped_area(&info); + + if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { + VM_BUG_ON(addr != -ENOMEM); + info.low_limit = VA_EXCLUDE_END; + info.high_limit = task_size; + addr = vm_unmapped_area(&info); } - task_size -= len; - -full_search: - if (do_color_align) - addr = COLOUR_ALIGN(addr, pgoff); - else - addr = PAGE_ALIGN(addr); - - for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { - /* At this point: (!vma || addr < vma->vm_end). */ - if (addr < VA_EXCLUDE_START && - (addr + len) >= VA_EXCLUDE_START) { - addr = VA_EXCLUDE_END; - vma = find_vma(mm, VA_EXCLUDE_END); - } - if (unlikely(task_size < addr)) { - if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; - goto full_search; - } - return -ENOMEM; - } - if (likely(!vma || addr + len <= vma->vm_start)) { - /* - * Remember the place where we stopped the search: - */ - mm->free_area_cache = addr + len; - return addr; - } - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; - - addr = vma->vm_end; - if (do_color_align) - addr = COLOUR_ALIGN(addr, pgoff); - } + return addr; } unsigned long @@ -190,6 +151,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, unsigned long task_size = STACK_TOP32; unsigned long addr = addr0; int do_color_align; + struct vm_unmapped_area_info info; /* This should only ever run for 32-bit processes. */ BUG_ON(!test_thread_flag(TIF_32BIT)); @@ -214,7 +176,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* requesting a specific address */ if (addr) { if (do_color_align) - addr = COLOUR_ALIGN(addr, pgoff); + addr = COLOR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); @@ -224,73 +186,27 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, return addr; } - /* check if free_area_cache is useful for us */ - if (len <= mm->cached_hole_size) { - mm->cached_hole_size = 0; - mm->free_area_cache = mm->mmap_base; - } - - /* either no address requested or can't fit in requested address hole */ - addr = mm->free_area_cache; - if (do_color_align) { - unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff); + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = PAGE_SIZE; + info.high_limit = mm->mmap_base; + info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + addr = vm_unmapped_area(&info); - addr = base + len; - } - - /* make sure it can fit in the remaining address space */ - if (likely(addr > len)) { - vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) { - /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr-len); - } - } - - if (unlikely(mm->mmap_base < len)) - goto bottomup; - - addr = mm->mmap_base-len; - if (do_color_align) - addr = COLOUR_ALIGN_DOWN(addr, pgoff); - - do { - /* - * Lookup failure means no vma is above this address, - * else if new region fits below vma->vm_start, - * return with success: - */ - vma = find_vma(mm, addr); - if (likely(!vma || addr+len <= vma->vm_start)) { - /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr); - } - - /* remember the largest hole we saw so far */ - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; - - /* try just below the current vma->vm_start */ - addr = vma->vm_start-len; - if (do_color_align) - addr = COLOUR_ALIGN_DOWN(addr, pgoff); - } while (likely(len < vma->vm_start)); - -bottomup: /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ - mm->cached_hole_size = ~0UL; - mm->free_area_cache = TASK_UNMAPPED_BASE; - addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); - /* - * Restore the topdown base: - */ - mm->free_area_cache = mm->mmap_base; - mm->cached_hole_size = ~0UL; + if (addr & ~PAGE_MASK) { + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = STACK_TOP32; + addr = vm_unmapped_area(&info); + } return addr; } diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index f76f83d5ac63..d2b59441ebdd 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -30,55 +30,28 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, unsigned long pgoff, unsigned long flags) { - struct mm_struct *mm = current->mm; - struct vm_area_struct * vma; unsigned long task_size = TASK_SIZE; - unsigned long start_addr; + struct vm_unmapped_area_info info; if (test_thread_flag(TIF_32BIT)) task_size = STACK_TOP32; - if (unlikely(len >= VA_EXCLUDE_START)) - return -ENOMEM; - if (len > mm->cached_hole_size) { - start_addr = addr = mm->free_area_cache; - } else { - start_addr = addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; + info.flags = 0; + info.length = len; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = min(task_size, VA_EXCLUDE_START); + info.align_mask = PAGE_MASK & ~HPAGE_MASK; + info.align_offset = 0; + addr = vm_unmapped_area(&info); + + if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { + VM_BUG_ON(addr != -ENOMEM); + info.low_limit = VA_EXCLUDE_END; + info.high_limit = task_size; + addr = vm_unmapped_area(&info); } - task_size -= len; - -full_search: - addr = ALIGN(addr, HPAGE_SIZE); - - for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { - /* At this point: (!vma || addr < vma->vm_end). */ - if (addr < VA_EXCLUDE_START && - (addr + len) >= VA_EXCLUDE_START) { - addr = VA_EXCLUDE_END; - vma = find_vma(mm, VA_EXCLUDE_END); - } - if (unlikely(task_size < addr)) { - if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; - goto full_search; - } - return -ENOMEM; - } - if (likely(!vma || addr + len <= vma->vm_start)) { - /* - * Remember the place where we stopped the search: - */ - mm->free_area_cache = addr + len; - return addr; - } - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; - - addr = ALIGN(vma->vm_end, HPAGE_SIZE); - } + return addr; } static unsigned long @@ -87,71 +60,34 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long pgoff, const unsigned long flags) { - struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0; + struct vm_unmapped_area_info info; /* This should only ever run for 32-bit processes. */ BUG_ON(!test_thread_flag(TIF_32BIT)); - /* check if free_area_cache is useful for us */ - if (len <= mm->cached_hole_size) { - mm->cached_hole_size = 0; - mm->free_area_cache = mm->mmap_base; - } - - /* either no address requested or can't fit in requested address hole */ - addr = mm->free_area_cache & HPAGE_MASK; - - /* make sure it can fit in the remaining address space */ - if (likely(addr > len)) { - vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) { - /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr-len); - } - } - - if (unlikely(mm->mmap_base < len)) - goto bottomup; - - addr = (mm->mmap_base-len) & HPAGE_MASK; - - do { - /* - * Lookup failure means no vma is above this address, - * else if new region fits below vma->vm_start, - * return with success: - */ - vma = find_vma(mm, addr); - if (likely(!vma || addr+len <= vma->vm_start)) { - /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr); - } - - /* remember the largest hole we saw so far */ - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; - - /* try just below the current vma->vm_start */ - addr = (vma->vm_start-len) & HPAGE_MASK; - } while (likely(len < vma->vm_start)); + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = PAGE_SIZE; + info.high_limit = mm->mmap_base; + info.align_mask = PAGE_MASK & ~HPAGE_MASK; + info.align_offset = 0; + addr = vm_unmapped_area(&info); -bottomup: /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ - mm->cached_hole_size = ~0UL; - mm->free_area_cache = TASK_UNMAPPED_BASE; - addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); - /* - * Restore the topdown base: - */ - mm->free_area_cache = mm->mmap_base; - mm->cached_hole_size = ~0UL; + if (addr & ~PAGE_MASK) { + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = STACK_TOP32; + addr = vm_unmapped_area(&info); + } return addr; } diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c index 812e2d037972..650ccff8378c 100644 --- a/arch/tile/mm/hugetlbpage.c +++ b/arch/tile/mm/hugetlbpage.c @@ -231,42 +231,15 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long pgoff, unsigned long flags) { struct hstate *h = hstate_file(file); - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - unsigned long start_addr; - - if (len > mm->cached_hole_size) { - start_addr = mm->free_area_cache; - } else { - start_addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; - } - -full_search: - addr = ALIGN(start_addr, huge_page_size(h)); - - for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { - /* At this point: (!vma || addr < vma->vm_end). */ - if (TASK_SIZE - len < addr) { - /* - * Start a new search - just in case we missed - * some holes. - */ - if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; - goto full_search; - } - return -ENOMEM; - } - if (!vma || addr + len <= vma->vm_start) { - mm->free_area_cache = addr + len; - return addr; - } - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; - addr = ALIGN(vma->vm_end, huge_page_size(h)); - } + struct vm_unmapped_area_info info; + + info.flags = 0; + info.length = len; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = TASK_SIZE; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + return vm_unmapped_area(&info); } static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, @@ -274,92 +247,30 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long pgoff, unsigned long flags) { struct hstate *h = hstate_file(file); - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma, *prev_vma; - unsigned long base = mm->mmap_base, addr = addr0; - unsigned long largest_hole = mm->cached_hole_size; - int first_time = 1; - - /* don't allow allocations above current base */ - if (mm->free_area_cache > base) - mm->free_area_cache = base; - - if (len <= largest_hole) { - largest_hole = 0; - mm->free_area_cache = base; - } -try_again: - /* make sure it can fit in the remaining address space */ - if (mm->free_area_cache < len) - goto fail; - - /* either no address requested or can't fit in requested address hole */ - addr = (mm->free_area_cache - len) & huge_page_mask(h); - do { - /* - * Lookup failure means no vma is above this address, - * i.e. return with success: - */ - vma = find_vma_prev(mm, addr, &prev_vma); - if (!vma) { - return addr; - break; - } - - /* - * new region fits between prev_vma->vm_end and - * vma->vm_start, use it: - */ - if (addr + len <= vma->vm_start && - (!prev_vma || (addr >= prev_vma->vm_end))) { - /* remember the address as a hint for next time */ - mm->cached_hole_size = largest_hole; - mm->free_area_cache = addr; - return addr; - } else { - /* pull free_area_cache down to the first hole */ - if (mm->free_area_cache == vma->vm_end) { - mm->free_area_cache = vma->vm_start; - mm->cached_hole_size = largest_hole; - } - } + struct vm_unmapped_area_info info; + unsigned long addr; - /* remember the largest hole we saw so far */ - if (addr + largest_hole < vma->vm_start) - largest_hole = vma->vm_start - addr; + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = PAGE_SIZE; + info.high_limit = current->mm->mmap_base; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + addr = vm_unmapped_area(&info); - /* try just below the current vma->vm_start */ - addr = (vma->vm_start - len) & huge_page_mask(h); - - } while (len <= vma->vm_start); - -fail: - /* - * if hint left us with no space for the requested - * mapping then try again: - */ - if (first_time) { - mm->free_area_cache = base; - largest_hole = 0; - first_time = 0; - goto try_again; - } /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ - mm->free_area_cache = TASK_UNMAPPED_BASE; - mm->cached_hole_size = ~0UL; - addr = hugetlb_get_unmapped_area_bottomup(file, addr0, - len, pgoff, flags); - - /* - * Restore the topdown base: - */ - mm->free_area_cache = base; - mm->cached_hole_size = ~0UL; + if (addr & ~PAGE_MASK) { + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = TASK_SIZE; + addr = vm_unmapped_area(&info); + } return addr; } diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index 5939f44fe0c0..9c999c1674fa 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -354,12 +354,10 @@ static inline int mmap_is_ia32(void) return 0; } -/* The first two values are special, do not change. See align_addr() */ +/* Do not change the values. See get_align_mask() */ enum align_flags { ALIGN_VA_32 = BIT(0), ALIGN_VA_64 = BIT(1), - ALIGN_VDSO = BIT(2), - ALIGN_TOPDOWN = BIT(3), }; struct va_alignment { @@ -368,5 +366,5 @@ struct va_alignment { } ____cacheline_aligned; extern struct va_alignment va_align; -extern unsigned long align_addr(unsigned long, struct file *, enum align_flags); +extern unsigned long align_vdso_addr(unsigned long); #endif /* _ASM_X86_ELF_H */ diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h index 593e51d4643f..513b05f15bb4 100644 --- a/arch/x86/include/asm/mman.h +++ b/arch/x86/include/asm/mman.h @@ -3,6 +3,9 @@ #define MAP_32BIT 0x40 /* only give out 32bit addresses */ +#define MAP_HUGE_2MB (21 << MAP_HUGE_SHIFT) +#define MAP_HUGE_1GB (30 << MAP_HUGE_SHIFT) + #include <asm-generic/mman.h> #endif /* _ASM_X86_MMAN_H */ diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index b4d3c3927dd8..97ef74b88e0f 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -21,37 +21,23 @@ /* * Align a virtual address to avoid aliasing in the I$ on AMD F15h. - * - * @flags denotes the allocation direction - bottomup or topdown - - * or vDSO; see call sites below. */ -unsigned long align_addr(unsigned long addr, struct file *filp, - enum align_flags flags) +static unsigned long get_align_mask(void) { - unsigned long tmp_addr; - /* handle 32- and 64-bit case with a single conditional */ if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32()))) - return addr; + return 0; if (!(current->flags & PF_RANDOMIZE)) - return addr; - - if (!((flags & ALIGN_VDSO) || filp)) - return addr; - - tmp_addr = addr; - - /* - * We need an address which is <= than the original - * one only when in topdown direction. - */ - if (!(flags & ALIGN_TOPDOWN)) - tmp_addr += va_align.mask; + return 0; - tmp_addr &= ~va_align.mask; + return va_align.mask; +} - return tmp_addr; +unsigned long align_vdso_addr(unsigned long addr) +{ + unsigned long align_mask = get_align_mask(); + return (addr + align_mask) & ~align_mask; } static int __init control_va_addr_alignment(char *str) @@ -126,7 +112,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; - unsigned long start_addr; + struct vm_unmapped_area_info info; unsigned long begin, end; if (flags & MAP_FIXED) @@ -144,50 +130,16 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, (!vma || addr + len <= vma->vm_start)) return addr; } - if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32)) - && len <= mm->cached_hole_size) { - mm->cached_hole_size = 0; - mm->free_area_cache = begin; - } - addr = mm->free_area_cache; - if (addr < begin) - addr = begin; - start_addr = addr; - -full_search: - - addr = align_addr(addr, filp, 0); - - for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { - /* At this point: (!vma || addr < vma->vm_end). */ - if (end - len < addr) { - /* - * Start a new search - just in case we missed - * some holes. - */ - if (start_addr != begin) { - start_addr = addr = begin; - mm->cached_hole_size = 0; - goto full_search; - } - return -ENOMEM; - } - if (!vma || addr + len <= vma->vm_start) { - /* - * Remember the place where we stopped the search: - */ - mm->free_area_cache = addr + len; - return addr; - } - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; - addr = vma->vm_end; - addr = align_addr(addr, filp, 0); - } + info.flags = 0; + info.length = len; + info.low_limit = begin; + info.high_limit = end; + info.align_mask = filp ? get_align_mask() : 0; + info.align_offset = pgoff << PAGE_SHIFT; + return vm_unmapped_area(&info); } - unsigned long arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, @@ -195,7 +147,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; - unsigned long addr = addr0, start_addr; + unsigned long addr = addr0; + struct vm_unmapped_area_info info; /* requested length too big for entire address space */ if (len > TASK_SIZE) @@ -217,51 +170,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, return addr; } - /* check if free_area_cache is useful for us */ - if (len <= mm->cached_hole_size) { - mm->cached_hole_size = 0; - mm->free_area_cache = mm->mmap_base; - } - -try_again: - /* either no address requested or can't fit in requested address hole */ - start_addr = addr = mm->free_area_cache; - - if (addr < len) - goto fail; - - addr -= len; - do { - addr = align_addr(addr, filp, ALIGN_TOPDOWN); - - /* - * Lookup failure means no vma is above this address, - * else if new region fits below vma->vm_start, - * return with success: - */ - vma = find_vma(mm, addr); - if (!vma || addr+len <= vma->vm_start) - /* remember the address as a hint for next time */ - return mm->free_area_cache = addr; - - /* remember the largest hole we saw so far */ - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; - - /* try just below the current vma->vm_start */ - addr = vma->vm_start-len; - } while (len < vma->vm_start); - -fail: - /* - * if hint left us with no space for the requested - * mapping then try again: - */ - if (start_addr != mm->mmap_base) { - mm->free_area_cache = mm->mmap_base; - mm->cached_hole_size = 0; - goto try_again; - } + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = PAGE_SIZE; + info.high_limit = mm->mmap_base; + info.align_mask = filp ? get_align_mask() : 0; + info.align_offset = pgoff << PAGE_SHIFT; + addr = vm_unmapped_area(&info); + if (!(addr & ~PAGE_MASK)) + return addr; + VM_BUG_ON(addr != -ENOMEM); bottomup: /* @@ -270,14 +188,5 @@ bottomup: * can happen with large stack limits and large mmap() * allocations. */ - mm->cached_hole_size = ~0UL; - mm->free_area_cache = TASK_UNMAPPED_BASE; - addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); - /* - * Restore the topdown base: - */ - mm->free_area_cache = mm->mmap_base; - mm->cached_hole_size = ~0UL; - - return addr; + return arch_get_unmapped_area(filp, addr0, len, pgoff, flags); } diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 937bff5cdaa7..ae1aa71d0115 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -274,42 +274,15 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long pgoff, unsigned long flags) { struct hstate *h = hstate_file(file); - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - unsigned long start_addr; - - if (len > mm->cached_hole_size) { - start_addr = mm->free_area_cache; - } else { - start_addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; - } - -full_search: - addr = ALIGN(start_addr, huge_page_size(h)); - - for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { - /* At this point: (!vma || addr < vma->vm_end). */ - if (TASK_SIZE - len < addr) { - /* - * Start a new search - just in case we missed - * some holes. - */ - if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; - goto full_search; - } - return -ENOMEM; - } - if (!vma || addr + len <= vma->vm_start) { - mm->free_area_cache = addr + len; - return addr; - } - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; - addr = ALIGN(vma->vm_end, huge_page_size(h)); - } + struct vm_unmapped_area_info info; + + info.flags = 0; + info.length = len; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = TASK_SIZE; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + return vm_unmapped_area(&info); } static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, @@ -317,83 +290,30 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long pgoff, unsigned long flags) { struct hstate *h = hstate_file(file); - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - unsigned long base = mm->mmap_base; - unsigned long addr = addr0; - unsigned long largest_hole = mm->cached_hole_size; - unsigned long start_addr; - - /* don't allow allocations above current base */ - if (mm->free_area_cache > base) - mm->free_area_cache = base; - - if (len <= largest_hole) { - largest_hole = 0; - mm->free_area_cache = base; - } -try_again: - start_addr = mm->free_area_cache; - - /* make sure it can fit in the remaining address space */ - if (mm->free_area_cache < len) - goto fail; - - /* either no address requested or can't fit in requested address hole */ - addr = (mm->free_area_cache - len) & huge_page_mask(h); - do { - /* - * Lookup failure means no vma is above this address, - * i.e. return with success: - */ - vma = find_vma(mm, addr); - if (!vma) - return addr; + struct vm_unmapped_area_info info; + unsigned long addr; - if (addr + len <= vma->vm_start) { - /* remember the address as a hint for next time */ - mm->cached_hole_size = largest_hole; - return (mm->free_area_cache = addr); - } else if (mm->free_area_cache == vma->vm_end) { - /* pull free_area_cache down to the first hole */ - mm->free_area_cache = vma->vm_start; - mm->cached_hole_size = largest_hole; - } + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = PAGE_SIZE; + info.high_limit = current->mm->mmap_base; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + addr = vm_unmapped_area(&info); - /* remember the largest hole we saw so far */ - if (addr + largest_hole < vma->vm_start) - largest_hole = vma->vm_start - addr; - - /* try just below the current vma->vm_start */ - addr = (vma->vm_start - len) & huge_page_mask(h); - } while (len <= vma->vm_start); - -fail: - /* - * if hint left us with no space for the requested - * mapping then try again: - */ - if (start_addr != base) { - mm->free_area_cache = base; - largest_hole = 0; - goto try_again; - } /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ - mm->free_area_cache = TASK_UNMAPPED_BASE; - mm->cached_hole_size = ~0UL; - addr = hugetlb_get_unmapped_area_bottomup(file, addr0, - len, pgoff, flags); - - /* - * Restore the topdown base: - */ - mm->free_area_cache = base; - mm->cached_hole_size = ~0UL; + if (addr & ~PAGE_MASK) { + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = TASK_SIZE; + addr = vm_unmapped_area(&info); + } return addr; } diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index 00aaf047b39f..431e87544411 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c @@ -141,7 +141,7 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) * unaligned here as a result of stack start randomization. */ addr = PAGE_ALIGN(addr); - addr = align_addr(addr, NULL, ALIGN_VDSO); + addr = align_vdso_addr(addr); return addr; } diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h index 25bc6c1309c3..00eed6786d7e 100644 --- a/arch/xtensa/include/uapi/asm/mman.h +++ b/arch/xtensa/include/uapi/asm/mman.h @@ -93,4 +93,15 @@ /* compatibility flags */ #define MAP_FILE 0 +/* + * When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size. + * This gives us 6 bits, which is enough until someone invents 128 bit address + * spaces. + * + * Assume these are all power of twos. + * When 0 use the default page size. + */ +#define MAP_HUGE_SHIFT 26 +#define MAP_HUGE_MASK 0x3f + #endif /* _XTENSA_MMAN_H */ |