summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichel Lespinasse <walken@google.com>2013-02-22 16:32:44 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 17:50:11 -0800
commitcea10a19b7972a1954c4a2d05a7de8db48b444fb (patch)
tree694b3c906259cfbfc7b7cb1b0eb507ecf0d1d63c
parentc22c0d6344c362b1dde5d8e160d3d07536aca120 (diff)
downloadblackbird-op-linux-cea10a19b7972a1954c4a2d05a7de8db48b444fb.tar.gz
blackbird-op-linux-cea10a19b7972a1954c4a2d05a7de8db48b444fb.zip
mm: directly use __mlock_vma_pages_range() in find_extend_vma()
In find_extend_vma(), we don't need mlock_vma_pages_range() to verify the vma type - we know we're working with a stack. So, we can call directly into __mlock_vma_pages_range(), and remove the last make_pages_present() call site. Note that we don't use mm_populate() here, so we can't release the mmap_sem while allocating new stack pages. This is deemed acceptable, because the stack vmas grow by a bounded number of pages at a time, and these are anon pages so we don't have to read from disk to populate them. Signed-off-by: Michel Lespinasse <walken@google.com> Acked-by: Rik van Riel <riel@redhat.com> Tested-by: Andy Lutomirski <luto@amacapital.net> Cc: Greg Ungerer <gregungerer@westnet.com.au> Cc: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mm.h1
-rw-r--r--mm/internal.h4
-rw-r--r--mm/memory.c24
-rw-r--r--mm/mlock.c57
-rw-r--r--mm/mmap.c10
5 files changed, 9 insertions, 87 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8332f3069fe3..0c34d3486816 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1035,7 +1035,6 @@ static inline int fixup_user_fault(struct task_struct *tsk,
}
#endif
-extern int make_pages_present(unsigned long addr, unsigned long end);
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, int write);
diff --git a/mm/internal.h b/mm/internal.h
index 9ba21100ebf3..1c0c4cc0fcf7 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -162,8 +162,8 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *prev, struct rb_node *rb_parent);
#ifdef CONFIG_MMU
-extern long mlock_vma_pages_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end);
+extern long __mlock_vma_pages_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, int *nonblocking);
extern void munlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
diff --git a/mm/memory.c b/mm/memory.c
index 0abd07097ec6..7837ceacf090 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3824,30 +3824,6 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
}
#endif /* __PAGETABLE_PMD_FOLDED */
-int make_pages_present(unsigned long addr, unsigned long end)
-{
- int ret, len, write;
- struct vm_area_struct * vma;
-
- vma = find_vma(current->mm, addr);
- if (!vma)
- return -ENOMEM;
- /*
- * We want to touch writable mappings with a write fault in order
- * to break COW, except for shared mappings because these don't COW
- * and we would not want to dirty them for nothing.
- */
- write = (vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE;
- BUG_ON(addr >= end);
- BUG_ON(end > vma->vm_end);
- len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE;
- ret = get_user_pages(current, current->mm, addr,
- len, write, 0, NULL, NULL);
- if (ret < 0)
- return ret;
- return ret == len ? 0 : -EFAULT;
-}
-
#if !defined(__HAVE_ARCH_GATE_AREA)
#if defined(AT_SYSINFO_EHDR)
diff --git a/mm/mlock.c b/mm/mlock.c
index a296a49865df..569400a5d079 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -155,9 +155,8 @@ void munlock_vma_page(struct page *page)
*
* vma->vm_mm->mmap_sem must be held for at least read.
*/
-static long __mlock_vma_pages_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- int *nonblocking)
+long __mlock_vma_pages_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, int *nonblocking)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long addr = start;
@@ -202,56 +201,6 @@ static int __mlock_posix_error_return(long retval)
return retval;
}
-/**
- * mlock_vma_pages_range() - mlock pages in specified vma range.
- * @vma - the vma containing the specfied address range
- * @start - starting address in @vma to mlock
- * @end - end address [+1] in @vma to mlock
- *
- * For mmap()/mremap()/expansion of mlocked vma.
- *
- * return 0 on success for "normal" vmas.
- *
- * return number of pages [> 0] to be removed from locked_vm on success
- * of "special" vmas.
- */
-long mlock_vma_pages_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- int nr_pages = (end - start) / PAGE_SIZE;
- BUG_ON(!(vma->vm_flags & VM_LOCKED));
-
- /*
- * filter unlockable vmas
- */
- if (vma->vm_flags & (VM_IO | VM_PFNMAP))
- goto no_mlock;
-
- if (!((vma->vm_flags & VM_DONTEXPAND) ||
- is_vm_hugetlb_page(vma) ||
- vma == get_gate_vma(current->mm))) {
-
- __mlock_vma_pages_range(vma, start, end, NULL);
-
- /* Hide errors from mmap() and other callers */
- return 0;
- }
-
- /*
- * User mapped kernel pages or huge pages:
- * make these pages present to populate the ptes, but
- * fall thru' to reset VM_LOCKED--no need to unlock, and
- * return nr_pages so these don't get counted against task's
- * locked limit. huge pages are already counted against
- * locked vm limit.
- */
- make_pages_present(start, end);
-
-no_mlock:
- vma->vm_flags &= ~VM_LOCKED; /* and don't come back! */
- return nr_pages; /* error or pages NOT mlocked */
-}
-
/*
* munlock_vma_pages_range() - munlock all pages in the vma range.'
* @vma - vma containing range to be munlock()ed.
@@ -303,7 +252,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
*
* Filters out "special" vmas -- VM_LOCKED never gets set for these, and
* munlock is a no-op. However, for some special vmas, we go ahead and
- * populate the ptes via make_pages_present().
+ * populate the ptes.
*
* For vmas that pass the filters, merge/split as appropriate.
*/
diff --git a/mm/mmap.c b/mm/mmap.c
index d6bc39e939ab..8826c77513a9 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2204,9 +2204,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
return vma;
if (!prev || expand_stack(prev, addr))
return NULL;
- if (prev->vm_flags & VM_LOCKED) {
- mlock_vma_pages_range(prev, addr, prev->vm_end);
- }
+ if (prev->vm_flags & VM_LOCKED)
+ __mlock_vma_pages_range(prev, addr, prev->vm_end, NULL);
return prev;
}
#else
@@ -2232,9 +2231,8 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr)
start = vma->vm_start;
if (expand_stack(vma, addr))
return NULL;
- if (vma->vm_flags & VM_LOCKED) {
- mlock_vma_pages_range(vma, addr, start);
- }
+ if (vma->vm_flags & VM_LOCKED)
+ __mlock_vma_pages_range(vma, addr, start, NULL);
return vma;
}
#endif
OpenPOWER on IntegriCloud