summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichel Lespinasse <walken@google.com>2013-02-22 16:35:56 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 17:50:23 -0800
commit240aadeedc4a89fc44623f8ce4ca46bda73db07e (patch)
treee12cc254f2e78560837e5d454fdbe9b5d9e05c5c
parent28a35716d317980ae9bc2ff2f84c33a3cda9e884 (diff)
downloadblackbird-op-linux-240aadeedc4a89fc44623f8ce4ca46bda73db07e.tar.gz
blackbird-op-linux-240aadeedc4a89fc44623f8ce4ca46bda73db07e.zip
mm: accelerate mm_populate() treatment of THP pages
This change adds a follow_page_mask function which is equivalent to follow_page, but with an extra page_mask argument. follow_page_mask sets *page_mask to HPAGE_PMD_NR - 1 when it encounters a THP page, and to 0 in other cases. __get_user_pages() makes use of this in order to accelerate populating THP ranges - that is, when both the pages and vmas arrays are NULL, we don't need to iterate HPAGE_PMD_NR times to cover a single THP page (and we also avoid taking mm->page_table_lock that many times). Signed-off-by: Michel Lespinasse <walken@google.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mm.h13
-rw-r--r--mm/memory.c31
-rw-r--r--mm/nommu.c6
3 files changed, 38 insertions, 12 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 87b0ef253607..6124f1db50fe 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1629,8 +1629,17 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
-struct page *follow_page(struct vm_area_struct *, unsigned long address,
- unsigned int foll_flags);
+struct page *follow_page_mask(struct vm_area_struct *vma,
+ unsigned long address, unsigned int foll_flags,
+ unsigned int *page_mask);
+
+static inline struct page *follow_page(struct vm_area_struct *vma,
+ unsigned long address, unsigned int foll_flags)
+{
+ unsigned int unused_page_mask;
+ return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
+}
+
#define FOLL_WRITE 0x01 /* check pte is writable */
#define FOLL_TOUCH 0x02 /* mark page accessed */
#define FOLL_GET 0x04 /* do get_page on page */
diff --git a/mm/memory.c b/mm/memory.c
index bc929dbad215..5d2ef1217d0c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1462,10 +1462,11 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
EXPORT_SYMBOL_GPL(zap_vma_ptes);
/**
- * follow_page - look up a page descriptor from a user-virtual address
+ * follow_page_mask - look up a page descriptor from a user-virtual address
* @vma: vm_area_struct mapping @address
* @address: virtual address to look up
* @flags: flags modifying lookup behaviour
+ * @page_mask: on output, *page_mask is set according to the size of the page
*
* @flags can have FOLL_ flags set, defined in <linux/mm.h>
*
@@ -1473,8 +1474,9 @@ EXPORT_SYMBOL_GPL(zap_vma_ptes);
* an error pointer if there is a mapping to something not represented
* by a page descriptor (see also vm_normal_page()).
*/
-struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
- unsigned int flags)
+struct page *follow_page_mask(struct vm_area_struct *vma,
+ unsigned long address, unsigned int flags,
+ unsigned int *page_mask)
{
pgd_t *pgd;
pud_t *pud;
@@ -1484,6 +1486,8 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
struct page *page;
struct mm_struct *mm = vma->vm_mm;
+ *page_mask = 0;
+
page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
if (!IS_ERR(page)) {
BUG_ON(flags & FOLL_GET);
@@ -1530,6 +1534,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
page = follow_trans_huge_pmd(vma, address,
pmd, flags);
spin_unlock(&mm->page_table_lock);
+ *page_mask = HPAGE_PMD_NR - 1;
goto out;
}
} else
@@ -1684,6 +1689,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
{
long i;
unsigned long vm_flags;
+ unsigned int page_mask;
if (!nr_pages)
return 0;
@@ -1761,6 +1767,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
get_page(page);
}
pte_unmap(pte);
+ page_mask = 0;
goto next_page;
}
@@ -1778,6 +1785,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
do {
struct page *page;
unsigned int foll_flags = gup_flags;
+ unsigned int page_increm;
/*
* If we have a pending SIGKILL, don't keep faulting
@@ -1787,7 +1795,8 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
return i ? i : -ERESTARTSYS;
cond_resched();
- while (!(page = follow_page(vma, start, foll_flags))) {
+ while (!(page = follow_page_mask(vma, start,
+ foll_flags, &page_mask))) {
int ret;
unsigned int fault_flags = 0;
@@ -1861,13 +1870,19 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
flush_anon_page(vma, page, start);
flush_dcache_page(page);
+ page_mask = 0;
}
next_page:
- if (vmas)
+ if (vmas) {
vmas[i] = vma;
- i++;
- start += PAGE_SIZE;
- nr_pages--;
+ page_mask = 0;
+ }
+ page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
+ if (page_increm > nr_pages)
+ page_increm = nr_pages;
+ i += page_increm;
+ start += page_increm * PAGE_SIZE;
+ nr_pages -= page_increm;
} while (nr_pages && start < vma->vm_end);
} while (nr_pages);
return i;
diff --git a/mm/nommu.c b/mm/nommu.c
index 6ab706608492..da0d210fd403 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1819,9 +1819,11 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
return ret;
}
-struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
- unsigned int foll_flags)
+struct page *follow_page_mask(struct vm_area_struct *vma,
+ unsigned long address, unsigned int flags,
+ unsigned int *page_mask)
{
+ *page_mask = 0;
return NULL;
}
OpenPOWER on IntegriCloud