diff options
author | Hugh Dickins <hughd@google.com> | 2016-05-19 17:12:57 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-19 19:12:14 -0700 |
commit | 1d069b7dd56728a0eb6acb138dce0d37600dee00 (patch) | |
tree | 29a891a8640bbf0356037d39a115e6ca7a995a75 /mm | |
parent | bf8616d5fa179d6c755f06726567c6d63c6fbbc7 (diff) | |
download | blackbird-op-linux-1d069b7dd56728a0eb6acb138dce0d37600dee00.tar.gz blackbird-op-linux-1d069b7dd56728a0eb6acb138dce0d37600dee00.zip |
huge pagecache: extend mremap pmd rmap lockout to files
Whatever huge pagecache implementation we go with, file rmap locking
must be added to anon rmap locking, when mremap's move_page_tables()
finds a pmd_trans_huge pmd entry: a simple change, let's do it now.
Factor out take_rmap_locks() and drop_rmap_locks() to handle the locking
for make move_ptes() and move_page_tables(), and delete the
VM_BUG_ON_VMA which rejected vm_file and required anon_vma.
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andres Lagar-Cavilla <andreslc@google.com>
Cc: Yang Shi <yang.shi@linaro.org>
Cc: Ning Qu <quning@gmail.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Andres Lagar-Cavilla <andreslc@google.com>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/mremap.c | 42 |
1 files changed, 22 insertions, 20 deletions
diff --git a/mm/mremap.c b/mm/mremap.c index 7d98fe1adc12..9dc499977924 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -70,6 +70,22 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, return pmd; } +static void take_rmap_locks(struct vm_area_struct *vma) +{ + if (vma->vm_file) + i_mmap_lock_write(vma->vm_file->f_mapping); + if (vma->anon_vma) + anon_vma_lock_write(vma->anon_vma); +} + +static void drop_rmap_locks(struct vm_area_struct *vma) +{ + if (vma->anon_vma) + anon_vma_unlock_write(vma->anon_vma); + if (vma->vm_file) + i_mmap_unlock_write(vma->vm_file->f_mapping); +} + static pte_t move_soft_dirty_pte(pte_t pte) { /* @@ -90,8 +106,6 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, struct vm_area_struct *new_vma, pmd_t *new_pmd, unsigned long new_addr, bool need_rmap_locks) { - struct address_space *mapping = NULL; - struct anon_vma *anon_vma = NULL; struct mm_struct *mm = vma->vm_mm; pte_t *old_pte, *new_pte, pte; spinlock_t *old_ptl, *new_ptl; @@ -114,16 +128,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, * serialize access to individual ptes, but only rmap traversal * order guarantees that we won't miss both the old and new ptes). */ - if (need_rmap_locks) { - if (vma->vm_file) { - mapping = vma->vm_file->f_mapping; - i_mmap_lock_write(mapping); - } - if (vma->anon_vma) { - anon_vma = vma->anon_vma; - anon_vma_lock_write(anon_vma); - } - } + if (need_rmap_locks) + take_rmap_locks(vma); /* * We don't have to worry about the ordering of src and dst @@ -151,10 +157,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, spin_unlock(new_ptl); pte_unmap(new_pte - 1); pte_unmap_unlock(old_pte - 1, old_ptl); - if (anon_vma) - anon_vma_unlock_write(anon_vma); - if (mapping) - i_mmap_unlock_write(mapping); + if (need_rmap_locks) + drop_rmap_locks(vma); } #define LATENCY_LIMIT (64 * PAGE_SIZE) @@ -193,15 +197,13 @@ unsigned long move_page_tables(struct vm_area_struct *vma, if (pmd_trans_huge(*old_pmd)) { if (extent == HPAGE_PMD_SIZE) { bool moved; - VM_BUG_ON_VMA(vma->vm_file || !vma->anon_vma, - vma); /* See comment in move_ptes() */ if (need_rmap_locks) - anon_vma_lock_write(vma->anon_vma); + take_rmap_locks(vma); moved = move_huge_pmd(vma, old_addr, new_addr, old_end, old_pmd, new_pmd); if (need_rmap_locks) - anon_vma_unlock_write(vma->anon_vma); + drop_rmap_locks(vma); if (moved) { need_flush = true; continue; |