diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/huge_memory.c | 67 | ||||
-rw-r--r-- | mm/internal.h | 3 | ||||
-rw-r--r-- | mm/khugepaged.c | 2 | ||||
-rw-r--r-- | mm/memory.c | 2 | ||||
-rw-r--r-- | mm/migrate.c | 2 | ||||
-rw-r--r-- | mm/shmem.c | 10 |
6 files changed, 42 insertions, 44 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 86fe697e8bfb..f22401fd83b5 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -474,10 +474,13 @@ out: } __setup("transparent_hugepage=", setup_transparent_hugepage); -pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) +pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma, bool dirty) { - if (likely(vma->vm_flags & VM_WRITE)) + if (likely(vma->vm_flags & VM_WRITE)) { pmd = pmd_mkwrite(pmd); + if (dirty) + pmd = pmd_mkdirty(pmd); + } return pmd; } @@ -599,7 +602,7 @@ static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page, } entry = mk_huge_pmd(page, vma->vm_page_prot); - entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); + entry = maybe_pmd_mkwrite(entry, vma, true); page_add_new_anon_rmap(page, vma, haddr, true); mem_cgroup_commit_charge(page, memcg, false, true); lru_cache_add_active_or_unevictable(page, vma); @@ -741,8 +744,8 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, if (pfn_t_devmap(pfn)) entry = pmd_mkdevmap(entry); if (write) { - entry = pmd_mkyoung(pmd_mkdirty(entry)); - entry = maybe_pmd_mkwrite(entry, vma); + entry = pmd_mkyoung(entry); + entry = maybe_pmd_mkwrite(entry, vma, true); } if (pgtable) { @@ -788,10 +791,14 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD -static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) +static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma, + bool dirty) { - if (likely(vma->vm_flags & VM_WRITE)) + if (likely(vma->vm_flags & VM_WRITE)) { pud = pud_mkwrite(pud); + if (dirty) + pud = pud_mkdirty(pud); + } return pud; } @@ -807,8 +814,8 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, if (pfn_t_devmap(pfn)) entry = pud_mkdevmap(entry); if (write) { - entry = pud_mkyoung(pud_mkdirty(entry)); - entry = maybe_pud_mkwrite(entry, vma); + entry = pud_mkyoung(entry); + entry = maybe_pud_mkwrite(entry, vma, true); } set_pud_at(mm, addr, pud, entry); update_mmu_cache_pud(vma, addr, pud); @@ -842,20 +849,15 @@ EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud); #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd) + pmd_t *pmd, int flags) { pmd_t _pmd; - /* - * We should set the dirty bit only for FOLL_WRITE but for now - * the dirty bit in the pmd is meaningless. And if the dirty - * bit will become meaningful and we'll only set it with - * FOLL_WRITE, an atomic set_bit will be required on the pmd to - * set the young bit, instead of the current set_pmd_at. - */ - _pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); + _pmd = pmd_mkyoung(*pmd); + if (flags & FOLL_WRITE) + _pmd = pmd_mkdirty(_pmd); if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, - pmd, _pmd, 1)) + pmd, _pmd, flags & FOLL_WRITE)) update_mmu_cache_pmd(vma, addr, pmd); } @@ -884,7 +886,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, return NULL; if (flags & FOLL_TOUCH) - touch_pmd(vma, addr, pmd); + touch_pmd(vma, addr, pmd, flags); /* * device mapped pages can only be returned if the @@ -995,20 +997,15 @@ out: #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD static void touch_pud(struct vm_area_struct *vma, unsigned long addr, - pud_t *pud) + pud_t *pud, int flags) { pud_t _pud; - /* - * We should set the dirty bit only for FOLL_WRITE but for now - * the dirty bit in the pud is meaningless. And if the dirty - * bit will become meaningful and we'll only set it with - * FOLL_WRITE, an atomic set_bit will be required on the pud to - * set the young bit, instead of the current set_pud_at. - */ - _pud = pud_mkyoung(pud_mkdirty(*pud)); + _pud = pud_mkyoung(*pud); + if (flags & FOLL_WRITE) + _pud = pud_mkdirty(_pud); if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, - pud, _pud, 1)) + pud, _pud, flags & FOLL_WRITE)) update_mmu_cache_pud(vma, addr, pud); } @@ -1031,7 +1028,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, return NULL; if (flags & FOLL_TOUCH) - touch_pud(vma, addr, pud); + touch_pud(vma, addr, pud, flags); /* * device mapped pages can only be returned if the @@ -1289,7 +1286,7 @@ int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd) if (reuse_swap_page(page, NULL)) { pmd_t entry; entry = pmd_mkyoung(orig_pmd); - entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); + entry = maybe_pmd_mkwrite(entry, vma, true); if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); ret |= VM_FAULT_WRITE; @@ -1359,7 +1356,7 @@ alloc: } else { pmd_t entry; entry = mk_huge_pmd(new_page, vma->vm_page_prot); - entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); + entry = maybe_pmd_mkwrite(entry, vma, true); pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); page_add_new_anon_rmap(new_page, vma, haddr, true); mem_cgroup_commit_charge(new_page, memcg, false, true); @@ -1424,7 +1421,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, page = pmd_page(*pmd); VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page); if (flags & FOLL_TOUCH) - touch_pmd(vma, addr, pmd); + touch_pmd(vma, addr, pmd, flags); if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { /* * We don't mlock() pte-mapped THPs. This way we can avoid @@ -2938,7 +2935,7 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) if (pmd_swp_soft_dirty(*pvmw->pmd)) pmde = pmd_mksoft_dirty(pmde); if (is_write_migration_entry(entry)) - pmde = maybe_pmd_mkwrite(pmde, vma); + pmde = maybe_pmd_mkwrite(pmde, vma, false); flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE); page_add_anon_rmap(new, vma, mmun_start, true); diff --git a/mm/internal.h b/mm/internal.h index e6bd35182dae..b35cdebda0ce 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -328,7 +328,8 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page) } } -extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); +extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma, + bool dirty); /* * At what user virtual address is page expected in @vma? diff --git a/mm/khugepaged.c b/mm/khugepaged.c index ea4ff259b671..db43dc8a8ae6 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1057,7 +1057,7 @@ static void collapse_huge_page(struct mm_struct *mm, pgtable = pmd_pgtable(_pmd); _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); - _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); + _pmd = maybe_pmd_mkwrite(_pmd, vma, false); /* * spin_lock() below is not the equivalent of smp_wmb(), so diff --git a/mm/memory.c b/mm/memory.c index 85e7a87da79f..b10c1d26f675 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3335,7 +3335,7 @@ static int do_set_pmd(struct vm_fault *vmf, struct page *page) entry = mk_huge_pmd(page, vma->vm_page_prot); if (write) - entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); + entry = maybe_pmd_mkwrite(entry, vma, true); add_mm_counter(vma->vm_mm, MM_FILEPAGES, HPAGE_PMD_NR); page_add_file_rmap(page, true); diff --git a/mm/migrate.c b/mm/migrate.c index 4d0be47a322a..57865fc8cfe3 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2068,7 +2068,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, } entry = mk_huge_pmd(new_page, vma->vm_page_prot); - entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); + entry = maybe_pmd_mkwrite(entry, vma, false); /* * Clear the old entry under pagetable lock and establish the new PTE. diff --git a/mm/shmem.c b/mm/shmem.c index 4aa9307feab0..7fbe67be86fa 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3776,7 +3776,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent) * tmpfs instance, limiting inodes to one per page of lowmem; * but the internal instance is left unlimited. */ - if (!(sb->s_flags & MS_KERNMOUNT)) { + if (!(sb->s_flags & SB_KERNMOUNT)) { sbinfo->max_blocks = shmem_default_max_blocks(); sbinfo->max_inodes = shmem_default_max_inodes(); if (shmem_parse_options(data, sbinfo, false)) { @@ -3784,12 +3784,12 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent) goto failed; } } else { - sb->s_flags |= MS_NOUSER; + sb->s_flags |= SB_NOUSER; } sb->s_export_op = &shmem_export_ops; - sb->s_flags |= MS_NOSEC; + sb->s_flags |= SB_NOSEC; #else - sb->s_flags |= MS_NOUSER; + sb->s_flags |= SB_NOUSER; #endif spin_lock_init(&sbinfo->stat_lock); @@ -3809,7 +3809,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent) sb->s_xattr = shmem_xattr_handlers; #endif #ifdef CONFIG_TMPFS_POSIX_ACL - sb->s_flags |= MS_POSIXACL; + sb->s_flags |= SB_POSIXACL; #endif uuid_gen(&sb->s_uuid); |