summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c9
-rw-r--r--mm/huge_memory.c5
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/migrate.c22
-rw-r--r--mm/mmap.c11
-rw-r--r--mm/page-writeback.c18
-rw-r--r--mm/rmap.c3
-rw-r--r--mm/vmscan.c21
8 files changed, 67 insertions, 24 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 9276bdb2343c..0604cb02e6f3 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -786,7 +786,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
VM_BUG_ON_PAGE(!PageLocked(new), new);
VM_BUG_ON_PAGE(new->mapping, new);
- error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
+ error = radix_tree_preload(gfp_mask & GFP_RECLAIM_MASK);
if (!error) {
struct address_space *mapping = old->mapping;
void (*freepage)(struct page *);
@@ -842,7 +842,7 @@ static int __add_to_page_cache_locked(struct page *page,
return error;
}
- error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
+ error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK);
if (error) {
if (!huge)
mem_cgroup_cancel_charge(page, memcg, false);
@@ -1585,8 +1585,7 @@ no_page:
if (fgp_flags & FGP_ACCESSED)
__SetPageReferenced(page);
- err = add_to_page_cache_lru(page, mapping, offset,
- gfp_mask & GFP_RECLAIM_MASK);
+ err = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
if (unlikely(err)) {
put_page(page);
page = NULL;
@@ -2387,7 +2386,7 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
if (!page)
return -ENOMEM;
- ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_KERNEL);
+ ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
if (ret == 0)
ret = mapping->a_ops->readpage(file, page);
else if (ret == -EEXIST)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 14ed6ee5e02f..a3a1815f8e11 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2925,7 +2925,10 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
pmde = maybe_pmd_mkwrite(pmde, vma);
flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE);
- page_add_anon_rmap(new, vma, mmun_start, true);
+ if (PageAnon(new))
+ page_add_anon_rmap(new, vma, mmun_start, true);
+ else
+ page_add_file_rmap(new, true);
set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
if (vma->vm_flags & VM_LOCKED)
mlock_vma_page(new);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e074f7c637aa..2bd3df3d101a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2192,7 +2192,7 @@ static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
{
struct memcg_kmem_cache_create_work *cw;
- cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
+ cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
if (!cw)
return;
diff --git a/mm/migrate.c b/mm/migrate.c
index f65dd69e1fd1..568433023831 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -472,7 +472,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
pslot = radix_tree_lookup_slot(&mapping->i_pages,
page_index(page));
- expected_count += 1 + page_has_private(page);
+ expected_count += hpage_nr_pages(page) + page_has_private(page);
if (page_count(page) != expected_count ||
radix_tree_deref_slot_protected(pslot,
&mapping->i_pages.xa_lock) != page) {
@@ -505,7 +505,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
*/
newpage->index = page->index;
newpage->mapping = page->mapping;
- get_page(newpage); /* add cache reference */
+ page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */
if (PageSwapBacked(page)) {
__SetPageSwapBacked(newpage);
if (PageSwapCache(page)) {
@@ -524,13 +524,26 @@ int migrate_page_move_mapping(struct address_space *mapping,
}
radix_tree_replace_slot(&mapping->i_pages, pslot, newpage);
+ if (PageTransHuge(page)) {
+ int i;
+ int index = page_index(page);
+
+ for (i = 0; i < HPAGE_PMD_NR; i++) {
+ pslot = radix_tree_lookup_slot(&mapping->i_pages,
+ index + i);
+ radix_tree_replace_slot(&mapping->i_pages, pslot,
+ newpage + i);
+ }
+ } else {
+ radix_tree_replace_slot(&mapping->i_pages, pslot, newpage);
+ }
/*
* Drop cache reference from old page by unfreezing
* to one less reference.
* We know this isn't the last reference.
*/
- page_ref_unfreeze(page, expected_count - 1);
+ page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));
xa_unlock(&mapping->i_pages);
/* Leave irq disabled to prevent preemption while updating stats */
@@ -1622,6 +1635,9 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
current_node = NUMA_NO_NODE;
}
out_flush:
+ if (list_empty(&pagelist))
+ return err;
+
/* Make sure we do not overwrite the existing error */
err1 = do_move_pages_to_node(mm, &pagelist, current_node);
if (!err1)
diff --git a/mm/mmap.c b/mm/mmap.c
index 188f195883b9..9d5968d1e8e3 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -100,11 +100,20 @@ pgprot_t protection_map[16] __ro_after_init = {
__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
};
+#ifndef CONFIG_ARCH_HAS_FILTER_PGPROT
+static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
+{
+ return prot;
+}
+#endif
+
pgprot_t vm_get_page_prot(unsigned long vm_flags)
{
- return __pgprot(pgprot_val(protection_map[vm_flags &
+ pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags &
(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
pgprot_val(arch_vm_get_page_prot(vm_flags)));
+
+ return arch_filter_pgprot(ret);
}
EXPORT_SYMBOL(vm_get_page_prot);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 5c1a3279e63f..337c6afb3345 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2502,13 +2502,13 @@ void account_page_redirty(struct page *page)
if (mapping && mapping_cap_account_dirty(mapping)) {
struct inode *inode = mapping->host;
struct bdi_writeback *wb;
- bool locked;
+ struct wb_lock_cookie cookie = {};
- wb = unlocked_inode_to_wb_begin(inode, &locked);
+ wb = unlocked_inode_to_wb_begin(inode, &cookie);
current->nr_dirtied--;
dec_node_page_state(page, NR_DIRTIED);
dec_wb_stat(wb, WB_DIRTIED);
- unlocked_inode_to_wb_end(inode, locked);
+ unlocked_inode_to_wb_end(inode, &cookie);
}
}
EXPORT_SYMBOL(account_page_redirty);
@@ -2614,15 +2614,15 @@ void __cancel_dirty_page(struct page *page)
if (mapping_cap_account_dirty(mapping)) {
struct inode *inode = mapping->host;
struct bdi_writeback *wb;
- bool locked;
+ struct wb_lock_cookie cookie = {};
lock_page_memcg(page);
- wb = unlocked_inode_to_wb_begin(inode, &locked);
+ wb = unlocked_inode_to_wb_begin(inode, &cookie);
if (TestClearPageDirty(page))
account_page_cleaned(page, mapping, wb);
- unlocked_inode_to_wb_end(inode, locked);
+ unlocked_inode_to_wb_end(inode, &cookie);
unlock_page_memcg(page);
} else {
ClearPageDirty(page);
@@ -2654,7 +2654,7 @@ int clear_page_dirty_for_io(struct page *page)
if (mapping && mapping_cap_account_dirty(mapping)) {
struct inode *inode = mapping->host;
struct bdi_writeback *wb;
- bool locked;
+ struct wb_lock_cookie cookie = {};
/*
* Yes, Virginia, this is indeed insane.
@@ -2691,14 +2691,14 @@ int clear_page_dirty_for_io(struct page *page)
* always locked coming in here, so we get the desired
* exclusion.
*/
- wb = unlocked_inode_to_wb_begin(inode, &locked);
+ wb = unlocked_inode_to_wb_begin(inode, &cookie);
if (TestClearPageDirty(page)) {
dec_lruvec_page_state(page, NR_FILE_DIRTY);
dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
dec_wb_stat(wb, WB_RECLAIMABLE);
ret = 1;
}
- unlocked_inode_to_wb_end(inode, locked);
+ unlocked_inode_to_wb_end(inode, &cookie);
return ret;
}
return TestClearPageDirty(page);
diff --git a/mm/rmap.c b/mm/rmap.c
index f0dd4e4565bc..8d5337fed37b 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1374,9 +1374,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
if (!pvmw.pte && (flags & TTU_MIGRATION)) {
VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
- if (!PageAnon(page))
- continue;
-
set_pmd_migration_entry(&pvmw, page);
continue;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8b920ce3ae02..9b697323a88c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -303,7 +303,7 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone
/*
* Add a shrinker callback to be called from the vm.
*/
-int register_shrinker(struct shrinker *shrinker)
+int prealloc_shrinker(struct shrinker *shrinker)
{
size_t size = sizeof(*shrinker->nr_deferred);
@@ -313,10 +313,29 @@ int register_shrinker(struct shrinker *shrinker)
shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
if (!shrinker->nr_deferred)
return -ENOMEM;
+ return 0;
+}
+
+void free_prealloced_shrinker(struct shrinker *shrinker)
+{
+ kfree(shrinker->nr_deferred);
+ shrinker->nr_deferred = NULL;
+}
+void register_shrinker_prepared(struct shrinker *shrinker)
+{
down_write(&shrinker_rwsem);
list_add_tail(&shrinker->list, &shrinker_list);
up_write(&shrinker_rwsem);
+}
+
+int register_shrinker(struct shrinker *shrinker)
+{
+ int err = prealloc_shrinker(shrinker);
+
+ if (err)
+ return err;
+ register_shrinker_prepared(shrinker);
return 0;
}
EXPORT_SYMBOL(register_shrinker);
OpenPOWER on IntegriCloud