summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/ramfs/file-nommu.c3
-rw-r--r--include/linux/mm.h4
-rw-r--r--mm/internal.h12
-rw-r--r--mm/nommu.c4
-rw-r--r--mm/page_alloc.c7
-rw-r--r--mm/slab.c9
6 files changed, 11 insertions, 28 deletions
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index 3f810acd0bfa..b1ca234068f6 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -87,8 +87,7 @@ static int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
xpages = 1UL << order;
npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
- for (loop = 0; loop < npages; loop++)
- set_page_count(pages + loop, 1);
+ split_page(pages, order);
/* trim off any pages we don't actually require */
for (loop = npages; loop < xpages; loop++)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 9b3cdfc8046d..3d84b7a35e0d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -327,11 +327,7 @@ static inline void get_page(struct page *page)
void put_page(struct page *page);
-#ifdef CONFIG_MMU
void split_page(struct page *page, unsigned int order);
-#else
-static inline void split_page(struct page *page, unsigned int order) {}
-#endif
/*
* Multiple processes may "see" the same page. E.g. for untouched
diff --git a/mm/internal.h b/mm/internal.h
index e3042db2a2d6..7bb339779818 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -15,19 +15,7 @@
static inline void set_page_refs(struct page *page, int order)
{
-#ifdef CONFIG_MMU
set_page_count(page, 1);
-#else
- int i;
-
- /*
- * We need to reference all the pages for this order, otherwise if
- * anyone accesses one of the pages with (get/put) it will be freed.
- * - eg: access_process_vm()
- */
- for (i = 0; i < (1 << order); i++)
- set_page_count(page + i, 1);
-#endif /* CONFIG_MMU */
}
static inline void __put_page(struct page *page)
diff --git a/mm/nommu.c b/mm/nommu.c
index 4951f4786f28..db45efac17cc 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -159,7 +159,7 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
/*
* kmalloc doesn't like __GFP_HIGHMEM for some reason
*/
- return kmalloc(size, gfp_mask & ~__GFP_HIGHMEM);
+ return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
}
struct page * vmalloc_to_page(void *addr)
@@ -623,7 +623,7 @@ static int do_mmap_private(struct vm_area_struct *vma, unsigned long len)
* - note that this may not return a page-aligned address if the object
* we're allocating is smaller than a page
*/
- base = kmalloc(len, GFP_KERNEL);
+ base = kmalloc(len, GFP_KERNEL|__GFP_COMP);
if (!base)
goto enomem;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7aa0181287e1..e197818a7cf6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -422,11 +422,6 @@ static void __free_pages_ok(struct page *page, unsigned int order)
mutex_debug_check_no_locks_freed(page_address(page),
PAGE_SIZE<<order);
-#ifndef CONFIG_MMU
- for (i = 1 ; i < (1 << order) ; ++i)
- __put_page(page + i);
-#endif
-
for (i = 0 ; i < (1 << order) ; ++i)
reserved += free_pages_check(page + i);
if (reserved)
@@ -746,7 +741,6 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
clear_highpage(page + i);
}
-#ifdef CONFIG_MMU
/*
* split_page takes a non-compound higher-order page, and splits it into
* n (1<<order) sub-pages: page[0..n]
@@ -766,7 +760,6 @@ void split_page(struct page *page, unsigned int order)
set_page_count(page + i, 1);
}
}
-#endif
/*
* Really, prep_compound_page() should be called from __rmqueue_bulk(). But
diff --git a/mm/slab.c b/mm/slab.c
index f477acfb732f..ff0ab772f49d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -590,6 +590,8 @@ static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
static inline struct kmem_cache *page_get_cache(struct page *page)
{
+ if (unlikely(PageCompound(page)))
+ page = (struct page *)page_private(page);
return (struct kmem_cache *)page->lru.next;
}
@@ -600,6 +602,8 @@ static inline void page_set_slab(struct page *page, struct slab *slab)
static inline struct slab *page_get_slab(struct page *page)
{
+ if (unlikely(PageCompound(page)))
+ page = (struct page *)page_private(page);
return (struct slab *)page->lru.prev;
}
@@ -2412,8 +2416,11 @@ static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp,
struct page *page;
/* Nasty!!!!!! I hope this is OK. */
- i = 1 << cachep->gfporder;
page = virt_to_page(objp);
+
+ i = 1;
+ if (likely(!PageCompound(page)))
+ i <<= cachep->gfporder;
do {
page_set_cache(page, cachep);
page_set_slab(page, slabp);
OpenPOWER on IntegriCloud