diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2010-05-24 14:32:38 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-25 08:07:00 -0700 |
commit | ec95f53aa6ed62ba68660cb19c8474ebe9025cce (patch) | |
tree | 953ad71c8ee0373ca76f17ee9df65358ad2fe7b7 /mm | |
parent | 5f53e76299ceebd68bdf9495e8ff80db77711236 (diff) | |
download | blackbird-op-linux-ec95f53aa6ed62ba68660cb19c8474ebe9025cce.tar.gz blackbird-op-linux-ec95f53aa6ed62ba68660cb19c8474ebe9025cce.zip |
mm: introduce free_pages_prepare()
free_hot_cold_page() and __free_pages_ok() have very similar freeing
preparation. Consolidate them.
[akpm@linux-foundation.org: fix busted coding style]
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 40 |
1 files changed, 21 insertions, 19 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 95ad42de5a87..8f4f27841b71 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -620,20 +620,23 @@ static void free_one_page(struct zone *zone, struct page *page, int order, spin_unlock(&zone->lock); } -static void __free_pages_ok(struct page *page, unsigned int order) +static bool free_pages_prepare(struct page *page, unsigned int order) { - unsigned long flags; int i; int bad = 0; - int wasMlocked = __TestClearPageMlocked(page); trace_mm_page_free_direct(page, order); kmemcheck_free_shadow(page, order); - for (i = 0 ; i < (1 << order) ; ++i) - bad += free_pages_check(page + i); + for (i = 0; i < (1 << order); i++) { + struct page *pg = page + i; + + if (PageAnon(pg)) + pg->mapping = NULL; + bad += free_pages_check(pg); + } if (bad) - return; + return false; if (!PageHighMem(page)) { debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order); @@ -643,6 +646,17 @@ static void __free_pages_ok(struct page *page, unsigned int order) arch_free_page(page, order); kernel_map_pages(page, 1 << order, 0); + return true; +} + +static void __free_pages_ok(struct page *page, unsigned int order) +{ + unsigned long flags; + int wasMlocked = __TestClearPageMlocked(page); + + if (!free_pages_prepare(page, order)) + return; + local_irq_save(flags); if (unlikely(wasMlocked)) free_page_mlock(page); @@ -1128,21 +1142,9 @@ void free_hot_cold_page(struct page *page, int cold) int migratetype; int wasMlocked = __TestClearPageMlocked(page); - trace_mm_page_free_direct(page, 0); - kmemcheck_free_shadow(page, 0); - - if (PageAnon(page)) - page->mapping = NULL; - if (free_pages_check(page)) + if (!free_pages_prepare(page, 0)) return; - if (!PageHighMem(page)) { - debug_check_no_locks_freed(page_address(page), PAGE_SIZE); - debug_check_no_obj_freed(page_address(page), PAGE_SIZE); - } - arch_free_page(page, 0); - kernel_map_pages(page, 1, 0); - migratetype = get_pageblock_migratetype(page); set_page_private(page, migratetype); local_irq_save(flags); |