From 435b405c06119d93333738172b8060b0ed12af41 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Mon, 8 Oct 2012 16:32:16 -0700 Subject: memory-hotplug: fix pages missed by race rather than failing If race between allocation and isolation in memory-hotplug offline happens, some pages could be in MIGRATE_MOVABLE of free_list although the pageblock's migratetype of the page is MIGRATE_ISOLATE. The race could be detected by get_freepage_migratetype in __test_page_isolated_in_pageblock. If it is detected, now EBUSY gets bubbled all the way up and the hotplug operations fails. But better idea is instead of returning and failing memory-hotremove, move the free page to the correct list at the time it is detected. It could enhance memory-hotremove operation success ratio although the race is really rare. Suggested by Mel Gorman. [akpm@linux-foundation.org: small cleanup] Signed-off-by: Minchan Kim Cc: KAMEZAWA Hiroyuki Reviewed-by: Yasuaki Ishimatsu Acked-by: Mel Gorman Cc: Xishi Qiu Cc: Wen Congyang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page-isolation.h | 4 ++++ mm/page_alloc.c | 2 +- mm/page_isolation.c | 16 ++++++++++++++-- 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index 105077aa7685..fca8c0a5c188 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h @@ -6,6 +6,10 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count); void set_pageblock_migratetype(struct page *page, int migratetype); int move_freepages_block(struct zone *zone, struct page *page, int migratetype); +int move_freepages(struct zone *zone, + struct page *start_page, struct page *end_page, + int migratetype); + /* * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. * If specified range includes migrate types other than MOVABLE or CMA, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 94fd283dde98..82f0b2f54f81 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -925,7 +925,7 @@ static int fallbacks[MIGRATE_TYPES][4] = { * Note that start_page and end_pages are not aligned on a pageblock * boundary. If alignment is required, use move_freepages_block() */ -static int move_freepages(struct zone *zone, +int move_freepages(struct zone *zone, struct page *start_page, struct page *end_page, int migratetype) { diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 6744235d2d0e..5f34a9053ce0 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -201,8 +201,20 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn) } page = pfn_to_page(pfn); if (PageBuddy(page)) { - if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) - break; + /* + * If race between isolatation and allocation happens, + * some free pages could be in MIGRATE_MOVABLE list + * although pageblock's migratation type of the page + * is MIGRATE_ISOLATE. Catch it and move the page into + * MIGRATE_ISOLATE list. + */ + if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) { + struct page *end_page; + + end_page = page + (1 << page_order(page)) - 1; + move_freepages(page_zone(page), page, end_page, + MIGRATE_ISOLATE); + } pfn += 1 << page_order(page); } else if (page_count(page) == 0 && -- cgit v1.2.1