summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorBartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>2012-10-08 16:32:00 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 16:22:44 +0900
commit2139cbe627b8910ded55148f87ee10f7485408ed (patch)
treee42678cc486717e39391bfc71ca0e5671468210e /mm
parent770c8aaaf6f04a87e6765f24d497132de9152a46 (diff)
downloadblackbird-op-linux-2139cbe627b8910ded55148f87ee10f7485408ed.tar.gz
blackbird-op-linux-2139cbe627b8910ded55148f87ee10f7485408ed.zip
cma: fix counting of isolated pages
Isolated free pages shouldn't be accounted to NR_FREE_PAGES counter. Fix it by properly decreasing/increasing NR_FREE_PAGES counter in set_migratetype_isolate()/unset_migratetype_isolate() and removing counter adjustment for isolated pages from free_one_page() and split_free_page(). Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c9
-rw-r--r--mm/page_isolation.c12
2 files changed, 16 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3f18a14effb8..d259cc2b69c1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -691,7 +691,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
zone->pages_scanned = 0;
__free_one_page(page, zone, order, migratetype);
- __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
+ if (unlikely(migratetype != MIGRATE_ISOLATE))
+ __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
spin_unlock(&zone->lock);
}
@@ -1392,6 +1393,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
unsigned int order;
unsigned long watermark;
struct zone *zone;
+ int mt;
BUG_ON(!PageBuddy(page));
@@ -1407,7 +1409,10 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
list_del(&page->lru);
zone->free_area[order].nr_free--;
rmv_page_order(page);
- __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order));
+
+ mt = get_pageblock_migratetype(page);
+ if (unlikely(mt != MIGRATE_ISOLATE))
+ __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order));
if (alloc_order != order)
expand(zone, page, alloc_order, order,
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 247d1f175739..3ca1716471bc 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -76,8 +76,12 @@ int set_migratetype_isolate(struct page *page)
out:
if (!ret) {
+ unsigned long nr_pages;
+
set_pageblock_isolate(page);
- move_freepages_block(zone, page, MIGRATE_ISOLATE);
+ nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);
+
+ __mod_zone_page_state(zone, NR_FREE_PAGES, -nr_pages);
}
spin_unlock_irqrestore(&zone->lock, flags);
@@ -89,12 +93,14 @@ out:
void unset_migratetype_isolate(struct page *page, unsigned migratetype)
{
struct zone *zone;
- unsigned long flags;
+ unsigned long flags, nr_pages;
+
zone = page_zone(page);
spin_lock_irqsave(&zone->lock, flags);
if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
goto out;
- move_freepages_block(zone, page, migratetype);
+ nr_pages = move_freepages_block(zone, page, migratetype);
+ __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
restore_pageblock_isolate(page, migratetype);
out:
spin_unlock_irqrestore(&zone->lock, flags);
OpenPOWER on IntegriCloud