diff options
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 47 |
1 files changed, 23 insertions, 24 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 093f5fe6dd77..7430df68cb64 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -692,7 +692,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, __count_vm_events(KSWAPD_STEAL, nr_freed); } else __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan); - __count_vm_events(PGACTIVATE, nr_freed); + __count_zone_vm_events(PGSTEAL, zone, nr_freed); if (nr_taken == 0) goto done; @@ -984,7 +984,7 @@ static unsigned long shrink_zones(int priority, struct zone **zones, if (!populated_zone(zone)) continue; - if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) + if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) continue; note_zone_scanning_priority(zone, priority); @@ -1034,7 +1034,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask) for (i = 0; zones[i] != NULL; i++) { struct zone *zone = zones[i]; - if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) + if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) continue; lru_pages += zone->nr_active + zone->nr_inactive; @@ -1089,7 +1089,7 @@ out: for (i = 0; zones[i] != 0; i++) { struct zone *zone = zones[i]; - if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) + if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) continue; zone->prev_priority = priority; @@ -1354,7 +1354,7 @@ void wakeup_kswapd(struct zone *zone, int order) return; if (pgdat->kswapd_max_order < order) pgdat->kswapd_max_order = order; - if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) + if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) return; if (!waitqueue_active(&pgdat->kswapd_wait)) return; @@ -1369,8 +1369,8 @@ void wakeup_kswapd(struct zone *zone, int order) * * For pass > 3 we also try to shrink the LRU lists that contain a few pages */ -static unsigned long shrink_all_zones(unsigned long nr_pages, int pass, - int prio, struct scan_control *sc) +static unsigned long shrink_all_zones(unsigned long nr_pages, int prio, + int pass, struct scan_control *sc) { struct zone *zone; unsigned long nr_to_scan, ret = 0; @@ -1406,6 +1406,16 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int pass, return ret; } +static unsigned long count_lru_pages(void) +{ + struct zone *zone; + unsigned long ret = 0; + + for_each_zone(zone) + ret += zone->nr_active + zone->nr_inactive; + return ret; +} + /* * Try to free `nr_pages' of memory, system-wide, and return the number of * freed pages. @@ -1420,7 +1430,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages) unsigned long ret = 0; int pass; struct reclaim_state reclaim_state; - struct zone *zone; struct scan_control sc = { .gfp_mask = GFP_KERNEL, .may_swap = 0, @@ -1431,10 +1440,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages) current->reclaim_state = &reclaim_state; - lru_pages = 0; - for_each_zone(zone) - lru_pages += zone->nr_active + zone->nr_inactive; - + lru_pages = count_lru_pages(); nr_slab = global_page_state(NR_SLAB_RECLAIMABLE); /* If slab caches are huge, it's better to hit them first */ while (nr_slab >= lru_pages) { @@ -1461,13 +1467,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages) for (pass = 0; pass < 5; pass++) { int prio; - /* Needed for shrinking slab caches later on */ - if (!lru_pages) - for_each_zone(zone) { - lru_pages += zone->nr_active; - lru_pages += zone->nr_inactive; - } - /* Force reclaiming mapped pages in the passes #3 and #4 */ if (pass > 2) { sc.may_swap = 1; @@ -1483,7 +1482,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages) goto out; reclaim_state.reclaimed_slab = 0; - shrink_slab(sc.nr_scanned, sc.gfp_mask, lru_pages); + shrink_slab(sc.nr_scanned, sc.gfp_mask, + count_lru_pages()); ret += reclaim_state.reclaimed_slab; if (ret >= nr_pages) goto out; @@ -1491,20 +1491,19 @@ unsigned long shrink_all_memory(unsigned long nr_pages) if (sc.nr_scanned && prio < DEF_PRIORITY - 2) congestion_wait(WRITE, HZ / 10); } - - lru_pages = 0; } /* * If ret = 0, we could not shrink LRUs, but there may be something * in slab caches */ - if (!ret) + if (!ret) { do { reclaim_state.reclaimed_slab = 0; - shrink_slab(nr_pages, sc.gfp_mask, lru_pages); + shrink_slab(nr_pages, sc.gfp_mask, count_lru_pages()); ret += reclaim_state.reclaimed_slab; } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0); + } out: current->reclaim_state = NULL; |