diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2012-12-11 16:00:29 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-11 17:22:22 -0800 |
commit | e5adfffc857788c8b7eca0e98cf1e26f1964b292 (patch) | |
tree | 8d1ac37aa649a060055162b59cb06e8ca63f7a68 /mm/page_alloc.c | |
parent | 19965460e31c73a934d2c19c152f876a75bdff3e (diff) | |
download | talos-obmc-linux-e5adfffc857788c8b7eca0e98cf1e26f1964b292.tar.gz talos-obmc-linux-e5adfffc857788c8b7eca0e98cf1e26f1964b292.zip |
mm: use IS_ENABLED(CONFIG_NUMA) instead of NUMA_BUILD
We don't need custom NUMA_BUILD anymore, since we have handy
IS_ENABLED().
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 18 |
1 files changed, 10 insertions, 8 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index dc018b486b74..a49b0ea3cc2f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1871,7 +1871,7 @@ zonelist_scan: */ for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, nodemask) { - if (NUMA_BUILD && zlc_active && + if (IS_ENABLED(CONFIG_NUMA) && zlc_active && !zlc_zone_worth_trying(zonelist, z, allowednodes)) continue; if ((alloc_flags & ALLOC_CPUSET) && @@ -1917,7 +1917,8 @@ zonelist_scan: classzone_idx, alloc_flags)) goto try_this_zone; - if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) { + if (IS_ENABLED(CONFIG_NUMA) && + !did_zlc_setup && nr_online_nodes > 1) { /* * we do zlc_setup if there are multiple nodes * and before considering the first zone allowed @@ -1936,7 +1937,7 @@ zonelist_scan: * As we may have just activated ZLC, check if the first * eligible zone has failed zone_reclaim recently. */ - if (NUMA_BUILD && zlc_active && + if (IS_ENABLED(CONFIG_NUMA) && zlc_active && !zlc_zone_worth_trying(zonelist, z, allowednodes)) continue; @@ -1962,11 +1963,11 @@ try_this_zone: if (page) break; this_zone_full: - if (NUMA_BUILD) + if (IS_ENABLED(CONFIG_NUMA)) zlc_mark_zone_full(zonelist, z); } - if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) { + if (unlikely(IS_ENABLED(CONFIG_NUMA) && page == NULL && zlc_active)) { /* Disable zlc cache for second zonelist scan */ zlc_active = 0; goto zonelist_scan; @@ -2266,7 +2267,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, return NULL; /* After successful reclaim, reconsider all zones for allocation */ - if (NUMA_BUILD) + if (IS_ENABLED(CONFIG_NUMA)) zlc_clear_zones_full(zonelist); retry: @@ -2412,7 +2413,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, * allowed per node queues are empty and that nodes are * over allocated. */ - if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE) + if (IS_ENABLED(CONFIG_NUMA) && + (gfp_mask & GFP_THISNODE) == GFP_THISNODE) goto nopage; restart: @@ -2819,7 +2821,7 @@ unsigned int nr_free_pagecache_pages(void) static inline void show_node(struct zone *zone) { - if (NUMA_BUILD) + if (IS_ENABLED(CONFIG_NUMA)) printk("Node %d ", zone_to_nid(zone)); } |