diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-10-16 01:25:37 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-16 09:42:59 -0700 |
commit | 523b945855a1427000ffc707c610abe5947ae607 (patch) | |
tree | 2d84b5b6822a2a20bfd79146c08ce06ac8c80b9b /include | |
parent | 633c0666b5a5c41c376a5a7e4304d638dc48c1b9 (diff) | |
download | blackbird-op-linux-523b945855a1427000ffc707c610abe5947ae607.tar.gz blackbird-op-linux-523b945855a1427000ffc707c610abe5947ae607.zip |
Memoryless nodes: Fix GFP_THISNODE behavior
GFP_THISNODE checks that the zone selected is within the pgdat (node) of the
first zone of a nodelist. That only works if the node has memory. A
memoryless node will have its first node on another pgdat (node).
GFP_THISNODE currently will return simply memory on the first pgdat. Thus it
is returning memory on other nodes. GFP_THISNODE should fail if there is no
local memory on a node.
Add a new set of zonelists for each node that only contain the nodes that
belong to the zones itself so that no fallback is possible.
Then modify gfp_type to pickup the right zone based on the presence of
__GFP_THISNODE.
Drop the existing GFP_THISNODE checks from the page_allocators hot path.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Acked-by: Nishanth Aravamudan <nacc@us.ibm.com>
Tested-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Acked-by: Bob Picco <bob.picco@hp.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@skynet.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/gfp.h | 17 | ||||
-rw-r--r-- | include/linux/mmzone.h | 14 |
2 files changed, 25 insertions, 6 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index bc68dd9a6d41..12a90a191c11 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -98,22 +98,29 @@ struct vm_area_struct; static inline enum zone_type gfp_zone(gfp_t flags) { + int base = 0; + +#ifdef CONFIG_NUMA + if (flags & __GFP_THISNODE) + base = MAX_NR_ZONES; +#endif + #ifdef CONFIG_ZONE_DMA if (flags & __GFP_DMA) - return ZONE_DMA; + return base + ZONE_DMA; #endif #ifdef CONFIG_ZONE_DMA32 if (flags & __GFP_DMA32) - return ZONE_DMA32; + return base + ZONE_DMA32; #endif if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) == (__GFP_HIGHMEM | __GFP_MOVABLE)) - return ZONE_MOVABLE; + return base + ZONE_MOVABLE; #ifdef CONFIG_HIGHMEM if (flags & __GFP_HIGHMEM) - return ZONE_HIGHMEM; + return base + ZONE_HIGHMEM; #endif - return ZONE_NORMAL; + return base + ZONE_NORMAL; } /* diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index f21e5951038b..f6167f2fd7fb 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -324,6 +324,17 @@ struct zone { #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) #ifdef CONFIG_NUMA + +/* + * The NUMA zonelists are doubled becausse we need zonelists that restrict the + * allocations to a single node for GFP_THISNODE. + * + * [0 .. MAX_NR_ZONES -1] : Zonelists with fallback + * [MAZ_NR_ZONES ... MAZ_ZONELISTS -1] : No fallback (GFP_THISNODE) + */ +#define MAX_ZONELISTS (2 * MAX_NR_ZONES) + + /* * We cache key information from each zonelist for smaller cache * footprint when scanning for free pages in get_page_from_freelist(). @@ -389,6 +400,7 @@ struct zonelist_cache { unsigned long last_full_zap; /* when last zap'd (jiffies) */ }; #else +#define MAX_ZONELISTS MAX_NR_ZONES struct zonelist_cache; #endif @@ -455,7 +467,7 @@ extern struct page *mem_map; struct bootmem_data; typedef struct pglist_data { struct zone node_zones[MAX_NR_ZONES]; - struct zonelist node_zonelists[MAX_NR_ZONES]; + struct zonelist node_zonelists[MAX_ZONELISTS]; int nr_zones; #ifdef CONFIG_FLAT_NODE_MEM_MAP struct page *node_mem_map; |