summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDave Hansen <haveblue@us.ibm.com>2005-10-29 18:16:52 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-29 21:40:44 -0700
commitc6a57e19e464db118dc4ab9cfe9e9748c6d630a0 (patch)
tree1be192e4c0635c9aa49c8c76d1606e606ecdc9a0 /mm
parent4ca644d970bf2542623228a4624af356d20ca267 (diff)
downloadtalos-obmc-linux-c6a57e19e464db118dc4ab9cfe9e9748c6d630a0.tar.gz
talos-obmc-linux-c6a57e19e464db118dc4ab9cfe9e9748c6d630a0.zip
[PATCH] memory hotplug prep: fixup bad_range()
When doing memory hotplug operations, the size of existing zones can obviously change. This means that zone->zone_{start_pfn,spanned_pages} can change. There are currently no locks that protect these structure members. However, they are rarely accessed at runtime. Outside of swsusp, the only place that I can find is bad_range(). So, split bad_range() up into two pieces: one that needs to be locked and anther that doesn't. Signed-off-by: Dave Hansen <haveblue@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c26
1 files changed, 21 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9a2fa8110afc..a51ef94eec33 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -78,21 +78,37 @@ int min_free_kbytes = 1024;
unsigned long __initdata nr_kernel_pages;
unsigned long __initdata nr_all_pages;
-/*
- * Temporary debugging check for pages not lying within a given zone.
- */
-static int bad_range(struct zone *zone, struct page *page)
+static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
{
if (page_to_pfn(page) >= zone->zone_start_pfn + zone->spanned_pages)
return 1;
if (page_to_pfn(page) < zone->zone_start_pfn)
return 1;
+
+ return 0;
+}
+
+static int page_is_consistent(struct zone *zone, struct page *page)
+{
#ifdef CONFIG_HOLES_IN_ZONE
if (!pfn_valid(page_to_pfn(page)))
- return 1;
+ return 0;
#endif
if (zone != page_zone(page))
+ return 0;
+
+ return 1;
+}
+/*
+ * Temporary debugging check for pages not lying within a given zone.
+ */
+static int bad_range(struct zone *zone, struct page *page)
+{
+ if (page_outside_zone_boundaries(zone, page))
return 1;
+ if (!page_is_consistent(zone, page))
+ return 1;
+
return 0;
}
OpenPOWER on IntegriCloud