summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2013-05-02 17:37:49 +0200
committerFrederic Weisbecker <fweisbec@gmail.com>2013-05-02 17:54:19 +0200
commitc032862fba51a3ca504752d3a25186b324c5ce83 (patch)
tree955dc2ba4ab3df76ecc2bb780ee84aca04967e8d /mm/page_alloc.c
parentfda76e074c7737fc57855dd17c762e50ed526052 (diff)
parent8700c95adb033843fc163d112b9d21d4fda78018 (diff)
downloadtalos-op-linux-c032862fba51a3ca504752d3a25186b324c5ce83.tar.gz
talos-op-linux-c032862fba51a3ca504752d3a25186b324c5ce83.zip
Merge commit '8700c95adb03' into timers/nohz
The full dynticks tree needs the latest RCU and sched upstream updates in order to fix some dependencies. Merge a common upstream merge point that has these updates. Conflicts: include/linux/perf_event.h kernel/rcutree.h kernel/rcutree_plugin.h Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c78
1 files changed, 71 insertions, 7 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8fcced7823fa..98cbdf6e5532 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -58,6 +58,7 @@
#include <linux/prefetch.h>
#include <linux/migrate.h>
#include <linux/page-debug-flags.h>
+#include <linux/hugetlb.h>
#include <linux/sched/rt.h>
#include <asm/tlbflush.h>
@@ -1397,6 +1398,7 @@ void split_page(struct page *page, unsigned int order)
for (i = 1; i < (1 << order); i++)
set_page_refcounted(page + i);
}
+EXPORT_SYMBOL_GPL(split_page);
static int __isolate_free_page(struct page *page, unsigned int order)
{
@@ -1940,9 +1942,24 @@ zonelist_scan:
continue;
default:
/* did we reclaim enough */
- if (!zone_watermark_ok(zone, order, mark,
+ if (zone_watermark_ok(zone, order, mark,
classzone_idx, alloc_flags))
+ goto try_this_zone;
+
+ /*
+ * Failed to reclaim enough to meet watermark.
+ * Only mark the zone full if checking the min
+ * watermark or if we failed to reclaim just
+ * 1<<order pages or else the page allocator
+ * fastpath will prematurely mark zones full
+ * when the watermark is between the low and
+ * min watermarks.
+ */
+ if (((alloc_flags & ALLOC_WMARK_MASK) == ALLOC_WMARK_MIN) ||
+ ret == ZONE_RECLAIM_SOME)
goto this_zone_full;
+
+ continue;
}
}
@@ -2002,6 +2019,13 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
return;
/*
+ * Walking all memory to count page types is very expensive and should
+ * be inhibited in non-blockable contexts.
+ */
+ if (!(gfp_mask & __GFP_WAIT))
+ filter |= SHOW_MEM_FILTER_PAGE_COUNT;
+
+ /*
* This documents exceptions given to allocations in certain
* contexts that are allowed to allocate outside current's set
* of allowed nodes.
@@ -3105,6 +3129,8 @@ void show_free_areas(unsigned int filter)
printk("= %lukB\n", K(total));
}
+ hugetlb_show_meminfo();
+
printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
show_swap_cache_info();
@@ -4161,10 +4187,23 @@ int __meminit __early_pfn_to_nid(unsigned long pfn)
{
unsigned long start_pfn, end_pfn;
int i, nid;
+ /*
+ * NOTE: The following SMP-unsafe globals are only used early in boot
+ * when the kernel is running single-threaded.
+ */
+ static unsigned long __meminitdata last_start_pfn, last_end_pfn;
+ static int __meminitdata last_nid;
+
+ if (last_start_pfn <= pfn && pfn < last_end_pfn)
+ return last_nid;
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
- if (start_pfn <= pfn && pfn < end_pfn)
+ if (start_pfn <= pfn && pfn < end_pfn) {
+ last_start_pfn = start_pfn;
+ last_end_pfn = end_pfn;
+ last_nid = nid;
return nid;
+ }
/* This is a memory hole */
return -1;
}
@@ -4710,7 +4749,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
/*
* Figure out the number of possible node ids.
*/
-static void __init setup_nr_node_ids(void)
+void __init setup_nr_node_ids(void)
{
unsigned int node;
unsigned int highest = 0;
@@ -4719,10 +4758,6 @@ static void __init setup_nr_node_ids(void)
highest = node;
nr_node_ids = highest + 1;
}
-#else
-static inline void setup_nr_node_ids(void)
-{
-}
#endif
/**
@@ -5113,6 +5148,35 @@ early_param("movablecore", cmdline_parse_movablecore);
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+unsigned long free_reserved_area(unsigned long start, unsigned long end,
+ int poison, char *s)
+{
+ unsigned long pages, pos;
+
+ pos = start = PAGE_ALIGN(start);
+ end &= PAGE_MASK;
+ for (pages = 0; pos < end; pos += PAGE_SIZE, pages++) {
+ if (poison)
+ memset((void *)pos, poison, PAGE_SIZE);
+ free_reserved_page(virt_to_page(pos));
+ }
+
+ if (pages && s)
+ pr_info("Freeing %s memory: %ldK (%lx - %lx)\n",
+ s, pages << (PAGE_SHIFT - 10), start, end);
+
+ return pages;
+}
+
+#ifdef CONFIG_HIGHMEM
+void free_highmem_page(struct page *page)
+{
+ __free_reserved_page(page);
+ totalram_pages++;
+ totalhigh_pages++;
+}
+#endif
+
/**
* set_dma_reserve - set the specified number of pages reserved in the first zone
* @new_dma_reserve: The number of pages to mark reserved
OpenPOWER on IntegriCloud