summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/compaction.h39
-rw-r--r--include/linux/mmzone.h9
-rw-r--r--mm/page_alloc.c5
3 files changed, 52 insertions, 1 deletions
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 3719325c6091..5ac51552d908 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -22,6 +22,36 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
extern int fragmentation_index(struct zone *zone, unsigned int order);
extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *mask);
+
+/* Do not skip compaction more than 64 times */
+#define COMPACT_MAX_DEFER_SHIFT 6
+
+/*
+ * Compaction is deferred when compaction fails to result in a page
+ * allocation success. 1 << compact_defer_limit compactions are skipped up
+ * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
+ */
+static inline void defer_compaction(struct zone *zone)
+{
+ zone->compact_considered = 0;
+ zone->compact_defer_shift++;
+
+ if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
+ zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
+}
+
+/* Returns true if compaction should be skipped this time */
+static inline bool compaction_deferred(struct zone *zone)
+{
+ unsigned long defer_limit = 1UL << zone->compact_defer_shift;
+
+ /* Avoid possible overflow */
+ if (++zone->compact_considered > defer_limit)
+ zone->compact_considered = defer_limit;
+
+ return zone->compact_considered < (1UL << zone->compact_defer_shift);
+}
+
#else
static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *nodemask)
@@ -29,6 +59,15 @@ static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
return COMPACT_CONTINUE;
}
+static inline void defer_compaction(struct zone *zone)
+{
+}
+
+static inline bool compaction_deferred(struct zone *zone)
+{
+ return 1;
+}
+
#endif /* CONFIG_COMPACTION */
#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index cf9e458e96b0..fd55f725a09e 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -321,6 +321,15 @@ struct zone {
unsigned long *pageblock_flags;
#endif /* CONFIG_SPARSEMEM */
+#ifdef CONFIG_COMPACTION
+ /*
+ * On compaction failure, 1<<compact_defer_shift compactions
+ * are skipped before trying again. The number attempted since
+ * last failure is tracked with compact_considered.
+ */
+ unsigned int compact_considered;
+ unsigned int compact_defer_shift;
+#endif
ZONE_PADDING(_pad1_)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cd88a860f088..95ad42de5a87 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1769,7 +1769,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
{
struct page *page;
- if (!order)
+ if (!order || compaction_deferred(preferred_zone))
return NULL;
*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
@@ -1785,6 +1785,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
alloc_flags, preferred_zone,
migratetype);
if (page) {
+ preferred_zone->compact_considered = 0;
+ preferred_zone->compact_defer_shift = 0;
count_vm_event(COMPACTSUCCESS);
return page;
}
@@ -1795,6 +1797,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
* but not enough to satisfy watermarks.
*/
count_vm_event(COMPACTFAIL);
+ defer_compaction(preferred_zone);
cond_resched();
}
OpenPOWER on IntegriCloud