diff options
-rw-r--r-- | include/linux/mmzone.h | 6 | ||||
-rw-r--r-- | init/Kconfig | 13 | ||||
-rw-r--r-- | mm/page_alloc.c | 51 |
3 files changed, 56 insertions, 14 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 57700038e669..7d7e4fe0fda8 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -33,9 +33,15 @@ */ #define PAGE_ALLOC_COSTLY_ORDER 3 +#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY #define MIGRATE_UNMOVABLE 0 #define MIGRATE_MOVABLE 1 #define MIGRATE_TYPES 2 +#else +#define MIGRATE_UNMOVABLE 0 +#define MIGRATE_MOVABLE 0 +#define MIGRATE_TYPES 1 +#endif #define for_each_migratetype_order(order, type) \ for (order = 0; order < MAX_ORDER; order++) \ diff --git a/init/Kconfig b/init/Kconfig index 54f31a191b88..bab643f7717f 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -607,6 +607,19 @@ config BASE_SMALL default 0 if BASE_FULL default 1 if !BASE_FULL +config PAGE_GROUP_BY_MOBILITY + bool "Group pages based on their mobility in the page allocator" + def_bool y + help + The standard allocator will fragment memory over time which means + that high order allocations will fail even if kswapd is running. If + this option is set, the allocator will try and group page types + based on their ability to migrate or reclaim. This is a best effort + attempt at lowering fragmentation which a few workloads care about. + The loss is a more complex allocator that may perform slower. If + you are interested in working with large pages, say Y and set + /proc/sys/vm/min_free_bytes to 16374. Otherwise say N + menuconfig MODULES bool "Enable loadable module support" help diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e3e726bd2858..fea1e3b56c3d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -158,6 +158,7 @@ int nr_node_ids __read_mostly = MAX_NUMNODES; EXPORT_SYMBOL(nr_node_ids); #endif +#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY static inline int get_pageblock_migratetype(struct page *page) { return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); @@ -174,6 +175,22 @@ static inline int gfpflags_to_migratetype(gfp_t gfp_flags) return ((gfp_flags & __GFP_MOVABLE) != 0); } +#else +static inline int get_pageblock_migratetype(struct page *page) +{ + return MIGRATE_UNMOVABLE; +} + +static void set_pageblock_migratetype(struct page *page, int migratetype) +{ +} + +static inline int gfpflags_to_migratetype(gfp_t gfp_flags) +{ + return MIGRATE_UNMOVABLE; +} +#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */ + #ifdef CONFIG_DEBUG_VM static int page_outside_zone_boundaries(struct zone *zone, struct page *page) { @@ -653,6 +670,7 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) return 0; } +#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY /* * This array describes the order lists are fallen back to when * the free lists for the desirable migrate type are depleted @@ -709,6 +727,13 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order, return NULL; } +#else +static struct page *__rmqueue_fallback(struct zone *zone, int order, + int start_migratetype) +{ + return NULL; +} +#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */ /* * Do the hard work of removing an element from the buddy allocator. @@ -953,27 +978,25 @@ again: if (unlikely(!pcp->count)) goto failed; } + +#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY /* Find a page of the appropriate migrate type */ - list_for_each_entry(page, &pcp->list, lru) { - if (page_private(page) == migratetype) { - list_del(&page->lru); - pcp->count--; + list_for_each_entry(page, &pcp->list, lru) + if (page_private(page) == migratetype) break; - } - } - /* - * Check if a page of the appropriate migrate type - * was found. If not, allocate more to the pcp list - */ - if (&page->lru == &pcp->list) { + /* Allocate more to the pcp list if necessary */ + if (unlikely(&page->lru == &pcp->list)) { pcp->count += rmqueue_bulk(zone, 0, pcp->batch, &pcp->list, migratetype); page = list_entry(pcp->list.next, struct page, lru); - VM_BUG_ON(page_private(page) != migratetype); - list_del(&page->lru); - pcp->count--; } +#else + page = list_entry(pcp->list.next, struct page, lru); +#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */ + + list_del(&page->lru); + pcp->count--; } else { spin_lock_irqsave(&zone->lock, flags); page = __rmqueue(zone, order, migratetype); |