diff options
Diffstat (limited to 'mm/page_alloc.c')
| -rw-r--r-- | mm/page_alloc.c | 26 | 
1 files changed, 21 insertions, 5 deletions
| diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d4096f4a5c1f..76c9688b6a0a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2507,10 +2507,6 @@ void drain_all_pages(struct zone *zone)  	if (WARN_ON_ONCE(!mm_percpu_wq))  		return; -	/* Workqueues cannot recurse */ -	if (current->flags & PF_WQ_WORKER) -		return; -  	/*  	 * Do not drain if one is already in progress unless it's specific to  	 * a zone. Such callers are primarily CMA and memory hotplug and need @@ -2688,6 +2684,7 @@ void free_unref_page_list(struct list_head *list)  {  	struct page *page, *next;  	unsigned long flags, pfn; +	int batch_count = 0;  	/* Prepare pages for freeing */  	list_for_each_entry_safe(page, next, list, lru) { @@ -2704,6 +2701,16 @@ void free_unref_page_list(struct list_head *list)  		set_page_private(page, 0);  		trace_mm_page_free_batched(page);  		free_unref_page_commit(page, pfn); + +		/* +		 * Guard against excessive IRQ disabled times when we get +		 * a large list of pages to free. +		 */ +		if (++batch_count == SWAP_CLUSTER_MAX) { +			local_irq_restore(flags); +			batch_count = 0; +			local_irq_save(flags); +		}  	}  	local_irq_restore(flags);  } @@ -6253,6 +6260,8 @@ void __paginginit zero_resv_unavail(void)  	pgcnt = 0;  	for_each_resv_unavail_range(i, &start, &end) {  		for (pfn = PFN_DOWN(start); pfn < PFN_UP(end); pfn++) { +			if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) +				continue;  			mm_zero_struct_page(pfn_to_page(pfn));  			pgcnt++;  		} @@ -7656,11 +7665,18 @@ int alloc_contig_range(unsigned long start, unsigned long end,  	/*  	 * In case of -EBUSY, we'd like to know which page causes problem. -	 * So, just fall through. We will check it in test_pages_isolated(). +	 * So, just fall through. test_pages_isolated() has a tracepoint +	 * which will report the busy page. +	 * +	 * It is possible that busy pages could become available before +	 * the call to test_pages_isolated, and the range will actually be +	 * allocated.  So, if we fall through be sure to clear ret so that +	 * -EBUSY is not accidentally used or returned to caller.  	 */  	ret = __alloc_contig_migrate_range(&cc, start, end);  	if (ret && ret != -EBUSY)  		goto done; +	ret =0;  	/*  	 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES | 

