From b77eab7079d9e477489d2416cceda05d3c1cf21f Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Thu, 5 Apr 2018 16:22:52 -0700 Subject: mm/memory_hotplug: optimize probe routine When memory is hotplugged pages_correctly_reserved() is called to verify that the added memory is present, this routine traverses through every struct page and verifies that PageReserved() is set. This is a slow operation especially if a large amount of memory is added. Instead of checking every page, it is enough to simply check that the section is present, has mapping (struct page array is allocated), and the mapping is online. In addition, we should not excpect that probe routine sets flags in struct page, as the struct pages have not yet been initialized. The initialization should be done in __init_single_page(), the same as during boot. Link: http://lkml.kernel.org/r/20180215165920.8570-5-pasha.tatashin@oracle.com Signed-off-by: Pavel Tatashin Acked-by: Michal Hocko Cc: Ingo Molnar Cc: Baoquan He Cc: Bharata B Rao Cc: Daniel Jordan Cc: Dan Williams Cc: Greg Kroah-Hartman Cc: "H. Peter Anvin" Cc: Kirill A. Shutemov Cc: Mel Gorman Cc: Steven Sistare Cc: Thomas Gleixner Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/base/memory.c | 36 ++++++++++++++++++++---------------- 1 file changed, 20 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/base/memory.c b/drivers/base/memory.c index fe4b24f05f6a..deb3f029b451 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -187,13 +187,14 @@ int memory_isolate_notify(unsigned long val, void *v) } /* - * The probe routines leave the pages reserved, just as the bootmem code does. - * Make sure they're still that way. + * The probe routines leave the pages uninitialized, just as the bootmem code + * does. Make sure we do not access them, but instead use only information from + * within sections. */ -static bool pages_correctly_reserved(unsigned long start_pfn) +static bool pages_correctly_probed(unsigned long start_pfn) { - int i, j; - struct page *page; + unsigned long section_nr = pfn_to_section_nr(start_pfn); + unsigned long section_nr_end = section_nr + sections_per_block; unsigned long pfn = start_pfn; /* @@ -201,21 +202,24 @@ static bool pages_correctly_reserved(unsigned long start_pfn) * SPARSEMEM_VMEMMAP. We lookup the page once per section * and assume memmap is contiguous within each section */ - for (i = 0; i < sections_per_block; i++, pfn += PAGES_PER_SECTION) { + for (; section_nr < section_nr_end; section_nr++) { if (WARN_ON_ONCE(!pfn_valid(pfn))) return false; - page = pfn_to_page(pfn); - - for (j = 0; j < PAGES_PER_SECTION; j++) { - if (PageReserved(page + j)) - continue; - - printk(KERN_WARNING "section number %ld page number %d " - "not reserved, was it already online?\n", - pfn_to_section_nr(pfn), j); + if (!present_section_nr(section_nr)) { + pr_warn("section %ld pfn[%lx, %lx) not present", + section_nr, pfn, pfn + PAGES_PER_SECTION); + return false; + } else if (!valid_section_nr(section_nr)) { + pr_warn("section %ld pfn[%lx, %lx) no valid memmap", + section_nr, pfn, pfn + PAGES_PER_SECTION); + return false; + } else if (online_section_nr(section_nr)) { + pr_warn("section %ld pfn[%lx, %lx) is already online", + section_nr, pfn, pfn + PAGES_PER_SECTION); return false; } + pfn += PAGES_PER_SECTION; } return true; @@ -237,7 +241,7 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t switch (action) { case MEM_ONLINE: - if (!pages_correctly_reserved(start_pfn)) + if (!pages_correctly_probed(start_pfn)) return -EBUSY; ret = online_pages(start_pfn, nr_pages, online_type); -- cgit v1.2.1