From 514caf23a70fd697fa2ece238b2cd8dcc73fb16f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 26 Jun 2019 14:27:13 +0200 Subject: memremap: replace the altmap_valid field with a PGMAP_ALTMAP_VALID flag Add a flags field to struct dev_pagemap to replace the altmap_valid boolean to be a little more extensible. Also add a pgmap_altmap() helper to find the optional altmap and clean up the code using the altmap using it. Signed-off-by: Christoph Hellwig Reviewed-by: Ira Weiny Reviewed-by: Dan Williams Tested-by: Dan Williams Signed-off-by: Jason Gunthorpe --- arch/powerpc/mm/mem.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) (limited to 'arch/powerpc/mm/mem.c') diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 2540d3b2588c..a2923c5c1982 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -131,17 +131,9 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct page *page; + struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap); int ret; - /* - * If we have an altmap then we need to skip over any reserved PFNs - * when querying the zone. - */ - page = pfn_to_page(start_pfn); - if (altmap) - page += vmem_altmap_offset(altmap); - __remove_pages(page_zone(page), start_pfn, nr_pages, altmap); /* Remove htab bolted mappings for this section of memory */ -- cgit v1.2.1 From 1cfb725fb1899dc6fdc88f8b5354a65e8ad260c6 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 14 May 2019 09:05:13 +0000 Subject: powerpc/64: flush_inval_dcache_range() becomes flush_dcache_range() On most arches having function flush_dcache_range(), including PPC32, this function does a writeback and invalidation of the cache bloc. On PPC64, flush_dcache_range() only does a writeback while flush_inval_dcache_range() does the invalidation in addition. In addition it looks like within arch/powerpc/, there are no PPC64 platforms using flush_dcache_range() This patch drops the existing 64 bits version of flush_dcache_range() and renames flush_inval_dcache_range() into flush_dcache_range(). Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman --- arch/powerpc/mm/mem.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/mm/mem.c') diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 40bd4153ab09..096c87836e29 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -125,7 +125,7 @@ int __ref arch_add_memory(int nid, u64 start, u64 size, start, start + size, rc); return -EFAULT; } - flush_inval_dcache_range(start, start + size); + flush_dcache_range(start, start + size); return __add_pages(nid, start_pfn, nr_pages, restrictions); } @@ -151,7 +151,7 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size, /* Remove htab bolted mappings for this section of memory */ start = (unsigned long)__va(start); - flush_inval_dcache_range(start, start + size); + flush_dcache_range(start, start + size); ret = remove_section_mapping(start, start + size); WARN_ON_ONCE(ret); -- cgit v1.2.1 From 03800e0526ee25ed7c843ca1e57b69ac2a5af642 Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Tue, 25 Jun 2019 10:17:27 -0400 Subject: powerpc: fix off by one in max_zone_pfn initialization for ZONE_DMA 25078dc1f74be16b858e914f52cc8f4d03c2271a first introduced an off by one error in the ZONE_DMA initialization of PPC_BOOK3E_64=y and since 9739ab7eda459f0669ec9807e0d9be5020bab88c the off by one applies to PPC32=y too. This simply corrects the off by one and should resolve crashes like below: [ 65.179101] page 0x7fff outside node 0 zone DMA [ 0x0 - 0x7fff ] Unfortunately in various MM places "max" means a non inclusive end of range. free_area_init_nodes max_zone_pfn parameter is one case and MAX_ORDER is another one (unrelated) that comes by memory. Reported-by: Zorro Lang Fixes: 25078dc1f74b ("powerpc: use mm zones more sensibly") Fixes: 9739ab7eda45 ("powerpc: enable a 30-bit ZONE_DMA for 32-bit pmac") Signed-off-by: Andrea Arcangeli Reviewed-by: Christoph Hellwig Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20190625141727.2883-1-aarcange@redhat.com --- arch/powerpc/mm/mem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/mm/mem.c') diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 6d5f0fc76666..b6beb86e5300 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -249,7 +249,7 @@ void __init paging_init(void) #ifdef CONFIG_ZONE_DMA max_zone_pfns[ZONE_DMA] = min(max_low_pfn, - ((1UL << ARCH_ZONE_DMA_BITS) - 1) >> PAGE_SHIFT); + 1UL << (ARCH_ZONE_DMA_BITS - PAGE_SHIFT)); #endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; #ifdef CONFIG_HIGHMEM -- cgit v1.2.1 From 80ec922dbd87fd38d15719c86a94457204648aeb Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 18 Jul 2019 15:56:51 -0700 Subject: mm/memory_hotplug: allow arch_remove_memory() without CONFIG_MEMORY_HOTREMOVE We want to improve error handling while adding memory by allowing to use arch_remove_memory() and __remove_pages() even if CONFIG_MEMORY_HOTREMOVE is not set to e.g., implement something like: arch_add_memory() rc = do_something(); if (rc) { arch_remove_memory(); } We won't get rid of CONFIG_MEMORY_HOTREMOVE for now, as it will require quite some dependencies for memory offlining. Link: http://lkml.kernel.org/r/20190527111152.16324-7-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Pavel Tatashin Cc: Tony Luck Cc: Fenghua Yu Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Michael Ellerman Cc: Heiko Carstens Cc: Yoshinori Sato Cc: Rich Felker Cc: Dave Hansen Cc: Andy Lutomirski Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Borislav Petkov Cc: "H. Peter Anvin" Cc: Greg Kroah-Hartman Cc: "Rafael J. Wysocki" Cc: Michal Hocko Cc: David Hildenbrand Cc: Oscar Salvador Cc: "Kirill A. Shutemov" Cc: Alex Deucher Cc: "David S. Miller" Cc: Mark Brown Cc: Chris Wilson Cc: Christophe Leroy Cc: Nicholas Piggin Cc: Vasily Gorbik Cc: Rob Herring Cc: Masahiro Yamada Cc: "mike.travis@hpe.com" Cc: Andrew Banman Cc: Arun KS Cc: Qian Cai Cc: Mathieu Malaterre Cc: Baoquan He Cc: Logan Gunthorpe Cc: Anshuman Khandual Cc: Ard Biesheuvel Cc: Catalin Marinas Cc: Chintan Pandya Cc: Dan Williams Cc: Ingo Molnar Cc: Jonathan Cameron Cc: Joonsoo Kim Cc: Jun Yao Cc: Mark Rutland Cc: Mike Rapoport Cc: Oscar Salvador Cc: Robin Murphy Cc: Wei Yang Cc: Will Deacon Cc: Yu Zhao Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/mm/mem.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/powerpc/mm/mem.c') diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 26a8da3723bb..9259337d7374 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -125,7 +125,6 @@ int __ref arch_add_memory(int nid, u64 start, u64 size, return __add_pages(nid, start_pfn, nr_pages, restrictions); } -#ifdef CONFIG_MEMORY_HOTREMOVE void __ref arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap) { @@ -151,7 +150,6 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size, pr_warn("Hash collision while resizing HPT\n"); } #endif -#endif /* CONFIG_MEMORY_HOTPLUG */ #ifndef CONFIG_NEED_MULTIPLE_NODES void __init mem_topology_setup(void) -- cgit v1.2.1