From 4bbc84ffd137fe43d68aa633d317b0a96de8a828 Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Mon, 19 Dec 2016 19:17:08 -0800 Subject: sparc: use symbolic names for tsb indexing Use symbolic names MM_TSB_BASE and MM_TSB_HUGE instead of numeric values 0 and 1 in __tsb_context_switch. Code cleanup only, no functional change. Suggested-by: Sam Ravnborg Signed-off-by: Mike Kravetz Acked-by: Sam Ravnborg Signed-off-by: David S. Miller --- arch/sparc/include/asm/mmu_context_64.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/sparc') diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index b84be675e507..d0317993e947 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h @@ -35,15 +35,15 @@ void __tsb_context_switch(unsigned long pgd_pa, static inline void tsb_context_switch(struct mm_struct *mm) { __tsb_context_switch(__pa(mm->pgd), - &mm->context.tsb_block[0], + &mm->context.tsb_block[MM_TSB_BASE], #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - (mm->context.tsb_block[1].tsb ? - &mm->context.tsb_block[1] : + (mm->context.tsb_block[MM_TSB_HUGE].tsb ? + &mm->context.tsb_block[MM_TSB_HUGE] : NULL) #else NULL #endif - , __pa(&mm->context.tsb_descr[0])); + , __pa(&mm->context.tsb_descr[MM_TSB_BASE])); } void tsb_grow(struct mm_struct *mm, -- cgit v1.2.1 From 5d0e7705774dd412a465896d08d59a81a345c1e4 Mon Sep 17 00:00:00 2001 From: Tom Hromatka Date: Tue, 10 Jan 2017 10:57:56 -0700 Subject: sparc: Fixed typo in sstate.c. Replaced panicing with panicking Signed-off-by: Tom Hromatka Signed-off-by: David S. Miller --- arch/sparc/kernel/sstate.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/sparc') diff --git a/arch/sparc/kernel/sstate.c b/arch/sparc/kernel/sstate.c index c59af546f522..3caed4023589 100644 --- a/arch/sparc/kernel/sstate.c +++ b/arch/sparc/kernel/sstate.c @@ -43,8 +43,8 @@ static const char poweroff_msg[32] __attribute__((aligned(32))) = "Linux powering off"; static const char rebooting_msg[32] __attribute__((aligned(32))) = "Linux rebooting"; -static const char panicing_msg[32] __attribute__((aligned(32))) = - "Linux panicing"; +static const char panicking_msg[32] __attribute__((aligned(32))) = + "Linux panicking"; static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused) { @@ -76,7 +76,7 @@ static struct notifier_block sstate_reboot_notifier = { static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr) { - do_set_sstate(HV_SOFT_STATE_TRANSITION, panicing_msg); + do_set_sstate(HV_SOFT_STATE_TRANSITION, panicking_msg); return NOTIFY_DONE; } -- cgit v1.2.1 From 1a8b6d76dc5b489cd0123fa8447b6e20569f357b Mon Sep 17 00:00:00 2001 From: Mao Wenan Date: Wed, 18 Jan 2017 08:50:05 +0800 Subject: net:add one common config ARCH_WANT_RELAX_ORDER to support relax ordering Relax ordering(RO) is one feature of 82599 NIC, to enable this feature can enhance the performance for some cpu architecure, such as SPARC and so on. Currently it only supports one special cpu architecture(SPARC) in 82599 driver to enable RO feature, this is not very common for other cpu architecture which really needs RO feature. This patch add one common config CONFIG_ARCH_WANT_RELAX_ORDER to set RO feature, and should define CONFIG_ARCH_WANT_RELAX_ORDER in sparc Kconfig firstly. Signed-off-by: Mao Wenan Reviewed-by: Alexander Duyck Reviewed-by: Alexander Duyck Signed-off-by: David S. Miller --- arch/sparc/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/sparc') diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index cf4034c66362..68ac5c7cd982 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -44,6 +44,7 @@ config SPARC select CPU_NO_EFFICIENT_FFS select HAVE_ARCH_HARDENED_USERCOPY select PROVE_LOCKING_SMALL if PROVE_LOCKING + select ARCH_WANT_RELAX_ORDER config SPARC32 def_bool !64BIT -- cgit v1.2.1 From 5299709d0a87342dadc1fc9850484fadeb488bf8 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 20 Jan 2017 13:04:01 -0800 Subject: treewide: Constify most dma_map_ops structures Most dma_map_ops structures are never modified. Constify these structures such that these can be write-protected. This patch has been generated as follows: git grep -l 'struct dma_map_ops' | xargs -d\\n sed -i \ -e 's/struct dma_map_ops/const struct dma_map_ops/g' \ -e 's/const struct dma_map_ops {/struct dma_map_ops {/g' \ -e 's/^const struct dma_map_ops;$/struct dma_map_ops;/' \ -e 's/const const struct dma_map_ops /const struct dma_map_ops /g'; sed -i -e 's/const \(struct dma_map_ops intel_dma_ops\)/\1/' \ $(git grep -l 'struct dma_map_ops intel_dma_ops'); sed -i -e 's/const \(struct dma_map_ops dma_iommu_ops\)/\1/' \ $(git grep -l 'struct dma_map_ops' | grep ^arch/powerpc); sed -i -e '/^struct vmd_dev {$/,/^};$/ s/const \(struct dma_map_ops[[:blank:]]dma_ops;\)/\1/' \ -e '/^static void vmd_setup_dma_ops/,/^}$/ s/const \(struct dma_map_ops \*dest\)/\1/' \ -e 's/const \(struct dma_map_ops \*dest = \&vmd->dma_ops\)/\1/' \ drivers/pci/host/*.c sed -i -e '/^void __init pci_iommu_alloc(void)$/,/^}$/ s/dma_ops->/intel_dma_ops./' arch/ia64/kernel/pci-dma.c sed -i -e 's/static const struct dma_map_ops sn_dma_ops/static struct dma_map_ops sn_dma_ops/' arch/ia64/sn/pci/pci_dma.c sed -i -e 's/(const struct dma_map_ops \*)//' drivers/misc/mic/bus/vop_bus.c Signed-off-by: Bart Van Assche Reviewed-by: Christoph Hellwig Cc: Benjamin Herrenschmidt Cc: Boris Ostrovsky Cc: David Woodhouse Cc: Juergen Gross Cc: H. Peter Anvin Cc: Ingo Molnar Cc: linux-arch@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: Russell King Cc: x86@kernel.org Signed-off-by: Doug Ledford --- arch/sparc/include/asm/dma-mapping.h | 8 ++++---- arch/sparc/kernel/iommu.c | 4 ++-- arch/sparc/kernel/ioport.c | 8 ++++---- arch/sparc/kernel/pci_sun4v.c | 2 +- 4 files changed, 11 insertions(+), 11 deletions(-) (limited to 'arch/sparc') diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 1180ae254154..3d2babc0c4c6 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -18,13 +18,13 @@ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, */ } -extern struct dma_map_ops *dma_ops; -extern struct dma_map_ops *leon_dma_ops; -extern struct dma_map_ops pci32_dma_ops; +extern const struct dma_map_ops *dma_ops; +extern const struct dma_map_ops *leon_dma_ops; +extern const struct dma_map_ops pci32_dma_ops; extern struct bus_type pci_bus_type; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) { #ifdef CONFIG_SPARC_LEON if (sparc_cpu_model == sparc_leon) diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 9df997995f6b..c63ba99ca551 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -741,7 +741,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, spin_unlock_irqrestore(&iommu->lock, flags); } -static struct dma_map_ops sun4u_dma_ops = { +static const struct dma_map_ops sun4u_dma_ops = { .alloc = dma_4u_alloc_coherent, .free = dma_4u_free_coherent, .map_page = dma_4u_map_page, @@ -752,7 +752,7 @@ static struct dma_map_ops sun4u_dma_ops = { .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, }; -struct dma_map_ops *dma_ops = &sun4u_dma_ops; +const struct dma_map_ops *dma_ops = &sun4u_dma_ops; EXPORT_SYMBOL(dma_ops); int dma_supported(struct device *dev, u64 device_mask) diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 6ffaec44931a..cf20033a1458 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -401,7 +401,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg, BUG(); } -static struct dma_map_ops sbus_dma_ops = { +static const struct dma_map_ops sbus_dma_ops = { .alloc = sbus_alloc_coherent, .free = sbus_free_coherent, .map_page = sbus_map_page, @@ -637,7 +637,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist * } } -struct dma_map_ops pci32_dma_ops = { +const struct dma_map_ops pci32_dma_ops = { .alloc = pci32_alloc_coherent, .free = pci32_free_coherent, .map_page = pci32_map_page, @@ -652,10 +652,10 @@ struct dma_map_ops pci32_dma_ops = { EXPORT_SYMBOL(pci32_dma_ops); /* leon re-uses pci32_dma_ops */ -struct dma_map_ops *leon_dma_ops = &pci32_dma_ops; +const struct dma_map_ops *leon_dma_ops = &pci32_dma_ops; EXPORT_SYMBOL(leon_dma_ops); -struct dma_map_ops *dma_ops = &sbus_dma_ops; +const struct dma_map_ops *dma_ops = &sbus_dma_ops; EXPORT_SYMBOL(dma_ops); diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index f4daccd12bf5..68bec7c97cb8 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -669,7 +669,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, local_irq_restore(flags); } -static struct dma_map_ops sun4v_dma_ops = { +static const struct dma_map_ops sun4v_dma_ops = { .alloc = dma_4v_alloc_coherent, .free = dma_4v_free_coherent, .map_page = dma_4v_map_page, -- cgit v1.2.1 From 815dd18788fe0d41899f51b91d0560279cf16b0d Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 20 Jan 2017 13:04:04 -0800 Subject: treewide: Consolidate get_dma_ops() implementations Introduce a new architecture-specific get_arch_dma_ops() function that takes a struct bus_type * argument. Add get_dma_ops() in . Signed-off-by: Bart Van Assche Cc: Benjamin Herrenschmidt Cc: Boris Ostrovsky Cc: David Woodhouse Cc: Juergen Gross Cc: H. Peter Anvin Cc: Ingo Molnar Cc: linux-arch@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: Russell King Cc: x86@kernel.org Signed-off-by: Doug Ledford --- arch/sparc/include/asm/dma-mapping.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/sparc') diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 3d2babc0c4c6..69cc627779f2 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -24,14 +24,14 @@ extern const struct dma_map_ops pci32_dma_ops; extern struct bus_type pci_bus_type; -static inline const struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { #ifdef CONFIG_SPARC_LEON if (sparc_cpu_model == sparc_leon) return leon_dma_ops; #endif #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI) - if (dev->bus == &pci_bus_type) + if (bus == &pci_bus_type) return &pci32_dma_ops; #endif return dma_ops; -- cgit v1.2.1 From 5437344c2bb81fdb8a5074f15899788eed537655 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Tue, 10 Jan 2017 10:12:55 -0500 Subject: sparc: migrate exception table users onto extable.h This file was using module.h for search_exception_table. We've now separated that content out into its own file "extable.h" so now move over to that. Unlike most other instances, we can't delete the module.h include here since the file needs that for the within_module_init definition. Reported-by: kbuild test robot Cc: "David S. Miller" Cc: sparclinux@vger.kernel.org Signed-off-by: Paul Gortmaker --- arch/sparc/mm/extable.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/sparc') diff --git a/arch/sparc/mm/extable.c b/arch/sparc/mm/extable.c index 768a11e6bd4f..db214e9931d9 100644 --- a/arch/sparc/mm/extable.c +++ b/arch/sparc/mm/extable.c @@ -3,6 +3,7 @@ */ #include +#include #include void sort_extable(struct exception_table_entry *start, -- cgit v1.2.1 From 7a7dc961a28b965a0d0303c2e989df17b411708b Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 17 Jan 2017 10:59:02 -0500 Subject: sparc64: Zero pages on allocation for mondo and error queues. Error queues use a non-zero first word to detect if the queues are full. Using pages that have not been zeroed may result in false positive overflow events. These queues are set up once during boot so zeroing all mondo and error queue pages is safe. Note that the false positive overflow does not always occur because the page allocation for these queues is so early in the boot cycle that higher number CPUs get fresh pages. It is only when traps are serviced with lower number CPUs who were given already used pages that this issue is exposed. Signed-off-by: Liam R. Howlett Signed-off-by: David S. Miller --- arch/sparc/kernel/irq_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sparc') diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index 34a7930b76ef..baed4cdeda75 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -1021,7 +1021,7 @@ static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask) unsigned long order = get_order(size); unsigned long p; - p = __get_free_pages(GFP_KERNEL, order); + p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); if (!p) { prom_printf("SUN4V: Error, cannot allocate queue.\n"); prom_halt(); -- cgit v1.2.1 From 047487241ff59374fded8c477f21453681f5995c Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 17 Jan 2017 10:59:03 -0500 Subject: sparc64: Handle PIO & MEM non-resumable errors. User processes trying to access an invalid memory address via PIO will receive a SIGBUS signal instead of causing a panic. Memory errors will receive a SIGKILL since a SIGBUS may result in a coredump which may attempt to repeat the faulting access. Signed-off-by: Liam R. Howlett Signed-off-by: David S. Miller --- arch/sparc/kernel/traps_64.c | 73 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) (limited to 'arch/sparc') diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 496fa926e1e0..d44fb806bbd7 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -2051,6 +2051,73 @@ void sun4v_resum_overflow(struct pt_regs *regs) atomic_inc(&sun4v_resum_oflow_cnt); } +/* Given a set of registers, get the virtual addressi that was being accessed + * by the faulting instructions at tpc. + */ +static unsigned long sun4v_get_vaddr(struct pt_regs *regs) +{ + unsigned int insn; + + if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) { + return compute_effective_address(regs, insn, + (insn >> 25) & 0x1f); + } + return 0; +} + +/* Attempt to handle non-resumable errors generated from userspace. + * Returns true if the signal was handled, false otherwise. + */ +bool sun4v_nonresum_error_user_handled(struct pt_regs *regs, + struct sun4v_error_entry *ent) { + + unsigned int attrs = ent->err_attrs; + + if (attrs & SUN4V_ERR_ATTRS_MEMORY) { + unsigned long addr = ent->err_raddr; + siginfo_t info; + + if (addr == ~(u64)0) { + /* This seems highly unlikely to ever occur */ + pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n"); + } else { + unsigned long page_cnt = DIV_ROUND_UP(ent->err_size, + PAGE_SIZE); + + /* Break the unfortunate news. */ + pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n", + addr); + pr_emerg("SUN4V NON-RECOVERABLE ERROR: Claiming %lu ages.\n", + page_cnt); + + while (page_cnt-- > 0) { + if (pfn_valid(addr >> PAGE_SHIFT)) + get_page(pfn_to_page(addr >> PAGE_SHIFT)); + addr += PAGE_SIZE; + } + } + info.si_signo = SIGKILL; + info.si_errno = 0; + info.si_trapno = 0; + force_sig_info(info.si_signo, &info, current); + + return true; + } + if (attrs & SUN4V_ERR_ATTRS_PIO) { + siginfo_t info; + + info.si_signo = SIGBUS; + info.si_code = BUS_ADRERR; + info.si_addr = (void __user *)sun4v_get_vaddr(regs); + force_sig_info(info.si_signo, &info, current); + + return true; + } + + /* Default to doing nothing */ + return false; +} + /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate. * Log the event, clear the first word of the entry, and die. */ @@ -2075,6 +2142,12 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset) put_cpu(); + if (!(regs->tstate & TSTATE_PRIV) && + sun4v_nonresum_error_user_handled(regs, &local_copy)) { + /* DON'T PANIC: This userspace error was handled. */ + return; + } + #ifdef CONFIG_PCI /* Check for the special PCI poke sequence. */ if (pci_poke_in_progress && pci_poke_cpu == cpu) { -- cgit v1.2.1 From b672592f022152155fde7db99aafbcf04a2c3ba5 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:51 +0100 Subject: sched/cputime: Remove generic asm headers cputime_t is now only used by two architectures: * powerpc (when CONFIG_VIRT_CPU_ACCOUNTING_NATIVE=y) * s390 And since the core doesn't use it anymore, we don't need any arch support from the others. So we can remove their stub implementations. A final cleanup would be to provide an efficient pure arch implementation of cputime_to_nsec() for s390 and powerpc and finally remove include/linux/cputime.h . Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-36-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/sparc/include/asm/Kbuild | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/sparc') diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index 0569bfac4afb..e9e837bc3158 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -2,7 +2,6 @@ generic-y += clkdev.h -generic-y += cputime.h generic-y += div64.h generic-y += emergency-restart.h generic-y += exec.h -- cgit v1.2.1 From 6d23f8a5d432337aa2590ea8fd5eee8b0bc28eee Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 22 Feb 2017 15:46:13 -0800 Subject: arch, mm: remove arch specific show_mem We have a generic implementation for quite some time already. If there is any arch specific information to be printed then we should add a callback called from the generic code rather than duplicate the whole show_mem. The current code has resulted in the code duplication and the output divergence which is both confusing and adds maintainance costs. Let's just get rid of this mess. Link: http://lkml.kernel.org/r/20170117091543.25850-4-mhocko@kernel.org Signed-off-by: Michal Hocko Acked-by: Guan Xuetao [UniCore32] Acked-by: Helge Deller [for parisc] Acked-by: Chris Metcalf [for tile] Acked-by: Mel Gorman Acked-by: Johannes Weiner Cc: Tony Luck Cc: Fenghua Yu Cc: "James E.J. Bottomley" Cc: "David S. Miller" Cc: Hillf Danton Cc: Vlastimil Babka Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sparc/mm/init_32.c | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'arch/sparc') diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index eb8287155279..c6afe98de4d9 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -55,17 +55,6 @@ extern unsigned int sparc_ramdisk_size; unsigned long highstart_pfn, highend_pfn; -void show_mem(unsigned int filter) -{ - printk("Mem-info:\n"); - show_free_areas(filter); - printk("Free swap: %6ldkB\n", - get_nr_swap_pages() << (PAGE_SHIFT-10)); - printk("%ld pages of RAM\n", totalram_pages); - printk("%ld free pages\n", nr_free_pages()); -} - - unsigned long last_valid_pfn; unsigned long calc_highpages(void) -- cgit v1.2.1 From 9af744d743170b5f5ef70031dea8d772d166ab28 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 22 Feb 2017 15:46:16 -0800 Subject: lib/show_mem.c: teach show_mem to work with the given nodemask show_mem() allows to filter out node specific data which is irrelevant to the allocation request via SHOW_MEM_FILTER_NODES. The filtering is done in skip_free_areas_node which skips all nodes which are not in the mems_allowed of the current process. This works most of the time as expected because the nodemask shouldn't be outside of the allocating task but there are some exceptions. E.g. memory hotplug might want to request allocations from outside of the allowed nodes (see new_node_page). Get rid of this hardcoded behavior and push the allocation mask down the show_mem path and use it instead of cpuset_current_mems_allowed. NULL nodemask is interpreted as cpuset_current_mems_allowed. [akpm@linux-foundation.org: coding-style fixes] Link: http://lkml.kernel.org/r/20170117091543.25850-5-mhocko@kernel.org Signed-off-by: Michal Hocko Acked-by: Mel Gorman Cc: Hillf Danton Cc: Johannes Weiner Cc: Vlastimil Babka Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sparc/kernel/setup_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sparc') diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c index c4e65cb3280f..6f06058c5ae7 100644 --- a/arch/sparc/kernel/setup_32.c +++ b/arch/sparc/kernel/setup_32.c @@ -82,7 +82,7 @@ static void prom_sync_me(void) "nop\n\t" : : "r" (&trapbase)); prom_printf("PROM SYNC COMMAND...\n"); - show_free_areas(0); + show_free_areas(0, NULL); if (!is_idle_task(current)) { local_irq_enable(); sys_sync(); -- cgit v1.2.1 From cbc41b43e7c2ba6503b6b96dfe38e6b1316f46cf Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Sat, 14 Jan 2017 18:01:54 +0530 Subject: sparc32: mm: srmmu: add __ro_after_init to sparc32_cachetlb_ops structures The objects viking_ops, viking_sun4d_smp_ops and smp_cachetlb_ops of type sparc32_cachetlb_ops are not modified anywhere after getting modified in the init functions. Inside init their reference is also stored in a pointer of type const struct sparc32_cachetlb_ops *. So these structures are never modified after init, therefore add __ro_after to the declaration of these structures. Signed-off-by: Bhumika Goyal Reviewed-by: Kees Cook Signed-off-by: David S. Miller --- arch/sparc/mm/srmmu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/sparc') diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index c7f2a5295b3a..def82f6d626f 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -1444,7 +1444,7 @@ static void poke_viking(void) srmmu_set_mmureg(mreg); } -static struct sparc32_cachetlb_ops viking_ops = { +static struct sparc32_cachetlb_ops viking_ops __ro_after_init = { .cache_all = viking_flush_cache_all, .cache_mm = viking_flush_cache_mm, .cache_page = viking_flush_cache_page, @@ -1475,7 +1475,7 @@ static struct sparc32_cachetlb_ops viking_ops = { * flushes going at once will require SMP locking anyways so there's * no real value in trying any harder than this. */ -static struct sparc32_cachetlb_ops viking_sun4d_smp_ops = { +static struct sparc32_cachetlb_ops viking_sun4d_smp_ops __ro_after_init = { .cache_all = viking_flush_cache_all, .cache_mm = viking_flush_cache_mm, .cache_page = viking_flush_cache_page, @@ -1759,7 +1759,7 @@ static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) local_ops->sig_insns(mm, insn_addr); } -static struct sparc32_cachetlb_ops smp_cachetlb_ops = { +static struct sparc32_cachetlb_ops smp_cachetlb_ops __ro_after_init = { .cache_all = smp_flush_cache_all, .cache_mm = smp_flush_cache_mm, .cache_page = smp_flush_cache_page, -- cgit v1.2.1 From fc5e8c283999168942baa5ef7f3805444f80be2b Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Sat, 4 Feb 2017 16:32:22 +0000 Subject: sparc: topology_64.h: Fix condition for including cpudata.h We currently define macros referring to cpu_data if CONFIG_SMP is defined, but only include the declaration if CONFIG_NUMA is defined. Fixes: 541cc39433a8 ("sparc: fix a building error reported by kbuild") Signed-off-by: Ben Hutchings Acked-by: Gonglei Signed-off-by: David S. Miller --- arch/sparc/include/asm/topology_64.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/sparc') diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h index 225543000122..ad5293f89680 100644 --- a/arch/sparc/include/asm/topology_64.h +++ b/arch/sparc/include/asm/topology_64.h @@ -4,7 +4,6 @@ #ifdef CONFIG_NUMA #include -#include static inline int cpu_to_node(int cpu) { @@ -42,6 +41,9 @@ int __node_distance(int, int); #endif /* !(CONFIG_NUMA) */ #ifdef CONFIG_SMP + +#include + #define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) #define topology_core_id(cpu) (cpu_data(cpu).core_id) #define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu]) -- cgit v1.2.1 From 269d8523585c1b82b53aff3cf00d88ccbaf58c35 Mon Sep 17 00:00:00 2001 From: Eric Saint Etienne Date: Mon, 6 Feb 2017 14:32:41 +0000 Subject: sparc64: fix for user probes in high memory When returning from the user probe code into userspace process, PC & NPC are truncated to 32 bits. Due to shared libraries getting loaded very high in the virtual address space of the process, placing a user probe inside a shared library makes the kernel return into the process at the wrong address, causing it to seg'fault most of the time. This patch prevents truncating PC and NPC. Signed-off-by: Eric Saint Etienne Reviewed-by: David Aldridge Signed-off-by: David S. Miller --- arch/sparc/include/asm/uprobes.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/sparc') diff --git a/arch/sparc/include/asm/uprobes.h b/arch/sparc/include/asm/uprobes.h index f87aae5a908e..36196c17aff8 100644 --- a/arch/sparc/include/asm/uprobes.h +++ b/arch/sparc/include/asm/uprobes.h @@ -42,8 +42,8 @@ struct arch_uprobe { }; struct arch_uprobe_task { - u32 saved_tpc; - u32 saved_tnpc; + u64 saved_tpc; + u64 saved_tnpc; }; struct task_struct; -- cgit v1.2.1 From cffb3e76818fee4763a2ce5f2b1eca2d7885e2cf Mon Sep 17 00:00:00 2001 From: Vijay Kumar Date: Wed, 1 Feb 2017 11:34:37 -0800 Subject: sparc64: Set cpu state to offline when stopped CPU needs to be marked offline before stopping it. When not marked offline, the xcall receives HV_EWOULDBLOCK and so assumes that not all CPUs received the message, and retries. After 10000 retries, it finally fails with fatal mondo timeout. Signed-off-by: Vijay Kumar Signed-off-by: David S. Miller --- arch/sparc/kernel/smp_64.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/sparc') diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 0ce347f8e4cc..712bf1b7f630 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -1443,6 +1443,7 @@ void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) static void stop_this_cpu(void *dummy) { + set_cpu_online(smp_processor_id(), false); prom_stopself(); } @@ -1454,6 +1455,8 @@ void smp_send_stop(void) for_each_online_cpu(cpu) { if (cpu == smp_processor_id()) continue; + + set_cpu_online(cpu, false); #ifdef CONFIG_SUN_LDOMS if (ldom_domaining_enabled) { unsigned long hv_err; -- cgit v1.2.1 From 7dd4fcf5b70694dc961eb6b954673e4fc9730dbd Mon Sep 17 00:00:00 2001 From: Vijay Kumar Date: Wed, 1 Feb 2017 11:34:38 -0800 Subject: sparc64: Migrate hvcons irq to panicked cpu On panic, all other CPUs are stopped except the one which had hit panic. To keep console alive, we need to migrate hvcons irq to panicked CPU. Signed-off-by: Vijay Kumar Signed-off-by: David S. Miller --- arch/sparc/include/asm/setup.h | 5 ++++- arch/sparc/kernel/smp_64.c | 6 +++++- 2 files changed, 9 insertions(+), 2 deletions(-) (limited to 'arch/sparc') diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h index 29d64b1758ed..478bf6bb4598 100644 --- a/arch/sparc/include/asm/setup.h +++ b/arch/sparc/include/asm/setup.h @@ -59,8 +59,11 @@ extern atomic_t dcpage_flushes; extern atomic_t dcpage_flushes_xcall; extern int sysctl_tsb_ratio; -#endif +#ifdef CONFIG_SERIAL_SUNHV +void sunhv_migrate_hvcons_irq(int cpu); +#endif +#endif void sun_do_break(void); extern int stop_a_enabled; extern int scons_pwroff; diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 712bf1b7f630..90a02cb64e20 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -1452,8 +1452,12 @@ void smp_send_stop(void) int cpu; if (tlb_type == hypervisor) { + int this_cpu = smp_processor_id(); +#ifdef CONFIG_SERIAL_SUNHV + sunhv_migrate_hvcons_irq(this_cpu); +#endif for_each_online_cpu(cpu) { - if (cpu == smp_processor_id()) + if (cpu == this_cpu) continue; set_cpu_online(cpu, false); -- cgit v1.2.1 From c7d9f77d33a779ad582d8b2284ba007931ebd894 Mon Sep 17 00:00:00 2001 From: Nitin Gupta Date: Wed, 1 Feb 2017 16:16:36 -0800 Subject: sparc64: Multi-page size support Add support for using multiple hugepage sizes simultaneously on mainline. Currently, support for 256M has been added which can be used along with 8M pages. Page tables are set like this (e.g. for 256M page): VA + (8M * x) -> PA + (8M * x) (sz bit = 256M) where x in [0, 31] and TSB is set similarly: VA + (4M * x) -> PA + (4M * x) (sz bit = 256M) where x in [0, 63] - Testing Tested on Sonoma (which supports 256M pages) by running stream benchmark instances in parallel: one instance uses 8M pages and another uses 256M pages, consuming 48G each. Boot params used: default_hugepagesz=256M hugepagesz=256M hugepages=300 hugepagesz=8M hugepages=10000 Signed-off-by: Nitin Gupta Signed-off-by: David S. Miller --- arch/sparc/include/asm/page_64.h | 3 +- arch/sparc/include/asm/pgtable_64.h | 23 +++-- arch/sparc/include/asm/tlbflush_64.h | 5 +- arch/sparc/kernel/tsb.S | 21 +---- arch/sparc/mm/hugetlbpage.c | 160 +++++++++++++++++++++++++++++++---- arch/sparc/mm/init_64.c | 42 ++++++++- arch/sparc/mm/tlb.c | 17 ++-- arch/sparc/mm/tsb.c | 44 ++++++++-- 8 files changed, 253 insertions(+), 62 deletions(-) (limited to 'arch/sparc') diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index c1263fc390db..d76f38d4171b 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h @@ -17,7 +17,7 @@ #define HPAGE_SHIFT 23 #define REAL_HPAGE_SHIFT 22 - +#define HPAGE_256MB_SHIFT 28 #define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) @@ -26,6 +26,7 @@ #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT)) +#define HUGE_MAX_HSTATE 2 #endif #ifndef __ASSEMBLY__ diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 314b66851348..7932a4a37817 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -375,7 +375,10 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot) #define pgprot_noncached pgprot_noncached #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) -static inline unsigned long __pte_huge_mask(void) +extern pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, + struct page *page, int writable); +#define arch_make_huge_pte arch_make_huge_pte +static inline unsigned long __pte_default_huge_mask(void) { unsigned long mask; @@ -395,12 +398,14 @@ static inline unsigned long __pte_huge_mask(void) static inline pte_t pte_mkhuge(pte_t pte) { - return __pte(pte_val(pte) | _PAGE_PMD_HUGE | __pte_huge_mask()); + return __pte(pte_val(pte) | __pte_default_huge_mask()); } -static inline bool is_hugetlb_pte(pte_t pte) +static inline bool is_default_hugetlb_pte(pte_t pte) { - return !!(pte_val(pte) & __pte_huge_mask()); + unsigned long mask = __pte_default_huge_mask(); + + return (pte_val(pte) & mask) == mask; } static inline bool is_hugetlb_pmd(pmd_t pmd) @@ -875,10 +880,12 @@ static inline unsigned long pud_pfn(pud_t pud) /* Actual page table PTE updates. */ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, - pte_t *ptep, pte_t orig, int fullmm); + pte_t *ptep, pte_t orig, int fullmm, + unsigned int hugepage_shift); static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, - pte_t *ptep, pte_t orig, int fullmm) + pte_t *ptep, pte_t orig, int fullmm, + unsigned int hugepage_shift) { /* It is more efficient to let flush_tlb_kernel_range() * handle init_mm tlb flushes. @@ -887,7 +894,7 @@ static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, * and SUN4V pte layout, so this inline test is fine. */ if (likely(mm != &init_mm) && pte_accessible(mm, orig)) - tlb_batch_add(mm, vaddr, ptep, orig, fullmm); + tlb_batch_add(mm, vaddr, ptep, orig, fullmm, hugepage_shift); } #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR @@ -906,7 +913,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t orig = *ptep; *ptep = pte; - maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm); + maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm, PAGE_SHIFT); } #define set_pte_at(mm,addr,ptep,pte) \ diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h index a8e192e90700..54be88a6774c 100644 --- a/arch/sparc/include/asm/tlbflush_64.h +++ b/arch/sparc/include/asm/tlbflush_64.h @@ -8,7 +8,7 @@ #define TLB_BATCH_NR 192 struct tlb_batch { - bool huge; + unsigned int hugepage_shift; struct mm_struct *mm; unsigned long tlb_nr; unsigned long active; @@ -17,7 +17,8 @@ struct tlb_batch { void flush_tsb_kernel_range(unsigned long start, unsigned long end); void flush_tsb_user(struct tlb_batch *tb); -void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge); +void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, + unsigned int hugepage_shift); /* TLB flush operations. */ diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S index d568c8207af7..10689cfd0ad4 100644 --- a/arch/sparc/kernel/tsb.S +++ b/arch/sparc/kernel/tsb.S @@ -117,26 +117,11 @@ tsb_miss_page_table_walk_sun4v_fastpath: /* Valid PTE is now in %g5. */ #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) -661: sethi %uhi(_PAGE_SZALL_4U), %g7 + sethi %uhi(_PAGE_PMD_HUGE), %g7 sllx %g7, 32, %g7 - .section .sun4v_2insn_patch, "ax" - .word 661b - mov _PAGE_SZALL_4V, %g7 - nop - .previous - - and %g5, %g7, %g2 - -661: sethi %uhi(_PAGE_SZHUGE_4U), %g7 - sllx %g7, 32, %g7 - .section .sun4v_2insn_patch, "ax" - .word 661b - mov _PAGE_SZHUGE_4V, %g7 - nop - .previous - cmp %g2, %g7 - bne,pt %xcc, 60f + andcc %g5, %g7, %g0 + be,pt %xcc, 60f nop /* It is a huge page, use huge page TSB entry address we diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 988acc8b1b80..618a568cc7f2 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -28,6 +28,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, unsigned long pgoff, unsigned long flags) { + struct hstate *h = hstate_file(filp); unsigned long task_size = TASK_SIZE; struct vm_unmapped_area_info info; @@ -38,7 +39,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, info.length = len; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = min(task_size, VA_EXCLUDE_START); - info.align_mask = PAGE_MASK & ~HPAGE_MASK; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; addr = vm_unmapped_area(&info); @@ -58,6 +59,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long pgoff, const unsigned long flags) { + struct hstate *h = hstate_file(filp); struct mm_struct *mm = current->mm; unsigned long addr = addr0; struct vm_unmapped_area_info info; @@ -69,7 +71,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, info.length = len; info.low_limit = PAGE_SIZE; info.high_limit = mm->mmap_base; - info.align_mask = PAGE_MASK & ~HPAGE_MASK; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; addr = vm_unmapped_area(&info); @@ -94,6 +96,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { + struct hstate *h = hstate_file(file); struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long task_size = TASK_SIZE; @@ -101,7 +104,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, if (test_thread_flag(TIF_32BIT)) task_size = STACK_TOP32; - if (len & ~HPAGE_MASK) + if (len & ~huge_page_mask(h)) return -EINVAL; if (len > task_size) return -ENOMEM; @@ -113,7 +116,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, } if (addr) { - addr = ALIGN(addr, HPAGE_SIZE); + addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (task_size - len >= addr && (!vma || addr + len <= vma->vm_start)) @@ -127,6 +130,112 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, pgoff, flags); } +static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift) +{ + return entry; +} + +static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift) +{ + unsigned long hugepage_size = _PAGE_SZ4MB_4V; + + pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V; + + switch (shift) { + case HPAGE_256MB_SHIFT: + hugepage_size = _PAGE_SZ256MB_4V; + pte_val(entry) |= _PAGE_PMD_HUGE; + break; + case HPAGE_SHIFT: + pte_val(entry) |= _PAGE_PMD_HUGE; + break; + default: + WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift); + } + + pte_val(entry) = pte_val(entry) | hugepage_size; + return entry; +} + +static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift) +{ + if (tlb_type == hypervisor) + return sun4v_hugepage_shift_to_tte(entry, shift); + else + return sun4u_hugepage_shift_to_tte(entry, shift); +} + +pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, + struct page *page, int writeable) +{ + unsigned int shift = huge_page_shift(hstate_vma(vma)); + + return hugepage_shift_to_tte(entry, shift); +} + +static unsigned int sun4v_huge_tte_to_shift(pte_t entry) +{ + unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4V; + unsigned int shift; + + switch (tte_szbits) { + case _PAGE_SZ256MB_4V: + shift = HPAGE_256MB_SHIFT; + break; + case _PAGE_SZ4MB_4V: + shift = REAL_HPAGE_SHIFT; + break; + default: + shift = PAGE_SHIFT; + break; + } + return shift; +} + +static unsigned int sun4u_huge_tte_to_shift(pte_t entry) +{ + unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U; + unsigned int shift; + + switch (tte_szbits) { + case _PAGE_SZ256MB_4U: + shift = HPAGE_256MB_SHIFT; + break; + case _PAGE_SZ4MB_4U: + shift = REAL_HPAGE_SHIFT; + break; + default: + shift = PAGE_SHIFT; + break; + } + return shift; +} + +static unsigned int huge_tte_to_shift(pte_t entry) +{ + unsigned long shift; + + if (tlb_type == hypervisor) + shift = sun4v_huge_tte_to_shift(entry); + else + shift = sun4u_huge_tte_to_shift(entry); + + if (shift == PAGE_SHIFT) + WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n", + pte_val(entry)); + + return shift; +} + +static unsigned long huge_tte_to_size(pte_t pte) +{ + unsigned long size = 1UL << huge_tte_to_shift(pte); + + if (size == REAL_HPAGE_SIZE) + size = HPAGE_SIZE; + return size; +} + pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) { @@ -160,35 +269,54 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t entry) { + unsigned int i, nptes, hugepage_shift; + unsigned long size; pte_t orig; + size = huge_tte_to_size(entry); + nptes = size >> PMD_SHIFT; + if (!pte_present(*ptep) && pte_present(entry)) - mm->context.hugetlb_pte_count++; + mm->context.hugetlb_pte_count += nptes; - addr &= HPAGE_MASK; + addr &= ~(size - 1); orig = *ptep; - *ptep = entry; + hugepage_shift = pte_none(orig) ? PAGE_SIZE : huge_tte_to_shift(orig); - /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */ - maybe_tlb_batch_add(mm, addr, ptep, orig, 0); - maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0); + for (i = 0; i < nptes; i++) + ptep[i] = __pte(pte_val(entry) + (i << PMD_SHIFT)); + + maybe_tlb_batch_add(mm, addr, ptep, orig, 0, hugepage_shift); + /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */ + if (size == HPAGE_SIZE) + maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0, + hugepage_shift); } pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { + unsigned int i, nptes, hugepage_shift; + unsigned long size; pte_t entry; entry = *ptep; + size = huge_tte_to_size(entry); + nptes = size >> PMD_SHIFT; + hugepage_shift = pte_none(entry) ? PAGE_SIZE : huge_tte_to_shift(entry); + if (pte_present(entry)) - mm->context.hugetlb_pte_count--; + mm->context.hugetlb_pte_count -= nptes; - addr &= HPAGE_MASK; - *ptep = __pte(0UL); + addr &= ~(size - 1); + for (i = 0; i < nptes; i++) + ptep[i] = __pte(0UL); - /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */ - maybe_tlb_batch_add(mm, addr, ptep, entry, 0); - maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0); + maybe_tlb_batch_add(mm, addr, ptep, entry, 0, hugepage_shift); + /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */ + if (size == HPAGE_SIZE) + maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0, + hugepage_shift); return entry; } diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 5d2f91511c60..7ed3975d04cc 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -324,6 +324,46 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde tsb_insert(tsb, tag, tte); } +#ifdef CONFIG_HUGETLB_PAGE +static int __init setup_hugepagesz(char *string) +{ + unsigned long long hugepage_size; + unsigned int hugepage_shift; + unsigned short hv_pgsz_idx; + unsigned int hv_pgsz_mask; + int rc = 0; + + hugepage_size = memparse(string, &string); + hugepage_shift = ilog2(hugepage_size); + + switch (hugepage_shift) { + case HPAGE_256MB_SHIFT: + hv_pgsz_mask = HV_PGSZ_MASK_256MB; + hv_pgsz_idx = HV_PGSZ_IDX_256MB; + break; + case HPAGE_SHIFT: + hv_pgsz_mask = HV_PGSZ_MASK_4MB; + hv_pgsz_idx = HV_PGSZ_IDX_4MB; + break; + default: + hv_pgsz_mask = 0; + } + + if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) { + pr_warn("hugepagesz=%llu not supported by MMU.\n", + hugepage_size); + goto out; + } + + hugetlb_add_hstate(hugepage_shift - PAGE_SHIFT); + rc = 1; + +out: + return rc; +} +__setup("hugepagesz=", setup_hugepagesz); +#endif /* CONFIG_HUGETLB_PAGE */ + void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { struct mm_struct *mm; @@ -347,7 +387,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) && - is_hugetlb_pte(pte)) { + is_hugetlb_pmd(__pmd(pte_val(pte)))) { /* We are fabricating 8MB pages using 4MB real hw pages. */ pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT)); __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index c56a195c9071..afda3bbf7854 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -67,7 +67,7 @@ void arch_leave_lazy_mmu_mode(void) } static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, - bool exec, bool huge) + bool exec, unsigned int hugepage_shift) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); unsigned long nr; @@ -84,19 +84,19 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, } if (!tb->active) { - flush_tsb_user_page(mm, vaddr, huge); + flush_tsb_user_page(mm, vaddr, hugepage_shift); global_flush_tlb_page(mm, vaddr); goto out; } if (nr == 0) { tb->mm = mm; - tb->huge = huge; + tb->hugepage_shift = hugepage_shift; } - if (tb->huge != huge) { + if (tb->hugepage_shift != hugepage_shift) { flush_tlb_pending(); - tb->huge = huge; + tb->hugepage_shift = hugepage_shift; nr = 0; } @@ -110,10 +110,9 @@ out: } void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, - pte_t *ptep, pte_t orig, int fullmm) + pte_t *ptep, pte_t orig, int fullmm, + unsigned int hugepage_shift) { - bool huge = is_hugetlb_pte(orig); - if (tlb_type != hypervisor && pte_dirty(orig)) { unsigned long paddr, pfn = pte_pfn(orig); @@ -139,7 +138,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, no_cache_flush: if (!fullmm) - tlb_batch_add_one(mm, vaddr, pte_exec(orig), huge); + tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index e20fbbafb0b0..4ccca32bd1e1 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -86,6 +86,33 @@ static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries); } +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) +static void __flush_huge_tsb_one_entry(unsigned long tsb, unsigned long v, + unsigned long hash_shift, + unsigned long nentries, + unsigned int hugepage_shift) +{ + unsigned int hpage_entries; + unsigned int i; + + hpage_entries = 1 << (hugepage_shift - hash_shift); + for (i = 0; i < hpage_entries; i++) + __flush_tsb_one_entry(tsb, v + (i << hash_shift), hash_shift, + nentries); +} + +static void __flush_huge_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, + unsigned long tsb, unsigned long nentries, + unsigned int hugepage_shift) +{ + unsigned long i; + + for (i = 0; i < tb->tlb_nr; i++) + __flush_huge_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, + nentries, hugepage_shift); +} +#endif + void flush_tsb_user(struct tlb_batch *tb) { struct mm_struct *mm = tb->mm; @@ -93,7 +120,7 @@ void flush_tsb_user(struct tlb_batch *tb) spin_lock_irqsave(&mm->context.lock, flags); - if (!tb->huge) { + if (tb->hugepage_shift == PAGE_SHIFT) { base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) @@ -101,24 +128,26 @@ void flush_tsb_user(struct tlb_batch *tb) __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); } #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - if (tb->huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) { + else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) base = __pa(base); - __flush_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries); + __flush_huge_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries, + tb->hugepage_shift); } #endif spin_unlock_irqrestore(&mm->context.lock, flags); } -void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge) +void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, + unsigned int hugepage_shift) { unsigned long nentries, base, flags; spin_lock_irqsave(&mm->context.lock, flags); - if (!huge) { + if (hugepage_shift == PAGE_SHIFT) { base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) @@ -126,12 +155,13 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge) __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries); } #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - if (huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) { + else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) base = __pa(base); - __flush_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT, nentries); + __flush_huge_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT, + nentries, hugepage_shift); } #endif spin_unlock_irqrestore(&mm->context.lock, flags); -- cgit v1.2.1 From dcd1912d21a02534d1f0a9005d5ba3283f164780 Mon Sep 17 00:00:00 2001 From: Nitin Gupta Date: Mon, 6 Feb 2017 12:33:26 -0800 Subject: sparc64: Add 64K page size support This patch depends on: [v6] sparc64: Multi-page size support - Testing Tested on Sonoma by running stream benchmark instance which allocated 48G worth of 64K pages. boot params: default_hugepagesz=64K hugepagesz=64K hugepages=1310720 Signed-off-by: Nitin Gupta Signed-off-by: David S. Miller --- arch/sparc/include/asm/page_64.h | 3 ++- arch/sparc/mm/hugetlbpage.c | 54 ++++++++++++++++++++++++++++++++-------- arch/sparc/mm/init_64.c | 4 +++ arch/sparc/mm/tsb.c | 5 ++-- 4 files changed, 52 insertions(+), 14 deletions(-) (limited to 'arch/sparc') diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index d76f38d4171b..f294dd42fc7d 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h @@ -18,6 +18,7 @@ #define HPAGE_SHIFT 23 #define REAL_HPAGE_SHIFT 22 #define HPAGE_256MB_SHIFT 28 +#define HPAGE_64K_SHIFT 16 #define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) @@ -26,7 +27,7 @@ #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT)) -#define HUGE_MAX_HSTATE 2 +#define HUGE_MAX_HSTATE 3 #endif #ifndef __ASSEMBLY__ diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 618a568cc7f2..605bfceb7d54 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -149,6 +149,9 @@ static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift) case HPAGE_SHIFT: pte_val(entry) |= _PAGE_PMD_HUGE; break; + case HPAGE_64K_SHIFT: + hugepage_size = _PAGE_SZ64K_4V; + break; default: WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift); } @@ -185,6 +188,9 @@ static unsigned int sun4v_huge_tte_to_shift(pte_t entry) case _PAGE_SZ4MB_4V: shift = REAL_HPAGE_SHIFT; break; + case _PAGE_SZ64K_4V: + shift = HPAGE_64K_SHIFT; + break; default: shift = PAGE_SHIFT; break; @@ -204,6 +210,9 @@ static unsigned int sun4u_huge_tte_to_shift(pte_t entry) case _PAGE_SZ4MB_4U: shift = REAL_HPAGE_SHIFT; break; + case _PAGE_SZ64K_4U: + shift = HPAGE_64K_SHIFT; + break; default: shift = PAGE_SHIFT; break; @@ -241,12 +250,21 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, { pgd_t *pgd; pud_t *pud; + pmd_t *pmd; pte_t *pte = NULL; pgd = pgd_offset(mm, addr); pud = pud_alloc(mm, pgd, addr); - if (pud) - pte = (pte_t *)pmd_alloc(mm, pud, addr); + if (pud) { + pmd = pmd_alloc(mm, pud, addr); + if (!pmd) + return NULL; + + if (sz == PMD_SHIFT) + pte = (pte_t *)pmd; + else + pte = pte_alloc_map(mm, pmd, addr); + } return pte; } @@ -255,42 +273,52 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; pud_t *pud; + pmd_t *pmd; pte_t *pte = NULL; pgd = pgd_offset(mm, addr); if (!pgd_none(*pgd)) { pud = pud_offset(pgd, addr); - if (!pud_none(*pud)) - pte = (pte_t *)pmd_offset(pud, addr); + if (!pud_none(*pud)) { + pmd = pmd_offset(pud, addr); + if (!pmd_none(*pmd)) { + if (is_hugetlb_pmd(*pmd)) + pte = (pte_t *)pmd; + else + pte = pte_offset_map(pmd, addr); + } + } } + return pte; } void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t entry) { - unsigned int i, nptes, hugepage_shift; + unsigned int i, nptes, orig_shift, shift; unsigned long size; pte_t orig; size = huge_tte_to_size(entry); - nptes = size >> PMD_SHIFT; + shift = size >= HPAGE_SIZE ? PMD_SHIFT : PAGE_SHIFT; + nptes = size >> shift; if (!pte_present(*ptep) && pte_present(entry)) mm->context.hugetlb_pte_count += nptes; addr &= ~(size - 1); orig = *ptep; - hugepage_shift = pte_none(orig) ? PAGE_SIZE : huge_tte_to_shift(orig); + orig_shift = pte_none(orig) ? PAGE_SIZE : huge_tte_to_shift(orig); for (i = 0; i < nptes; i++) - ptep[i] = __pte(pte_val(entry) + (i << PMD_SHIFT)); + ptep[i] = __pte(pte_val(entry) + (i << shift)); - maybe_tlb_batch_add(mm, addr, ptep, orig, 0, hugepage_shift); + maybe_tlb_batch_add(mm, addr, ptep, orig, 0, orig_shift); /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */ if (size == HPAGE_SIZE) maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0, - hugepage_shift); + orig_shift); } pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, @@ -302,7 +330,11 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, entry = *ptep; size = huge_tte_to_size(entry); - nptes = size >> PMD_SHIFT; + if (size >= HPAGE_SIZE) + nptes = size >> PMD_SHIFT; + else + nptes = size >> PAGE_SHIFT; + hugepage_shift = pte_none(entry) ? PAGE_SIZE : huge_tte_to_shift(entry); if (pte_present(entry)) diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 7ed3975d04cc..16c1e46e8603 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -345,6 +345,10 @@ static int __init setup_hugepagesz(char *string) hv_pgsz_mask = HV_PGSZ_MASK_4MB; hv_pgsz_idx = HV_PGSZ_IDX_4MB; break; + case HPAGE_64K_SHIFT: + hv_pgsz_mask = HV_PGSZ_MASK_64K; + hv_pgsz_idx = HV_PGSZ_IDX_64K; + break; default: hv_pgsz_mask = 0; } diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 4ccca32bd1e1..e39fc57ad850 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -147,12 +147,13 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, spin_lock_irqsave(&mm->context.lock, flags); - if (hugepage_shift == PAGE_SHIFT) { + if (hugepage_shift < HPAGE_SHIFT) { base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) base = __pa(base); - __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries); + __flush_huge_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries, + hugepage_shift); } #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { -- cgit v1.2.1 From 1537b26dab1c1dfd92a116a933143f52b1112a22 Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Thu, 16 Feb 2017 15:05:58 -0500 Subject: sparc64: use latency groups to improve add_node_ranges speed add_node_ranges() takes 2.6s - 3.6s per 1T of boot time. On machine with 6T memory it takes 15.4s, on 32T it would take 82s-115s of boot time. This function sets NUMA ids for memory blocks, and scans the whole memory a page at a time to do so. But, we could use values in latency groups mask and match to determine the boundaries without checking every single page. With the fix the add_node_ranges() time is reduced from 15.4s down to 0.2s on machine with 6T memory. Signed-off-by: Pavel Tatashin Reviewed-by: Babu Moger Reviewed-by: Bob Picco Signed-off-by: David S. Miller --- arch/sparc/mm/init_64.c | 208 ++++++++++++++++++++++++++---------------------- 1 file changed, 113 insertions(+), 95 deletions(-) (limited to 'arch/sparc') diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 16c1e46e8603..77446eaf1395 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -829,13 +829,23 @@ static void __init find_ramdisk(unsigned long phys_base) struct node_mem_mask { unsigned long mask; - unsigned long val; + unsigned long match; }; static struct node_mem_mask node_masks[MAX_NUMNODES]; static int num_node_masks; #ifdef CONFIG_NEED_MULTIPLE_NODES +struct mdesc_mlgroup { + u64 node; + u64 latency; + u64 match; + u64 mask; +}; + +static struct mdesc_mlgroup *mlgroups; +static int num_mlgroups; + int numa_cpu_lookup_table[NR_CPUS]; cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; @@ -846,78 +856,129 @@ struct mdesc_mblock { }; static struct mdesc_mblock *mblocks; static int num_mblocks; -static int find_numa_node_for_addr(unsigned long pa, - struct node_mem_mask *pnode_mask); -static unsigned long __init ra_to_pa(unsigned long addr) +static struct mdesc_mblock * __init addr_to_mblock(unsigned long addr) { + struct mdesc_mblock *m = NULL; int i; for (i = 0; i < num_mblocks; i++) { - struct mdesc_mblock *m = &mblocks[i]; + m = &mblocks[i]; if (addr >= m->base && addr < (m->base + m->size)) { - addr += m->offset; break; } } - return addr; + + return m; } -static int __init find_node(unsigned long addr) +static u64 __init memblock_nid_range_sun4u(u64 start, u64 end, int *nid) { - static bool search_mdesc = true; - static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL }; - static int last_index; - int i; + int prev_nid, new_nid; - addr = ra_to_pa(addr); - for (i = 0; i < num_node_masks; i++) { - struct node_mem_mask *p = &node_masks[i]; + prev_nid = -1; + for ( ; start < end; start += PAGE_SIZE) { + for (new_nid = 0; new_nid < num_node_masks; new_nid++) { + struct node_mem_mask *p = &node_masks[new_nid]; - if ((addr & p->mask) == p->val) - return i; - } - /* The following condition has been observed on LDOM guests because - * node_masks only contains the best latency mask and value. - * LDOM guest's mdesc can contain a single latency group to - * cover multiple address range. Print warning message only if the - * address cannot be found in node_masks nor mdesc. - */ - if ((search_mdesc) && - ((addr & last_mem_mask.mask) != last_mem_mask.val)) { - /* find the available node in the mdesc */ - last_index = find_numa_node_for_addr(addr, &last_mem_mask); - numadbg("find_node: latency group for address 0x%lx is %d\n", - addr, last_index); - if ((last_index < 0) || (last_index >= num_node_masks)) { - /* WARN_ONCE() and use default group 0 */ - WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0."); - search_mdesc = false; - last_index = 0; + if ((start & p->mask) == p->match) { + if (prev_nid == -1) + prev_nid = new_nid; + break; + } } + + if (new_nid == num_node_masks) { + prev_nid = 0; + WARN_ONCE(1, "addr[%Lx] doesn't match a NUMA node rule. Some memory will be owned by node 0.", + start); + break; + } + + if (prev_nid != new_nid) + break; } + *nid = prev_nid; - return last_index; + return start > end ? end : start; } static u64 __init memblock_nid_range(u64 start, u64 end, int *nid) { - *nid = find_node(start); - start += PAGE_SIZE; - while (start < end) { - int n = find_node(start); + u64 ret_end, pa_start, m_mask, m_match, m_end; + struct mdesc_mblock *mblock; + int _nid, i; + + if (tlb_type != hypervisor) + return memblock_nid_range_sun4u(start, end, nid); + + mblock = addr_to_mblock(start); + if (!mblock) { + WARN_ONCE(1, "memblock_nid_range: Can't find mblock addr[%Lx]", + start); + + _nid = 0; + ret_end = end; + goto done; + } + + pa_start = start + mblock->offset; + m_match = 0; + m_mask = 0; + + for (_nid = 0; _nid < num_node_masks; _nid++) { + struct node_mem_mask *const m = &node_masks[_nid]; - if (n != *nid) + if ((pa_start & m->mask) == m->match) { + m_match = m->match; + m_mask = m->mask; break; - start += PAGE_SIZE; + } } - if (start > end) - start = end; + if (num_node_masks == _nid) { + /* We could not find NUMA group, so default to 0, but lets + * search for latency group, so we could calculate the correct + * end address that we return + */ + _nid = 0; - return start; + for (i = 0; i < num_mlgroups; i++) { + struct mdesc_mlgroup *const m = &mlgroups[i]; + + if ((pa_start & m->mask) == m->match) { + m_match = m->match; + m_mask = m->mask; + break; + } + } + + if (i == num_mlgroups) { + WARN_ONCE(1, "memblock_nid_range: Can't find latency group addr[%Lx]", + start); + + ret_end = end; + goto done; + } + } + + /* + * Each latency group has match and mask, and each memory block has an + * offset. An address belongs to a latency group if its address matches + * the following formula: ((addr + offset) & mask) == match + * It is, however, slow to check every single page if it matches a + * particular latency group. As optimization we calculate end value by + * using bit arithmetics. + */ + m_end = m_match + (1ul << __ffs(m_mask)) - mblock->offset; + m_end += pa_start & ~((1ul << fls64(m_mask)) - 1); + ret_end = m_end > end ? end : m_end; + +done: + *nid = _nid; + return ret_end; } #endif @@ -958,7 +1019,8 @@ static void init_node_masks_nonnuma(void) numadbg("Initializing tables for non-numa.\n"); - node_masks[0].mask = node_masks[0].val = 0; + node_masks[0].mask = 0; + node_masks[0].match = 0; num_node_masks = 1; #ifdef CONFIG_NEED_MULTIPLE_NODES @@ -976,15 +1038,6 @@ EXPORT_SYMBOL(numa_cpu_lookup_table); EXPORT_SYMBOL(numa_cpumask_lookup_table); EXPORT_SYMBOL(node_data); -struct mdesc_mlgroup { - u64 node; - u64 latency; - u64 match; - u64 mask; -}; -static struct mdesc_mlgroup *mlgroups; -static int num_mlgroups; - static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio, u32 cfg_handle) { @@ -1226,41 +1279,6 @@ int __node_distance(int from, int to) return numa_latency[from][to]; } -static int find_numa_node_for_addr(unsigned long pa, - struct node_mem_mask *pnode_mask) -{ - struct mdesc_handle *md = mdesc_grab(); - u64 node, arc; - int i = 0; - - node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); - if (node == MDESC_NODE_NULL) - goto out; - - mdesc_for_each_node_by_name(md, node, "group") { - mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) { - u64 target = mdesc_arc_target(md, arc); - struct mdesc_mlgroup *m = find_mlgroup(target); - - if (!m) - continue; - if ((pa & m->mask) == m->match) { - if (pnode_mask) { - pnode_mask->mask = m->mask; - pnode_mask->val = m->match; - } - mdesc_release(md); - return i; - } - } - i++; - } - -out: - mdesc_release(md); - return -1; -} - static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) { int i; @@ -1268,7 +1286,7 @@ static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) for (i = 0; i < MAX_NUMNODES; i++) { struct node_mem_mask *n = &node_masks[i]; - if ((grp->mask == n->mask) && (grp->match == n->val)) + if ((grp->mask == n->mask) && (grp->match == n->match)) break; } return i; @@ -1323,10 +1341,10 @@ static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp, n = &node_masks[num_node_masks++]; n->mask = candidate->mask; - n->val = candidate->match; + n->match = candidate->match; - numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n", - index, n->mask, n->val, candidate->latency); + numadbg("NUMA NODE[%d]: mask[%lx] match[%lx] (latency[%llx])\n", + index, n->mask, n->match, candidate->latency); return 0; } @@ -1423,7 +1441,7 @@ static int __init numa_parse_jbus(void) numa_cpu_lookup_table[cpu] = index; cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu)); node_masks[index].mask = ~((1UL << 36UL) - 1UL); - node_masks[index].val = cpu << 36UL; + node_masks[index].match = cpu << 36UL; index++; } -- cgit v1.2.1 From cd429ce2d095041d249ec85feaed608bbf72154f Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Thu, 16 Feb 2017 15:13:54 -0500 Subject: sparc64: memblock resizes are not handled properly In add_node_ranges() when memblock resize happens, the iterator keeps using the previous freed array. This bug cause hangs on machine where there are over 128 memory blocks during boot. For example, on machines where memory interleaving is small. The problem is seen on T4-4 because it cant have 2T of memory, and memory is interleaved at 8G. So we have 2T/8G = 256 regions to set node IDs. The starting size of regions array is 128. Thus, we have to double at least one time (actually we have to double twice because some memory is already reserved and thus we need more than 256 regions). We start using an incorrect pointer to the array after the first doubling. Signed-off-by: Pavel Tatashin Signed-off-by: Babu Moger Reviewed-by: Babu Moger Signed-off-by: David S. Miller --- arch/sparc/mm/init_64.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/sparc') diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 77446eaf1395..ccd455328989 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -1126,6 +1126,10 @@ int of_node_to_nid(struct device_node *dp) static void __init add_node_ranges(void) { struct memblock_region *reg; + unsigned long prev_max; + +memblock_resized: + prev_max = memblock.memory.max; for_each_memblock(memory, reg) { unsigned long size = reg->size; @@ -1145,6 +1149,8 @@ static void __init add_node_ranges(void) memblock_set_node(start, this_end - start, &memblock.memory, nid); + if (memblock.memory.max != prev_max) + goto memblock_resized; start = this_end; } } -- cgit v1.2.1 From ac65e2828d03ddf84e9fe1fb6d110d8de933dc22 Mon Sep 17 00:00:00 2001 From: Nitin Gupta Date: Fri, 24 Feb 2017 03:03:16 -0800 Subject: sparc64: Fix build error in flush_tsb_user_page Patch "sparc64: Add 64K page size support" unconditionally used __flush_huge_tsb_one_entry() which is available only when hugetlb support is enabled. Another issue was incorrect TSB flushing for 64K pages in flush_tsb_user(). Signed-off-by: Nitin Gupta Signed-off-by: David S. Miller --- arch/sparc/mm/hugetlbpage.c | 5 +++-- arch/sparc/mm/tsb.c | 20 ++++++++++++++++---- 2 files changed, 19 insertions(+), 6 deletions(-) (limited to 'arch/sparc') diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 605bfceb7d54..e98a3f2e8f0f 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -309,7 +309,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, addr &= ~(size - 1); orig = *ptep; - orig_shift = pte_none(orig) ? PAGE_SIZE : huge_tte_to_shift(orig); + orig_shift = pte_none(orig) ? PAGE_SHIFT : huge_tte_to_shift(orig); for (i = 0; i < nptes; i++) ptep[i] = __pte(pte_val(entry) + (i << shift)); @@ -335,7 +335,8 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, else nptes = size >> PAGE_SHIFT; - hugepage_shift = pte_none(entry) ? PAGE_SIZE : huge_tte_to_shift(entry); + hugepage_shift = pte_none(entry) ? PAGE_SHIFT : + huge_tte_to_shift(entry); if (pte_present(entry)) mm->context.hugetlb_pte_count -= nptes; diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index e39fc57ad850..23479c3d39f0 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -120,12 +120,18 @@ void flush_tsb_user(struct tlb_batch *tb) spin_lock_irqsave(&mm->context.lock, flags); - if (tb->hugepage_shift == PAGE_SHIFT) { + if (tb->hugepage_shift < HPAGE_SHIFT) { base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) base = __pa(base); - __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); + if (tb->hugepage_shift == PAGE_SHIFT) + __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); +#if defined(CONFIG_HUGETLB_PAGE) + else + __flush_huge_tsb_one(tb, PAGE_SHIFT, base, nentries, + tb->hugepage_shift); +#endif } #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { @@ -152,8 +158,14 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) base = __pa(base); - __flush_huge_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries, - hugepage_shift); + if (hugepage_shift == PAGE_SHIFT) + __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, + nentries); +#if defined(CONFIG_HUGETLB_PAGE) + else + __flush_huge_tsb_one_entry(base, vaddr, PAGE_SHIFT, + nentries, hugepage_shift); +#endif } #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { -- cgit v1.2.1 From 7d134b2ce639448199052fd573a324f7e7cd5ed8 Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Mon, 27 Feb 2017 14:26:56 -0800 Subject: kprobes: move kprobe declarations to asm-generic/kprobes.h Often all is needed is these small helpers, instead of compiler.h or a full kprobes.h. This is important for asm helpers, in fact even some asm/kprobes.h make use of these helpers... instead just keep a generic asm file with helpers useful for asm code with the least amount of clutter as possible. Likewise we need now to also address what to do about this file for both when architectures have CONFIG_HAVE_KPROBES, and when they do not. Then for when architectures have CONFIG_HAVE_KPROBES but have disabled CONFIG_KPROBES. Right now most asm/kprobes.h do not have guards against CONFIG_KPROBES, this means most architecture code cannot include asm/kprobes.h safely. Correct this and add guards for architectures missing them. Additionally provide architectures that not have kprobes support with the default asm-generic solution. This lets us force asm/kprobes.h on the header include/linux/kprobes.h always, but most importantly we can now safely include just asm/kprobes.h on architecture code without bringing the full kitchen sink of header files. Two architectures already provided a guard against CONFIG_KPROBES on its kprobes.h: sh, arch. The rest of the architectures needed gaurds added. We avoid including any not-needed headers on asm/kprobes.h unless kprobes have been enabled. In a subsequent atomic change we can try now to remove compiler.h from include/linux/kprobes.h. During this sweep I've also identified a few architectures defining a common macro needed for both kprobes and ftrace, that of the definition of the breakput instruction up. Some refer to this as BREAKPOINT_INSTRUCTION. This must be kept outside of the #ifdef CONFIG_KPROBES guard. [mcgrof@kernel.org: fix arm64 build] Link: http://lkml.kernel.org/r/CAB=NE6X1WMByuARS4mZ1g9+W=LuVBnMDnh_5zyN0CLADaVh=Jw@mail.gmail.com [sfr@canb.auug.org.au: fixup for kprobes declarations moving] Link: http://lkml.kernel.org/r/20170214165933.13ebd4f4@canb.auug.org.au Link: http://lkml.kernel.org/r/20170203233139.32682-1-mcgrof@kernel.org Signed-off-by: Luis R. Rodriguez Signed-off-by: Stephen Rothwell Acked-by: Masami Hiramatsu Cc: Arnd Bergmann Cc: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Anil S Keshavamurthy Cc: David S. Miller Cc: Ingo Molnar Cc: Thomas Gleixner Cc: H. Peter Anvin Cc: Andy Lutomirski Cc: Steven Rostedt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sparc/include/asm/kprobes.h | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'arch/sparc') diff --git a/arch/sparc/include/asm/kprobes.h b/arch/sparc/include/asm/kprobes.h index a145d798e112..49f8402035d7 100644 --- a/arch/sparc/include/asm/kprobes.h +++ b/arch/sparc/include/asm/kprobes.h @@ -1,13 +1,17 @@ #ifndef _SPARC64_KPROBES_H #define _SPARC64_KPROBES_H +#include + +#define BREAKPOINT_INSTRUCTION 0x91d02070 /* ta 0x70 */ +#define BREAKPOINT_INSTRUCTION_2 0x91d02071 /* ta 0x71 */ + +#ifdef CONFIG_KPROBES #include #include typedef u32 kprobe_opcode_t; -#define BREAKPOINT_INSTRUCTION 0x91d02070 /* ta 0x70 */ -#define BREAKPOINT_INSTRUCTION_2 0x91d02071 /* ta 0x71 */ #define MAX_INSN_SIZE 2 #define kretprobe_blacklist_size 0 @@ -48,4 +52,6 @@ int kprobe_exceptions_notify(struct notifier_block *self, int kprobe_fault_handler(struct pt_regs *regs, int trapnr); asmlinkage void __kprobes kprobe_trap(unsigned long trap_level, struct pt_regs *regs); + +#endif /* CONFIG_KPROBES */ #endif /* _SPARC64_KPROBES_H */ -- cgit v1.2.1 From 08a7e621ff81dec64ddf1eab16353c0c217fdd89 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 27 Feb 2017 14:28:41 -0800 Subject: scripts/spelling.txt: add "swith" pattern and fix typo instances Fix typos and add the following to the scripts/spelling.txt: swith||switch swithable||switchable swithed||switched swithing||switching While we are here, fix the "update" to "updates" in the touched hunk in drivers/net/wireless/marvell/mwifiex/wmm.c. Link: http://lkml.kernel.org/r/1481573103-11329-2-git-send-email-yamada.masahiro@socionext.com Signed-off-by: Masahiro Yamada Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sparc/include/asm/switch_to_32.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sparc') diff --git a/arch/sparc/include/asm/switch_to_32.h b/arch/sparc/include/asm/switch_to_32.h index 16f10374feb3..475dd4158ae4 100644 --- a/arch/sparc/include/asm/switch_to_32.h +++ b/arch/sparc/include/asm/switch_to_32.h @@ -9,7 +9,7 @@ extern struct thread_info *current_set[NR_CPUS]; * Flush windows so that the VM switch which follows * would not pull the stack from under us. * - * SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work) + * SWITCH_ENTER and SWITCH_DO_LAZY_FPU do not work yet (e.g. SMP does not work) * XXX WTF is the above comment? Found in late teen 2.4.x. */ #ifdef CONFIG_SMP -- cgit v1.2.1 From 8ab102d60a0c19df602f3758848d55f0703bf9bb Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 27 Feb 2017 14:28:55 -0800 Subject: scripts/spelling.txt: add "partiton" pattern and fix typo instances Fix typos and add the following to the scripts/spelling.txt: partiton||partition Link: http://lkml.kernel.org/r/1481573103-11329-7-git-send-email-yamada.masahiro@socionext.com Signed-off-by: Masahiro Yamada Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sparc/kernel/visemul.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sparc') diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c index c4ac58e483a4..8f35eea2103a 100644 --- a/arch/sparc/kernel/visemul.c +++ b/arch/sparc/kernel/visemul.c @@ -30,7 +30,7 @@ /* 001001011 - two 32-bit merges */ #define FPMERGE_OPF 0x04b -/* 000110001 - 8-by-16-bit partitoned product */ +/* 000110001 - 8-by-16-bit partitioned product */ #define FMUL8x16_OPF 0x031 /* 000110011 - 8-by-16-bit upper alpha partitioned product */ -- cgit v1.2.1 From f1f1007644ffc8051a4c11427d58b1967ae7b75a Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Mon, 27 Feb 2017 14:30:07 -0800 Subject: mm: add new mmgrab() helper Apart from adding the helper function itself, the rest of the kernel is converted mechanically using: git grep -l 'atomic_inc.*mm_count' | xargs sed -i 's/atomic_inc(&\(.*\)->mm_count);/mmgrab\(\1\);/' git grep -l 'atomic_inc.*mm_count' | xargs sed -i 's/atomic_inc(&\(.*\)\.mm_count);/mmgrab\(\&\1\);/' This is needed for a later patch that hooks into the helper, but might be a worthwhile cleanup on its own. (Michal Hocko provided most of the kerneldoc comment.) Link: http://lkml.kernel.org/r/20161218123229.22952-1-vegard.nossum@oracle.com Signed-off-by: Vegard Nossum Acked-by: Michal Hocko Acked-by: Peter Zijlstra (Intel) Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sparc/kernel/leon_smp.c | 2 +- arch/sparc/kernel/smp_64.c | 2 +- arch/sparc/kernel/sun4d_smp.c | 2 +- arch/sparc/kernel/sun4m_smp.c | 2 +- arch/sparc/kernel/traps_32.c | 2 +- arch/sparc/kernel/traps_64.c | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/sparc') diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c index 71e16f2241c2..b99d33797e1d 100644 --- a/arch/sparc/kernel/leon_smp.c +++ b/arch/sparc/kernel/leon_smp.c @@ -93,7 +93,7 @@ void leon_cpu_pre_online(void *arg) : "memory" /* paranoid */); /* Attach to the address space of init_task. */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 90a02cb64e20..8e3e13924594 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -122,7 +122,7 @@ void smp_callin(void) current_thread_info()->new_child = 0; /* Attach to the address space of init_task. */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; /* inform the notifiers about the new cpu */ diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c index 9d98e5002a09..7b55c50eabe5 100644 --- a/arch/sparc/kernel/sun4d_smp.c +++ b/arch/sparc/kernel/sun4d_smp.c @@ -93,7 +93,7 @@ void sun4d_cpu_pre_online(void *arg) show_leds(cpuid); /* Attach to the address space of init_task. */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; local_ops->cache_all(); diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c index 278c40abce82..633c4cf6fdb0 100644 --- a/arch/sparc/kernel/sun4m_smp.c +++ b/arch/sparc/kernel/sun4m_smp.c @@ -59,7 +59,7 @@ void sun4m_cpu_pre_online(void *arg) : "memory" /* paranoid */); /* Attach to the address space of init_task. */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c index 4f21df7d4f13..ecddac5a4c96 100644 --- a/arch/sparc/kernel/traps_32.c +++ b/arch/sparc/kernel/traps_32.c @@ -448,7 +448,7 @@ void trap_init(void) thread_info_offsets_are_bolixed_pete(); /* Attach to the address space of init_task. */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; /* NOTE: Other cpus have this done as they are started diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index dfc97a47c9a0..e022d7b00390 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -2837,6 +2837,6 @@ void __init trap_init(void) /* Attach to the address space of init_task. On SMP we * do this in smp.c:smp_callin for other cpus. */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; } -- cgit v1.2.1