diff options
Diffstat (limited to 'arch/mips/include')
-rw-r--r-- | arch/mips/include/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/mips/include/asm/fpu.h | 2 | ||||
-rw-r--r-- | arch/mips/include/asm/mach-bcm63xx/dma-coherence.h | 10 | ||||
-rw-r--r-- | arch/mips/include/asm/mach-sibyte/war.h | 3 | ||||
-rw-r--r-- | arch/mips/include/asm/mm-arch-hooks.h | 15 | ||||
-rw-r--r-- | arch/mips/include/asm/pgtable.h | 31 | ||||
-rw-r--r-- | arch/mips/include/asm/smp.h | 2 | ||||
-rw-r--r-- | arch/mips/include/asm/stackframe.h | 25 | ||||
-rw-r--r-- | arch/mips/include/asm/switch_to.h | 48 | ||||
-rw-r--r-- | arch/mips/include/uapi/asm/sigcontext.h | 4 |
10 files changed, 84 insertions, 57 deletions
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 7fe5c61a3cb8..1f8546081d20 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -7,6 +7,7 @@ generic-y += emergency-restart.h generic-y += irq_work.h generic-y += local64.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += mutex.h generic-y += parport.h generic-y += percpu.h diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index 084780b355aa..1b0625189835 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -74,7 +74,7 @@ static inline int __enable_fpu(enum fpu_mode mode) goto fr_common; case FPU_64BIT: -#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) \ +#if !(defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) \ || defined(CONFIG_64BIT)) /* we only have a 32-bit FPU */ return SIGFPE; diff --git a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h b/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h deleted file mode 100644 index 11d3b572b1b3..000000000000 --- a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H -#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H - -#include <asm/bmips.h> - -#define plat_post_dma_flush bmips_post_dma_flush - -#include <asm/mach-generic/dma-coherence.h> - -#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */ diff --git a/arch/mips/include/asm/mach-sibyte/war.h b/arch/mips/include/asm/mach-sibyte/war.h index 0a227d426b9c..520f8fc2c806 100644 --- a/arch/mips/include/asm/mach-sibyte/war.h +++ b/arch/mips/include/asm/mach-sibyte/war.h @@ -13,8 +13,7 @@ #define R4600_V2_HIT_CACHEOP_WAR 0 #define R5432_CP0_INTERRUPT_WAR 0 -#if defined(CONFIG_SB1_PASS_1_WORKAROUNDS) || \ - defined(CONFIG_SB1_PASS_2_WORKAROUNDS) +#if defined(CONFIG_SB1_PASS_2_WORKAROUNDS) #ifndef __ASSEMBLY__ extern int sb1250_m3_workaround_needed(void); diff --git a/arch/mips/include/asm/mm-arch-hooks.h b/arch/mips/include/asm/mm-arch-hooks.h deleted file mode 100644 index b5609fe8e475..000000000000 --- a/arch/mips/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_MIPS_MM_ARCH_HOOKS_H -#define _ASM_MIPS_MM_ARCH_HOOKS_H - -#endif /* _ASM_MIPS_MM_ARCH_HOOKS_H */ diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 9d8106758142..ae8569475264 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) * Make sure the buddy is global too (if it's !none, * it better already be global) */ +#ifdef CONFIG_SMP + /* + * For SMP, multiple CPUs can race, so we need to do + * this atomically. + */ +#ifdef CONFIG_64BIT +#define LL_INSN "lld" +#define SC_INSN "scd" +#else /* CONFIG_32BIT */ +#define LL_INSN "ll" +#define SC_INSN "sc" +#endif + unsigned long page_global = _PAGE_GLOBAL; + unsigned long tmp; + + __asm__ __volatile__ ( + " .set push\n" + " .set noreorder\n" + "1: " LL_INSN " %[tmp], %[buddy]\n" + " bnez %[tmp], 2f\n" + " or %[tmp], %[tmp], %[global]\n" + " " SC_INSN " %[tmp], %[buddy]\n" + " beqz %[tmp], 1b\n" + " nop\n" + "2:\n" + " .set pop" + : [buddy] "+m" (buddy->pte), + [tmp] "=&r" (tmp) + : [global] "r" (page_global)); +#else /* !CONFIG_SMP */ if (pte_none(*buddy)) pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; +#endif /* CONFIG_SMP */ } #endif } diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index 16f1ea9ab191..03722d4326a1 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -83,8 +83,6 @@ static inline void __cpu_die(unsigned int cpu) extern void play_dead(void); #endif -extern asmlinkage void smp_call_function_interrupt(void); - static inline void arch_send_call_function_single_ipi(int cpu) { extern struct plat_smp_ops *mp_ops; /* private */ diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h index 28d6d9364bd1..a71da576883c 100644 --- a/arch/mips/include/asm/stackframe.h +++ b/arch/mips/include/asm/stackframe.h @@ -152,6 +152,31 @@ .set noreorder bltz k0, 8f move k1, sp +#ifdef CONFIG_EVA + /* + * Flush interAptiv's Return Prediction Stack (RPS) by writing + * EntryHi. Toggling Config7.RPS is slower and less portable. + * + * The RPS isn't automatically flushed when exceptions are + * taken, which can result in kernel mode speculative accesses + * to user addresses if the RPS mispredicts. That's harmless + * when user and kernel share the same address space, but with + * EVA the same user segments may be unmapped to kernel mode, + * even containing sensitive MMIO regions or invalid memory. + * + * This can happen when the kernel sets the return address to + * ret_from_* and jr's to the exception handler, which looks + * more like a tail call than a function call. If nested calls + * don't evict the last user address in the RPS, it will + * mispredict the return and fetch from a user controlled + * address into the icache. + * + * More recent EVA-capable cores with MAAR to restrict + * speculative accesses aren't affected. + */ + MFC0 k0, CP0_ENTRYHI + MTC0 k0, CP0_ENTRYHI +#endif .set reorder /* Called from user mode, new stack. */ get_saved_sp diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h index 7163cd7fdd69..9733cd0266e4 100644 --- a/arch/mips/include/asm/switch_to.h +++ b/arch/mips/include/asm/switch_to.h @@ -83,45 +83,43 @@ do { if (cpu_has_rw_llb) { \ } \ } while (0) +/* + * For newly created kernel threads switch_to() will return to + * ret_from_kernel_thread, newly created user threads to ret_from_fork. + * That is, everything following resume() will be skipped for new threads. + * So everything that matters to new threads should be placed before resume(). + */ #define switch_to(prev, next, last) \ do { \ - u32 __c0_stat; \ s32 __fpsave = FP_SAVE_NONE; \ __mips_mt_fpaff_switch_to(prev); \ - if (cpu_has_dsp) \ + if (cpu_has_dsp) { \ __save_dsp(prev); \ - if (cop2_present && (KSTK_STATUS(prev) & ST0_CU2)) { \ - if (cop2_lazy_restore) \ - KSTK_STATUS(prev) &= ~ST0_CU2; \ - __c0_stat = read_c0_status(); \ - write_c0_status(__c0_stat | ST0_CU2); \ - cop2_save(prev); \ - write_c0_status(__c0_stat & ~ST0_CU2); \ + __restore_dsp(next); \ + } \ + if (cop2_present) { \ + set_c0_status(ST0_CU2); \ + if ((KSTK_STATUS(prev) & ST0_CU2)) { \ + if (cop2_lazy_restore) \ + KSTK_STATUS(prev) &= ~ST0_CU2; \ + cop2_save(prev); \ + } \ + if (KSTK_STATUS(next) & ST0_CU2 && \ + !cop2_lazy_restore) { \ + cop2_restore(next); \ + } \ + clear_c0_status(ST0_CU2); \ } \ __clear_software_ll_bit(); \ if (test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU)) \ __fpsave = FP_SAVE_SCALAR; \ if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA)) \ __fpsave = FP_SAVE_VECTOR; \ - (last) = resume(prev, next, task_thread_info(next), __fpsave); \ -} while (0) - -#define finish_arch_switch(prev) \ -do { \ - u32 __c0_stat; \ - if (cop2_present && !cop2_lazy_restore && \ - (KSTK_STATUS(current) & ST0_CU2)) { \ - __c0_stat = read_c0_status(); \ - write_c0_status(__c0_stat | ST0_CU2); \ - cop2_restore(current); \ - write_c0_status(__c0_stat & ~ST0_CU2); \ - } \ - if (cpu_has_dsp) \ - __restore_dsp(current); \ if (cpu_has_userlocal) \ - write_c0_userlocal(current_thread_info()->tp_value); \ + write_c0_userlocal(task_thread_info(next)->tp_value); \ __restore_watch(); \ disable_msa(); \ + (last) = resume(prev, next, task_thread_info(next), __fpsave); \ } while (0) #endif /* _ASM_SWITCH_TO_H */ diff --git a/arch/mips/include/uapi/asm/sigcontext.h b/arch/mips/include/uapi/asm/sigcontext.h index 6c9906f59c6e..9081d88ae44f 100644 --- a/arch/mips/include/uapi/asm/sigcontext.h +++ b/arch/mips/include/uapi/asm/sigcontext.h @@ -16,7 +16,7 @@ /* * Keep this struct definition in sync with the sigcontext fragment - * in arch/mips/tools/offset.c + * in arch/mips/kernel/asm-offsets.c */ struct sigcontext { unsigned int sc_regmask; /* Unused */ @@ -46,7 +46,7 @@ struct sigcontext { #include <linux/posix_types.h> /* * Keep this struct definition in sync with the sigcontext fragment - * in arch/mips/tools/offset.c + * in arch/mips/kernel/asm-offsets.c * * Warning: this structure illdefined with sc_badvaddr being just an unsigned * int so it was changed to unsigned long in 2.6.0-test1. This may break |