diff options
Diffstat (limited to 'arch/mips/include/asm')
-rw-r--r-- | arch/mips/include/asm/cpu-features.h | 13 | ||||
-rw-r--r-- | arch/mips/include/asm/cpu.h | 1 | ||||
-rw-r--r-- | arch/mips/include/asm/mipsregs.h | 4 | ||||
-rw-r--r-- | arch/mips/include/asm/mmu.h | 6 | ||||
-rw-r--r-- | arch/mips/include/asm/mmu_context.h | 54 |
5 files changed, 73 insertions, 5 deletions
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 701e525641b8..6998a9796499 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -591,6 +591,19 @@ #endif /* CONFIG_MIPS_MT_SMP */ /* + * We only enable MMID support for configurations which natively support 64 bit + * atomics because getting good performance from the allocator relies upon + * efficient atomic64_*() functions. + */ +#ifndef cpu_has_mmid +# ifdef CONFIG_GENERIC_ATOMIC64 +# define cpu_has_mmid 0 +# else +# define cpu_has_mmid __isa_ge_and_opt(6, MIPS_CPU_MMID) +# endif +#endif + +/* * Guest capabilities */ #ifndef cpu_guest_has_conf1 diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index 532b49b1dbb3..6ad7d3cabd91 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -422,6 +422,7 @@ enum cpu_type_enum { MBIT_ULL(55) /* CPU shares FTLB entries with another */ #define MIPS_CPU_MT_PER_TC_PERF_COUNTERS \ MBIT_ULL(56) /* CPU has perf counters implemented per TC (MIPSMT ASE) */ +#define MIPS_CPU_MMID MBIT_ULL(57) /* CPU supports MemoryMapIDs */ /* * CPU ASE encodings diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 900a47581dd1..1e6966e8527e 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -667,6 +667,7 @@ #define MIPS_CONF5_FRE (_ULCAST_(1) << 8) #define MIPS_CONF5_UFE (_ULCAST_(1) << 9) #define MIPS_CONF5_CA2 (_ULCAST_(1) << 14) +#define MIPS_CONF5_MI (_ULCAST_(1) << 17) #define MIPS_CONF5_CRCP (_ULCAST_(1) << 18) #define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27) #define MIPS_CONF5_EVA (_ULCAST_(1) << 28) @@ -1610,6 +1611,9 @@ do { \ #define read_c0_xcontextconfig() __read_ulong_c0_register($4, 3) #define write_c0_xcontextconfig(val) __write_ulong_c0_register($4, 3, val) +#define read_c0_memorymapid() __read_32bit_c0_register($4, 5) +#define write_c0_memorymapid(val) __write_32bit_c0_register($4, 5, val) + #define read_c0_pagemask() __read_32bit_c0_register($5, 0) #define write_c0_pagemask(val) __write_32bit_c0_register($5, 0, val) diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h index 88a108ce62c1..5df0238f639b 100644 --- a/arch/mips/include/asm/mmu.h +++ b/arch/mips/include/asm/mmu.h @@ -7,7 +7,11 @@ #include <linux/wait.h> typedef struct { - u64 asid[NR_CPUS]; + union { + u64 asid[NR_CPUS]; + atomic64_t mmid; + }; + void *vdso; /* lock to be held whilst modifying fp_bd_emupage_allocmap */ diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index a0f29df8ced8..cddead91acd4 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -17,8 +17,10 @@ #include <linux/smp.h> #include <linux/slab.h> +#include <asm/barrier.h> #include <asm/cacheflush.h> #include <asm/dsemul.h> +#include <asm/ginvt.h> #include <asm/hazards.h> #include <asm/tlbflush.h> #include <asm-generic/mm_hooks.h> @@ -73,6 +75,19 @@ extern unsigned long pgd_current[]; #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ /* + * The ginvt instruction will invalidate wired entries when its type field + * targets anything other than the entire TLB. That means that if we were to + * allow the kernel to create wired entries with the MMID of current->active_mm + * then those wired entries could be invalidated when we later use ginvt to + * invalidate TLB entries with that MMID. + * + * In order to prevent ginvt from trashing wired entries, we reserve one MMID + * for use by the kernel when creating wired entries. This MMID will never be + * assigned to a struct mm, and we'll never target it with a ginvt instruction. + */ +#define MMID_KERNEL_WIRED 0 + +/* * All unused by hardware upper bits will be considered * as a software asid extension. */ @@ -90,13 +105,19 @@ static inline u64 asid_first_version(unsigned int cpu) static inline u64 cpu_context(unsigned int cpu, const struct mm_struct *mm) { + if (cpu_has_mmid) + return atomic64_read(&mm->context.mmid); + return mm->context.asid[cpu]; } static inline void set_cpu_context(unsigned int cpu, struct mm_struct *mm, u64 ctx) { - mm->context.asid[cpu] = ctx; + if (cpu_has_mmid) + atomic64_set(&mm->context.mmid, ctx); + else + mm->context.asid[cpu] = ctx; } #define asid_cache(cpu) (cpu_data[cpu].asid_cache) @@ -120,8 +141,12 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) { int i; - for_each_possible_cpu(i) - set_cpu_context(i, mm, 0); + if (cpu_has_mmid) { + set_cpu_context(0, mm, 0); + } else { + for_each_possible_cpu(i) + set_cpu_context(i, mm, 0); + } mm->context.bd_emupage_allocmap = NULL; spin_lock_init(&mm->context.bd_emupage_lock); @@ -168,12 +193,33 @@ drop_mmu_context(struct mm_struct *mm) { unsigned long flags; unsigned int cpu; + u32 old_mmid; + u64 ctx; local_irq_save(flags); cpu = smp_processor_id(); - if (!cpu_context(cpu, mm)) { + ctx = cpu_context(cpu, mm); + + if (!ctx) { /* no-op */ + } else if (cpu_has_mmid) { + /* + * Globally invalidating TLB entries associated with the MMID + * is pretty cheap using the GINVT instruction, so we'll do + * that rather than incur the overhead of allocating a new + * MMID. The latter would be especially difficult since MMIDs + * are global & other CPUs may be actively using ctx. + */ + htw_stop(); + old_mmid = read_c0_memorymapid(); + write_c0_memorymapid(ctx & cpu_asid_mask(&cpu_data[cpu])); + mtc0_tlbw_hazard(); + ginvt_mmid(); + sync_ginv(); + write_c0_memorymapid(old_mmid); + instruction_hazard(); + htw_start(); } else if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { /* * mm is currently active, so we can't really drop it. |