diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/context.c | 207 |
1 files changed, 108 insertions, 99 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 4e07eec1270d..7a27d7363be2 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -2,6 +2,9 @@ * linux/arch/arm/mm/context.c * * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. + * Copyright (C) 2012 ARM Limited + * + * Author: Will Deacon <will.deacon@arm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -14,14 +17,40 @@ #include <linux/percpu.h> #include <asm/mmu_context.h> +#include <asm/smp_plat.h> #include <asm/thread_notify.h> #include <asm/tlbflush.h> +/* + * On ARMv6, we have the following structure in the Context ID: + * + * 31 7 0 + * +-------------------------+-----------+ + * | process ID | ASID | + * +-------------------------+-----------+ + * | context ID | + * +-------------------------------------+ + * + * The ASID is used to tag entries in the CPU caches and TLBs. + * The context ID is used by debuggers and trace logic, and + * should be unique within all running processes. + */ +#define ASID_FIRST_VERSION (1ULL << ASID_BITS) +#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1) + +#define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1) +#define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK) + static DEFINE_RAW_SPINLOCK(cpu_asid_lock); -unsigned int cpu_last_asid = ASID_FIRST_VERSION; +static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); +static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); + +static DEFINE_PER_CPU(atomic64_t, active_asids); +static DEFINE_PER_CPU(u64, reserved_asids); +static cpumask_t tlb_flush_pending; #ifdef CONFIG_ARM_LPAE -void cpu_set_reserved_ttbr0(void) +static void cpu_set_reserved_ttbr0(void) { unsigned long ttbl = __pa(swapper_pg_dir); unsigned long ttbh = 0; @@ -37,7 +66,7 @@ void cpu_set_reserved_ttbr0(void) isb(); } #else -void cpu_set_reserved_ttbr0(void) +static void cpu_set_reserved_ttbr0(void) { u32 ttb; /* Copy TTBR1 into TTBR0 */ @@ -84,124 +113,104 @@ static int __init contextidr_notifier_init(void) arch_initcall(contextidr_notifier_init); #endif -/* - * We fork()ed a process, and we need a new context for the child - * to run in. - */ -void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) +static void flush_context(unsigned int cpu) { - mm->context.id = 0; - raw_spin_lock_init(&mm->context.id_lock); -} + int i; + u64 asid; + + /* Update the list of reserved ASIDs and the ASID bitmap. */ + bitmap_clear(asid_map, 0, NUM_USER_ASIDS); + for_each_possible_cpu(i) { + if (i == cpu) { + asid = 0; + } else { + asid = atomic64_xchg(&per_cpu(active_asids, i), 0); + __set_bit(ASID_TO_IDX(asid), asid_map); + } + per_cpu(reserved_asids, i) = asid; + } -static void flush_context(void) -{ - cpu_set_reserved_ttbr0(); - local_flush_tlb_all(); - if (icache_is_vivt_asid_tagged()) { + /* Queue a TLB invalidate and flush the I-cache if necessary. */ + if (!tlb_ops_need_broadcast()) + cpumask_set_cpu(cpu, &tlb_flush_pending); + else + cpumask_setall(&tlb_flush_pending); + + if (icache_is_vivt_asid_tagged()) __flush_icache_all(); - dsb(); - } } -#ifdef CONFIG_SMP +static int is_reserved_asid(u64 asid) +{ + int cpu; + for_each_possible_cpu(cpu) + if (per_cpu(reserved_asids, cpu) == asid) + return 1; + return 0; +} -static void set_mm_context(struct mm_struct *mm, unsigned int asid) +static void new_context(struct mm_struct *mm, unsigned int cpu) { - unsigned long flags; + u64 asid = mm->context.id; + u64 generation = atomic64_read(&asid_generation); - /* - * Locking needed for multi-threaded applications where the - * same mm->context.id could be set from different CPUs during - * the broadcast. This function is also called via IPI so the - * mm->context.id_lock has to be IRQ-safe. - */ - raw_spin_lock_irqsave(&mm->context.id_lock, flags); - if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { + if (asid != 0 && is_reserved_asid(asid)) { /* - * Old version of ASID found. Set the new one and - * reset mm_cpumask(mm). + * Our current ASID was active during a rollover, we can + * continue to use it and this was just a false alarm. */ - mm->context.id = asid; + asid = generation | (asid & ~ASID_MASK); + } else { + /* + * Allocate a free ASID. If we can't find one, take a + * note of the currently active ASIDs and mark the TLBs + * as requiring flushes. + */ + asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); + if (asid == NUM_USER_ASIDS) { + generation = atomic64_add_return(ASID_FIRST_VERSION, + &asid_generation); + flush_context(cpu); + asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); + } + __set_bit(asid, asid_map); + asid = generation | IDX_TO_ASID(asid); cpumask_clear(mm_cpumask(mm)); } - raw_spin_unlock_irqrestore(&mm->context.id_lock, flags); - /* - * Set the mm_cpumask(mm) bit for the current CPU. - */ - cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); + mm->context.id = asid; } -/* - * Reset the ASID on the current CPU. This function call is broadcast - * from the CPU handling the ASID rollover and holding cpu_asid_lock. - */ -static void reset_context(void *info) +void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) { - unsigned int asid; + unsigned long flags; unsigned int cpu = smp_processor_id(); - struct mm_struct *mm = current->active_mm; - smp_rmb(); - asid = cpu_last_asid + cpu + 1; + if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) + __check_kvm_seq(mm); - flush_context(); - set_mm_context(mm, asid); - - /* set the new ASID */ - cpu_switch_mm(mm->pgd, mm); -} + /* + * Required during context switch to avoid speculative page table + * walking with the wrong TTBR. + */ + cpu_set_reserved_ttbr0(); -#else + if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) + && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id)) + goto switch_mm_fastpath; -static inline void set_mm_context(struct mm_struct *mm, unsigned int asid) -{ - mm->context.id = asid; - cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); -} + raw_spin_lock_irqsave(&cpu_asid_lock, flags); + /* Check that our ASID belongs to the current generation. */ + if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) + new_context(mm, cpu); -#endif + atomic64_set(&per_cpu(active_asids, cpu), mm->context.id); + cpumask_set_cpu(cpu, mm_cpumask(mm)); -void __new_context(struct mm_struct *mm) -{ - unsigned int asid; + if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) + local_flush_tlb_all(); + raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); - raw_spin_lock(&cpu_asid_lock); -#ifdef CONFIG_SMP - /* - * Check the ASID again, in case the change was broadcast from - * another CPU before we acquired the lock. - */ - if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { - cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); - raw_spin_unlock(&cpu_asid_lock); - return; - } -#endif - /* - * At this point, it is guaranteed that the current mm (with - * an old ASID) isn't active on any other CPU since the ASIDs - * are changed simultaneously via IPI. - */ - asid = ++cpu_last_asid; - if (asid == 0) - asid = cpu_last_asid = ASID_FIRST_VERSION; - - /* - * If we've used up all our ASIDs, we need - * to start a new version and flush the TLB. - */ - if (unlikely((asid & ~ASID_MASK) == 0)) { - asid = cpu_last_asid + smp_processor_id() + 1; - flush_context(); -#ifdef CONFIG_SMP - smp_wmb(); - smp_call_function(reset_context, NULL, 1); -#endif - cpu_last_asid += NR_CPUS; - } - - set_mm_context(mm, asid); - raw_spin_unlock(&cpu_asid_lock); +switch_mm_fastpath: + cpu_switch_mm(mm->pgd, mm); } |