summaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/mmu_context.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm/mmu_context.h')
-rw-r--r--arch/arm/include/asm/mmu_context.h82
1 files changed, 3 insertions, 79 deletions
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index 0306bc642c0d..a64f61cb23d1 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -24,84 +24,8 @@ void __check_kvm_seq(struct mm_struct *mm);
#ifdef CONFIG_CPU_HAS_ASID
-/*
- * On ARMv6, we have the following structure in the Context ID:
- *
- * 31 7 0
- * +-------------------------+-----------+
- * | process ID | ASID |
- * +-------------------------+-----------+
- * | context ID |
- * +-------------------------------------+
- *
- * The ASID is used to tag entries in the CPU caches and TLBs.
- * The context ID is used by debuggers and trace logic, and
- * should be unique within all running processes.
- */
-#define ASID_BITS 8
-#define ASID_MASK ((~0) << ASID_BITS)
-#define ASID_FIRST_VERSION (1 << ASID_BITS)
-
-extern unsigned int cpu_last_asid;
-
-void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
-void __new_context(struct mm_struct *mm);
-void cpu_set_reserved_ttbr0(void);
-
-static inline void switch_new_context(struct mm_struct *mm)
-{
- unsigned long flags;
-
- __new_context(mm);
-
- local_irq_save(flags);
- cpu_switch_mm(mm->pgd, mm);
- local_irq_restore(flags);
-}
-
-static inline void check_and_switch_context(struct mm_struct *mm,
- struct task_struct *tsk)
-{
- if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
- __check_kvm_seq(mm);
-
- /*
- * Required during context switch to avoid speculative page table
- * walking with the wrong TTBR.
- */
- cpu_set_reserved_ttbr0();
-
- if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
- /*
- * The ASID is from the current generation, just switch to the
- * new pgd. This condition is only true for calls from
- * context_switch() and interrupts are already disabled.
- */
- cpu_switch_mm(mm->pgd, mm);
- else if (irqs_disabled())
- /*
- * Defer the new ASID allocation until after the context
- * switch critical region since __new_context() cannot be
- * called with interrupts disabled (it sends IPIs).
- */
- set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
- else
- /*
- * That is a direct call to switch_mm() or activate_mm() with
- * interrupts enabled and a new context.
- */
- switch_new_context(mm);
-}
-
-#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0)
-
-#define finish_arch_post_lock_switch \
- finish_arch_post_lock_switch
-static inline void finish_arch_post_lock_switch(void)
-{
- if (test_and_clear_thread_flag(TIF_SWITCH_MM))
- switch_new_context(current->mm);
-}
+void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
+#define init_new_context(tsk,mm) ({ mm->context.id = 0; })
#else /* !CONFIG_CPU_HAS_ASID */
@@ -143,6 +67,7 @@ static inline void finish_arch_post_lock_switch(void)
#endif /* CONFIG_CPU_HAS_ASID */
#define destroy_context(mm) do { } while(0)
+#define activate_mm(prev,next) switch_mm(prev, next, NULL)
/*
* This is called when "tsk" is about to enter lazy TLB mode.
@@ -186,6 +111,5 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
}
#define deactivate_mm(tsk,mm) do { } while (0)
-#define activate_mm(prev,next) switch_mm(prev, next, NULL)
#endif
OpenPOWER on IntegriCloud