diff options
Diffstat (limited to 'arch/s390/include/asm/mmu_context.h')
-rw-r--r-- | arch/s390/include/asm/mmu_context.h | 36 |
1 files changed, 16 insertions, 20 deletions
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index cf4c1cb17dcd..f4a07f788f78 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -73,41 +73,38 @@ static inline int init_new_context(struct task_struct *tsk, static inline void set_user_asce(struct mm_struct *mm) { S390_lowcore.user_asce = mm->context.asce; - if (current->thread.mm_segment.ar4) - __ctl_load(S390_lowcore.user_asce, 7, 7); - set_cpu_flag(CIF_ASCE_PRIMARY); + __ctl_load(S390_lowcore.user_asce, 1, 1); + clear_cpu_flag(CIF_ASCE_PRIMARY); } static inline void clear_user_asce(void) { S390_lowcore.user_asce = S390_lowcore.kernel_asce; - - __ctl_load(S390_lowcore.user_asce, 1, 1); - __ctl_load(S390_lowcore.user_asce, 7, 7); -} - -static inline void load_kernel_asce(void) -{ - unsigned long asce; - - __ctl_store(asce, 1, 1); - if (asce != S390_lowcore.kernel_asce) - __ctl_load(S390_lowcore.kernel_asce, 1, 1); + __ctl_load(S390_lowcore.kernel_asce, 1, 1); set_cpu_flag(CIF_ASCE_PRIMARY); } +mm_segment_t enable_sacf_uaccess(void); +void disable_sacf_uaccess(mm_segment_t old_fs); + static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { int cpu = smp_processor_id(); - S390_lowcore.user_asce = next->context.asce; if (prev == next) return; + S390_lowcore.user_asce = next->context.asce; cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); - /* Clear old ASCE by loading the kernel ASCE. */ - __ctl_load(S390_lowcore.kernel_asce, 1, 1); - __ctl_load(S390_lowcore.kernel_asce, 7, 7); + /* Clear previous user-ASCE from CR1 and CR7 */ + if (!test_cpu_flag(CIF_ASCE_PRIMARY)) { + __ctl_load(S390_lowcore.kernel_asce, 1, 1); + set_cpu_flag(CIF_ASCE_PRIMARY); + } + if (test_cpu_flag(CIF_ASCE_SECONDARY)) { + __ctl_load(S390_lowcore.vdso_asce, 7, 7); + clear_cpu_flag(CIF_ASCE_SECONDARY); + } cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); } @@ -117,7 +114,6 @@ static inline void finish_arch_post_lock_switch(void) struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; - load_kernel_asce(); if (mm) { preempt_disable(); while (atomic_read(&mm->context.flush_count)) |