diff options
author | David Mosberger-Tang <David.Mosberger@acm.org> | 2005-07-25 22:23:00 -0700 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2005-08-12 15:05:21 -0700 |
commit | badea125d7cbd93f1678a95cf009b3bdfe6065cd (patch) | |
tree | c9cd47cfc5f7474fdf60735548734e647a4f7a9d /include/asm-ia64/mmu_context.h | |
parent | 7d69fa6266770eeb6317eddd46b64456e8a515bf (diff) | |
download | blackbird-op-linux-badea125d7cbd93f1678a95cf009b3bdfe6065cd.tar.gz blackbird-op-linux-badea125d7cbd93f1678a95cf009b3bdfe6065cd.zip |
[IA64] Fix race in mm-context wrap-around logic.
The patch below should fix a race which could cause stale TLB entries.
Specifically, when 2 CPUs ended up racing for entrance to
wrap_mmu_context(). The losing CPU would find that by the time it
acquired ctx.lock, mm->context already had a valid value, but then it
failed to (re-)check the delayed TLB flushing logic and hence could
end up using a context number when there were still stale entries in
its TLB. The fix is to check for delayed TLB flushes only after
mm->context is valid (non-zero). The patch also makes GCC v4.x
happier by defining a non-volatile variant of mm_context_t called
nv_mm_context_t.
Signed-off-by: David Mosberger-Tang <David.Mosberger@acm.org>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'include/asm-ia64/mmu_context.h')
-rw-r--r-- | include/asm-ia64/mmu_context.h | 54 |
1 files changed, 32 insertions, 22 deletions
diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h index e3e5fededb04..0680d163be97 100644 --- a/include/asm-ia64/mmu_context.h +++ b/include/asm-ia64/mmu_context.h @@ -55,34 +55,46 @@ static inline void delayed_tlb_flush (void) { extern void local_flush_tlb_all (void); + unsigned long flags; if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) { - local_flush_tlb_all(); - __ia64_per_cpu_var(ia64_need_tlb_flush) = 0; + spin_lock_irqsave(&ia64_ctx.lock, flags); + { + if (__ia64_per_cpu_var(ia64_need_tlb_flush)) { + local_flush_tlb_all(); + __ia64_per_cpu_var(ia64_need_tlb_flush) = 0; + } + } + spin_unlock_irqrestore(&ia64_ctx.lock, flags); } } -static inline mm_context_t +static inline nv_mm_context_t get_mmu_context (struct mm_struct *mm) { unsigned long flags; - mm_context_t context = mm->context; - - if (context) - return context; - - spin_lock_irqsave(&ia64_ctx.lock, flags); - { - /* re-check, now that we've got the lock: */ - context = mm->context; - if (context == 0) { - cpus_clear(mm->cpu_vm_mask); - if (ia64_ctx.next >= ia64_ctx.limit) - wrap_mmu_context(mm); - mm->context = context = ia64_ctx.next++; + nv_mm_context_t context = mm->context; + + if (unlikely(!context)) { + spin_lock_irqsave(&ia64_ctx.lock, flags); + { + /* re-check, now that we've got the lock: */ + context = mm->context; + if (context == 0) { + cpus_clear(mm->cpu_vm_mask); + if (ia64_ctx.next >= ia64_ctx.limit) + wrap_mmu_context(mm); + mm->context = context = ia64_ctx.next++; + } } + spin_unlock_irqrestore(&ia64_ctx.lock, flags); } - spin_unlock_irqrestore(&ia64_ctx.lock, flags); + /* + * Ensure we're not starting to use "context" before any old + * uses of it are gone from our TLB. + */ + delayed_tlb_flush(); + return context; } @@ -104,7 +116,7 @@ destroy_context (struct mm_struct *mm) } static inline void -reload_context (mm_context_t context) +reload_context (nv_mm_context_t context) { unsigned long rid; unsigned long rid_incr = 0; @@ -138,7 +150,7 @@ reload_context (mm_context_t context) static inline void activate_context (struct mm_struct *mm) { - mm_context_t context; + nv_mm_context_t context; do { context = get_mmu_context(mm); @@ -157,8 +169,6 @@ activate_context (struct mm_struct *mm) static inline void activate_mm (struct mm_struct *prev, struct mm_struct *next) { - delayed_tlb_flush(); - /* * We may get interrupts here, but that's OK because interrupt handlers cannot * touch user-space. |