diff options
author | Peter Keilty <peter.keilty@hp.com> | 2005-10-31 16:44:47 -0500 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2005-10-31 14:36:05 -0800 |
commit | dcc17d1baef3721d1574e5b2f4f2d4607514bcff (patch) | |
tree | 78b19a9b54f57aa010f50201e7639786b0e5f770 /arch/ia64/mm | |
parent | f2c84c0e84bfa637a7161eac10157cf3b05b4a73 (diff) | |
download | blackbird-op-linux-dcc17d1baef3721d1574e5b2f4f2d4607514bcff.tar.gz blackbird-op-linux-dcc17d1baef3721d1574e5b2f4f2d4607514bcff.zip |
[IA64] Use bitmaps for efficient context allocation/free
Corrects the very inefficent method of finding free context_ids in
get_mmu_context(). Instead of walking the task_list of all processes,
2 bitmaps are used to efficently store and lookup state, inuse and
needs flushing. The entire rid address space is now used before calling
wrap_mmu_context and global tlb flushing.
Special thanks to Ken and Rohit for their review and modifications in
using a bit flushmap.
Signed-off-by: Peter Keilty <peter.keilty@hp.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r-- | arch/ia64/mm/tlb.c | 56 |
1 files changed, 27 insertions, 29 deletions
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index c79a9b96d02b..39628fca274c 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c @@ -8,6 +8,8 @@ * Modified RID allocation for SMP * Goutham Rao <goutham.rao@intel.com> * IPI based ptc implementation and A-step IPI implementation. + * Rohit Seth <rohit.seth@intel.com> + * Ken Chen <kenneth.w.chen@intel.com> */ #include <linux/config.h> #include <linux/module.h> @@ -16,12 +18,14 @@ #include <linux/sched.h> #include <linux/smp.h> #include <linux/mm.h> +#include <linux/bootmem.h> #include <asm/delay.h> #include <asm/mmu_context.h> #include <asm/pgalloc.h> #include <asm/pal.h> #include <asm/tlbflush.h> +#include <asm/dma.h> static struct { unsigned long mask; /* mask of supported purge page-sizes */ @@ -31,49 +35,43 @@ static struct { struct ia64_ctx ia64_ctx = { .lock = SPIN_LOCK_UNLOCKED, .next = 1, - .limit = (1 << 15) - 1, /* start out with the safe (architected) limit */ .max_ctx = ~0U }; DEFINE_PER_CPU(u8, ia64_need_tlb_flush); /* + * Initializes the ia64_ctx.bitmap array based on max_ctx+1. + * Called after cpu_init() has setup ia64_ctx.max_ctx based on + * maximum RID that is supported by boot CPU. + */ +void __init +mmu_context_init (void) +{ + ia64_ctx.bitmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3); + ia64_ctx.flushmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3); +} + +/* * Acquire the ia64_ctx.lock before calling this function! */ void wrap_mmu_context (struct mm_struct *mm) { - unsigned long tsk_context, max_ctx = ia64_ctx.max_ctx; - struct task_struct *tsk; int i; + unsigned long flush_bit; - if (ia64_ctx.next > max_ctx) - ia64_ctx.next = 300; /* skip daemons */ - ia64_ctx.limit = max_ctx + 1; - - /* - * Scan all the task's mm->context and set proper safe range - */ - - read_lock(&tasklist_lock); - repeat: - for_each_process(tsk) { - if (!tsk->mm) - continue; - tsk_context = tsk->mm->context; - if (tsk_context == ia64_ctx.next) { - if (++ia64_ctx.next >= ia64_ctx.limit) { - /* empty range: reset the range limit and start over */ - if (ia64_ctx.next > max_ctx) - ia64_ctx.next = 300; - ia64_ctx.limit = max_ctx + 1; - goto repeat; - } - } - if ((tsk_context > ia64_ctx.next) && (tsk_context < ia64_ctx.limit)) - ia64_ctx.limit = tsk_context; + for (i=0; i <= ia64_ctx.max_ctx / BITS_PER_LONG; i++) { + flush_bit = xchg(&ia64_ctx.flushmap[i], 0); + ia64_ctx.bitmap[i] ^= flush_bit; } - read_unlock(&tasklist_lock); + + /* use offset at 300 to skip daemons */ + ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, + ia64_ctx.max_ctx, 300); + ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap, + ia64_ctx.max_ctx, ia64_ctx.next); + /* can't call flush_tlb_all() here because of race condition with O(1) scheduler [EF] */ { int cpu = get_cpu(); /* prevent preemption/migration */ |