diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-10-29 18:16:01 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-10-29 21:40:37 -0700 |
commit | 15a23ffa2fc91cebdac44d4aee994f59d5c28dc0 (patch) | |
tree | 5006935b29246c1ae07a7abc6a384f6b547293ce /include/asm-ia64 | |
parent | 7be7a546994f1222b2312fd348da14e16b6b7b42 (diff) | |
download | blackbird-op-linux-15a23ffa2fc91cebdac44d4aee994f59d5c28dc0.tar.gz blackbird-op-linux-15a23ffa2fc91cebdac44d4aee994f59d5c28dc0.zip |
[PATCH] mm: tlb_gather_mmu get_cpu_var
tlb_gather_mmu dates from before kernel preemption was allowed, and uses
smp_processor_id or __get_cpu_var to find its per-cpu mmu_gather. That works
because it's currently only called after getting page_table_lock, which is not
dropped until after the matching tlb_finish_mmu. But don't rely on that, it
will soon change: now disable preemption internally by proper get_cpu_var in
tlb_gather_mmu, put_cpu_var in tlb_finish_mmu.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-ia64')
-rw-r--r-- | include/asm-ia64/tlb.h | 6 |
1 files changed, 4 insertions, 2 deletions
diff --git a/include/asm-ia64/tlb.h b/include/asm-ia64/tlb.h index 3a9a6d1be75c..1b82299d7c1e 100644 --- a/include/asm-ia64/tlb.h +++ b/include/asm-ia64/tlb.h @@ -129,7 +129,7 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e static inline struct mmu_gather * tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush) { - struct mmu_gather *tlb = &__get_cpu_var(mmu_gathers); + struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); tlb->mm = mm; /* @@ -154,7 +154,7 @@ tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush) /* * Called at the end of the shootdown operation to free up any resources that were - * collected. The page table lock is still held at this point. + * collected. */ static inline void tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) @@ -174,6 +174,8 @@ tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) /* keep the page table cache within bounds */ check_pgt_cache(); + + put_cpu_var(mmu_gathers); } static inline unsigned int |