From 01edcd891c3e9f4bb992ff2ceb69836bf76f8ddf Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Wed, 23 Nov 2005 13:37:39 -0800 Subject: [PATCH] mm: powerpc ptlock comments Update comments (only) on page_table_lock and mmap_sem in arch/powerpc. Removed the comment on page_table_lock from hash_huge_page: since it's no longer taking page_table_lock itself, it's irrelevant whether others are; but how it is safe (even against huge file truncation?) I can't say. Signed-off-by: Hugh Dickins Cc: Paul Mackerras Cc: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/mm/hugetlbpage.c | 4 +--- arch/powerpc/mm/mem.c | 2 +- arch/powerpc/mm/tlb_32.c | 6 ++++++ arch/powerpc/mm/tlb_64.c | 4 ++-- 4 files changed, 10 insertions(+), 6 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 426c269e552e..9250f14be8ef 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -754,9 +754,7 @@ repeat: } /* - * No need to use ldarx/stdcx here because all who - * might be updating the pte will hold the - * page_table_lock + * No need to use ldarx/stdcx here */ *ptep = __pte(new_pte & ~_PAGE_BUSY); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 4bd7b0a70996..ed6ed2e30dac 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -495,7 +495,7 @@ EXPORT_SYMBOL(flush_icache_user_range); * We use it to preload an HPTE into the hash table corresponding to * the updated linux PTE. * - * This must always be called with the mm->page_table_lock held + * This must always be called with the pte lock held. */ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_32.c index 6c3dc3c44c86..ad580f3742e5 100644 --- a/arch/powerpc/mm/tlb_32.c +++ b/arch/powerpc/mm/tlb_32.c @@ -149,6 +149,12 @@ void flush_tlb_mm(struct mm_struct *mm) return; } + /* + * It is safe to go down the mm's list of vmas when called + * from dup_mmap, holding mmap_sem. It would also be safe from + * unmap_region or exit_mmap, but not from vmtruncate on SMP - + * but it seems dup_mmap is the only SMP case which gets here. + */ for (mp = mm->mmap; mp != NULL; mp = mp->vm_next) flush_range(mp->vm_mm, mp->vm_start, mp->vm_end); FINISH_FLUSH; diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c index 53e31b834ace..859d29a0cac5 100644 --- a/arch/powerpc/mm/tlb_64.c +++ b/arch/powerpc/mm/tlb_64.c @@ -95,7 +95,7 @@ static void pte_free_submit(struct pte_freelist_batch *batch) void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) { - /* This is safe as we are holding page_table_lock */ + /* This is safe since tlb_gather_mmu has disabled preemption */ cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); @@ -206,7 +206,7 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch) void pte_free_finish(void) { - /* This is safe as we are holding page_table_lock */ + /* This is safe since tlb_gather_mmu has disabled preemption */ struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); if (*batchp == NULL) -- cgit v1.2.1