diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-04-19 13:29:18 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org.(none)> | 2005-04-19 13:29:18 -0700 |
commit | 021740dc30d184e3b0fa7679936e65a56090c425 (patch) | |
tree | c31bd23fe74038b4bab5148e17e07745b75b453d /arch | |
parent | 146425a316fb937fbdcac018b34a23c67d12214b (diff) | |
download | blackbird-op-linux-021740dc30d184e3b0fa7679936e65a56090c425.tar.gz blackbird-op-linux-021740dc30d184e3b0fa7679936e65a56090c425.zip |
[PATCH] freepgt: hugetlb area is clean
Once we're strict about clearing away page tables, hugetlb_prefault can assume
there are no page tables left within its range. Since the other arches
continue if !pte_none here, let i386 do the same.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/mm/hugetlbpage.c | 11 | ||||
-rw-r--r-- | arch/ppc64/mm/hugetlbpage.c | 37 |
2 files changed, 2 insertions, 46 deletions
diff --git a/arch/i386/mm/hugetlbpage.c b/arch/i386/mm/hugetlbpage.c index a8c45143088b..171fc925e1e4 100644 --- a/arch/i386/mm/hugetlbpage.c +++ b/arch/i386/mm/hugetlbpage.c @@ -249,15 +249,8 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) goto out; } - if (!pte_none(*pte)) { - pmd_t *pmd = (pmd_t *) pte; - - page = pmd_page(*pmd); - pmd_clear(pmd); - mm->nr_ptes--; - dec_page_state(nr_page_table_pages); - page_cache_release(page); - } + if (!pte_none(*pte)) + continue; idx = ((addr - vma->vm_start) >> HPAGE_SHIFT) + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c index 8665bb57e42b..390296efe3e0 100644 --- a/arch/ppc64/mm/hugetlbpage.c +++ b/arch/ppc64/mm/hugetlbpage.c @@ -203,8 +203,6 @@ static int prepare_low_seg_for_htlb(struct mm_struct *mm, unsigned long seg) unsigned long start = seg << SID_SHIFT; unsigned long end = (seg+1) << SID_SHIFT; struct vm_area_struct *vma; - unsigned long addr; - struct mmu_gather *tlb; BUG_ON(seg >= 16); @@ -213,41 +211,6 @@ static int prepare_low_seg_for_htlb(struct mm_struct *mm, unsigned long seg) if (vma && (vma->vm_start < end)) return -EBUSY; - /* Clean up any leftover PTE pages in the region */ - spin_lock(&mm->page_table_lock); - tlb = tlb_gather_mmu(mm, 0); - for (addr = start; addr < end; addr += PMD_SIZE) { - pgd_t *pgd = pgd_offset(mm, addr); - pmd_t *pmd; - struct page *page; - pte_t *pte; - int i; - - if (pgd_none(*pgd)) - continue; - pmd = pmd_offset(pgd, addr); - if (!pmd || pmd_none(*pmd)) - continue; - if (pmd_bad(*pmd)) { - pmd_ERROR(*pmd); - pmd_clear(pmd); - continue; - } - pte = (pte_t *)pmd_page_kernel(*pmd); - /* No VMAs, so there should be no PTEs, check just in case. */ - for (i = 0; i < PTRS_PER_PTE; i++) { - BUG_ON(!pte_none(*pte)); - pte++; - } - page = pmd_page(*pmd); - pmd_clear(pmd); - mm->nr_ptes--; - dec_page_state(nr_page_table_pages); - pte_free_tlb(tlb, page); - } - tlb_finish_mmu(tlb, start, end); - spin_unlock(&mm->page_table_lock); - return 0; } |