diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/ia64/mm/hugetlbpage.c | 29 | ||||
-rw-r--r-- | arch/ppc64/mm/hugetlbpage.c | 10 |
2 files changed, 23 insertions, 16 deletions
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index 626258ae9742..df08ae7634b6 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -186,13 +186,30 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int wri return NULL; } -/* - * Do nothing, until we've worked out what to do! To allow build, we - * must remove reference to clear_page_range since it no longer exists. - */ -void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev, - unsigned long start, unsigned long end) +void hugetlb_free_pgd_range(struct mmu_gather **tlb, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) { + /* + * This is called only when is_hugepage_only_range(addr,), + * and it follows that is_hugepage_only_range(end,) also. + * + * The offset of these addresses from the base of the hugetlb + * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that + * the standard free_pgd_range will free the right page tables. + * + * If floor and ceiling are also in the hugetlb region, they + * must likewise be scaled down; but if outside, left unchanged. + */ + + addr = htlbpage_to_page(addr); + end = htlbpage_to_page(end); + if (is_hugepage_only_range(tlb->mm, floor, HPAGE_SIZE)) + floor = htlbpage_to_page(floor); + if (is_hugepage_only_range(tlb->mm, ceiling, HPAGE_SIZE)) + ceiling = htlbpage_to_page(ceiling); + + free_pgd_range(tlb, addr, end, floor, ceiling); } void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c index c62ddaff0720..8665bb57e42b 100644 --- a/arch/ppc64/mm/hugetlbpage.c +++ b/arch/ppc64/mm/hugetlbpage.c @@ -430,16 +430,6 @@ void unmap_hugepage_range(struct vm_area_struct *vma, flush_tlb_pending(); } -void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev, - unsigned long start, unsigned long end) -{ - /* Because the huge pgtables are only 2 level, they can take - * at most around 4M, much less than one hugepage which the - * process is presumably entitled to use. So we don't bother - * freeing up the pagetables on unmap, and wait until - * destroy_context() to clean up the lot. */ -} - int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) { struct mm_struct *mm = current->mm; |