diff options
Diffstat (limited to 'arch/powerpc/mm/hash_utils_64.c')
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 114 |
1 files changed, 83 insertions, 31 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index d525f2eba313..611ad084b7e7 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -93,6 +93,8 @@ int mmu_linear_psize = MMU_PAGE_4K; int mmu_virtual_psize = MMU_PAGE_4K; int mmu_vmalloc_psize = MMU_PAGE_4K; int mmu_io_psize = MMU_PAGE_4K; +int mmu_kernel_ssize = MMU_SEGSIZE_256M; +int mmu_highuser_ssize = MMU_SEGSIZE_256M; #ifdef CONFIG_HUGETLB_PAGE int mmu_huge_psize = MMU_PAGE_16M; unsigned int HPAGE_SHIFT; @@ -145,7 +147,8 @@ struct mmu_psize_def mmu_psize_defaults_gp[] = { int htab_bolt_mapping(unsigned long vstart, unsigned long vend, - unsigned long pstart, unsigned long mode, int psize) + unsigned long pstart, unsigned long mode, + int psize, int ssize) { unsigned long vaddr, paddr; unsigned int step, shift; @@ -158,8 +161,8 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, for (vaddr = vstart, paddr = pstart; vaddr < vend; vaddr += step, paddr += step) { unsigned long hash, hpteg; - unsigned long vsid = get_kernel_vsid(vaddr); - unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff); + unsigned long vsid = get_kernel_vsid(vaddr, ssize); + unsigned long va = hpt_va(vaddr, vsid, ssize); tmp_mode = mode; @@ -167,14 +170,14 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, if (!in_kernel_text(vaddr)) tmp_mode = mode | HPTE_R_N; - hash = hpt_hash(va, shift); + hash = hpt_hash(va, shift, ssize); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); DBG("htab_bolt_mapping: calling %p\n", ppc_md.hpte_insert); BUG_ON(!ppc_md.hpte_insert); ret = ppc_md.hpte_insert(hpteg, va, paddr, - tmp_mode, HPTE_V_BOLTED, psize); + tmp_mode, HPTE_V_BOLTED, psize, ssize); if (ret < 0) break; @@ -186,6 +189,37 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, return ret < 0 ? ret : 0; } +static int __init htab_dt_scan_seg_sizes(unsigned long node, + const char *uname, int depth, + void *data) +{ + char *type = of_get_flat_dt_prop(node, "device_type", NULL); + u32 *prop; + unsigned long size = 0; + + /* We are scanning "cpu" nodes only */ + if (type == NULL || strcmp(type, "cpu") != 0) + return 0; + + prop = (u32 *)of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", + &size); + if (prop == NULL) + return 0; + for (; size >= 4; size -= 4, ++prop) { + if (prop[0] == 40) { + DBG("1T segment support detected\n"); + cur_cpu_spec->cpu_features |= CPU_FTR_1T_SEGMENT; + } + return 1; + } + return 0; +} + +static void __init htab_init_seg_sizes(void) +{ + of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL); +} + static int __init htab_dt_scan_page_sizes(unsigned long node, const char *uname, int depth, void *data) @@ -265,7 +299,6 @@ static int __init htab_dt_scan_page_sizes(unsigned long node, return 0; } - static void __init htab_init_page_sizes(void) { int rc; @@ -398,7 +431,7 @@ void create_section_mapping(unsigned long start, unsigned long end) { BUG_ON(htab_bolt_mapping(start, end, __pa(start), _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX, - mmu_linear_psize)); + mmu_linear_psize, mmu_kernel_ssize)); } #endif /* CONFIG_MEMORY_HOTPLUG */ @@ -449,9 +482,18 @@ void __init htab_initialize(void) DBG(" -> htab_initialize()\n"); + /* Initialize segment sizes */ + htab_init_seg_sizes(); + /* Initialize page sizes */ htab_init_page_sizes(); + if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) { + mmu_kernel_ssize = MMU_SEGSIZE_1T; + mmu_highuser_ssize = MMU_SEGSIZE_1T; + printk(KERN_INFO "Using 1TB segments\n"); + } + /* * Calculate the required size of the htab. We want the number of * PTEGs to equal one half the number of real pages. @@ -523,18 +565,20 @@ void __init htab_initialize(void) if (base != dart_tablebase) BUG_ON(htab_bolt_mapping(base, dart_tablebase, __pa(base), mode_rw, - mmu_linear_psize)); + mmu_linear_psize, + mmu_kernel_ssize)); if ((base + size) > dart_table_end) BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB, base + size, __pa(dart_table_end), mode_rw, - mmu_linear_psize)); + mmu_linear_psize, + mmu_kernel_ssize)); continue; } #endif /* CONFIG_U3_DART */ BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), - mode_rw, mmu_linear_psize)); + mode_rw, mmu_linear_psize, mmu_kernel_ssize)); } /* @@ -553,7 +597,7 @@ void __init htab_initialize(void) BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end, __pa(tce_alloc_start), mode_rw, - mmu_linear_psize)); + mmu_linear_psize, mmu_kernel_ssize)); } htab_finish_init(); @@ -621,7 +665,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) pte_t *ptep; cpumask_t tmp; int rc, user_region = 0, local = 0; - int psize; + int psize, ssize; DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", ea, access, trap); @@ -640,20 +684,22 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) DBG_LOW(" user region with no mm !\n"); return 1; } - vsid = get_vsid(mm->context.id, ea); #ifdef CONFIG_PPC_MM_SLICES psize = get_slice_psize(mm, ea); #else psize = mm->context.user_psize; #endif + ssize = user_segment_size(ea); + vsid = get_vsid(mm->context.id, ea, ssize); break; case VMALLOC_REGION_ID: mm = &init_mm; - vsid = get_kernel_vsid(ea); + vsid = get_kernel_vsid(ea, mmu_kernel_ssize); if (ea < VMALLOC_END) psize = mmu_vmalloc_psize; else psize = mmu_io_psize; + ssize = mmu_kernel_ssize; break; default: /* Not a valid range @@ -758,10 +804,10 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) #ifdef CONFIG_PPC_HAS_HASH_64K if (psize == MMU_PAGE_64K) - rc = __hash_page_64K(ea, access, vsid, ptep, trap, local); + rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize); else #endif /* CONFIG_PPC_HAS_HASH_64K */ - rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); + rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize); #ifndef CONFIG_PPC_64K_PAGES DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep)); @@ -783,6 +829,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, cpumask_t mask; unsigned long flags; int local = 0; + int ssize; BUG_ON(REGION_ID(ea) != USER_REGION_ID); @@ -815,7 +862,8 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, #endif /* CONFIG_PPC_64K_PAGES */ /* Get VSID */ - vsid = get_vsid(mm->context.id, ea); + ssize = user_segment_size(ea); + vsid = get_vsid(mm->context.id, ea, ssize); /* Hash doesn't like irqs */ local_irq_save(flags); @@ -828,28 +876,29 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, /* Hash it in */ #ifdef CONFIG_PPC_HAS_HASH_64K if (mm->context.user_psize == MMU_PAGE_64K) - __hash_page_64K(ea, access, vsid, ptep, trap, local); + __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize); else #endif /* CONFIG_PPC_HAS_HASH_64K */ - __hash_page_4K(ea, access, vsid, ptep, trap, local); + __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize); local_irq_restore(flags); } -void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int local) +void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize, + int local) { unsigned long hash, index, shift, hidx, slot; DBG_LOW("flush_hash_page(va=%016x)\n", va); pte_iterate_hashed_subpages(pte, psize, va, index, shift) { - hash = hpt_hash(va, shift); + hash = hpt_hash(va, shift, ssize); hidx = __rpte_to_hidx(pte, index); if (hidx & _PTEIDX_SECONDARY) hash = ~hash; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += hidx & _PTEIDX_GROUP_IX; DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index, slot, hidx); - ppc_md.hpte_invalidate(slot, va, psize, local); + ppc_md.hpte_invalidate(slot, va, psize, ssize, local); } pte_iterate_hashed_end(); } @@ -864,7 +913,7 @@ void flush_hash_range(unsigned long number, int local) for (i = 0; i < number; i++) flush_hash_page(batch->vaddr[i], batch->pte[i], - batch->psize, local); + batch->psize, batch->ssize, local); } } @@ -890,17 +939,19 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address) #ifdef CONFIG_DEBUG_PAGEALLOC static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) { - unsigned long hash, hpteg, vsid = get_kernel_vsid(vaddr); - unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff); + unsigned long hash, hpteg; + unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); + unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize); unsigned long mode = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX | HPTE_R_N; int ret; - hash = hpt_hash(va, PAGE_SHIFT); + hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr), - mode, HPTE_V_BOLTED, mmu_linear_psize); + mode, HPTE_V_BOLTED, + mmu_linear_psize, mmu_kernel_ssize); BUG_ON (ret < 0); spin_lock(&linear_map_hash_lock); BUG_ON(linear_map_hash_slots[lmi] & 0x80); @@ -910,10 +961,11 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) { - unsigned long hash, hidx, slot, vsid = get_kernel_vsid(vaddr); - unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff); + unsigned long hash, hidx, slot; + unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); + unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize); - hash = hpt_hash(va, PAGE_SHIFT); + hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize); spin_lock(&linear_map_hash_lock); BUG_ON(!(linear_map_hash_slots[lmi] & 0x80)); hidx = linear_map_hash_slots[lmi] & 0x7f; @@ -923,7 +975,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) hash = ~hash; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += hidx & _PTEIDX_GROUP_IX; - ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, 0); + ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, mmu_kernel_ssize, 0); } void kernel_map_pages(struct page *page, int numpages, int enable) |