diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-30 08:38:30 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-30 08:38:30 -0700 |
commit | d67c6f869c0a7f275689855161c93d714197e052 (patch) | |
tree | 17024af84087d216c62144d21a41beb74eca80dc /include/asm-s390/hugetlb.h | |
parent | ec31b2124158f60c515ed84bd5e40db1a883c7b6 (diff) | |
parent | 1175cdc670f2d4197b033f823b32435031a6daa8 (diff) | |
download | talos-obmc-linux-d67c6f869c0a7f275689855161c93d714197e052.tar.gz talos-obmc-linux-d67c6f869c0a7f275689855161c93d714197e052.zip |
Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6:
[S390] Update default configuration.
[S390] use generic sys_ptrace
[S390] Remove self ptrace IEEE_IP hack.
[S390] Convert to SPARSEMEM & SPARSEMEM_VMEMMAP
[S390] System z large page support.
[S390] Convert machine feature detection code to C.
[S390] vmemmap: use clear_table to initialise page tables.
[S390] Move stfl to system.h and delete duplicated version.
[S390] uaccess_mvcos: #ifdef config dependent code.
[S390] cpu topology: Fix possible deadlock.
[S390] Add topology_core_siblings to topology.h
[S390] cio: Make isc handling more robust.
[S390] remove -traditional
[S390] Automatically detect added cpus.
[S390] smp: Fix locking order.
[S390] Add missing ifndef/define to include/asm-s390/sysinfo.h.
[S390] Move show_regs to traps.c.
[S390] cio: Use strict_strtoul() for attributes.
Diffstat (limited to 'include/asm-s390/hugetlb.h')
-rw-r--r-- | include/asm-s390/hugetlb.h | 183 |
1 files changed, 183 insertions, 0 deletions
diff --git a/include/asm-s390/hugetlb.h b/include/asm-s390/hugetlb.h new file mode 100644 index 000000000000..600a776f8f75 --- /dev/null +++ b/include/asm-s390/hugetlb.h @@ -0,0 +1,183 @@ +/* + * IBM System z Huge TLB Page Support for Kernel. + * + * Copyright IBM Corp. 2008 + * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com> + */ + +#ifndef _ASM_S390_HUGETLB_H +#define _ASM_S390_HUGETLB_H + +#include <asm/page.h> +#include <asm/pgtable.h> + + +#define is_hugepage_only_range(mm, addr, len) 0 +#define hugetlb_free_pgd_range free_pgd_range + +void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte); + +/* + * If the arch doesn't supply something else, assume that hugepage + * size aligned regions are ok without further preparation. + */ +static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) +{ + if (len & ~HPAGE_MASK) + return -EINVAL; + if (addr & ~HPAGE_MASK) + return -EINVAL; + return 0; +} + +#define hugetlb_prefault_arch_hook(mm) do { } while (0) + +int arch_prepare_hugepage(struct page *page); +void arch_release_hugepage(struct page *page); + +static inline pte_t pte_mkhuge(pte_t pte) +{ + /* + * PROT_NONE needs to be remapped from the pte type to the ste type. + * The HW invalid bit is also different for pte and ste. The pte + * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE + * bit, so we don't have to clear it. + */ + if (pte_val(pte) & _PAGE_INVALID) { + if (pte_val(pte) & _PAGE_SWT) + pte_val(pte) |= _HPAGE_TYPE_NONE; + pte_val(pte) |= _SEGMENT_ENTRY_INV; + } + /* + * Clear SW pte bits SWT and SWX, there are no SW bits in a segment + * table entry. + */ + pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX); + /* + * Also set the change-override bit because we don't need dirty bit + * tracking for hugetlbfs pages. + */ + pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO); + return pte; +} + +static inline pte_t huge_pte_wrprotect(pte_t pte) +{ + pte_val(pte) |= _PAGE_RO; + return pte; +} + +static inline int huge_pte_none(pte_t pte) +{ + return (pte_val(pte) & _SEGMENT_ENTRY_INV) && + !(pte_val(pte) & _SEGMENT_ENTRY_RO); +} + +static inline pte_t huge_ptep_get(pte_t *ptep) +{ + pte_t pte = *ptep; + unsigned long mask; + + if (!MACHINE_HAS_HPAGE) { + ptep = (pte_t *) (pte_val(pte) & _SEGMENT_ENTRY_ORIGIN); + if (ptep) { + mask = pte_val(pte) & + (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO); + pte = pte_mkhuge(*ptep); + pte_val(pte) |= mask; + } + } + return pte; +} + +static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + pte_t pte = huge_ptep_get(ptep); + + pmd_clear((pmd_t *) ptep); + return pte; +} + +static inline void __pmd_csp(pmd_t *pmdp) +{ + register unsigned long reg2 asm("2") = pmd_val(*pmdp); + register unsigned long reg3 asm("3") = pmd_val(*pmdp) | + _SEGMENT_ENTRY_INV; + register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5; + + asm volatile( + " csp %1,%3" + : "=m" (*pmdp) + : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc"); + pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY; +} + +static inline void __pmd_idte(unsigned long address, pmd_t *pmdp) +{ + unsigned long sto = (unsigned long) pmdp - + pmd_index(address) * sizeof(pmd_t); + + if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) { + asm volatile( + " .insn rrf,0xb98e0000,%2,%3,0,0" + : "=m" (*pmdp) + : "m" (*pmdp), "a" (sto), + "a" ((address & HPAGE_MASK)) + ); + } + pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY; +} + +static inline void huge_ptep_invalidate(struct mm_struct *mm, + unsigned long address, pte_t *ptep) +{ + pmd_t *pmdp = (pmd_t *) ptep; + + if (!MACHINE_HAS_IDTE) { + __pmd_csp(pmdp); + if (mm->context.noexec) { + pmdp = get_shadow_table(pmdp); + __pmd_csp(pmdp); + } + return; + } + + __pmd_idte(address, pmdp); + if (mm->context.noexec) { + pmdp = get_shadow_table(pmdp); + __pmd_idte(address, pmdp); + } + return; +} + +#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ +({ \ + int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \ + if (__changed) { \ + huge_ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \ + set_huge_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \ + } \ + __changed; \ +}) + +#define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \ +({ \ + pte_t __pte = huge_ptep_get(__ptep); \ + if (pte_write(__pte)) { \ + if (atomic_read(&(__mm)->mm_users) > 1 || \ + (__mm) != current->active_mm) \ + huge_ptep_invalidate(__mm, __addr, __ptep); \ + set_huge_pte_at(__mm, __addr, __ptep, \ + huge_pte_wrprotect(__pte)); \ + } \ +}) + +static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) +{ + huge_ptep_invalidate(vma->vm_mm, address, ptep); +} + +#endif /* _ASM_S390_HUGETLB_H */ |