diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2012-10-01 12:58:34 +0200 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2012-10-09 14:16:56 +0200 |
commit | 378b1e7a80a59325ca1036e892462db728126f84 (patch) | |
tree | 6d25959ac7401eee48f76441ca42c2759aef15b4 /arch/s390 | |
parent | 521b3d790c16fad9d83c72d610c1e416ad3f7ae3 (diff) | |
download | blackbird-op-linux-378b1e7a80a59325ca1036e892462db728126f84.tar.gz blackbird-op-linux-378b1e7a80a59325ca1036e892462db728126f84.zip |
s390/mm: fix pmd_huge() usage for kernel mapping
pmd_huge() will always return 0 on !HUGETLBFS, however we use that helper
function when walking the kernel page tables to decide if we have a
1MB page frame or not.
Since we create 1MB frames for the kernel 1:1 mapping independently of
HUGETLBFS this can lead to incorrect storage accesses since the code
can assume that we have a pointer to a page table instead of a pointer
to a 1MB frame.
Fix this by adding a pmd_large() primitive like other architectures have
it already and remove all references to HUGETLBFS/HUGETLBPAGE from the
code that walks kernel page tables.
Reviewed-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/pgtable.h | 9 | ||||
-rw-r--r-- | arch/s390/mm/pageattr.c | 2 | ||||
-rw-r--r-- | arch/s390/mm/vmem.c | 29 |
3 files changed, 25 insertions, 15 deletions
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 979fe3dc0788..75b91bb772bd 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -507,6 +507,15 @@ static inline int pmd_none(pmd_t pmd) return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL; } +static inline int pmd_large(pmd_t pmd) +{ +#ifdef CONFIG_64BIT + return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE); +#else + return 0; +#endif +} + static inline int pmd_bad(pmd_t pmd) { unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV; diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index b36537a5f43e..0f33bab3e984 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c @@ -21,7 +21,7 @@ static void change_page_attr(unsigned long addr, int numpages, pgdp = pgd_offset(&init_mm, addr); pudp = pud_offset(pgdp, addr); pmdp = pmd_offset(pudp, addr); - if (pmd_huge(*pmdp)) { + if (pmd_large(*pmdp)) { WARN_ON_ONCE(1); continue; } diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index c22abf900c9e..5b70393911bd 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -79,7 +79,8 @@ static pte_t __ref *vmem_pte_alloc(unsigned long address) */ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) { - unsigned long address; + unsigned long end = start + size; + unsigned long address = start; pgd_t *pg_dir; pud_t *pu_dir; pmd_t *pm_dir; @@ -87,7 +88,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) pte_t pte; int ret = -ENOMEM; - for (address = start; address < start + size; address += PAGE_SIZE) { + while (address < end) { pg_dir = pgd_offset_k(address); if (pgd_none(*pg_dir)) { pu_dir = vmem_pud_alloc(); @@ -108,12 +109,11 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) pm_dir = pmd_offset(pu_dir, address); #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) - if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) && - (address + HPAGE_SIZE <= start + size) && - (address >= HPAGE_SIZE)) { + if (MACHINE_HAS_EDAT1 && address && !(address & ~PMD_MASK) && + (address + PMD_SIZE <= end)) { pte_val(pte) |= _SEGMENT_ENTRY_LARGE; pmd_val(*pm_dir) = pte_val(pte); - address += HPAGE_SIZE - PAGE_SIZE; + address += PMD_SIZE; continue; } #endif @@ -126,10 +126,11 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) pt_dir = pte_offset_kernel(pm_dir, address); *pt_dir = pte; + address += PAGE_SIZE; } ret = 0; out: - flush_tlb_kernel_range(start, start + size); + flush_tlb_kernel_range(start, end); return ret; } @@ -139,7 +140,8 @@ out: */ static void vmem_remove_range(unsigned long start, unsigned long size) { - unsigned long address; + unsigned long end = start + size; + unsigned long address = start; pgd_t *pg_dir; pud_t *pu_dir; pmd_t *pm_dir; @@ -147,7 +149,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size) pte_t pte; pte_val(pte) = _PAGE_TYPE_EMPTY; - for (address = start; address < start + size; address += PAGE_SIZE) { + while (address < end) { pg_dir = pgd_offset_k(address); pu_dir = pud_offset(pg_dir, address); if (pud_none(*pu_dir)) @@ -155,17 +157,16 @@ static void vmem_remove_range(unsigned long start, unsigned long size) pm_dir = pmd_offset(pu_dir, address); if (pmd_none(*pm_dir)) continue; - - if (pmd_huge(*pm_dir)) { + if (pmd_large(*pm_dir)) { pmd_clear(pm_dir); - address += HPAGE_SIZE - PAGE_SIZE; + address += PMD_SIZE; continue; } - pt_dir = pte_offset_kernel(pm_dir, address); *pt_dir = pte; + address += PAGE_SIZE; } - flush_tlb_kernel_range(start, start + size); + flush_tlb_kernel_range(start, end); } /* |