From b73c806341cfc7492ede6a2ce713cb579547d0ab Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Wed, 25 Nov 2009 22:00:08 +0000 Subject: sh: Abstract the number of page table levels Keep the dimensions of the page tables in a separate header file in preparation for allowing a three level page table structure. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/include/asm/pgtable.h | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) (limited to 'arch/sh/include/asm/pgtable.h') diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index ba3046e4f06f..9a0f66c1134c 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -12,7 +12,7 @@ #ifndef __ASM_SH_PGTABLE_H #define __ASM_SH_PGTABLE_H -#include +#include #include #ifndef __ASSEMBLY__ @@ -51,28 +51,12 @@ static inline unsigned long long neff_sign_extend(unsigned long val) #define NPHYS_SIGN (1LL << (NPHYS - 1)) #define NPHYS_MASK (-1LL << NPHYS) -/* - * traditional two-level paging structure - */ -/* PTE bits */ -#if defined(CONFIG_X2TLB) || defined(CONFIG_SUPERH64) -# define PTE_MAGNITUDE 3 /* 64-bit PTEs on extended mode SH-X2 TLB */ -#else -# define PTE_MAGNITUDE 2 /* 32-bit PTEs */ -#endif -#define PTE_SHIFT PAGE_SHIFT -#define PTE_BITS (PTE_SHIFT - PTE_MAGNITUDE) - -/* PGD bits */ -#define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS) #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) /* Entries per level */ #define PTRS_PER_PTE (PAGE_SIZE / (1 << PTE_MAGNITUDE)) -#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) -#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) #define FIRST_USER_ADDRESS 0 #define PHYS_ADDR_MASK29 0x1fffffff -- cgit v1.2.3 From 5d9b4b19f118abfb75e352841f7bf74580d7e427 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sun, 13 Dec 2009 14:38:50 +0000 Subject: sh: Definitions for 3-level page table layout If using 64-bit PTEs and 4K pages then each page table has 512 entries (as opposed to 1024 entries with 32-bit PTEs). Unlike MIPS, SH follows the convention that all structures in the page table (pgd_t, pmd_t, pgprot_t, etc) must be the same size. Therefore, 64-bit PTEs require 64-bit PGD entries, etc. Using 2-levels of page tables and 64-bit PTEs it is only possible to map 1GB of virtual address space. In order to map all 4GB of virtual address space we need to adopt a 3-level page table layout. This actually works out better for CONFIG_SUPERH32 because we only waste 2 PGD entries on the P1 and P2 areas (which are untranslated) instead of 256. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/include/asm/pgalloc.h | 4 +++ arch/sh/include/asm/pgalloc_pmd.h | 41 +++++++++++++++++++++++++++++ arch/sh/include/asm/pgtable.h | 4 +++ arch/sh/include/asm/pgtable_pmd.h | 55 +++++++++++++++++++++++++++++++++++++++ arch/sh/mm/Kconfig | 24 ++++++++++++++--- arch/sh/mm/fault_32.c | 3 +++ arch/sh/mm/init.c | 6 +++++ 7 files changed, 134 insertions(+), 3 deletions(-) create mode 100644 arch/sh/include/asm/pgalloc_pmd.h create mode 100644 arch/sh/include/asm/pgtable_pmd.h (limited to 'arch/sh/include/asm/pgtable.h') diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h index fe9f037ac5fd..4ea27855c3b5 100644 --- a/arch/sh/include/asm/pgalloc.h +++ b/arch/sh/include/asm/pgalloc.h @@ -6,7 +6,11 @@ #define QUICK_PT 1 /* Other page table pages that are zero on free */ +#ifdef CONFIG_PGTABLE_LEVELS_3 +#include +#else #include +#endif static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) diff --git a/arch/sh/include/asm/pgalloc_pmd.h b/arch/sh/include/asm/pgalloc_pmd.h new file mode 100644 index 000000000000..20f75cc4eb09 --- /dev/null +++ b/arch/sh/include/asm/pgalloc_pmd.h @@ -0,0 +1,41 @@ +#ifndef __ASM_SH_PGALLOC_PMD_H +#define __ASM_SH_PGALLOC_PMD_H + +static inline pgd_t *pgd_alloc(struct mm_struct *mm) +{ + pgd_t *pgd; + int i; + + pgd = kzalloc(sizeof(*pgd) * PTRS_PER_PGD, GFP_KERNEL | __GFP_REPEAT); + + for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) + pgd[i] = swapper_pg_dir[i]; + + return pgd; +} + +static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + kfree(pgd); +} + +static inline void __check_pgt_cache(void) +{ +} + +static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + set_pud(pud, __pud((unsigned long)pmd)); +} + +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) +{ + return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL); +} + +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) +{ + quicklist_free(QUICK_PT, NULL, pmd); +} + +#endif /* __ASM_SH_PGALLOC_PMD_H */ diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index 9a0f66c1134c..9effcc3b0d10 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -12,7 +12,11 @@ #ifndef __ASM_SH_PGTABLE_H #define __ASM_SH_PGTABLE_H +#ifdef CONFIG_PGTABLE_LEVELS_3 +#include +#else #include +#endif #include #ifndef __ASSEMBLY__ diff --git a/arch/sh/include/asm/pgtable_pmd.h b/arch/sh/include/asm/pgtable_pmd.h new file mode 100644 index 000000000000..78dc36e1c2dd --- /dev/null +++ b/arch/sh/include/asm/pgtable_pmd.h @@ -0,0 +1,55 @@ +#ifndef __ASM_SH_PGTABLE_PMD_H +#define __ASM_SH_PGTABLE_PMD_H + +#include + +/* + * Some cores need a 3-level page table layout, for example when using + * 64-bit PTEs and 4K pages. + */ + +#define PTE_MAGNITUDE 3 /* 64-bit PTEs on extended mode SH-X2 TLB */ + +/* PGD bits */ +#define PGDIR_SHIFT 30 + +#define PTRS_PER_PGD 4 +#define USER_PTRS_PER_PGD 2 + +/* PMD bits */ +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) + +#define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t)) + +#define pmd_ERROR(e) \ + printk("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) + +typedef struct { unsigned long long pmd; } pmd_t; +#define pmd_val(x) ((x).pmd) +#define __pmd(x) ((pmd_t) { (x) } ) + +static inline unsigned long pud_page_vaddr(pud_t pud) +{ + return pud_val(pud); +} + +#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) +static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) +{ + return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); +} + +#define pud_none(x) (!pud_val(x)) +#define pud_present(x) (pud_val(x)) +#define pud_clear(xp) do { set_pud(xp, __pud(0)); } while (0) +#define pud_bad(x) (pud_val(x) & ~PAGE_MASK) + +/* + * (puds are folded into pgds so this doesn't get actually called, + * but the define is needed for a generic inline function.) + */ +#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0) + +#endif /* __ASM_SH_PGTABLE_PMD_H */ diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 0e7ba8e891cf..b3f6c1a30b22 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -189,6 +189,24 @@ config ARCH_MEMORY_PROBE def_bool y depends on MEMORY_HOTPLUG +choice + prompt "Page table layout" + default PGTABLE_LEVELS_3 if X2TLB + default PGTABLE_LEVELS_2 + +config PGTABLE_LEVELS_2 + bool "2 Levels" + help + This is the default page table layout for all SuperH CPUs. + +config PGTABLE_LEVELS_3 + bool "3 Levels" + depends on X2TLB + help + This enables a 3 level page table structure. + +endchoice + choice prompt "Kernel page size" default PAGE_SIZE_8KB if X2TLB @@ -196,13 +214,13 @@ choice config PAGE_SIZE_4KB bool "4kB" - depends on !MMU || !X2TLB + depends on !MMU || !X2TLB || PGTABLE_LEVELS_3 help This is the default page size used by all SuperH CPUs. config PAGE_SIZE_8KB bool "8kB" - depends on !MMU || X2TLB + depends on !MMU || X2TLB && !PGTABLE_LEVELS_3 help This enables 8kB pages as supported by SH-X2 and later MMUs. @@ -214,7 +232,7 @@ config PAGE_SIZE_16KB config PAGE_SIZE_64KB bool "64kB" - depends on !MMU || CPU_SH4 || CPU_SH5 + depends on !MMU || CPU_SH4 && !PGTABLE_LEVELS_3 || CPU_SH5 help This enables support for 64kB pages, possible on all SH-4 CPUs and later. diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 47530104e0ad..28e22839c665 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -53,6 +53,9 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) if (!pud_present(*pud_k)) return NULL; + if (!pud_present(*pud)) + set_pud(pud, *pud_k); + pmd = pmd_offset(pud, address); pmd_k = pmd_offset(pud_k, address); if (!pmd_present(*pmd_k)) diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 432acd07e76a..761910d142f8 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -120,7 +120,13 @@ void __init page_table_range_init(unsigned long start, unsigned long end, for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { pud = (pud_t *)pgd; for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { +#ifdef __PAGETABLE_PMD_FOLDED pmd = (pmd_t *)pud; +#else + pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); + pud_populate(&init_mm, pud, pmd); + pmd += k; +#endif for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { if (pmd_none(*pmd)) { pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); -- cgit v1.2.3 From 2a5eacca85d39d8b6dffae821d7d260f05584dc7 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Thu, 31 Dec 2009 12:19:24 +0000 Subject: sh: Move page table allocation out of line We also switched away from quicklists and instead moved to slab caches. After benchmarking both implementations the difference is negligible. The slab caches suit us better though because the size of a pgd table is just 4 entries when we're using a 3-level page table layout and quicklists always deal with pages. Signed-off-by: Matt Fleming --- arch/sh/include/asm/pgalloc.h | 10 ++++--- arch/sh/include/asm/pgalloc_nopmd.h | 30 ------------------- arch/sh/include/asm/pgalloc_pmd.h | 41 -------------------------- arch/sh/include/asm/pgtable.h | 4 +-- arch/sh/mm/Makefile | 2 +- arch/sh/mm/pgtable.c | 57 +++++++++++++++++++++++++++++++++++++ 6 files changed, 66 insertions(+), 78 deletions(-) delete mode 100644 arch/sh/include/asm/pgalloc_nopmd.h delete mode 100644 arch/sh/include/asm/pgalloc_pmd.h create mode 100644 arch/sh/mm/pgtable.c (limited to 'arch/sh/include/asm/pgtable.h') diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h index 4ea27855c3b5..e106474996b2 100644 --- a/arch/sh/include/asm/pgalloc.h +++ b/arch/sh/include/asm/pgalloc.h @@ -6,10 +6,13 @@ #define QUICK_PT 1 /* Other page table pages that are zero on free */ +extern pgd_t *pgd_alloc(struct mm_struct *); +extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); + #ifdef CONFIG_PGTABLE_LEVELS_3 -#include -#else -#include +extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); +extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address); +extern void pmd_free(struct mm_struct *mm, pmd_t *pmd); #endif static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, @@ -67,7 +70,6 @@ do { \ static inline void check_pgt_cache(void) { - __check_pgt_cache(); quicklist_trim(QUICK_PT, NULL, 25, 16); } diff --git a/arch/sh/include/asm/pgalloc_nopmd.h b/arch/sh/include/asm/pgalloc_nopmd.h deleted file mode 100644 index e4b344c37e74..000000000000 --- a/arch/sh/include/asm/pgalloc_nopmd.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef __ASM_SH_PGALLOC_NOPMD_H -#define __ASM_SH_PGALLOC_NOPMD_H - -#define QUICK_PGD 0 /* We preserve special mappings over free */ - -static inline void pgd_ctor(void *x) -{ - pgd_t *pgd = x; - - memcpy(pgd + USER_PTRS_PER_PGD, - swapper_pg_dir + USER_PTRS_PER_PGD, - (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); -} - -static inline pgd_t *pgd_alloc(struct mm_struct *mm) -{ - return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor); -} - -static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ - quicklist_free(QUICK_PGD, NULL, pgd); -} - -static inline void __check_pgt_cache(void) -{ - quicklist_trim(QUICK_PGD, NULL, 25, 16); -} - -#endif /* __ASM_SH_PGALLOC_NOPMD_H */ diff --git a/arch/sh/include/asm/pgalloc_pmd.h b/arch/sh/include/asm/pgalloc_pmd.h deleted file mode 100644 index 20f75cc4eb09..000000000000 --- a/arch/sh/include/asm/pgalloc_pmd.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef __ASM_SH_PGALLOC_PMD_H -#define __ASM_SH_PGALLOC_PMD_H - -static inline pgd_t *pgd_alloc(struct mm_struct *mm) -{ - pgd_t *pgd; - int i; - - pgd = kzalloc(sizeof(*pgd) * PTRS_PER_PGD, GFP_KERNEL | __GFP_REPEAT); - - for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) - pgd[i] = swapper_pg_dir[i]; - - return pgd; -} - -static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ - kfree(pgd); -} - -static inline void __check_pgt_cache(void) -{ -} - -static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) -{ - set_pud(pud, __pud((unsigned long)pmd)); -} - -static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) -{ - return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL); -} - -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) -{ - quicklist_free(QUICK_PT, NULL, pmd); -} - -#endif /* __ASM_SH_PGALLOC_PMD_H */ diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index 9effcc3b0d10..78598ec33d0a 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -141,9 +141,9 @@ typedef pte_t *pte_addr_t; #define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) /* - * No page table caches to initialise + * Initialise the page table caches */ -#define pgtable_cache_init() do { } while (0) +extern void pgtable_cache_init(void); struct vm_area_struct; diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index 8a70535fa7ce..dd5010c708e0 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile @@ -15,7 +15,7 @@ obj-y += $(cacheops-y) mmu-y := nommu.o extable_32.o mmu-$(CONFIG_MMU) := extable_$(BITS).o fault_$(BITS).o \ - ioremap_$(BITS).o kmap.o tlbflush_$(BITS).o + ioremap_$(BITS).o kmap.o pgtable.o tlbflush_$(BITS).o obj-y += $(mmu-y) obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o diff --git a/arch/sh/mm/pgtable.c b/arch/sh/mm/pgtable.c new file mode 100644 index 000000000000..e1bc5483cc07 --- /dev/null +++ b/arch/sh/mm/pgtable.c @@ -0,0 +1,57 @@ +#include + +#define PGALLOC_GFP GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO + +static struct kmem_cache *pgd_cachep; + +#ifdef CONFIG_PGTABLE_LEVELS_3 +static struct kmem_cache *pmd_cachep; +#endif + +void pgd_ctor(void *x) +{ + pgd_t *pgd = x; + + memcpy(pgd + USER_PTRS_PER_PGD, + swapper_pg_dir + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); +} + +void pgtable_cache_init(void) +{ + pgd_cachep = kmem_cache_create("pgd_cache", + PTRS_PER_PGD * (1< Date: Wed, 13 Jan 2010 19:11:14 +0900 Subject: sh: default to extended TLB support. All SH-X2 and SH-X3 parts support an extended TLB mode, which has been left as experimental since support was originally merged. Now that it's had some time to stabilize and get some exposure to various platforms, we can drop it as an option and default enable it across the board. This is also good future proofing for newer parts that will drop support for the legacy TLB mode completely. This will also force 3-level page tables for all newer parts, which is necessary both for the varying page sizes and larger memories. Signed-off-by: Paul Mundt --- arch/sh/include/asm/pgalloc.h | 2 +- arch/sh/include/asm/pgtable.h | 2 +- arch/sh/include/asm/pgtable_nopmd.h | 11 ++++++----- arch/sh/include/asm/pgtable_pmd.h | 5 +++-- arch/sh/mm/Kconfig | 29 ++--------------------------- arch/sh/mm/pgtable.c | 9 ++++----- 6 files changed, 17 insertions(+), 41 deletions(-) (limited to 'arch/sh/include/asm/pgtable.h') diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h index f8982f4e0405..8c00785c60d5 100644 --- a/arch/sh/include/asm/pgalloc.h +++ b/arch/sh/include/asm/pgalloc.h @@ -9,7 +9,7 @@ extern pgd_t *pgd_alloc(struct mm_struct *); extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); -#ifdef CONFIG_PGTABLE_LEVELS_3 +#if PAGETABLE_LEVELS > 2 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address); extern void pmd_free(struct mm_struct *mm, pmd_t *pmd); diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index 78598ec33d0a..856ece07d31b 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -12,7 +12,7 @@ #ifndef __ASM_SH_PGTABLE_H #define __ASM_SH_PGTABLE_H -#ifdef CONFIG_PGTABLE_LEVELS_3 +#ifdef CONFIG_X2TLB #include #else #include diff --git a/arch/sh/include/asm/pgtable_nopmd.h b/arch/sh/include/asm/pgtable_nopmd.h index f0b525b3cb4a..b8355e4057cf 100644 --- a/arch/sh/include/asm/pgtable_nopmd.h +++ b/arch/sh/include/asm/pgtable_nopmd.h @@ -6,17 +6,18 @@ /* * traditional two-level paging structure */ +#define PAGETABLE_LEVELS 2 /* PTE bits */ -#define PTE_MAGNITUDE 2 /* 32-bit PTEs */ +#define PTE_MAGNITUDE 2 /* 32-bit PTEs */ -#define PTE_SHIFT PAGE_SHIFT -#define PTE_BITS (PTE_SHIFT - PTE_MAGNITUDE) +#define PTE_SHIFT PAGE_SHIFT +#define PTE_BITS (PTE_SHIFT - PTE_MAGNITUDE) /* PGD bits */ -#define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS) +#define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS) -#define PTRS_PER_PGD (PAGE_SIZE / (1 << PTE_MAGNITUDE)) +#define PTRS_PER_PGD (PAGE_SIZE / (1 << PTE_MAGNITUDE)) #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) #endif /* __ASM_SH_PGTABLE_NOPMD_H */ diff --git a/arch/sh/include/asm/pgtable_pmd.h b/arch/sh/include/asm/pgtable_pmd.h index 42a180e534a8..587b05e1d04f 100644 --- a/arch/sh/include/asm/pgtable_pmd.h +++ b/arch/sh/include/asm/pgtable_pmd.h @@ -7,11 +7,12 @@ * Some cores need a 3-level page table layout, for example when using * 64-bit PTEs and 4K pages. */ +#define PAGETABLE_LEVELS 3 -#define PTE_MAGNITUDE 3 /* 64-bit PTEs on extended mode SH-X2 TLB */ +#define PTE_MAGNITUDE 3 /* 64-bit PTEs on SH-X2 TLB */ /* PGD bits */ -#define PGDIR_SHIFT 30 +#define PGDIR_SHIFT 30 #define PTRS_PER_PGD 4 #define USER_PTRS_PER_PGD 2 diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 860cd24b4205..7a4ebc8cbadd 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -100,13 +100,8 @@ config PMB_LEGACY and allows systems to support up to 512MiB of system memory. config X2TLB - bool "Enable extended TLB mode" - depends on (CPU_SHX2 || CPU_SHX3) && MMU && EXPERIMENTAL - help - Selecting this option will enable the extended mode of the SH-X2 - TLB. For legacy SH-X behaviour and interoperability, say N. For - all of the fun new features and a willingless to submit bug reports, - say Y. + def_bool y + depends on (CPU_SHX2 || CPU_SHX3) && MMU config VSYSCALL bool "Support vsyscall page" @@ -174,32 +169,12 @@ config ARCH_MEMORY_PROBE def_bool y depends on MEMORY_HOTPLUG -choice - prompt "Page table layout" - default PGTABLE_LEVELS_3 if X2TLB - default PGTABLE_LEVELS_2 - -config PGTABLE_LEVELS_2 - bool "2 Levels" - help - This is the default page table layout for all SuperH CPUs. - -config PGTABLE_LEVELS_3 - bool "3 Levels" - depends on X2TLB - help - This enables a 3 level page table structure. - -endchoice - choice prompt "Kernel page size" - default PAGE_SIZE_8KB if X2TLB default PAGE_SIZE_4KB config PAGE_SIZE_4KB bool "4kB" - depends on !MMU || !X2TLB || PGTABLE_LEVELS_3 help This is the default page size used by all SuperH CPUs. diff --git a/arch/sh/mm/pgtable.c b/arch/sh/mm/pgtable.c index e1bc5483cc07..6f21fb1d8726 100644 --- a/arch/sh/mm/pgtable.c +++ b/arch/sh/mm/pgtable.c @@ -3,8 +3,7 @@ #define PGALLOC_GFP GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO static struct kmem_cache *pgd_cachep; - -#ifdef CONFIG_PGTABLE_LEVELS_3 +#if PAGETABLE_LEVELS > 2 static struct kmem_cache *pmd_cachep; #endif @@ -22,7 +21,7 @@ void pgtable_cache_init(void) pgd_cachep = kmem_cache_create("pgd_cache", PTRS_PER_PGD * (1< 2 pmd_cachep = kmem_cache_create("pmd_cache", PTRS_PER_PMD * (1< 2 void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) { set_pud(pud, __pud((unsigned long)pmd)); @@ -54,4 +53,4 @@ void pmd_free(struct mm_struct *mm, pmd_t *pmd) { kmem_cache_free(pmd_cachep, pmd); } -#endif /* CONFIG_PGTABLE_LEVELS_3 */ +#endif /* PAGETABLE_LEVELS > 2 */ -- cgit v1.2.3 From e44d6c4010a4c84369013932eea4721d16cbc08d Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 13 Jan 2010 19:18:39 +0900 Subject: sh: Rename split-level pgtable headers. These were originally named _nopmd and _pmd to follow their asm-generic counterparts, but we rename them to -2level and -3level for general consistency. Signed-off-by: Paul Mundt --- arch/sh/include/asm/pgtable-2level.h | 23 +++++++++++++++ arch/sh/include/asm/pgtable-3level.h | 56 ++++++++++++++++++++++++++++++++++++ arch/sh/include/asm/pgtable.h | 4 +-- arch/sh/include/asm/pgtable_nopmd.h | 23 --------------- arch/sh/include/asm/pgtable_pmd.h | 56 ------------------------------------ 5 files changed, 81 insertions(+), 81 deletions(-) create mode 100644 arch/sh/include/asm/pgtable-2level.h create mode 100644 arch/sh/include/asm/pgtable-3level.h delete mode 100644 arch/sh/include/asm/pgtable_nopmd.h delete mode 100644 arch/sh/include/asm/pgtable_pmd.h (limited to 'arch/sh/include/asm/pgtable.h') diff --git a/arch/sh/include/asm/pgtable-2level.h b/arch/sh/include/asm/pgtable-2level.h new file mode 100644 index 000000000000..19bd89db17e7 --- /dev/null +++ b/arch/sh/include/asm/pgtable-2level.h @@ -0,0 +1,23 @@ +#ifndef __ASM_SH_PGTABLE_2LEVEL_H +#define __ASM_SH_PGTABLE_2LEVEL_H + +#include + +/* + * traditional two-level paging structure + */ +#define PAGETABLE_LEVELS 2 + +/* PTE bits */ +#define PTE_MAGNITUDE 2 /* 32-bit PTEs */ + +#define PTE_SHIFT PAGE_SHIFT +#define PTE_BITS (PTE_SHIFT - PTE_MAGNITUDE) + +/* PGD bits */ +#define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS) + +#define PTRS_PER_PGD (PAGE_SIZE / (1 << PTE_MAGNITUDE)) +#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) + +#endif /* __ASM_SH_PGTABLE_2LEVEL_H */ diff --git a/arch/sh/include/asm/pgtable-3level.h b/arch/sh/include/asm/pgtable-3level.h new file mode 100644 index 000000000000..249a985d9648 --- /dev/null +++ b/arch/sh/include/asm/pgtable-3level.h @@ -0,0 +1,56 @@ +#ifndef __ASM_SH_PGTABLE_3LEVEL_H +#define __ASM_SH_PGTABLE_3LEVEL_H + +#include + +/* + * Some cores need a 3-level page table layout, for example when using + * 64-bit PTEs and 4K pages. + */ +#define PAGETABLE_LEVELS 3 + +#define PTE_MAGNITUDE 3 /* 64-bit PTEs on SH-X2 TLB */ + +/* PGD bits */ +#define PGDIR_SHIFT 30 + +#define PTRS_PER_PGD 4 +#define USER_PTRS_PER_PGD 2 + +/* PMD bits */ +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTE_MAGNITUDE)) +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) + +#define PTRS_PER_PMD ((1 << PGDIR_SHIFT) / PMD_SIZE) + +#define pmd_ERROR(e) \ + printk("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) + +typedef struct { unsigned long long pmd; } pmd_t; +#define pmd_val(x) ((x).pmd) +#define __pmd(x) ((pmd_t) { (x) } ) + +static inline unsigned long pud_page_vaddr(pud_t pud) +{ + return pud_val(pud); +} + +#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) +static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) +{ + return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); +} + +#define pud_none(x) (!pud_val(x)) +#define pud_present(x) (pud_val(x)) +#define pud_clear(xp) do { set_pud(xp, __pud(0)); } while (0) +#define pud_bad(x) (pud_val(x) & ~PAGE_MASK) + +/* + * (puds are folded into pgds so this doesn't get actually called, + * but the define is needed for a generic inline function.) + */ +#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0) + +#endif /* __ASM_SH_PGTABLE_3LEVEL_H */ diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index 856ece07d31b..aab76528abb9 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -13,9 +13,9 @@ #define __ASM_SH_PGTABLE_H #ifdef CONFIG_X2TLB -#include +#include #else -#include +#include #endif #include diff --git a/arch/sh/include/asm/pgtable_nopmd.h b/arch/sh/include/asm/pgtable_nopmd.h deleted file mode 100644 index b8355e4057cf..000000000000 --- a/arch/sh/include/asm/pgtable_nopmd.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef __ASM_SH_PGTABLE_NOPMD_H -#define __ASM_SH_PGTABLE_NOPMD_H - -#include - -/* - * traditional two-level paging structure - */ -#define PAGETABLE_LEVELS 2 - -/* PTE bits */ -#define PTE_MAGNITUDE 2 /* 32-bit PTEs */ - -#define PTE_SHIFT PAGE_SHIFT -#define PTE_BITS (PTE_SHIFT - PTE_MAGNITUDE) - -/* PGD bits */ -#define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS) - -#define PTRS_PER_PGD (PAGE_SIZE / (1 << PTE_MAGNITUDE)) -#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) - -#endif /* __ASM_SH_PGTABLE_NOPMD_H */ diff --git a/arch/sh/include/asm/pgtable_pmd.h b/arch/sh/include/asm/pgtable_pmd.h deleted file mode 100644 index 587b05e1d04f..000000000000 --- a/arch/sh/include/asm/pgtable_pmd.h +++ /dev/null @@ -1,56 +0,0 @@ -#ifndef __ASM_SH_PGTABLE_PMD_H -#define __ASM_SH_PGTABLE_PMD_H - -#include - -/* - * Some cores need a 3-level page table layout, for example when using - * 64-bit PTEs and 4K pages. - */ -#define PAGETABLE_LEVELS 3 - -#define PTE_MAGNITUDE 3 /* 64-bit PTEs on SH-X2 TLB */ - -/* PGD bits */ -#define PGDIR_SHIFT 30 - -#define PTRS_PER_PGD 4 -#define USER_PTRS_PER_PGD 2 - -/* PMD bits */ -#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTE_MAGNITUDE)) -#define PMD_SIZE (1UL << PMD_SHIFT) -#define PMD_MASK (~(PMD_SIZE-1)) - -#define PTRS_PER_PMD ((1 << PGDIR_SHIFT) / PMD_SIZE) - -#define pmd_ERROR(e) \ - printk("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) - -typedef struct { unsigned long long pmd; } pmd_t; -#define pmd_val(x) ((x).pmd) -#define __pmd(x) ((pmd_t) { (x) } ) - -static inline unsigned long pud_page_vaddr(pud_t pud) -{ - return pud_val(pud); -} - -#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) -static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) -{ - return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); -} - -#define pud_none(x) (!pud_val(x)) -#define pud_present(x) (pud_val(x)) -#define pud_clear(xp) do { set_pud(xp, __pud(0)); } while (0) -#define pud_bad(x) (pud_val(x) & ~PAGE_MASK) - -/* - * (puds are folded into pgds so this doesn't get actually called, - * but the define is needed for a generic inline function.) - */ -#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0) - -#endif /* __ASM_SH_PGTABLE_PMD_H */ -- cgit v1.2.3