From 5d9b4b19f118abfb75e352841f7bf74580d7e427 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sun, 13 Dec 2009 14:38:50 +0000 Subject: sh: Definitions for 3-level page table layout If using 64-bit PTEs and 4K pages then each page table has 512 entries (as opposed to 1024 entries with 32-bit PTEs). Unlike MIPS, SH follows the convention that all structures in the page table (pgd_t, pmd_t, pgprot_t, etc) must be the same size. Therefore, 64-bit PTEs require 64-bit PGD entries, etc. Using 2-levels of page tables and 64-bit PTEs it is only possible to map 1GB of virtual address space. In order to map all 4GB of virtual address space we need to adopt a 3-level page table layout. This actually works out better for CONFIG_SUPERH32 because we only waste 2 PGD entries on the P1 and P2 areas (which are untranslated) instead of 256. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/mm/init.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/sh/mm/init.c') diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 432acd07e76a..761910d142f8 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -120,7 +120,13 @@ void __init page_table_range_init(unsigned long start, unsigned long end, for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { pud = (pud_t *)pgd; for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { +#ifdef __PAGETABLE_PMD_FOLDED pmd = (pmd_t *)pud; +#else + pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); + pud_populate(&init_mm, pud, pmd); + pmd += k; +#endif for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { if (pmd_none(*pmd)) { pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); -- cgit v1.2.1 From cbf6b1ba7ae12b3f7cb6b0d060b88d44649f9eda Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 12 Jan 2010 19:01:11 +0900 Subject: sh: Always provide thread_info allocators. Presently the thread_info allocators are special cased, depending on THREAD_SHIFT < PAGE_SHIFT. This provides a sensible definition for them regardless of configuration, in preparation for extended CPU state. Signed-off-by: Paul Mundt --- arch/sh/mm/init.c | 29 ----------------------------- 1 file changed, 29 deletions(-) (limited to 'arch/sh/mm/init.c') diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 761910d142f8..d5fb014279ad 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -283,35 +283,6 @@ void free_initrd_mem(unsigned long start, unsigned long end) } #endif -#if THREAD_SHIFT < PAGE_SHIFT -static struct kmem_cache *thread_info_cache; - -struct thread_info *alloc_thread_info(struct task_struct *tsk) -{ - struct thread_info *ti; - - ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL); - if (unlikely(ti == NULL)) - return NULL; -#ifdef CONFIG_DEBUG_STACK_USAGE - memset(ti, 0, THREAD_SIZE); -#endif - return ti; -} - -void free_thread_info(struct thread_info *ti) -{ - kmem_cache_free(thread_info_cache, ti); -} - -void thread_info_cache_init(void) -{ - thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, - THREAD_SIZE, 0, NULL); - BUG_ON(thread_info_cache == NULL); -} -#endif /* THREAD_SHIFT < PAGE_SHIFT */ - #ifdef CONFIG_MEMORY_HOTPLUG int arch_add_memory(int nid, u64 start, u64 size) { -- cgit v1.2.1 From 07cad4dc1bfdaefd20c6329e9d8179ad1c600e92 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 17 Nov 2009 22:03:41 +0000 Subject: sh: Generalise the pte handling code for the fixmap path Generalise the code for setting and clearing pte's and allow TLB entries to be pinned and unpinned if the _PAGE_WIRED flag is present. Signed-off-by: Matt Fleming --- arch/sh/mm/init.c | 44 ++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 40 insertions(+), 4 deletions(-) (limited to 'arch/sh/mm/init.c') diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index d5fb014279ad..30a9b530d456 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -39,7 +39,7 @@ unsigned long cached_to_uncached = P2SEG - P1SEG; #endif #ifdef CONFIG_MMU -static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) +static pte_t *__get_pte_phys(unsigned long addr) { pgd_t *pgd; pud_t *pud; @@ -49,22 +49,30 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) pgd = pgd_offset_k(addr); if (pgd_none(*pgd)) { pgd_ERROR(*pgd); - return; + return NULL; } pud = pud_alloc(NULL, pgd, addr); if (unlikely(!pud)) { pud_ERROR(*pud); - return; + return NULL; } pmd = pmd_alloc(NULL, pud, addr); if (unlikely(!pmd)) { pmd_ERROR(*pmd); - return; + return NULL; } pte = pte_offset_kernel(pmd, addr); + return pte; +} + +static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) +{ + pte_t *pte; + + pte = __get_pte_phys(addr); if (!pte_none(*pte)) { pte_ERROR(*pte); return; @@ -72,6 +80,22 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); local_flush_tlb_one(get_asid(), addr); + + if (pgprot_val(prot) & _PAGE_WIRED) + tlb_wire_entry(NULL, addr, *pte); +} + +static void clear_pte_phys(unsigned long addr, pgprot_t prot) +{ + pte_t *pte; + + pte = __get_pte_phys(addr); + + if (pgprot_val(prot) & _PAGE_WIRED) + tlb_unwire_entry(); + + set_pte(pte, pfn_pte(0, __pgprot(0))); + local_flush_tlb_one(get_asid(), addr); } /* @@ -101,6 +125,18 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) set_pte_phys(address, phys, prot); } +void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot) +{ + unsigned long address = __fix_to_virt(idx); + + if (idx >= __end_of_fixed_addresses) { + BUG(); + return; + } + + clear_pte_phys(address, prot); +} + void __init page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) { -- cgit v1.2.1 From d9b9487af79955a8e8fcddc963f56697e020cfed Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 18 Jan 2010 21:08:32 +0900 Subject: sh: Handle early ioremaps through fixed mappings. This adds in a mem_init_done to work out when a standard ioremap() is possible, falling back to the fixmap based ioremap otherwise. Signed-off-by: Paul Mundt --- arch/sh/mm/init.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/sh/mm/init.c') diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 30a9b530d456..fe578a286fdd 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -237,6 +237,8 @@ static void __init iommu_init(void) no_iommu_init(); } +unsigned int mem_init_done = 0; + void __init mem_init(void) { int codesize, datasize, initsize; @@ -287,6 +289,8 @@ void __init mem_init(void) /* Initialize the vDSO */ vsyscall_init(); + + mem_init_done = 1; } void free_initmem(void) -- cgit v1.2.1 From cb6d04468d16de5a6161167ec7e76a43be540a80 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 19 Jan 2010 15:22:52 +0900 Subject: sh: Kill off now bogus fixmap/page wiring documentation. The plans for _PAGE_WIRED were detailed in a comment with the fixmap code, but as it's now all taken care of, we no longer have any reason for keeping it around, particularly since it's no longer accurate. Kill it off. Signed-off-by: Paul Mundt --- arch/sh/mm/init.c | 15 --------------- 1 file changed, 15 deletions(-) (limited to 'arch/sh/mm/init.c') diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index fe578a286fdd..32ebd1592e63 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -98,21 +98,6 @@ static void clear_pte_phys(unsigned long addr, pgprot_t prot) local_flush_tlb_one(get_asid(), addr); } -/* - * As a performance optimization, other platforms preserve the fixmap mapping - * across a context switch, we don't presently do this, but this could be done - * in a similar fashion as to the wired TLB interface that sh64 uses (by way - * of the memory mapped UTLB configuration) -- this unfortunately forces us to - * give up a TLB entry for each mapping we want to preserve. While this may be - * viable for a small number of fixmaps, it's not particularly useful for - * everything and needs to be carefully evaluated. (ie, we may want this for - * the vsyscall page). - * - * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass - * in at __set_fixmap() time to determine the appropriate behavior to follow. - * - * -- PFM. - */ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) { unsigned long address = __fix_to_virt(idx); -- cgit v1.2.1 From 2efa53b269ec1e9289a108e1506f53f6f1de440b Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 20 Jan 2010 16:40:48 +0900 Subject: sh: Make 29/32-bit mode check helper generally available. Presently __in_29bit_mode() is only defined for the PMB case, but it's also easily derived from the CONFIG_29BIT and CONFIG_32BIT && CONFIG_PMB=n cases. Signed-off-by: Paul Mundt --- arch/sh/mm/init.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'arch/sh/mm/init.c') diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 32ebd1592e63..491d9d5c8e0d 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -338,10 +338,3 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); #endif #endif /* CONFIG_MEMORY_HOTPLUG */ - -#ifdef CONFIG_PMB -int __in_29bit_mode(void) -{ - return !(ctrl_inl(PMB_PASCR) & PASCR_SE); -} -#endif /* CONFIG_PMB */ -- cgit v1.2.1 From 35f99c0da17beb5004f06ba86a87d82c1a6467be Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 20 Jan 2010 18:48:17 +0900 Subject: sh: pretty print virtual memory map on boot. This cribs the pretty printing from arch/x86/mm/init_32.c to dump the virtual memory layout on boot. This is primarily intended as a debugging aid, given that the newer CPUs have full control over their address space and as such have little to nothing in common with the legacy layout. Signed-off-by: Paul Mundt --- arch/sh/mm/init.c | 38 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) (limited to 'arch/sh/mm/init.c') diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 491d9d5c8e0d..f3e23ad075cb 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -260,6 +260,9 @@ void __init mem_init(void) memset(empty_zero_page, 0, PAGE_SIZE); __flush_wback_region(empty_zero_page, PAGE_SIZE); + /* Initialize the vDSO */ + vsyscall_init(); + codesize = (unsigned long) &_etext - (unsigned long) &_text; datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; @@ -272,8 +275,39 @@ void __init mem_init(void) datasize >> 10, initsize >> 10); - /* Initialize the vDSO */ - vsyscall_init(); + printk(KERN_INFO "virtual kernel memory layout:\n" + " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" +#ifdef CONFIG_HIGHMEM + " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" +#endif + " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" + " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" + " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" + " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" + " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", + FIXADDR_START, FIXADDR_TOP, + (FIXADDR_TOP - FIXADDR_START) >> 10, + +#ifdef CONFIG_HIGHMEM + PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, + (LAST_PKMAP*PAGE_SIZE) >> 10, +#endif + + (unsigned long)VMALLOC_START, VMALLOC_END, + (VMALLOC_END - VMALLOC_START) >> 20, + + (unsigned long)memory_start, (unsigned long)high_memory, + ((unsigned long)high_memory - (unsigned long)memory_start) >> 20, + + (unsigned long)&__init_begin, (unsigned long)&__init_end, + ((unsigned long)&__init_end - + (unsigned long)&__init_begin) >> 10, + + (unsigned long)&_etext, (unsigned long)&_edata, + ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, + + (unsigned long)&_text, (unsigned long)&_etext, + ((unsigned long)&_etext - (unsigned long)&_text) >> 10); mem_init_done = 1; } -- cgit v1.2.1 From 3125ee72dca25fc2157dcddd07e2d740db921fc4 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 21 Jan 2010 15:54:31 +0900 Subject: sh: Track the uncached mapping size. This provides a variable for tracking the uncached mapping size, and uses it for pretty printing the uncached lowmem range. Beyond this, we'll also be building on top of this for figuring out from where the remainder of P2 becomes usable when constructing unrelated mappings. Signed-off-by: Paul Mundt --- arch/sh/mm/init.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) (limited to 'arch/sh/mm/init.c') diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index f3e23ad075cb..a28ff63c17a6 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -27,15 +27,17 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD]; #ifdef CONFIG_SUPERH32 /* - * Handle trivial transitions between cached and uncached - * segments, making use of the 1:1 mapping relationship in - * 512MB lowmem. - * * This is the offset of the uncached section from its cached alias. - * Default value only valid in 29 bit mode, in 32bit mode will be - * overridden in pmb_init. + * + * Legacy platforms handle trivial transitions between cached and + * uncached segments by making use of the 1:1 mapping relationship in + * 512MB lowmem, others via a special uncached mapping. + * + * Default value only valid in 29 bit mode, in 32bit mode this will be + * updated by the early PMB initialization code. */ unsigned long cached_to_uncached = P2SEG - P1SEG; +unsigned long uncached_size = 0x20000000; #endif #ifdef CONFIG_MMU @@ -281,7 +283,8 @@ void __init mem_init(void) " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" #endif " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" - " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" + " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n" + " : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n" " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", @@ -299,6 +302,10 @@ void __init mem_init(void) (unsigned long)memory_start, (unsigned long)high_memory, ((unsigned long)high_memory - (unsigned long)memory_start) >> 20, + (unsigned long)memory_start + cached_to_uncached, + (unsigned long)memory_start + cached_to_uncached + uncached_size, + uncached_size >> 20, + (unsigned long)&__init_begin, (unsigned long)&__init_end, ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10, -- cgit v1.2.1 From 2dc2f8e0c46864e2a3722c84eaa96513d4cf8b2f Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 21 Jan 2010 16:05:25 +0900 Subject: sh: Kill off the special uncached section and fixmap. Now that cached_to_uncached works as advertized in 32-bit mode and we're never going to be able to map < 16MB anyways, there's no need for the special uncached section. Kill it off. Signed-off-by: Paul Mundt --- arch/sh/mm/init.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/sh/mm/init.c') diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index a28ff63c17a6..dffa6c749489 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -211,9 +211,6 @@ void __init paging_init(void) } free_area_init_nodes(max_zone_pfns); - - /* Set up the uncached fixmap */ - set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start)); } /* -- cgit v1.2.1 From b0f3ae03aca0f331b851ae94bc066124e7f104df Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 12 Feb 2010 15:40:00 +0900 Subject: sh: Isolate uncached mapping support. This splits out the uncached mapping support under its own config option, presently only used by 29-bit mode and 32-bit + PMB. This will make it possible to optionally add an uncached mapping on sh64 as well as booting without an uncached mapping for 32-bit. Signed-off-by: Paul Mundt --- arch/sh/mm/init.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'arch/sh/mm/init.c') diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index dffa6c749489..58012b6bbe76 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -21,11 +21,12 @@ #include #include #include +#include DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); pgd_t swapper_pg_dir[PTRS_PER_PGD]; -#ifdef CONFIG_SUPERH32 +#ifdef CONFIG_UNCACHED_MAPPING /* * This is the offset of the uncached section from its cached alias. * @@ -36,8 +37,8 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD]; * Default value only valid in 29 bit mode, in 32bit mode this will be * updated by the early PMB initialization code. */ -unsigned long cached_to_uncached = P2SEG - P1SEG; -unsigned long uncached_size = 0x20000000; +unsigned long cached_to_uncached = 0x20000000; +unsigned long uncached_size = SZ_512M; #endif #ifdef CONFIG_MMU @@ -281,7 +282,9 @@ void __init mem_init(void) #endif " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n" +#ifdef CONFIG_UNCACHED_MAPPING " : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n" +#endif " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", @@ -299,9 +302,11 @@ void __init mem_init(void) (unsigned long)memory_start, (unsigned long)high_memory, ((unsigned long)high_memory - (unsigned long)memory_start) >> 20, +#ifdef CONFIG_UNCACHED_MAPPING (unsigned long)memory_start + cached_to_uncached, (unsigned long)memory_start + cached_to_uncached + uncached_size, uncached_size >> 20, +#endif (unsigned long)&__init_begin, (unsigned long)&__init_end, ((unsigned long)&__init_end - -- cgit v1.2.1 From 9edef28653a519bf0a48250f36cce96b1736ec4e Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Feb 2010 16:28:00 +0900 Subject: sh: uncached mapping helpers. This adds some helper routines for uncached mapping support. This simplifies some of the cases where we need to check the uncached mapping boundaries in addition to giving us a centralized location for building more complex manipulation on top of. Signed-off-by: Paul Mundt --- arch/sh/mm/init.c | 21 ++------------------- 1 file changed, 2 insertions(+), 19 deletions(-) (limited to 'arch/sh/mm/init.c') diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 58012b6bbe76..08e280d7cc7e 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -26,21 +26,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); pgd_t swapper_pg_dir[PTRS_PER_PGD]; -#ifdef CONFIG_UNCACHED_MAPPING -/* - * This is the offset of the uncached section from its cached alias. - * - * Legacy platforms handle trivial transitions between cached and - * uncached segments by making use of the 1:1 mapping relationship in - * 512MB lowmem, others via a special uncached mapping. - * - * Default value only valid in 29 bit mode, in 32bit mode this will be - * updated by the early PMB initialization code. - */ -unsigned long cached_to_uncached = 0x20000000; -unsigned long uncached_size = SZ_512M; -#endif - #ifdef CONFIG_MMU static pte_t *__get_pte_phys(unsigned long addr) { @@ -260,7 +245,7 @@ void __init mem_init(void) memset(empty_zero_page, 0, PAGE_SIZE); __flush_wback_region(empty_zero_page, PAGE_SIZE); - /* Initialize the vDSO */ + uncached_init(); vsyscall_init(); codesize = (unsigned long) &_etext - (unsigned long) &_text; @@ -303,9 +288,7 @@ void __init mem_init(void) ((unsigned long)high_memory - (unsigned long)memory_start) >> 20, #ifdef CONFIG_UNCACHED_MAPPING - (unsigned long)memory_start + cached_to_uncached, - (unsigned long)memory_start + cached_to_uncached + uncached_size, - uncached_size >> 20, + uncached_start, uncached_end, uncached_size >> 20, #endif (unsigned long)&__init_begin, (unsigned long)&__init_end, -- cgit v1.2.1 From d01447b3197c2c470a14666be2c640407bbbfec7 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 18 Feb 2010 18:13:51 +0900 Subject: sh: Merge legacy and dynamic PMB modes. This implements a bit of rework for the PMB code, which permits us to kill off the legacy PMB mode completely. Rather than trusting the boot loader to do the right thing, we do a quick verification of the PMB contents to determine whether to have the kernel setup the initial mappings or whether it needs to mangle them later on instead. If we're booting from legacy mappings, the kernel will now take control of them and make them match the kernel's initial mapping configuration. This is accomplished by breaking the initialization phase out in to multiple steps: synchronization, merging, and resizing. With the recent rework, the synchronization code establishes page links for compound mappings already, so we build on top of this for promoting mappings and reclaiming unused slots. At the same time, the changes introduced for the uncached helpers also permit us to dynamically resize the uncached mapping without any particular headaches. The smallest page size is more than sufficient for mapping all of kernel text, and as we're careful not to jump to any far off locations in the setup code the mapping can safely be resized regardless of whether we are executing from it or not. Signed-off-by: Paul Mundt --- arch/sh/mm/init.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/sh/mm/init.c') diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 08e280d7cc7e..68028e8f26ce 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -245,7 +245,6 @@ void __init mem_init(void) memset(empty_zero_page, 0, PAGE_SIZE); __flush_wback_region(empty_zero_page, PAGE_SIZE); - uncached_init(); vsyscall_init(); codesize = (unsigned long) &_etext - (unsigned long) &_text; -- cgit v1.2.1