From 0536bdf33faff4d940ac094c77998cfac368cfff Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Thu, 25 Aug 2011 00:35:59 -0400 Subject: ARM: move iotable mappings within the vmalloc region In order to remove the build time variation between different SOCs with regards to VMALLOC_END, the iotable mappings are now allocated inside the vmalloc region. This allows for VMALLOC_END to be identical across all machines. The value for VMALLOC_END is now set to 0xff000000 which is right where the consistent DMA area starts. To accommodate all static mappings on machines with possible highmem usage, the default vmalloc area size is changed to 240 MB so that VMALLOC_START is no higher than 0xf0000000 by default. Signed-off-by: Nicolas Pitre Tested-by: Stephen Warren Tested-by: Kevin Hilman Tested-by: Jamie Iles --- arch/arm/include/asm/pgtable.h | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'arch/arm/include/asm/pgtable.h') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 9451dce3a553..6cdd55cb0b8c 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -21,7 +21,6 @@ #else #include -#include #include #include @@ -33,15 +32,10 @@ * any out-of-bounds memory accesses will hopefully be caught. * The vmalloc() routines leaves a hole of 4kB between each vmalloced * area for the same reason. ;) - * - * Note that platforms may override VMALLOC_START, but they must provide - * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space, - * which may not overlap IO space. */ -#ifndef VMALLOC_START #define VMALLOC_OFFSET (8*1024*1024) #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) -#endif +#define VMALLOC_END 0xff000000UL #define LIBRARY_TEXT_START 0x0c000000 -- cgit v1.2.1 From 0af362f8440a78b970d5f215e234420fa87d0f3f Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 19 Sep 2011 00:28:45 -0400 Subject: ARM: move VMALLOC_END down temporarily for shmobile THIS IS A TEMPORARY HACK. The purpose of this is _only_ to avoid a regression on an existing machine while a better fix is implemented. On shmobile the consistent DMA memory area was set to 158MB in commit 28f0721a79 with no explanation. The documented size for this area should vary between 2MB and 14MB, and none of the other ARM targets exceed that. The included #warning is therefore meant to be noisy on purpose to get shmobile maintainers attention and this commit reverted once this consistent DMA size conflict is resolved. Signed-off-by: Nicolas Pitre Cc: Magnus Damm Cc: Paul Mundt --- arch/arm/include/asm/pgtable.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/arm/include/asm/pgtable.h') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 6cdd55cb0b8c..bcae9b81a6d0 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -37,6 +37,13 @@ #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) #define VMALLOC_END 0xff000000UL +/* This is a temporary hack until shmobile's DMA area size is sorted out */ +#ifdef CONFIG_ARCH_SHMOBILE +#warning "SH-Mobile's consistent DMA size conflicts with VMALLOC_END by 144MB" +#undef VMALLOC_END +#define VMALLOC_END 0xF6000000UL +#endif + #define LIBRARY_TEXT_START 0x0c000000 #ifndef __ASSEMBLY__ -- cgit v1.2.1 From 7dbaa466780a754154531b44c2086f6618cee3a8 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Tue, 22 Nov 2011 04:01:07 +0100 Subject: ARM: 7169/1: topdown mmap support Similar to other architectures, this adds topdown mmap support in user process address space allocation policy. This allows mmap sizes greater than 2GB. This support is largely copied from MIPS and the generic implementations. The address space randomization is moved into arch_pick_mmap_layout. Tested on V-Express with ubuntu and a mmap test from here: https://bugs.launchpad.net/bugs/861296 Signed-off-by: Rob Herring Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/pgtable.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include/asm/pgtable.h') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 9451dce3a553..2f659e239727 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -336,6 +336,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) * We provide our own arch_get_unmapped_area to cope with VIPT caches. */ #define HAVE_ARCH_UNMAPPED_AREA +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN /* * remap a physical page `pfn' of size `size' with page protection `prot' -- cgit v1.2.1 From 8903826d0cd99aed9267e792d38284cf3092042b Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 30 Sep 2011 11:43:29 +0100 Subject: ARM: idmap: populate identity map pgd at init time using .init.text When disabling and re-enabling the MMU, it is necessary to take out an identity mapping for the code that manipulates the SCTLR in order to avoid it disappearing from under our feet. This is useful when soft rebooting and returning from CPU suspend. This patch allocates a set of page tables during boot and populates them with an identity mapping for the .idmap.text section. This means that users of the identity map do not need to manage their own pgd and can instead annotate their functions with __idmap or, in the case of assembly code, place them in the correct section. Acked-by: Dave Martin Reviewed-by: Catalin Marinas Tested-by: Lorenzo Pieralisi Signed-off-by: Will Deacon --- arch/arm/include/asm/pgtable.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/arm/include/asm/pgtable.h') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 9451dce3a553..03893a55e680 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -346,9 +346,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #define pgtable_cache_init() do { } while (0) -void identity_mapping_add(pgd_t *, unsigned long, unsigned long); -void identity_mapping_del(pgd_t *, unsigned long, unsigned long); - #endif /* !__ASSEMBLY__ */ #endif /* CONFIG_MMU */ -- cgit v1.2.1 From a32618d28dbe6e9bf8ec508ccbc3561a7d7d32f0 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 22 Nov 2011 17:30:28 +0000 Subject: ARM: pgtable: switch to use pgtable-nopud.h Nick Piggin noted upon introducing 4level-fixup.h: | Add a temporary "fallback" header so architectures can run with | the 4level pagetables patch without modification. All architectures | should be converted to use the folding headers (include/asm-generic/ | pgtable-nop?d.h) as soon as possible, and the fallback header removed. This makes ARM compliant with this statement. Signed-off-by: Russell King Signed-off-by: Catalin Marinas --- arch/arm/include/asm/pgtable.h | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) (limited to 'arch/arm/include/asm/pgtable.h') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 03893a55e680..e14826e7efd9 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -11,15 +11,16 @@ #define _ASMARM_PGTABLE_H #include -#include #include #ifndef CONFIG_MMU +#include #include "pgtable-nommu.h" #else +#include #include #include #include @@ -164,20 +165,22 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) /* - * The "pgd_xxx()" functions here are trivial for a folded two-level - * setup: the pgd is never bad, and a pmd always exists (as it's folded - * into the pgd entry) + * The "pud_xxx()" functions here are trivial when the pmd is folded into + * the pud: the pud entry is never bad, always exists, and can't be set or + * cleared. */ -#define pgd_none(pgd) (0) -#define pgd_bad(pgd) (0) -#define pgd_present(pgd) (1) -#define pgd_clear(pgdp) do { } while (0) -#define set_pgd(pgd,pgdp) do { } while (0) +#define pud_none(pud) (0) +#define pud_bad(pud) (0) +#define pud_present(pud) (1) +#define pud_clear(pudp) do { } while (0) #define set_pud(pud,pudp) do { } while (0) /* Find an entry in the second-level page table.. */ -#define pmd_offset(dir, addr) ((pmd_t *)(dir)) +static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) +{ + return (pmd_t *)pud; +} #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_present(pmd) (pmd_val(pmd)) -- cgit v1.2.1 From e0c0313bd720977a7ed01dc48f0762a3ddec607f Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 22 Nov 2011 17:30:28 +0000 Subject: ARM: LPAE: Move page table maintenance macros to pgtable-2level.h The page table maintenance macros need to be duplicated between the classic and the LPAE MMU so this patch moves those that are not common to the pgtable-2level.h file. Signed-off-by: Catalin Marinas --- arch/arm/include/asm/pgtable.h | 38 -------------------------------------- 1 file changed, 38 deletions(-) (limited to 'arch/arm/include/asm/pgtable.h') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index e14826e7efd9..977245b0875f 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -164,41 +164,8 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* to find an entry in a kernel page-table-directory */ #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) -/* - * The "pud_xxx()" functions here are trivial when the pmd is folded into - * the pud: the pud entry is never bad, always exists, and can't be set or - * cleared. - */ -#define pud_none(pud) (0) -#define pud_bad(pud) (0) -#define pud_present(pud) (1) -#define pud_clear(pudp) do { } while (0) -#define set_pud(pud,pudp) do { } while (0) - - -/* Find an entry in the second-level page table.. */ -static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) -{ - return (pmd_t *)pud; -} - #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_present(pmd) (pmd_val(pmd)) -#define pmd_bad(pmd) (pmd_val(pmd) & 2) - -#define copy_pmd(pmdpd,pmdps) \ - do { \ - pmdpd[0] = pmdps[0]; \ - pmdpd[1] = pmdps[1]; \ - flush_pmd_entry(pmdpd); \ - } while (0) - -#define pmd_clear(pmdp) \ - do { \ - pmdp[0] = __pmd(0); \ - pmdp[1] = __pmd(0); \ - clean_pmd_entry(pmdp); \ - } while (0) static inline pte_t *pmd_page_vaddr(pmd_t pmd) { @@ -207,10 +174,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) -/* we don't need complex calculations here as the pmd is folded into the pgd */ -#define pmd_addr_end(addr,end) (end) - - #ifndef CONFIG_HIGHPTE #define __pte_map(pmd) pmd_page_vaddr(*(pmd)) #define __pte_unmap(pte) do { } while (0) @@ -232,7 +195,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define pte_page(pte) pfn_to_page(pte_pfn(pte)) #define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot) -#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) #if __LINUX_ARM_ARCH__ < 6 -- cgit v1.2.1 From dcfdae04bd92e8a2ea155db0e21e3bddc09e0a89 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 22 Nov 2011 17:30:29 +0000 Subject: ARM: LPAE: Introduce the 3-level page table format definitions This patch introduces the pgtable-3level*.h files with definitions specific to the LPAE page table format (3 levels of page tables). Each table is 4KB and has 512 64-bit entries. An entry can point to a 40-bit physical address. The young, write and exec software bits share the corresponding hardware bits (negated). Other software bits use spare bits in the PTE. The patch also changes some variable types from unsigned long or int to pteval_t or pgprot_t. Signed-off-by: Catalin Marinas --- arch/arm/include/asm/pgtable.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/arm/include/asm/pgtable.h') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 977245b0875f..3ddcf66f5e04 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -25,7 +25,11 @@ #include #include +#ifdef CONFIG_ARM_LPAE +#include +#else #include +#endif /* * Just any arbitrary offset to the start of the vmalloc VM area: the -- cgit v1.2.1 From 9561f4e052a06167694e110d76ce3a5e38b59522 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 2 Jan 2012 23:00:32 -0500 Subject: Revert "ARM: move VMALLOC_END down temporarily for shmobile" This reverts commit 0af362f8440a78b970d5f215e234420fa87d0f3f as shmobile is not using a non-standard memory layout anymore. Signed-off-by: Nicolas Pitre --- arch/arm/include/asm/pgtable.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'arch/arm/include/asm/pgtable.h') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index bcae9b81a6d0..6cdd55cb0b8c 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -37,13 +37,6 @@ #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) #define VMALLOC_END 0xff000000UL -/* This is a temporary hack until shmobile's DMA area size is sorted out */ -#ifdef CONFIG_ARCH_SHMOBILE -#warning "SH-Mobile's consistent DMA size conflicts with VMALLOC_END by 144MB" -#undef VMALLOC_END -#define VMALLOC_END 0xF6000000UL -#endif - #define LIBRARY_TEXT_START 0x0c000000 #ifndef __ASSEMBLY__ -- cgit v1.2.1