From cc92c28b2db5b406657ecc05235d4ca4e222ae34 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 14 Jun 2010 21:16:19 -0400 Subject: [ARM] add address randomization to mmap() Signed-off-by: Nicolas Pitre --- arch/arm/mm/mmap.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index f5abc51c5a07..4f5b39687df5 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -80,6 +81,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, start_addr = addr = TASK_UNMAPPED_BASE; mm->cached_hole_size = 0; } + /* 8 bits of randomness in 20 address space bits */ + if (current->flags & PF_RANDOMIZE) + addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT; full_search: if (do_align) -- cgit v1.2.1 From 3260e5293727f16ffdce9a6a6203fd9a6b149e58 Mon Sep 17 00:00:00 2001 From: Michael Bohan Date: Mon, 14 Jun 2010 13:06:56 -0700 Subject: arm: mm: Don't free prohibited memmap entries The VM subsystem assumes that there are valid memmap entries to the bank end aligned to MAX_ORDER_NR_PAGES. It will try and read these page structs, and so we cannot free any memmap entries that it may inspect. Signed-off-by: Michael Bohan Signed-off-by: Daniel Walker --- arch/arm/mm/init.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index f6a999465323..e18c7cedb482 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -495,28 +495,27 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi) unsigned int i; /* - * [FIXME] This relies on each bank being in address order. This - * may not be the case, especially if the user has provided the - * information on the command line. + * This relies on each bank being in address order. + * The banks are sorted previously in bootmem_init(). */ for_each_nodebank(i, mi, node) { struct membank *bank = &mi->bank[i]; bank_start = bank_pfn_start(bank); - if (bank_start < prev_bank_end) { - printk(KERN_ERR "MEM: unordered memory banks. " - "Not freeing memmap.\n"); - break; - } /* * If we had a previous bank, and there is a space * between the current bank and the previous, free it. */ - if (prev_bank_end && prev_bank_end != bank_start) + if (prev_bank_end && prev_bank_end < bank_start) free_memmap(node, prev_bank_end, bank_start); - prev_bank_end = bank_pfn_end(bank); + /* + * Align up here since the VM subsystem insists that the + * memmap entries are valid from the bank end aligned to + * MAX_ORDER_NR_PAGES. + */ + prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); } } -- cgit v1.2.1 From 309caa9cc6ff39d261264ec4ff10e29489afc8f8 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 21 Jun 2010 21:03:18 +0100 Subject: ARM: Prohibit ioremap() on kernel managed RAM ARMv6 and above have a restriction whereby aliasing virtual:physical mappings must not have differing memory type and sharability attributes. Strictly, this covers the memory type (strongly ordered, device, memory), cache attributes (uncached, write combine, write through, write back read alloc, write back write alloc) and the shared bit. However, using ioremap() and its variants on system RAM results in mappings which differ in these attributes from the main system RAM mapping. Other architectures which similar restrictions approch this problem in the same way - they do not permit ioremap on main system RAM. Make ARM behave in the same way, with a WARN_ON() such that users can be traced and an alternative approach found. Signed-off-by: Russell King --- arch/arm/mm/ioremap.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 28c8b950ef04..03f11935ed08 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -268,6 +268,12 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) return NULL; + /* + * Don't allow RAM to be mapped - this causes problems with ARMv6+ + */ + if (WARN_ON(pfn_valid(pfn))) + return NULL; + type = get_mem_type(mtype); if (!type) return NULL; -- cgit v1.2.1 From f159f4ed55bb0fa5470800641e03a13a7e0eae6e Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 5 Jul 2010 14:53:10 +0100 Subject: ARM: 6207/1: Replace CONFIG_HAS_TLS_REG with HWCAP_TLS and check for it on V6 The TLS register is only available on ARM1136 r1p0 and later. Set HWCAP_TLS flags if hardware TLS is available and test for it if CONFIG_CPU_32v6K is not set for V6. Note that we set the TLS instruction in __kuser_get_tls dynamically as suggested by Jamie Lokier . Also the __switch_to code is optimized out in most cases as suggested by Nicolas Pitre . Reviewed-by: Nicolas Pitre Signed-off-by: Tony Lindgren Signed-off-by: Russell King --- arch/arm/mm/Kconfig | 11 ----------- arch/arm/mm/proc-v6.S | 5 +++-- arch/arm/mm/proc-v7.S | 2 +- 3 files changed, 4 insertions(+), 14 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 346ae14824a5..71d5d5efceef 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -717,17 +717,6 @@ config TLS_REG_EMUL a few prototypes like that in existence) and therefore access to that required register must be emulated. -config HAS_TLS_REG - bool - depends on !TLS_REG_EMUL - default y if SMP || CPU_32v7 - help - This selects support for the CP15 thread register. - It is defined to be available on some ARMv6 processors (including - all SMP capable ARMv6's) or later processors. User space may - assume directly accessing that register and always obtain the - expected value only on ARMv7 and above. - config NEEDS_SYSCALL_FOR_CMPXCHG bool help diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 7a5337ed7d68..2f5a3c23a0fe 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -239,7 +239,8 @@ __v6_proc_info: b __v6_setup .long cpu_arch_name .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA + /* See also feat_v6_fixup() for HWCAP_TLS */ + .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA|HWCAP_TLS .long cpu_v6_name .long v6_processor_functions .long v6wbi_tlb_fns @@ -262,7 +263,7 @@ __pj4_v6_proc_info: b __v6_setup .long cpu_arch_name .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP + .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS .long cpu_pj4_name .long v6_processor_functions .long v6wbi_tlb_fns diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 7aaf88a3b7aa..8071bcd4c995 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -344,7 +344,7 @@ __v7_proc_info: b __v7_setup .long cpu_arch_name .long cpu_elf_name - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP + .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS .long cpu_v7_name .long v7_processor_functions .long v7wbi_tlb_fns -- cgit v1.2.1 From 7961239599de71130c852ecfa9a4140f3f60547b Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 22 May 2010 16:20:14 +0100 Subject: ARM: Precalculate vmalloc_min Rather than storing the minimum size of the vmalloc area, store the maximum permitted address of the vmalloc area instead. Signed-off-by: Russell King --- arch/arm/mm/mmu.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 285894171186..6a08087ab022 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -668,7 +668,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr) create_mapping(io_desc + i); } -static unsigned long __initdata vmalloc_reserve = SZ_128M; +static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M); /* * vmalloc=size forces the vmalloc area to be exactly 'size' @@ -677,7 +677,7 @@ static unsigned long __initdata vmalloc_reserve = SZ_128M; */ static int __init early_vmalloc(char *arg) { - vmalloc_reserve = memparse(arg, NULL); + unsigned long vmalloc_reserve = memparse(arg, NULL); if (vmalloc_reserve < SZ_16M) { vmalloc_reserve = SZ_16M; @@ -692,12 +692,12 @@ static int __init early_vmalloc(char *arg) "vmalloc area is too big, limiting to %luMB\n", vmalloc_reserve >> 20); } + + vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve); return 0; } early_param("vmalloc", early_vmalloc); -#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) - static void __init sanity_check_meminfo(void) { int i, j, highmem = 0; @@ -707,7 +707,7 @@ static void __init sanity_check_meminfo(void) *bank = meminfo.bank[i]; #ifdef CONFIG_HIGHMEM - if (__va(bank->start) > VMALLOC_MIN || + if (__va(bank->start) > vmalloc_min || __va(bank->start) < (void *)PAGE_OFFSET) highmem = 1; @@ -717,8 +717,8 @@ static void __init sanity_check_meminfo(void) * Split those memory banks which are partially overlapping * the vmalloc area greatly simplifying things later. */ - if (__va(bank->start) < VMALLOC_MIN && - bank->size > VMALLOC_MIN - __va(bank->start)) { + if (__va(bank->start) < vmalloc_min && + bank->size > vmalloc_min - __va(bank->start)) { if (meminfo.nr_banks >= NR_BANKS) { printk(KERN_CRIT "NR_BANKS too low, " "ignoring high memory\n"); @@ -727,12 +727,12 @@ static void __init sanity_check_meminfo(void) (meminfo.nr_banks - i) * sizeof(*bank)); meminfo.nr_banks++; i++; - bank[1].size -= VMALLOC_MIN - __va(bank->start); - bank[1].start = __pa(VMALLOC_MIN - 1) + 1; + bank[1].size -= vmalloc_min - __va(bank->start); + bank[1].start = __pa(vmalloc_min - 1) + 1; bank[1].highmem = highmem = 1; j++; } - bank->size = VMALLOC_MIN - __va(bank->start); + bank->size = vmalloc_min - __va(bank->start); } #else bank->highmem = highmem; @@ -741,7 +741,7 @@ static void __init sanity_check_meminfo(void) * Check whether this memory bank would entirely overlap * the vmalloc area. */ - if (__va(bank->start) >= VMALLOC_MIN || + if (__va(bank->start) >= vmalloc_min || __va(bank->start) < (void *)PAGE_OFFSET) { printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " "(vmalloc region overlap).\n", @@ -753,9 +753,9 @@ static void __init sanity_check_meminfo(void) * Check whether this memory bank would partially overlap * the vmalloc area. */ - if (__va(bank->start + bank->size) > VMALLOC_MIN || + if (__va(bank->start + bank->size) > vmalloc_min || __va(bank->start + bank->size) < __va(bank->start)) { - unsigned long newsize = VMALLOC_MIN - __va(bank->start); + unsigned long newsize = vmalloc_min - __va(bank->start); printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " "to -%.8lx (vmalloc region overlap).\n", bank->start, bank->start + bank->size - 1, -- cgit v1.2.1 From be370302742ff9948f2a42b15cb2ba174d97b930 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 7 May 2010 17:40:33 +0100 Subject: ARM: Remove DISCONTIGMEM support Everything should now be using sparsemem rather than discontigmem, so remove the code supporting discontigmem from ARM. Signed-off-by: Russell King --- arch/arm/mm/Makefile | 1 - arch/arm/mm/discontig.c | 45 -------- arch/arm/mm/init.c | 266 +++++++++++++++++++++--------------------------- arch/arm/mm/mm.h | 4 +- arch/arm/mm/mmu.c | 38 +++---- arch/arm/mm/nommu.c | 13 +-- 6 files changed, 136 insertions(+), 231 deletions(-) delete mode 100644 arch/arm/mm/discontig.c (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index e8d34a80851c..d63b6c413758 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -15,7 +15,6 @@ endif obj-$(CONFIG_MODULES) += proc-syms.o obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o -obj-$(CONFIG_DISCONTIGMEM) += discontig.o obj-$(CONFIG_HIGHMEM) += highmem.o obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o diff --git a/arch/arm/mm/discontig.c b/arch/arm/mm/discontig.c deleted file mode 100644 index c8c0c4b0f0a3..000000000000 --- a/arch/arm/mm/discontig.c +++ /dev/null @@ -1,45 +0,0 @@ -/* - * linux/arch/arm/mm/discontig.c - * - * Discontiguous memory support. - * - * Initial code: Copyright (C) 1999-2000 Nicolas Pitre - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include -#include -#include - -#if MAX_NUMNODES != 4 && MAX_NUMNODES != 16 -# error Fix Me Please -#endif - -/* - * Our node_data structure for discontiguous memory. - */ - -pg_data_t discontig_node_data[MAX_NUMNODES] = { - { .bdata = &bootmem_node_data[0] }, - { .bdata = &bootmem_node_data[1] }, - { .bdata = &bootmem_node_data[2] }, - { .bdata = &bootmem_node_data[3] }, -#if MAX_NUMNODES == 16 - { .bdata = &bootmem_node_data[4] }, - { .bdata = &bootmem_node_data[5] }, - { .bdata = &bootmem_node_data[6] }, - { .bdata = &bootmem_node_data[7] }, - { .bdata = &bootmem_node_data[8] }, - { .bdata = &bootmem_node_data[9] }, - { .bdata = &bootmem_node_data[10] }, - { .bdata = &bootmem_node_data[11] }, - { .bdata = &bootmem_node_data[12] }, - { .bdata = &bootmem_node_data[13] }, - { .bdata = &bootmem_node_data[14] }, - { .bdata = &bootmem_node_data[15] }, -#endif -}; - -EXPORT_SYMBOL(discontig_node_data); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index f6a999465323..4011e524cb1d 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -79,38 +79,37 @@ struct meminfo meminfo; void show_mem(void) { int free = 0, total = 0, reserved = 0; - int shared = 0, cached = 0, slab = 0, node, i; + int shared = 0, cached = 0, slab = 0, i; struct meminfo * mi = &meminfo; printk("Mem-info:\n"); show_free_areas(); - for_each_online_node(node) { - for_each_nodebank (i,mi,node) { - struct membank *bank = &mi->bank[i]; - unsigned int pfn1, pfn2; - struct page *page, *end; - - pfn1 = bank_pfn_start(bank); - pfn2 = bank_pfn_end(bank); - - page = pfn_to_page(pfn1); - end = pfn_to_page(pfn2 - 1) + 1; - - do { - total++; - if (PageReserved(page)) - reserved++; - else if (PageSwapCache(page)) - cached++; - else if (PageSlab(page)) - slab++; - else if (!page_count(page)) - free++; - else - shared += page_count(page) - 1; - page++; - } while (page < end); - } + + for_each_bank (i, mi) { + struct membank *bank = &mi->bank[i]; + unsigned int pfn1, pfn2; + struct page *page, *end; + + pfn1 = bank_pfn_start(bank); + pfn2 = bank_pfn_end(bank); + + page = pfn_to_page(pfn1); + end = pfn_to_page(pfn2 - 1) + 1; + + do { + total++; + if (PageReserved(page)) + reserved++; + else if (PageSwapCache(page)) + cached++; + else if (PageSlab(page)) + slab++; + else if (!page_count(page)) + free++; + else + shared += page_count(page) - 1; + page++; + } while (page < end); } printk("%d pages of RAM\n", total); @@ -121,7 +120,7 @@ void show_mem(void) printk("%d pages swap cached\n", cached); } -static void __init find_node_limits(int node, struct meminfo *mi, +static void __init find_limits(struct meminfo *mi, unsigned long *min, unsigned long *max_low, unsigned long *max_high) { int i; @@ -129,7 +128,7 @@ static void __init find_node_limits(int node, struct meminfo *mi, *min = -1UL; *max_low = *max_high = 0; - for_each_nodebank(i, mi, node) { + for_each_bank (i, mi) { struct membank *bank = &mi->bank[i]; unsigned long start, end; @@ -154,14 +153,14 @@ static void __init find_node_limits(int node, struct meminfo *mi, * the end, we won't clash. */ static unsigned int __init -find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages) +find_bootmap_pfn(struct meminfo *mi, unsigned int bootmap_pages) { unsigned int start_pfn, i, bootmap_pfn; start_pfn = PAGE_ALIGN(__pa(_end)) >> PAGE_SHIFT; bootmap_pfn = 0; - for_each_nodebank(i, mi, node) { + for_each_bank(i, mi) { struct membank *bank = &mi->bank[i]; unsigned int start, end; @@ -191,7 +190,7 @@ find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages) static int __init check_initrd(struct meminfo *mi) { - int initrd_node = -2; + int initrd = -2; #ifdef CONFIG_BLK_DEV_INITRD unsigned long end = phys_initrd_start + phys_initrd_size; @@ -202,17 +201,17 @@ static int __init check_initrd(struct meminfo *mi) if (phys_initrd_size) { unsigned int i; - initrd_node = -1; + initrd = -1; for (i = 0; i < mi->nr_banks; i++) { struct membank *bank = &mi->bank[i]; if (bank_phys_start(bank) <= phys_initrd_start && end <= bank_phys_end(bank)) - initrd_node = bank->node; + initrd = 0; } } - if (initrd_node == -1) { + if (initrd == -1) { printk(KERN_ERR "INITRD: 0x%08lx+0x%08lx extends beyond " "physical memory - disabling initrd\n", phys_initrd_start, phys_initrd_size); @@ -220,10 +219,10 @@ static int __init check_initrd(struct meminfo *mi) } #endif - return initrd_node; + return initrd; } -static void __init bootmem_init_node(int node, struct meminfo *mi, +static void __init arm_bootmem_init(struct meminfo *mi, unsigned long start_pfn, unsigned long end_pfn) { unsigned long boot_pfn; @@ -235,37 +234,36 @@ static void __init bootmem_init_node(int node, struct meminfo *mi, * Allocate the bootmem bitmap page. */ boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); - boot_pfn = find_bootmap_pfn(node, mi, boot_pages); + boot_pfn = find_bootmap_pfn(mi, boot_pages); /* - * Initialise the bootmem allocator for this node, handing the + * Initialise the bootmem allocator, handing the * memory banks over to bootmem. */ - node_set_online(node); - pgdat = NODE_DATA(node); + node_set_online(0); + pgdat = NODE_DATA(0); init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn); - for_each_nodebank(i, mi, node) { + for_each_bank(i, mi) { struct membank *bank = &mi->bank[i]; if (!bank->highmem) - free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank)); + free_bootmem(bank_phys_start(bank), bank_phys_size(bank)); } /* - * Reserve the bootmem bitmap for this node. + * Reserve the bootmem bitmap. */ - reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT, - boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); + reserve_bootmem(boot_pfn << PAGE_SHIFT, + boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); } -static void __init bootmem_reserve_initrd(int node) +static void __init bootmem_reserve_initrd(void) { #ifdef CONFIG_BLK_DEV_INITRD - pg_data_t *pgdat = NODE_DATA(node); int res; - res = reserve_bootmem_node(pgdat, phys_initrd_start, - phys_initrd_size, BOOTMEM_EXCLUSIVE); + res = reserve_bootmem(phys_initrd_start, + phys_initrd_size, BOOTMEM_EXCLUSIVE); if (res == 0) { initrd_start = __phys_to_virt(phys_initrd_start); @@ -279,23 +277,23 @@ static void __init bootmem_reserve_initrd(int node) #endif } -static void __init bootmem_free_node(int node, struct meminfo *mi) +static void __init arm_bootmem_free(struct meminfo *mi) { unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; unsigned long min, max_low, max_high; int i; - find_node_limits(node, mi, &min, &max_low, &max_high); + find_limits(mi, &min, &max_low, &max_high); /* - * initialise the zones within this node. + * initialise the zones. */ memset(zone_size, 0, sizeof(zone_size)); /* - * The size of this node has already been determined. If we need - * to do anything fancy with the allocation of this memory to the - * zones, now is the time to do it. + * The memory size has already been determined. If we need + * to do anything fancy with the allocation of this memory + * to the zones, now is the time to do it. */ zone_size[0] = max_low - min; #ifdef CONFIG_HIGHMEM @@ -303,11 +301,11 @@ static void __init bootmem_free_node(int node, struct meminfo *mi) #endif /* - * For each bank in this node, calculate the size of the holes. - * holes = node_size - sum(bank_sizes_in_node) + * Calculate the size of the holes. + * holes = node_size - sum(bank_sizes) */ memcpy(zhole_size, zone_size, sizeof(zhole_size)); - for_each_nodebank(i, mi, node) { + for_each_bank(i, mi) { int idx = 0; #ifdef CONFIG_HIGHMEM if (mi->bank[i].highmem) @@ -320,9 +318,9 @@ static void __init bootmem_free_node(int node, struct meminfo *mi) * Adjust the sizes according to any special requirements for * this machine type. */ - arch_adjust_zones(node, zone_size, zhole_size); + arch_adjust_zones(0, zone_size, zhole_size); - free_area_init_node(node, zone_size, min, zhole_size); + free_area_init_node(0, zone_size, min, zhole_size); } #ifndef CONFIG_SPARSEMEM @@ -346,16 +344,16 @@ int pfn_valid(unsigned long pfn) } EXPORT_SYMBOL(pfn_valid); -static void arm_memory_present(struct meminfo *mi, int node) +static void arm_memory_present(struct meminfo *mi) { } #else -static void arm_memory_present(struct meminfo *mi, int node) +static void arm_memory_present(struct meminfo *mi) { int i; - for_each_nodebank(i, mi, node) { + for_each_bank(i, mi) { struct membank *bank = &mi->bank[i]; - memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank)); + memory_present(0, bank_pfn_start(bank), bank_pfn_end(bank)); } } #endif @@ -364,55 +362,35 @@ void __init bootmem_init(void) { struct meminfo *mi = &meminfo; unsigned long min, max_low, max_high; - int node, initrd_node; + int initrd; /* - * Locate which node contains the ramdisk image, if any. + * Locate the ramdisk image, if any. */ - initrd_node = check_initrd(mi); + initrd = check_initrd(mi); max_low = max_high = 0; - /* - * Run through each node initialising the bootmem allocator. - */ - for_each_node(node) { - unsigned long node_low, node_high; - - find_node_limits(node, mi, &min, &node_low, &node_high); + find_limits(mi, &min, &max_low, &max_high); - if (node_low > max_low) - max_low = node_low; - if (node_high > max_high) - max_high = node_high; + arm_bootmem_init(mi, min, max_low); - /* - * If there is no memory in this node, ignore it. - * (We can't have nodes which have no lowmem) - */ - if (node_low == 0) - continue; - - bootmem_init_node(node, mi, min, node_low); - - /* - * Reserve any special node zero regions. - */ - if (node == 0) - reserve_node_zero(NODE_DATA(node)); + /* + * Reserve any special regions. + */ + reserve_special_regions(); - /* - * If the initrd is in this node, reserve its memory. - */ - if (node == initrd_node) - bootmem_reserve_initrd(node); + /* + * If the initrd is present, reserve its memory. + */ + if (initrd == 0) + bootmem_reserve_initrd(); - /* - * Sparsemem tries to allocate bootmem in memory_present(), - * so must be done after the fixed reservations - */ - arm_memory_present(mi, node); - } + /* + * Sparsemem tries to allocate bootmem in memory_present(), + * so must be done after the fixed reservations + */ + arm_memory_present(mi); /* * sparse_init() needs the bootmem allocator up and running. @@ -420,12 +398,11 @@ void __init bootmem_init(void) sparse_init(); /* - * Now free memory in each node - free_area_init_node needs + * Now free the memory - free_area_init_node needs * the sparse mem_map arrays initialized by sparse_init() * for memmap_init_zone(), otherwise all PFNs are invalid. */ - for_each_node(node) - bootmem_free_node(node, mi); + arm_bootmem_free(mi); high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1; @@ -460,7 +437,7 @@ static inline int free_area(unsigned long pfn, unsigned long end, char *s) } static inline void -free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) +free_memmap(unsigned long start_pfn, unsigned long end_pfn) { struct page *start_pg, *end_pg; unsigned long pg, pgend; @@ -483,13 +460,13 @@ free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) * free the section of the memmap array. */ if (pg < pgend) - free_bootmem_node(NODE_DATA(node), pg, pgend - pg); + free_bootmem(pg, pgend - pg); } /* * The mem_map array can get very big. Free the unused area of the memory map. */ -static void __init free_unused_memmap_node(int node, struct meminfo *mi) +static void __init free_unused_memmap(struct meminfo *mi) { unsigned long bank_start, prev_bank_end = 0; unsigned int i; @@ -499,7 +476,7 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi) * may not be the case, especially if the user has provided the * information on the command line. */ - for_each_nodebank(i, mi, node) { + for_each_bank(i, mi) { struct membank *bank = &mi->bank[i]; bank_start = bank_pfn_start(bank); @@ -514,7 +491,7 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi) * between the current bank and the previous, free it. */ if (prev_bank_end && prev_bank_end != bank_start) - free_memmap(node, prev_bank_end, bank_start); + free_memmap(prev_bank_end, bank_start); prev_bank_end = bank_pfn_end(bank); } @@ -528,21 +505,14 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi) void __init mem_init(void) { unsigned long reserved_pages, free_pages; - int i, node; + int i; -#ifndef CONFIG_DISCONTIGMEM max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; -#endif /* this will put all unused low memory onto the freelists */ - for_each_online_node(node) { - pg_data_t *pgdat = NODE_DATA(node); - - free_unused_memmap_node(node, &meminfo); + free_unused_memmap(&meminfo); - if (pgdat->node_spanned_pages != 0) - totalram_pages += free_all_bootmem_node(pgdat); - } + totalram_pages += free_all_bootmem(); #ifdef CONFIG_SA1111 /* now that our DMA memory is actually so designated, we can free it */ @@ -552,39 +522,35 @@ void __init mem_init(void) #ifdef CONFIG_HIGHMEM /* set highmem page free */ - for_each_online_node(node) { - for_each_nodebank (i, &meminfo, node) { - unsigned long start = bank_pfn_start(&meminfo.bank[i]); - unsigned long end = bank_pfn_end(&meminfo.bank[i]); - if (start >= max_low_pfn + PHYS_PFN_OFFSET) - totalhigh_pages += free_area(start, end, NULL); - } + for_each_bank (i, &meminfo) { + unsigned long start = bank_pfn_start(&meminfo.bank[i]); + unsigned long end = bank_pfn_end(&meminfo.bank[i]); + if (start >= max_low_pfn + PHYS_PFN_OFFSET) + totalhigh_pages += free_area(start, end, NULL); } totalram_pages += totalhigh_pages; #endif reserved_pages = free_pages = 0; - for_each_online_node(node) { - for_each_nodebank(i, &meminfo, node) { - struct membank *bank = &meminfo.bank[i]; - unsigned int pfn1, pfn2; - struct page *page, *end; - - pfn1 = bank_pfn_start(bank); - pfn2 = bank_pfn_end(bank); - - page = pfn_to_page(pfn1); - end = pfn_to_page(pfn2 - 1) + 1; - - do { - if (PageReserved(page)) - reserved_pages++; - else if (!page_count(page)) - free_pages++; - page++; - } while (page < end); - } + for_each_bank(i, &meminfo) { + struct membank *bank = &meminfo.bank[i]; + unsigned int pfn1, pfn2; + struct page *page, *end; + + pfn1 = bank_pfn_start(bank); + pfn2 = bank_pfn_end(bank); + + page = pfn_to_page(pfn1); + end = pfn_to_page(pfn2 - 1) + 1; + + do { + if (PageReserved(page)) + reserved_pages++; + else if (!page_count(page)) + free_pages++; + page++; + } while (page < end); } /* diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 815d08eecbb0..7b19c90ab295 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -28,7 +28,5 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page #endif -struct pglist_data; - void __init bootmem_init(void); -void reserve_node_zero(struct pglist_data *pgdat); +void reserve_special_regions(void); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 6a08087ab022..ddb1bee6de73 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -827,9 +827,9 @@ static inline void prepare_page_table(void) } /* - * Reserve the various regions of node 0 + * Reserve the various regions */ -void __init reserve_node_zero(pg_data_t *pgdat) +void __init reserve_special_regions(void) { unsigned long res_size = 0; @@ -838,19 +838,17 @@ void __init reserve_node_zero(pg_data_t *pgdat) * Note that this can only be in node 0. */ #ifdef CONFIG_XIP_KERNEL - reserve_bootmem_node(pgdat, __pa(_data), _end - _data, - BOOTMEM_DEFAULT); + reserve_bootmem(__pa(_data), _end - _data, BOOTMEM_DEFAULT); #else - reserve_bootmem_node(pgdat, __pa(_stext), _end - _stext, - BOOTMEM_DEFAULT); + reserve_bootmem(__pa(_stext), _end - _stext, BOOTMEM_DEFAULT); #endif /* * Reserve the page tables. These are already in use, * and can only be in node 0. */ - reserve_bootmem_node(pgdat, __pa(swapper_pg_dir), - PTRS_PER_PGD * sizeof(pgd_t), BOOTMEM_DEFAULT); + reserve_bootmem(__pa(swapper_pg_dir), + PTRS_PER_PGD * sizeof(pgd_t), BOOTMEM_DEFAULT); /* * Hmm... This should go elsewhere, but we really really need to @@ -874,29 +872,22 @@ void __init reserve_node_zero(pg_data_t *pgdat) if (machine_is_h1940() || machine_is_rx3715() || machine_is_rx1950()) { - reserve_bootmem_node(pgdat, 0x30003000, 0x1000, - BOOTMEM_DEFAULT); - reserve_bootmem_node(pgdat, 0x30081000, 0x1000, - BOOTMEM_DEFAULT); + reserve_bootmem(0x30003000, 0x1000, BOOTMEM_DEFAULT); + reserve_bootmem(0x30081000, 0x1000, BOOTMEM_DEFAULT); } if (machine_is_palmld() || machine_is_palmtx()) { - reserve_bootmem_node(pgdat, 0xa0000000, 0x1000, - BOOTMEM_EXCLUSIVE); - reserve_bootmem_node(pgdat, 0xa0200000, 0x1000, - BOOTMEM_EXCLUSIVE); + reserve_bootmem(0xa0000000, 0x1000, BOOTMEM_EXCLUSIVE); + reserve_bootmem(0xa0200000, 0x1000, BOOTMEM_EXCLUSIVE); } if (machine_is_treo680() || machine_is_centro()) { - reserve_bootmem_node(pgdat, 0xa0000000, 0x1000, - BOOTMEM_EXCLUSIVE); - reserve_bootmem_node(pgdat, 0xa2000000, 0x1000, - BOOTMEM_EXCLUSIVE); + reserve_bootmem(0xa0000000, 0x1000, BOOTMEM_EXCLUSIVE); + reserve_bootmem(0xa2000000, 0x1000, BOOTMEM_EXCLUSIVE); } if (machine_is_palmt5()) - reserve_bootmem_node(pgdat, 0xa0200000, 0x1000, - BOOTMEM_EXCLUSIVE); + reserve_bootmem(0xa0200000, 0x1000, BOOTMEM_EXCLUSIVE); /* * U300 - This platform family can share physical memory @@ -920,8 +911,7 @@ void __init reserve_node_zero(pg_data_t *pgdat) res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; #endif if (res_size) - reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size, - BOOTMEM_DEFAULT); + reserve_bootmem(PHYS_OFFSET, res_size, BOOTMEM_DEFAULT); } /* diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 33b327379f07..25376d480b8e 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -18,20 +18,18 @@ #include "mm.h" /* - * Reserve the various regions of node 0 + * Reserve the various regions */ -void __init reserve_node_zero(pg_data_t *pgdat) +void __init reserve_special_regions(void) { /* * Register the kernel text and data with bootmem. * Note that this can only be in node 0. */ #ifdef CONFIG_XIP_KERNEL - reserve_bootmem_node(pgdat, __pa(_data), _end - _data, - BOOTMEM_DEFAULT); + reserve_bootmem(__pa(_data), _end - _data, BOOTMEM_DEFAULT); #else - reserve_bootmem_node(pgdat, __pa(_stext), _end - _stext, - BOOTMEM_DEFAULT); + reserve_bootmem(__pa(_stext), _end - _stext, BOOTMEM_DEFAULT); #endif /* @@ -39,8 +37,7 @@ void __init reserve_node_zero(pg_data_t *pgdat) * some architectures which the DRAM is the exception vector to trap, * alloc_page breaks with error, although it is not NULL, but "0." */ - reserve_bootmem_node(pgdat, CONFIG_VECTORS_BASE, PAGE_SIZE, - BOOTMEM_DEFAULT); + reserve_bootmem(CONFIG_VECTORS_BASE, PAGE_SIZE, BOOTMEM_DEFAULT); } /* -- cgit v1.2.1 From b65b4781fbd5846a82cdac0c32818af1a7452d1f Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 22 May 2010 20:58:51 +0100 Subject: ARM: Remove 'node' argument form arch_adjust_zones() Since we no longer support discontigmem, node is always zero, so remove this argument. Signed-off-by: Russell King --- arch/arm/mm/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 4011e524cb1d..4d2720888c50 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -318,7 +318,7 @@ static void __init arm_bootmem_free(struct meminfo *mi) * Adjust the sizes according to any special requirements for * this machine type. */ - arch_adjust_zones(0, zone_size, zhole_size); + arch_adjust_zones(zone_size, zhole_size); free_area_init_node(0, zone_size, min, zhole_size); } -- cgit v1.2.1 From 98c672cf1fa2a56f6f43e3f48b1208b83845582c Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 22 May 2010 18:18:57 +0100 Subject: ARM: Move platform memory reservations out of generic code Move the platform specific bootmem memory reservations out of arch/arm/mm/mmu.c into their respective platform files. Signed-off-by: Russell King --- arch/arm/mm/init.c | 5 ++++- arch/arm/mm/mm.h | 3 ++- arch/arm/mm/mmu.c | 63 +++-------------------------------------------------- arch/arm/mm/nommu.c | 2 +- 4 files changed, 10 insertions(+), 63 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 4d2720888c50..1a227eea64bd 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -358,7 +358,7 @@ static void arm_memory_present(struct meminfo *mi) } #endif -void __init bootmem_init(void) +void __init bootmem_init(struct machine_desc *mdesc) { struct meminfo *mi = &meminfo; unsigned long min, max_low, max_high; @@ -380,6 +380,9 @@ void __init bootmem_init(void) */ reserve_special_regions(); + if (mdesc->reserve) + mdesc->reserve(); + /* * If the initrd is present, reserve its memory. */ diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 7b19c90ab295..afafe4fc5431 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -28,5 +28,6 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page #endif -void __init bootmem_init(void); +struct machine_desc; +void __init bootmem_init(struct machine_desc *); void reserve_special_regions(void); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index ddb1bee6de73..1676d017a93a 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -17,7 +17,6 @@ #include #include -#include #include #include #include @@ -831,8 +830,6 @@ static inline void prepare_page_table(void) */ void __init reserve_special_regions(void) { - unsigned long res_size = 0; - /* * Register the kernel text and data with bootmem. * Note that this can only be in node 0. @@ -850,68 +847,14 @@ void __init reserve_special_regions(void) reserve_bootmem(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t), BOOTMEM_DEFAULT); - /* - * Hmm... This should go elsewhere, but we really really need to - * stop things allocating the low memory; ideally we need a better - * implementation of GFP_DMA which does not assume that DMA-able - * memory starts at zero. - */ - if (machine_is_integrator() || machine_is_cintegrator()) - res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; - - /* - * These should likewise go elsewhere. They pre-reserve the - * screen memory region at the start of main system memory. - */ - if (machine_is_edb7211()) - res_size = 0x00020000; - if (machine_is_p720t()) - res_size = 0x00014000; - - /* H1940, RX3715 and RX1950 need to reserve this for suspend */ - - if (machine_is_h1940() || machine_is_rx3715() - || machine_is_rx1950()) { - reserve_bootmem(0x30003000, 0x1000, BOOTMEM_DEFAULT); - reserve_bootmem(0x30081000, 0x1000, BOOTMEM_DEFAULT); - } - - if (machine_is_palmld() || machine_is_palmtx()) { - reserve_bootmem(0xa0000000, 0x1000, BOOTMEM_EXCLUSIVE); - reserve_bootmem(0xa0200000, 0x1000, BOOTMEM_EXCLUSIVE); - } - - if (machine_is_treo680() || machine_is_centro()) { - reserve_bootmem(0xa0000000, 0x1000, BOOTMEM_EXCLUSIVE); - reserve_bootmem(0xa2000000, 0x1000, BOOTMEM_EXCLUSIVE); - } - - if (machine_is_palmt5()) - reserve_bootmem(0xa0200000, 0x1000, BOOTMEM_EXCLUSIVE); - - /* - * U300 - This platform family can share physical memory - * between two ARM cpus, one running Linux and the other - * running another OS. - */ - if (machine_is_u300()) { -#ifdef CONFIG_MACH_U300_SINGLE_RAM -#if ((CONFIG_MACH_U300_ACCESS_MEM_SIZE & 1) == 1) && \ - CONFIG_MACH_U300_2MB_ALIGNMENT_FIX - res_size = 0x00100000; -#endif -#endif - } - #ifdef CONFIG_SA1111 /* * Because of the SA1111 DMA bug, we want to preserve our * precious DMA-able memory... */ - res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; + reserve_bootmem(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET, + BOOTMEM_DEFAULT); #endif - if (res_size) - reserve_bootmem(PHYS_OFFSET, res_size, BOOTMEM_DEFAULT); } /* @@ -1056,7 +999,7 @@ void __init paging_init(struct machine_desc *mdesc) sanity_check_meminfo(); prepare_page_table(); map_lowmem(); - bootmem_init(); + bootmem_init(mdesc); devicemaps_init(mdesc); kmap_init(); diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 25376d480b8e..ed58ddbbc5b3 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -46,7 +46,7 @@ void __init reserve_special_regions(void) */ void __init paging_init(struct machine_desc *mdesc) { - bootmem_init(); + bootmem_init(mdesc); } /* -- cgit v1.2.1 From 3abe9d33b382cb9eee7bfee8d90b10078f4baa4d Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 25 Mar 2010 17:02:59 +0000 Subject: ARM: early_alloc() Add a common early allocator function, in preparation for switching over to LMB. When we do, this function will need to do a little more than just allocating memory; we need it zero initialized too. Signed-off-by: Russell King --- arch/arm/mm/mmu.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 1676d017a93a..3079d0fd9cea 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -487,6 +487,11 @@ static void __init build_mem_type_table(void) #define vectors_base() (vectors_high() ? 0xffff0000 : 0) +static void __init *early_alloc(unsigned long sz) +{ + return alloc_bootmem_low_pages(sz); +} + static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, const struct mem_type *type) @@ -494,7 +499,7 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, pte_t *pte; if (pmd_none(*pmd)) { - pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t)); + pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t)); __pmd_populate(pmd, __pa(pte) | type->prot_l1); } @@ -873,7 +878,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) /* * Allocate the vector page early. */ - vectors = alloc_bootmem_low_pages(PAGE_SIZE); + vectors = early_alloc(PAGE_SIZE); for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); @@ -945,7 +950,7 @@ static void __init kmap_init(void) { #ifdef CONFIG_HIGHMEM pmd_t *pmd = pmd_off_k(PKMAP_BASE); - pte_t *pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t)); + pte_t *pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t)); BUG_ON(!pmd_none(*pmd) || !pte); __pmd_populate(pmd, __pa(pte) | _PAGE_KERNEL_TABLE); pkmap_page_table = pte + PTRS_PER_PTE; @@ -1005,11 +1010,8 @@ void __init paging_init(struct machine_desc *mdesc) top_pmd = pmd_off_k(0xffff0000); - /* - * allocate the zero page. Note that this always succeeds and - * returns a zeroed result. - */ - zero_page = alloc_bootmem_low_pages(PAGE_SIZE); + /* allocate the zero page. */ + zero_page = early_alloc(PAGE_SIZE); empty_zero_page = virt_to_page(zero_page); __flush_dcache_page(NULL, empty_zero_page); } -- cgit v1.2.1 From cb9d7707cd9be57830f31616233f6a872ca8416d Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 12 Jul 2010 21:50:59 +0100 Subject: ARM: 6222/1: add memory types for the TCMs The earlier TCM memory regions were mapped as MT_MEMORY_UNCACHED which doesn't really work on platforms supporting the new v6 features like the NX bit. Add unique MT_MEMORY_[I|D]TCM types instead. Cc: Nicolas Pitre Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/mm/mmu.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 285894171186..e53480148c05 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -258,6 +258,19 @@ static struct mem_type mem_types[] = { .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, .domain = DOMAIN_KERNEL, }, + [MT_MEMORY_DTCM] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | + L_PTE_DIRTY | L_PTE_WRITE, + .prot_l1 = PMD_TYPE_TABLE, + .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, + .domain = DOMAIN_KERNEL, + }, + [MT_MEMORY_ITCM] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | + L_PTE_USER | L_PTE_EXEC, + .prot_l1 = PMD_TYPE_TABLE, + .domain = DOMAIN_IO, + }, }; const struct mem_type *get_mem_type(unsigned int type) -- cgit v1.2.1 From 07d2a5c721c6aa2bd69812a74c8b3b116abf3e56 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 12 Jul 2010 21:52:34 +0100 Subject: ARM: 6224/1: print TCM whereabouts in init message If TCM is in use, we should display it in the virtual memory layout along with everything else. Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/mm/init.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index f6a999465323..526af48b1271 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -611,6 +611,14 @@ void __init mem_init(void) printk(KERN_NOTICE "Virtual kernel memory layout:\n" " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" +#ifdef CONFIG_HAVE_TCM +#ifdef DTCM_OFFSET + " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" +#endif +#ifdef ITCM_OFFSET + " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" +#endif +#endif " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" #ifdef CONFIG_MMU " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n" @@ -627,6 +635,14 @@ void __init mem_init(void) MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + (PAGE_SIZE)), +#ifdef CONFIG_HAVE_TCM +#ifdef DTCM_OFFSET + MLK(UL(DTCM_OFFSET), UL(DTCM_END + 1)), +#endif +#ifdef ITCM_OFFSET + MLK(UL(ITCM_OFFSET), UL(ITCM_END + 1)), +#endif +#endif MLK(FIXADDR_START, FIXADDR_TOP), #ifdef CONFIG_MMU MLM(CONSISTENT_BASE, CONSISTENT_END), -- cgit v1.2.1 From 4bb2e27db10abc555dfabd73661485fb75e4e97d Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 1 Jul 2010 18:33:29 +0100 Subject: ARM: early_pte_alloc() Provide a common function for allocating early PTE tables. Signed-off-by: Russell King --- arch/arm/mm/mmu.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 3079d0fd9cea..05dbb956175b 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -492,18 +492,21 @@ static void __init *early_alloc(unsigned long sz) return alloc_bootmem_low_pages(sz); } -static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, - unsigned long end, unsigned long pfn, - const struct mem_type *type) +static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) { - pte_t *pte; - if (pmd_none(*pmd)) { - pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t)); - __pmd_populate(pmd, __pa(pte) | type->prot_l1); + pte_t *pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t)); + __pmd_populate(pmd, __pa(pte) | prot); } + BUG_ON(pmd_bad(*pmd)); + return pte_offset_kernel(pmd, addr); +} - pte = pte_offset_kernel(pmd, addr); +static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, + unsigned long end, unsigned long pfn, + const struct mem_type *type) +{ + pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1); do { set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0); pfn++; @@ -949,11 +952,8 @@ static void __init devicemaps_init(struct machine_desc *mdesc) static void __init kmap_init(void) { #ifdef CONFIG_HIGHMEM - pmd_t *pmd = pmd_off_k(PKMAP_BASE); - pte_t *pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t)); - BUG_ON(!pmd_none(*pmd) || !pte); - __pmd_populate(pmd, __pa(pte) | _PAGE_KERNEL_TABLE); - pkmap_page_table = pte + PTRS_PER_PTE; + pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE), + PKMAP_BASE, _PAGE_KERNEL_TABLE); #endif } -- cgit v1.2.1 From 2778f62056ada442414392d7ccd41188bb631619 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 9 Jul 2010 16:27:52 +0100 Subject: ARM: initial LMB trial Acked-by: Tony Lindgren Signed-off-by: Russell King --- arch/arm/mm/init.c | 164 ++++++++++++++-------------------------------------- arch/arm/mm/mm.h | 2 +- arch/arm/mm/mmu.c | 34 +++++------ arch/arm/mm/nommu.c | 19 +----- 4 files changed, 64 insertions(+), 155 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 1a227eea64bd..4877e06308b7 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -146,95 +147,21 @@ static void __init find_limits(struct meminfo *mi, } } -/* - * FIXME: We really want to avoid allocating the bootmap bitmap - * over the top of the initrd. Hopefully, this is located towards - * the start of a bank, so if we allocate the bootmap bitmap at - * the end, we won't clash. - */ -static unsigned int __init -find_bootmap_pfn(struct meminfo *mi, unsigned int bootmap_pages) -{ - unsigned int start_pfn, i, bootmap_pfn; - - start_pfn = PAGE_ALIGN(__pa(_end)) >> PAGE_SHIFT; - bootmap_pfn = 0; - - for_each_bank(i, mi) { - struct membank *bank = &mi->bank[i]; - unsigned int start, end; - - start = bank_pfn_start(bank); - end = bank_pfn_end(bank); - - if (end < start_pfn) - continue; - - if (start < start_pfn) - start = start_pfn; - - if (end <= start) - continue; - - if (end - start >= bootmap_pages) { - bootmap_pfn = start; - break; - } - } - - if (bootmap_pfn == 0) - BUG(); - - return bootmap_pfn; -} - -static int __init check_initrd(struct meminfo *mi) -{ - int initrd = -2; -#ifdef CONFIG_BLK_DEV_INITRD - unsigned long end = phys_initrd_start + phys_initrd_size; - - /* - * Make sure that the initrd is within a valid area of - * memory. - */ - if (phys_initrd_size) { - unsigned int i; - - initrd = -1; - - for (i = 0; i < mi->nr_banks; i++) { - struct membank *bank = &mi->bank[i]; - if (bank_phys_start(bank) <= phys_initrd_start && - end <= bank_phys_end(bank)) - initrd = 0; - } - } - - if (initrd == -1) { - printk(KERN_ERR "INITRD: 0x%08lx+0x%08lx extends beyond " - "physical memory - disabling initrd\n", - phys_initrd_start, phys_initrd_size); - phys_initrd_start = phys_initrd_size = 0; - } -#endif - - return initrd; -} - static void __init arm_bootmem_init(struct meminfo *mi, unsigned long start_pfn, unsigned long end_pfn) { - unsigned long boot_pfn; unsigned int boot_pages; + phys_addr_t bitmap; pg_data_t *pgdat; int i; /* - * Allocate the bootmem bitmap page. + * Allocate the bootmem bitmap page. This must be in a region + * of memory which has already been mapped. */ boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); - boot_pfn = find_bootmap_pfn(mi, boot_pages); + bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES, + __pfn_to_phys(end_pfn)); /* * Initialise the bootmem allocator, handing the @@ -242,7 +169,7 @@ static void __init arm_bootmem_init(struct meminfo *mi, */ node_set_online(0); pgdat = NODE_DATA(0); - init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn); + init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); for_each_bank(i, mi) { struct membank *bank = &mi->bank[i]; @@ -251,30 +178,16 @@ static void __init arm_bootmem_init(struct meminfo *mi, } /* - * Reserve the bootmem bitmap. + * Reserve the memblock reserved regions in bootmem. */ - reserve_bootmem(boot_pfn << PAGE_SHIFT, - boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); -} - -static void __init bootmem_reserve_initrd(void) -{ -#ifdef CONFIG_BLK_DEV_INITRD - int res; - - res = reserve_bootmem(phys_initrd_start, - phys_initrd_size, BOOTMEM_EXCLUSIVE); - - if (res == 0) { - initrd_start = __phys_to_virt(phys_initrd_start); - initrd_end = initrd_start + phys_initrd_size; - } else { - printk(KERN_ERR - "INITRD: 0x%08lx+0x%08lx overlaps in-use " - "memory region - disabling initrd\n", - phys_initrd_start, phys_initrd_size); + for (i = 0; i < memblock.reserved.cnt; i++) { + phys_addr_t start = memblock_start_pfn(&memblock.reserved, i); + if (start >= start_pfn && + memblock_end_pfn(&memblock.reserved, i) <= end_pfn) + reserve_bootmem_node(pgdat, __pfn_to_phys(start), + memblock_size_bytes(&memblock.reserved, i), + BOOTMEM_DEFAULT); } -#endif } static void __init arm_bootmem_free(struct meminfo *mi) @@ -358,16 +271,40 @@ static void arm_memory_present(struct meminfo *mi) } #endif +void __init arm_memblock_init(struct meminfo *mi) +{ + int i; + + memblock_init(); + for (i = 0; i < mi->nr_banks; i++) + memblock_add(mi->bank[i].start, mi->bank[i].size); + + /* Register the kernel text, kernel data and initrd with memblock. */ +#ifdef CONFIG_XIP_KERNEL + memblock_reserve(__pa(_data), _end - _data); +#else + memblock_reserve(__pa(_stext), _end - _stext); +#endif +#ifdef CONFIG_BLK_DEV_INITRD + if (phys_initrd_size) { + memblock_reserve(phys_initrd_start, phys_initrd_size); + + /* Now convert initrd to virtual addresses */ + initrd_start = __phys_to_virt(phys_initrd_start); + initrd_end = initrd_start + phys_initrd_size; + } +#endif + + arm_mm_memblock_reserve(); + + memblock_analyze(); + memblock_dump_all(); +} + void __init bootmem_init(struct machine_desc *mdesc) { struct meminfo *mi = &meminfo; unsigned long min, max_low, max_high; - int initrd; - - /* - * Locate the ramdisk image, if any. - */ - initrd = check_initrd(mi); max_low = max_high = 0; @@ -375,20 +312,9 @@ void __init bootmem_init(struct machine_desc *mdesc) arm_bootmem_init(mi, min, max_low); - /* - * Reserve any special regions. - */ - reserve_special_regions(); - if (mdesc->reserve) mdesc->reserve(); - /* - * If the initrd is present, reserve its memory. - */ - if (initrd == 0) - bootmem_reserve_initrd(); - /* * Sparsemem tries to allocate bootmem in memory_present(), * so must be done after the fixed reservations diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index afafe4fc5431..fcfffae69b49 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -30,4 +30,4 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page struct machine_desc; void __init bootmem_init(struct machine_desc *); -void reserve_special_regions(void); +void arm_mm_memblock_reserve(void); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 05dbb956175b..833a6c3f70c0 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -11,9 +11,9 @@ #include #include #include -#include #include #include +#include #include #include @@ -489,7 +489,9 @@ static void __init build_mem_type_table(void) static void __init *early_alloc(unsigned long sz) { - return alloc_bootmem_low_pages(sz); + void *ptr = __va(memblock_alloc(sz, sz)); + memset(ptr, 0, sz); + return ptr; } static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) @@ -705,10 +707,14 @@ static int __init early_vmalloc(char *arg) } early_param("vmalloc", early_vmalloc); +phys_addr_t lowmem_end_addr; + static void __init sanity_check_meminfo(void) { int i, j, highmem = 0; + lowmem_end_addr = __pa(vmalloc_min - 1) + 1; + for (i = 0, j = 0; i < meminfo.nr_banks; i++) { struct membank *bank = &meminfo.bank[j]; *bank = meminfo.bank[i]; @@ -834,34 +840,22 @@ static inline void prepare_page_table(void) } /* - * Reserve the various regions + * Reserve the special regions of memory */ -void __init reserve_special_regions(void) +void __init arm_mm_memblock_reserve(void) { - /* - * Register the kernel text and data with bootmem. - * Note that this can only be in node 0. - */ -#ifdef CONFIG_XIP_KERNEL - reserve_bootmem(__pa(_data), _end - _data, BOOTMEM_DEFAULT); -#else - reserve_bootmem(__pa(_stext), _end - _stext, BOOTMEM_DEFAULT); -#endif - /* * Reserve the page tables. These are already in use, * and can only be in node 0. */ - reserve_bootmem(__pa(swapper_pg_dir), - PTRS_PER_PGD * sizeof(pgd_t), BOOTMEM_DEFAULT); + memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t)); #ifdef CONFIG_SA1111 /* * Because of the SA1111 DMA bug, we want to preserve our * precious DMA-able memory... */ - reserve_bootmem(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET, - BOOTMEM_DEFAULT); + memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET); #endif } @@ -1004,7 +998,6 @@ void __init paging_init(struct machine_desc *mdesc) sanity_check_meminfo(); prepare_page_table(); map_lowmem(); - bootmem_init(mdesc); devicemaps_init(mdesc); kmap_init(); @@ -1012,6 +1005,9 @@ void __init paging_init(struct machine_desc *mdesc) /* allocate the zero page. */ zero_page = early_alloc(PAGE_SIZE); + + bootmem_init(mdesc); + empty_zero_page = virt_to_page(zero_page); __flush_dcache_page(NULL, empty_zero_page); } diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index ed58ddbbc5b3..40e38fcccc7a 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -6,8 +6,8 @@ #include #include #include -#include #include +#include #include #include @@ -17,27 +17,14 @@ #include "mm.h" -/* - * Reserve the various regions - */ -void __init reserve_special_regions(void) +void __init arm_mm_memblock_reserve(void) { - /* - * Register the kernel text and data with bootmem. - * Note that this can only be in node 0. - */ -#ifdef CONFIG_XIP_KERNEL - reserve_bootmem(__pa(_data), _end - _data, BOOTMEM_DEFAULT); -#else - reserve_bootmem(__pa(_stext), _end - _stext, BOOTMEM_DEFAULT); -#endif - /* * Register the exception vector page. * some architectures which the DRAM is the exception vector to trap, * alloc_page breaks with error, although it is not NULL, but "0." */ - reserve_bootmem(CONFIG_VECTORS_BASE, PAGE_SIZE, BOOTMEM_DEFAULT); + memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE); } /* -- cgit v1.2.1 From 8d717a52d1b0959128be5134dd12608e8e4f2115 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 22 May 2010 19:47:18 +0100 Subject: ARM: Convert platform reservations to use LMB rather than bootmem Signed-off-by: Russell King --- arch/arm/mm/init.c | 11 ++++++----- arch/arm/mm/mm.h | 3 +-- arch/arm/mm/mmu.c | 2 +- arch/arm/mm/nommu.c | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 4877e06308b7..99d6bc9b89bb 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -271,7 +271,7 @@ static void arm_memory_present(struct meminfo *mi) } #endif -void __init arm_memblock_init(struct meminfo *mi) +void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) { int i; @@ -297,11 +297,15 @@ void __init arm_memblock_init(struct meminfo *mi) arm_mm_memblock_reserve(); + /* reserve any platform specific memblock areas */ + if (mdesc->reserve) + mdesc->reserve(); + memblock_analyze(); memblock_dump_all(); } -void __init bootmem_init(struct machine_desc *mdesc) +void __init bootmem_init(void) { struct meminfo *mi = &meminfo; unsigned long min, max_low, max_high; @@ -312,9 +316,6 @@ void __init bootmem_init(struct machine_desc *mdesc) arm_bootmem_init(mi, min, max_low); - if (mdesc->reserve) - mdesc->reserve(); - /* * Sparsemem tries to allocate bootmem in memory_present(), * so must be done after the fixed reservations diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index fcfffae69b49..6630620380a4 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -28,6 +28,5 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page #endif -struct machine_desc; -void __init bootmem_init(struct machine_desc *); +void __init bootmem_init(void); void arm_mm_memblock_reserve(void); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 833a6c3f70c0..d5541adc3520 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1006,7 +1006,7 @@ void __init paging_init(struct machine_desc *mdesc) /* allocate the zero page. */ zero_page = early_alloc(PAGE_SIZE); - bootmem_init(mdesc); + bootmem_init(); empty_zero_page = virt_to_page(zero_page); __flush_dcache_page(NULL, empty_zero_page); diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 40e38fcccc7a..687d02319a41 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -33,7 +33,7 @@ void __init arm_mm_memblock_reserve(void) */ void __init paging_init(struct machine_desc *mdesc) { - bootmem_init(mdesc); + bootmem_init(); } /* -- cgit v1.2.1 From eda2e5dcc914b4d70f665443efc9780e89a5e5c1 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 1 Jul 2010 12:00:57 +0100 Subject: ARM: LMB: Convert arm_memory_present() to use LMB memory information Signed-off-by: Russell King --- arch/arm/mm/init.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 99d6bc9b89bb..a453982fdcef 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -257,17 +257,16 @@ int pfn_valid(unsigned long pfn) } EXPORT_SYMBOL(pfn_valid); -static void arm_memory_present(struct meminfo *mi) +static void arm_memory_present(void) { } #else -static void arm_memory_present(struct meminfo *mi) +static void arm_memory_present(void) { int i; - for_each_bank(i, mi) { - struct membank *bank = &mi->bank[i]; - memory_present(0, bank_pfn_start(bank), bank_pfn_end(bank)); - } + for (i = 0; i < memblock.memory.cnt; i++) + memory_present(0, memblock_start_pfn(&memblock.memory, i), + memblock_end_pfn(&memblock.memory, i)); } #endif @@ -320,7 +319,7 @@ void __init bootmem_init(void) * Sparsemem tries to allocate bootmem in memory_present(), * so must be done after the fixed reservations */ - arm_memory_present(mi); + arm_memory_present(); /* * sparse_init() needs the bootmem allocator up and running. -- cgit v1.2.1 From e07b9e08601b400aee7e076e7b31799d3dd48c1e Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 1 Jul 2010 12:03:29 +0100 Subject: ARM: LMB: convert pfn_valid to use LMB Signed-off-by: Russell King --- arch/arm/mm/init.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index a453982fdcef..c357bfb464ae 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -239,16 +239,15 @@ static void __init arm_bootmem_free(struct meminfo *mi) #ifndef CONFIG_SPARSEMEM int pfn_valid(unsigned long pfn) { - struct meminfo *mi = &meminfo; - unsigned int left = 0, right = mi->nr_banks; + struct memblock_region *mem = &memblock.memory; + unsigned int left = 0, right = mem->cnt; do { unsigned int mid = (right + left) / 2; - struct membank *bank = &mi->bank[mid]; - if (pfn < bank_pfn_start(bank)) + if (pfn < memblock_start_pfn(mem, mid)) right = mid; - else if (pfn >= bank_pfn_end(bank)) + else if (pfn >= memblock_end_pfn(mem, mid)) left = mid + 1; else return 1; -- cgit v1.2.1 From a9deb137e4eb94d0a4fa0c3535b2c056d9363bef Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 1 Jul 2010 18:35:07 +0100 Subject: ARM: Remove unnecessary call to find_limits() Signed-off-by: Russell King --- arch/arm/mm/init.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index c357bfb464ae..599d121c81e7 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -190,14 +190,12 @@ static void __init arm_bootmem_init(struct meminfo *mi, } } -static void __init arm_bootmem_free(struct meminfo *mi) +static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min, + unsigned long max_low, unsigned long max_high) { unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; - unsigned long min, max_low, max_high; int i; - find_limits(mi, &min, &max_low, &max_high); - /* * initialise the zones. */ @@ -330,7 +328,7 @@ void __init bootmem_init(void) * the sparse mem_map arrays initialized by sparse_init() * for memmap_init_zone(), otherwise all PFNs are invalid. */ - arm_bootmem_free(mi); + arm_bootmem_free(mi, min, max_low, max_high); high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1; -- cgit v1.2.1 From 1dbd30e9890fd69e50b17edd70ca583546b0fe4e Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 12 Jul 2010 21:53:28 +0100 Subject: ARM: 6225/1: make TCM allocation static and common for all archs This changes the TCM handling so that a fixed area is reserved at 0xfffe0000-0xfffeffff for TCM. This areas is used by XScale but XScale does not have TCM so the mechanisms are mutually exclusive. This change is needed to make TCM detection more dynamic while still being able to compile code into it, and is a must for the unified ARM goals: the current TCM allocation at different places in memory for each machine would be a nightmare if you want to compile a single image for more than one machine with TCM so it has to be nailed down in one place. Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/mm/init.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 526af48b1271..e00404e6f45b 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -529,6 +529,11 @@ void __init mem_init(void) { unsigned long reserved_pages, free_pages; int i, node; +#ifdef CONFIG_HAVE_TCM + /* These pointers are filled in on TCM detection */ + extern u32 dtcm_end; + extern u32 itcm_end; +#endif #ifndef CONFIG_DISCONTIGMEM max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; @@ -612,12 +617,8 @@ void __init mem_init(void) printk(KERN_NOTICE "Virtual kernel memory layout:\n" " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" #ifdef CONFIG_HAVE_TCM -#ifdef DTCM_OFFSET " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" -#endif -#ifdef ITCM_OFFSET " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" -#endif #endif " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" #ifdef CONFIG_MMU @@ -636,12 +637,8 @@ void __init mem_init(void) MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + (PAGE_SIZE)), #ifdef CONFIG_HAVE_TCM -#ifdef DTCM_OFFSET - MLK(UL(DTCM_OFFSET), UL(DTCM_END + 1)), -#endif -#ifdef ITCM_OFFSET - MLK(UL(ITCM_OFFSET), UL(ITCM_END + 1)), -#endif + MLK(DTCM_OFFSET, (unsigned long) dtcm_end), + MLK(ITCM_OFFSET, (unsigned long) itcm_end), #endif MLK(FIXADDR_START, FIXADDR_TOP), #ifdef CONFIG_MMU -- cgit v1.2.1 From d746196361c9c635128249bb6cf13e709ae6abe1 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 26 Jul 2010 10:29:13 +0100 Subject: ARM: use generic ioremap_page_range() We don't need our own implementation of this, use the generic library implementation instead. Signed-off-by: Russell King --- arch/arm/mm/ioremap.c | 74 +++------------------------------------------------ 1 file changed, 4 insertions(+), 70 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 03f11935ed08..ab506272b2d3 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -42,78 +42,11 @@ */ #define VM_ARM_SECTION_MAPPING 0x80000000 -static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end, - unsigned long phys_addr, const struct mem_type *type) -{ - pgprot_t prot = __pgprot(type->prot_pte); - pte_t *pte; - - pte = pte_alloc_kernel(pmd, addr); - if (!pte) - return -ENOMEM; - - do { - if (!pte_none(*pte)) - goto bad; - - set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot), 0); - phys_addr += PAGE_SIZE; - } while (pte++, addr += PAGE_SIZE, addr != end); - return 0; - - bad: - printk(KERN_CRIT "remap_area_pte: page already exists\n"); - BUG(); -} - -static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr, - unsigned long end, unsigned long phys_addr, - const struct mem_type *type) -{ - unsigned long next; - pmd_t *pmd; - int ret = 0; - - pmd = pmd_alloc(&init_mm, pgd, addr); - if (!pmd) - return -ENOMEM; - - do { - next = pmd_addr_end(addr, end); - ret = remap_area_pte(pmd, addr, next, phys_addr, type); - if (ret) - return ret; - phys_addr += next - addr; - } while (pmd++, addr = next, addr != end); - return ret; -} - -static int remap_area_pages(unsigned long start, unsigned long pfn, - size_t size, const struct mem_type *type) -{ - unsigned long addr = start; - unsigned long next, end = start + size; - unsigned long phys_addr = __pfn_to_phys(pfn); - pgd_t *pgd; - int err = 0; - - BUG_ON(addr >= end); - pgd = pgd_offset_k(addr); - do { - next = pgd_addr_end(addr, end); - err = remap_area_pmd(pgd, addr, next, phys_addr, type); - if (err) - break; - phys_addr += next - addr; - } while (pgd++, addr = next, addr != end); - - return err; -} - int ioremap_page(unsigned long virt, unsigned long phys, const struct mem_type *mtype) { - return remap_area_pages(virt, __phys_to_pfn(phys), PAGE_SIZE, mtype); + return ioremap_page_range(virt, virt + PAGE_SIZE, phys, + __pgprot(mtype->prot_pte)); } EXPORT_SYMBOL(ioremap_page); @@ -300,7 +233,8 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, err = remap_area_sections(addr, pfn, size, type); } else #endif - err = remap_area_pages(addr, pfn, size, type); + err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), + __pgprot(type->prot_pte)); if (err) { vunmap((void *)addr); -- cgit v1.2.1 From 5bc23d32d86a132b5636a48dca0fa2528ef69ff9 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 25 Jul 2010 08:57:02 +0100 Subject: ARM: DMA coherent allocator: align remapped addresses The DMA coherent remap area is used to provide an uncached mapping of memory for coherency with DMA engines. Currently, we look for any free hole which our allocation will fit in with page alignment. However, this can lead to fragmentation of the area, and allows small allocations to cross L1 entry boundaries. This is undesirable as we want to move towards allocating sections of memory. Align allocations according to the size, limiting the alignment between the page and section sizes. Signed-off-by: Russell King --- arch/arm/mm/dma-mapping.c | 15 ++++++++++++++- arch/arm/mm/vmregion.c | 5 +++-- arch/arm/mm/vmregion.h | 2 +- 3 files changed, 18 insertions(+), 4 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 9e7742f0a102..c704eed63c5d 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -183,6 +183,8 @@ static void * __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) { struct arm_vmregion *c; + size_t align; + int bit; if (!consistent_pte[0]) { printk(KERN_ERR "%s: not initialised\n", __func__); @@ -190,10 +192,21 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) return NULL; } + /* + * Align the virtual region allocation - maximum alignment is + * a section size, minimum is a page size. This helps reduce + * fragmentation of the DMA space, and also prevents allocations + * smaller than a section from crossing a section boundary. + */ + bit = fls(size - 1) + 1; + if (bit > SECTION_SHIFT) + bit = SECTION_SHIFT; + align = 1 << bit; + /* * Allocate a virtual address in the consistent mapping region. */ - c = arm_vmregion_alloc(&consistent_head, size, + c = arm_vmregion_alloc(&consistent_head, align, size, gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); if (c) { pte_t *pte; diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c index 19e09bdb1b8a..935993e1b1ef 100644 --- a/arch/arm/mm/vmregion.c +++ b/arch/arm/mm/vmregion.c @@ -35,7 +35,8 @@ */ struct arm_vmregion * -arm_vmregion_alloc(struct arm_vmregion_head *head, size_t size, gfp_t gfp) +arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, + size_t size, gfp_t gfp) { unsigned long addr = head->vm_start, end = head->vm_end - size; unsigned long flags; @@ -58,7 +59,7 @@ arm_vmregion_alloc(struct arm_vmregion_head *head, size_t size, gfp_t gfp) goto nospc; if ((addr + size) <= c->vm_start) goto found; - addr = c->vm_end; + addr = ALIGN(c->vm_end, align); if (addr > end) goto nospc; } diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h index 6b2cdbdf3a85..15e9f044db9f 100644 --- a/arch/arm/mm/vmregion.h +++ b/arch/arm/mm/vmregion.h @@ -21,7 +21,7 @@ struct arm_vmregion { int vm_active; }; -struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, gfp_t); +struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t); struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long); struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long); void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *); -- cgit v1.2.1 From 3dc91aff9c3ef54b15cdaf32f61f973489fe69eb Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Thu, 22 Jul 2010 13:16:49 +0100 Subject: ARM: 6252/1: Use SIGBUS for unaligned access instead of SIGILL POSIX specify to use signal SIGBUS with code BUS_ADRALN for invalid address alignment. Signed-off-by: Kirill A. Shutemov Signed-off-by: Russell King --- arch/arm/mm/alignment.c | 4 ++-- arch/arm/mm/fault.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 6f98c358989a..53a609680c10 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -924,8 +924,8 @@ static int __init alignment_init(void) ai_usermode = UM_FIXUP; } - hook_fault_code(1, do_alignment, SIGILL, "alignment exception"); - hook_fault_code(3, do_alignment, SIGILL, "alignment exception"); + hook_fault_code(1, do_alignment, SIGBUS, "alignment exception"); + hook_fault_code(3, do_alignment, SIGBUS, "alignment exception"); return 0; } diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index cbfb2edcf7d1..ce6f3a422c81 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -463,9 +463,9 @@ static struct fsr_info { * defines these to be "precise" aborts. */ { do_bad, SIGSEGV, 0, "vector exception" }, - { do_bad, SIGILL, BUS_ADRALN, "alignment exception" }, + { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" }, { do_bad, SIGKILL, 0, "terminal exception" }, - { do_bad, SIGILL, BUS_ADRALN, "alignment exception" }, + { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" }, /* Do we need runtime check ? */ #if __LINUX_ARM_ARCH__ < 6 { do_bad, SIGBUS, 0, "external abort on linefetch" }, -- cgit v1.2.1 From 6338a6aa7c082f11d55712251e14178c68bf5869 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Thu, 22 Jul 2010 13:18:19 +0100 Subject: ARM: 6269/1: Add 'code' parameter for hook_fault_code() Add one more parameter to hook_fault_code() to be able to set 'code' field of struct fsr_info. Signed-off-by: Kirill A. Shutemov Signed-off-by: Russell King --- arch/arm/mm/alignment.c | 6 ++++-- arch/arm/mm/fault.c | 14 ++++++++------ 2 files changed, 12 insertions(+), 8 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 53a609680c10..77cfdbed9501 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -924,8 +924,10 @@ static int __init alignment_init(void) ai_usermode = UM_FIXUP; } - hook_fault_code(1, do_alignment, SIGBUS, "alignment exception"); - hook_fault_code(3, do_alignment, SIGBUS, "alignment exception"); + hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN, + "alignment exception"); + hook_fault_code(3, do_alignment, SIGBUS, BUS_ADRALN, + "alignment exception"); return 0; } diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index ce6f3a422c81..84131c832430 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -508,13 +508,15 @@ static struct fsr_info { void __init hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), - int sig, const char *name) + int sig, int code, const char *name) { - if (nr >= 0 && nr < ARRAY_SIZE(fsr_info)) { - fsr_info[nr].fn = fn; - fsr_info[nr].sig = sig; - fsr_info[nr].name = name; - } + if (nr < 0 || nr >= ARRAY_SIZE(fsr_info)) + BUG(); + + fsr_info[nr].fn = fn; + fsr_info[nr].sig = sig; + fsr_info[nr].code = code; + fsr_info[nr].name = name; } /* -- cgit v1.2.1 From 33a9c41bf5d8adae9d882513e617c4c645195e71 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Thu, 22 Jul 2010 13:20:22 +0100 Subject: ARM: 6255/1: Workaround infinity loop in handling of translation faults On ARM one Linux PGD entry contains two hardware entries (see page tables layout in pgtable.h). We normally guarantee that we always fill both L1 entries. But create_mapping() doesn't follow the rule. It can create inidividual L1 entries, so here we have to call pmd_none() check in do_translation_fault() for the entry really corresponded to address, not for the first of pair. Signed-off-by: Kirill A. Shutemov Signed-off-by: Russell King --- arch/arm/mm/fault.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 84131c832430..564b1c4829e4 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -413,7 +413,16 @@ do_translation_fault(unsigned long addr, unsigned int fsr, pmd_k = pmd_offset(pgd_k, addr); pmd = pmd_offset(pgd, addr); - if (pmd_none(*pmd_k)) + /* + * On ARM one Linux PGD entry contains two hardware entries (see page + * tables layout in pgtable.h). We normally guarantee that we always + * fill both L1 entries. But create_mapping() doesn't follow the rule. + * It can create inidividual L1 entries, so here we have to call + * pmd_none() check for the entry really corresponded to address, not + * for the first of pair. + */ + index = (addr >> SECTION_SHIFT) & 1; + if (pmd_none(pmd_k[index])) goto bad_area; copy_pmd(pmd, pmd_k); -- cgit v1.2.1 From 993bf4ec8c2a2b7979389ab196bf2fe217117158 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Thu, 22 Jul 2010 13:23:25 +0100 Subject: ARM: 6256/1: Check arch version and modify fsr_info[] depends on it at runtime Signed-off-by: Kirill A. Shutemov Signed-off-by: Russell King --- arch/arm/mm/fault.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 564b1c4829e4..5835e63454ff 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -475,12 +475,7 @@ static struct fsr_info { { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" }, { do_bad, SIGKILL, 0, "terminal exception" }, { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" }, -/* Do we need runtime check ? */ -#if __LINUX_ARM_ARCH__ < 6 { do_bad, SIGBUS, 0, "external abort on linefetch" }, -#else - { do_translation_fault, SIGSEGV, SEGV_MAPERR, "I-cache maintenance fault" }, -#endif { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, { do_bad, SIGBUS, 0, "external abort on linefetch" }, { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, @@ -605,3 +600,14 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) arm_notify_die("", regs, &info, ifsr, 0); } +static int __init exceptions_init(void) +{ + if (cpu_architecture() >= CPU_ARCH_ARMv6) { + hook_fault_code(4, do_translation_fault, SIGSEGV, SEGV_MAPERR, + "I-cache maintenance fault"); + } + + return 0; +} + +arch_initcall(exceptions_init); -- cgit v1.2.1 From b8ab5397bcbd92e3fd4a9770e0bf59315fa38dab Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Mon, 26 Jul 2010 11:20:41 +0100 Subject: ARM: 6268/1: ARMv6K and ARMv7 use fault statuses 3 and 6 as Access Flag fault Statuses 3 (0b00011) and 6 (0x00110) of DFSR are Access Flags faults on ARMv6K and ARMv7. Let's patch fsr_info[] at runtime if we are on ARMv7 or later. Unfortunately, we don't have runtime check for 'K' extension, so we can't check for it. Signed-off-by: Kirill A. Shutemov Signed-off-by: Russell King --- arch/arm/mm/alignment.c | 14 ++++++++++++-- arch/arm/mm/fault.c | 11 +++++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 77cfdbed9501..d073b64ae87e 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -926,8 +926,18 @@ static int __init alignment_init(void) hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN, "alignment exception"); - hook_fault_code(3, do_alignment, SIGBUS, BUS_ADRALN, - "alignment exception"); + + /* + * ARMv6K and ARMv7 use fault status 3 (0b00011) as Access Flag section + * fault, not as alignment error. + * + * TODO: handle ARMv6K properly. Runtime check for 'K' extension is + * needed. + */ + if (cpu_architecture() <= CPU_ARCH_ARMv6) { + hook_fault_code(3, do_alignment, SIGBUS, BUS_ADRALN, + "alignment exception"); + } return 0; } diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 5835e63454ff..23b0b03af5ea 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -607,6 +607,17 @@ static int __init exceptions_init(void) "I-cache maintenance fault"); } + if (cpu_architecture() >= CPU_ARCH_ARMv7) { + /* + * TODO: Access flag faults introduced in ARMv6K. + * Runtime check for 'K' extension is needed + */ + hook_fault_code(3, do_bad, SIGSEGV, SEGV_MAPERR, + "section access flag fault"); + hook_fault_code(6, do_bad, SIGSEGV, SEGV_MAPERR, + "section access flag fault"); + } + return 0; } -- cgit v1.2.1 From 9ca03a21e320a6bf44559323527aba704bcc8772 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 26 Jul 2010 12:22:12 +0100 Subject: ARM: Factor out common code from cpu_proc_fin() All implementations of cpu_proc_fin() start by disabling interrupts and then flush caches. Rather than have every processors proc_fin() implementation do this, move it out into generic code - and move the cache flush past setup_mm_for_reboot() (so it can benefit from having caches still enabled.) This allows cpu_proc_fin() to become independent of the L1/L2 cache types, and eventually move the L2 cache flushing into the L2 support code. Signed-off-by: Russell King --- arch/arm/mm/proc-arm1020.S | 6 +----- arch/arm/mm/proc-arm1020e.S | 6 +----- arch/arm/mm/proc-arm1022.S | 6 +----- arch/arm/mm/proc-arm1026.S | 6 +----- arch/arm/mm/proc-arm6_7.S | 2 -- arch/arm/mm/proc-arm720.S | 6 +----- arch/arm/mm/proc-arm740.S | 6 +----- arch/arm/mm/proc-arm7tdmi.S | 2 -- arch/arm/mm/proc-arm920.S | 10 +--------- arch/arm/mm/proc-arm922.S | 10 +--------- arch/arm/mm/proc-arm925.S | 6 +----- arch/arm/mm/proc-arm926.S | 6 +----- arch/arm/mm/proc-arm940.S | 6 +----- arch/arm/mm/proc-arm946.S | 6 +----- arch/arm/mm/proc-arm9tdmi.S | 2 -- arch/arm/mm/proc-fa526.S | 6 +----- arch/arm/mm/proc-feroceon.S | 7 +------ arch/arm/mm/proc-mohawk.S | 6 +----- arch/arm/mm/proc-sa110.S | 8 ++------ arch/arm/mm/proc-sa1100.S | 6 +----- arch/arm/mm/proc-v6.S | 5 +---- arch/arm/mm/proc-v7.S | 5 +---- arch/arm/mm/proc-xsc3.S | 6 +----- arch/arm/mm/proc-xscale.S | 6 +----- 24 files changed, 22 insertions(+), 119 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index 72507c630ceb..203a4e944d9e 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -79,15 +79,11 @@ ENTRY(cpu_arm1020_proc_init) * cpu_arm1020_proc_fin() */ ENTRY(cpu_arm1020_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl arm1020_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm1020_reset(loc) diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index d27829805609..1a511e765909 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -79,15 +79,11 @@ ENTRY(cpu_arm1020e_proc_init) * cpu_arm1020e_proc_fin() */ ENTRY(cpu_arm1020e_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl arm1020e_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm1020e_reset(loc) diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index ce13e4a827de..1ffa4eb9c34f 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S @@ -68,15 +68,11 @@ ENTRY(cpu_arm1022_proc_init) * cpu_arm1022_proc_fin() */ ENTRY(cpu_arm1022_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl arm1022_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm1022_reset(loc) diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index 636672a29c6d..5697c34b95b0 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S @@ -68,15 +68,11 @@ ENTRY(cpu_arm1026_proc_init) * cpu_arm1026_proc_fin() */ ENTRY(cpu_arm1026_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl arm1026_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm1026_reset(loc) diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index 795dc615f43b..64e0b327c7c5 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S @@ -184,8 +184,6 @@ ENTRY(cpu_arm7_proc_init) ENTRY(cpu_arm6_proc_fin) ENTRY(cpu_arm7_proc_fin) - mov r0, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, r0 mov r0, #0x31 @ ....S..DP...M mcr p15, 0, r0, c1, c0, 0 @ disable caches mov pc, lr diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S index 0b62de244666..9d96824134fc 100644 --- a/arch/arm/mm/proc-arm720.S +++ b/arch/arm/mm/proc-arm720.S @@ -54,15 +54,11 @@ ENTRY(cpu_arm720_proc_init) mov pc, lr ENTRY(cpu_arm720_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip mrc p15, 0, r0, c1, c0, 0 bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - mcr p15, 0, r1, c7, c7, 0 @ invalidate cache - ldmfd sp!, {pc} + mov pc, lr /* * Function: arm720_proc_do_idle(void) diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S index 01860cdeb2ec..6c1a9ab059ae 100644 --- a/arch/arm/mm/proc-arm740.S +++ b/arch/arm/mm/proc-arm740.S @@ -36,15 +36,11 @@ ENTRY(cpu_arm740_switch_mm) * cpu_arm740_proc_fin() */ ENTRY(cpu_arm740_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip mrc p15, 0, r0, c1, c0, 0 bic r0, r0, #0x3f000000 @ bank/f/lock/s bic r0, r0, #0x0000000c @ w-buffer/cache mcr p15, 0, r0, c1, c0, 0 @ disable caches - mcr p15, 0, r0, c7, c0, 0 @ invalidate cache - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm740_reset(loc) diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S index 1201b9863829..6a850dbba22e 100644 --- a/arch/arm/mm/proc-arm7tdmi.S +++ b/arch/arm/mm/proc-arm7tdmi.S @@ -36,8 +36,6 @@ ENTRY(cpu_arm7tdmi_switch_mm) * cpu_arm7tdmi_proc_fin() */ ENTRY(cpu_arm7tdmi_proc_fin) - mov r0, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, r0 mov pc, lr /* diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 8be81992645d..86f80aa56216 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -69,19 +69,11 @@ ENTRY(cpu_arm920_proc_init) * cpu_arm920_proc_fin() */ ENTRY(cpu_arm920_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip -#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH - bl arm920_flush_kern_cache_all -#else - bl v4wt_flush_kern_cache_all -#endif mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm920_reset(loc) diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index c0ff8e4b1074..f76ce9b62883 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S @@ -71,19 +71,11 @@ ENTRY(cpu_arm922_proc_init) * cpu_arm922_proc_fin() */ ENTRY(cpu_arm922_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip -#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH - bl arm922_flush_kern_cache_all -#else - bl v4wt_flush_kern_cache_all -#endif mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm922_reset(loc) diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 3c6cffe400f6..657bd3f7c153 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S @@ -92,15 +92,11 @@ ENTRY(cpu_arm925_proc_init) * cpu_arm925_proc_fin() */ ENTRY(cpu_arm925_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl arm925_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm925_reset(loc) diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 75b707c9cce1..73f1f3c68910 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -61,15 +61,11 @@ ENTRY(cpu_arm926_proc_init) * cpu_arm926_proc_fin() */ ENTRY(cpu_arm926_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl arm926_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm926_reset(loc) diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index 1af1657819eb..fffb061a45a5 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S @@ -37,15 +37,11 @@ ENTRY(cpu_arm940_switch_mm) * cpu_arm940_proc_fin() */ ENTRY(cpu_arm940_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl arm940_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x00001000 @ i-cache bic r0, r0, #0x00000004 @ d-cache mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm940_reset(loc) diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index 1664b6aaff79..249a6053760a 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S @@ -44,15 +44,11 @@ ENTRY(cpu_arm946_switch_mm) * cpu_arm946_proc_fin() */ ENTRY(cpu_arm946_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl arm946_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x00001000 @ i-cache bic r0, r0, #0x00000004 @ d-cache mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm946_reset(loc) diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S index 28545c29dbcd..db475667fac2 100644 --- a/arch/arm/mm/proc-arm9tdmi.S +++ b/arch/arm/mm/proc-arm9tdmi.S @@ -36,8 +36,6 @@ ENTRY(cpu_arm9tdmi_switch_mm) * cpu_arm9tdmi_proc_fin() */ ENTRY(cpu_arm9tdmi_proc_fin) - mov r0, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, r0 mov pc, lr /* diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S index 08f5ac237ad4..7803fdf70029 100644 --- a/arch/arm/mm/proc-fa526.S +++ b/arch/arm/mm/proc-fa526.S @@ -39,17 +39,13 @@ ENTRY(cpu_fa526_proc_init) * cpu_fa526_proc_fin() */ ENTRY(cpu_fa526_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl fa_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches nop nop - ldmfd sp!, {pc} + mov pc, lr /* * cpu_fa526_reset(loc) diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index 53e632343849..b304d0104a4e 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S @@ -75,11 +75,6 @@ ENTRY(cpu_feroceon_proc_init) * cpu_feroceon_proc_fin() */ ENTRY(cpu_feroceon_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl feroceon_flush_kern_cache_all - #if defined(CONFIG_CACHE_FEROCEON_L2) && \ !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) mov r0, #0 @@ -91,7 +86,7 @@ ENTRY(cpu_feroceon_proc_fin) bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_feroceon_reset(loc) diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index caa31154e7db..5f6892fcc167 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -51,15 +51,11 @@ ENTRY(cpu_mohawk_proc_init) * cpu_mohawk_proc_fin() */ ENTRY(cpu_mohawk_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl mohawk_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1800 @ ...iz........... bic r0, r0, #0x0006 @ .............ca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_mohawk_reset(loc) diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S index 7b706b389906..a201eb04b5e1 100644 --- a/arch/arm/mm/proc-sa110.S +++ b/arch/arm/mm/proc-sa110.S @@ -44,17 +44,13 @@ ENTRY(cpu_sa110_proc_init) * cpu_sa110_proc_fin() */ ENTRY(cpu_sa110_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl v4wb_flush_kern_cache_all @ clean caches -1: mov r0, #0 + mov r0, #0 mcr p15, 0, r0, c15, c2, 2 @ Disable clock switching mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_sa110_reset(loc) diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 5c47760c2064..7ddc4805bf97 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -55,16 +55,12 @@ ENTRY(cpu_sa1100_proc_init) * - Clean and turn off caches. */ ENTRY(cpu_sa1100_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl v4wb_flush_kern_cache_all mcr p15, 0, ip, c15, c2, 2 @ Disable clock switching mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_sa1100_reset(loc) diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 2f5a3c23a0fe..22aac8515196 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -42,14 +42,11 @@ ENTRY(cpu_v6_proc_init) mov pc, lr ENTRY(cpu_v6_proc_fin) - stmfd sp!, {lr} - cpsid if @ disable interrupts - bl v6_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x0006 @ .............ca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_v6_reset(loc) diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 8071bcd4c995..6a8506d99ee9 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -45,14 +45,11 @@ ENTRY(cpu_v7_proc_init) ENDPROC(cpu_v7_proc_init) ENTRY(cpu_v7_proc_fin) - stmfd sp!, {lr} - cpsid if @ disable interrupts - bl v7_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x0006 @ .............ca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr ENDPROC(cpu_v7_proc_fin) /* diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index e5797f1c1db7..361a51e49030 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -90,15 +90,11 @@ ENTRY(cpu_xsc3_proc_init) * cpu_xsc3_proc_fin() */ ENTRY(cpu_xsc3_proc_fin) - str lr, [sp, #-4]! - mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE - msr cpsr_c, r0 - bl xsc3_flush_kern_cache_all @ clean caches mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1800 @ ...IZ........... bic r0, r0, #0x0006 @ .............CA. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldr pc, [sp], #4 + mov pc, lr /* * cpu_xsc3_reset(loc) diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 63037e2162f2..14075979bcba 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -124,15 +124,11 @@ ENTRY(cpu_xscale_proc_init) * cpu_xscale_proc_fin() */ ENTRY(cpu_xscale_proc_fin) - str lr, [sp, #-4]! - mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE - msr cpsr_c, r0 - bl xscale_flush_kern_cache_all @ clean caches mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1800 @ ...IZ........... bic r0, r0, #0x0006 @ .............CA. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldr pc, [sp], #4 + mov pc, lr /* * cpu_xscale_reset(loc) -- cgit v1.2.1 From cc0e72b87a4afb520fab09b67e1c16e4ec4aaadb Mon Sep 17 00:00:00 2001 From: Changhwan Youn Date: Fri, 16 Jul 2010 12:15:38 +0900 Subject: ARM: S5PV310: Add new Kconfig and Makefiles This patch adds the Kconfig and Makefile for the new S5PV310 SoC. It also updates arch/arm Kconfig, Makefile and arch/arm/mm/Kconfig to include support for the new S5PV310. Signed-off-by: Changhwan Youn Signed-off-by: Kukjin Kim --- arch/arm/mm/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 101105e52610..9ca76bfe471c 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -782,7 +782,8 @@ config CACHE_L2X0 bool "Enable the L2x0 outer cache controller" depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || \ - ARCH_NOMADIK || ARCH_OMAP4 || ARCH_U8500 || ARCH_VEXPRESS_CA9X4 + ARCH_NOMADIK || ARCH_OMAP4 || ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || \ + ARCH_S5PV310 default y select OUTER_CACHE select OUTER_CACHE_SYNC -- cgit v1.2.1 From c5f800656bc985b448b1d848d309648826536543 Mon Sep 17 00:00:00 2001 From: Erik Gilling Date: Thu, 21 Jan 2010 16:53:02 -0800 Subject: [ARM] tegra: initial tegra support v2: Fixes from Mike Rapoport - remove unused header files (mach/dma.h and mach/nand.h) - remove tegra 1 references from Makefile.boot v2: fixes from Russell King - remove mach/io.h include from mach/iomap.h - fix whitespace in Kconfig v2: from Colin Cross - fix invalid immediate in debug-macro.S v3: - allow selection of multiple boards Signed-off-by: Colin Cross Signed-off-by: Erik Gilling --- arch/arm/mm/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 87ec141fcaa6..e1fd98fff8fa 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -771,7 +771,8 @@ config CACHE_L2X0 bool "Enable the L2x0 outer cache controller" depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || \ - ARCH_NOMADIK || ARCH_OMAP4 || ARCH_U8500 || ARCH_VEXPRESS_CA9X4 + ARCH_NOMADIK || ARCH_OMAP4 || ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || \ + ARCH_TEGRA default y select OUTER_CACHE select OUTER_CACHE_SYNC -- cgit v1.2.1 From 597781f3e51f48ef8e67be772196d9e9673752c4 Mon Sep 17 00:00:00 2001 From: Cesar Eduardo Barros Date: Mon, 9 Aug 2010 17:18:32 -0700 Subject: kmap_atomic: make kunmap_atomic() harder to misuse kunmap_atomic() is currently at level -4 on Rusty's "Hard To Misuse" list[1] ("Follow common convention and you'll get it wrong"), except in some architectures when CONFIG_DEBUG_HIGHMEM is set[2][3]. kunmap() takes a pointer to a struct page; kunmap_atomic(), however, takes takes a pointer to within the page itself. This seems to once in a while trip people up (the convention they are following is the one from kunmap()). Make it much harder to misuse, by moving it to level 9 on Rusty's list[4] ("The compiler/linker won't let you get it wrong"). This is done by refusing to build if the type of its first argument is a pointer to a struct page. The real kunmap_atomic() is renamed to kunmap_atomic_notypecheck() (which is what you would call in case for some strange reason calling it with a pointer to a struct page is not incorrect in your code). The previous version of this patch was compile tested on x86-64. [1] http://ozlabs.org/~rusty/index.cgi/tech/2008-04-01.html [2] In these cases, it is at level 5, "Do it right or it will always break at runtime." [3] At least mips and powerpc look very similar, and sparc also seems to share a common ancestor with both; there seems to be quite some degree of copy-and-paste coding here. The include/asm/highmem.h file for these three archs mention x86 CPUs at its top. [4] http://ozlabs.org/~rusty/index.cgi/tech/2008-03-30.html [5] As an aside, could someone tell me why mn10300 uses unsigned long as the first parameter of kunmap_atomic() instead of void *? Signed-off-by: Cesar Eduardo Barros Cc: Russell King (arch/arm) Cc: Ralf Baechle (arch/mips) Cc: David Howells (arch/frv, arch/mn10300) Cc: Koichi Yasutake (arch/mn10300) Cc: Kyle McMartin (arch/parisc) Cc: Helge Deller (arch/parisc) Cc: "James E.J. Bottomley" (arch/parisc) Cc: Benjamin Herrenschmidt (arch/powerpc) Cc: Paul Mackerras (arch/powerpc) Cc: "David S. Miller" (arch/sparc) Cc: Thomas Gleixner (arch/x86) Cc: Ingo Molnar (arch/x86) Cc: "H. Peter Anvin" (arch/x86) Cc: Arnd Bergmann (include/asm-generic) Cc: Rusty Russell ("Hard To Misuse" list) Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/mm/highmem.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 6ab244062b4a..1fbdb55bfd1b 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -82,7 +82,7 @@ void *kmap_atomic(struct page *page, enum km_type type) } EXPORT_SYMBOL(kmap_atomic); -void kunmap_atomic(void *kvaddr, enum km_type type) +void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); @@ -103,7 +103,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type) } pagefault_enable(); } -EXPORT_SYMBOL(kunmap_atomic); +EXPORT_SYMBOL(kunmap_atomic_notypecheck); void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) { -- cgit v1.2.1