diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-29 17:29:08 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-29 17:29:08 -0700 |
commit | 73154383f02998fdd6a1f26c7ef33bfc3785a101 (patch) | |
tree | 85a4c10cf32172b99aed01e95ded7269afcc9d7d /arch | |
parent | 362ed48dee509abe24cf84b7e137c7a29a8f4d2d (diff) | |
parent | ca0dde97178e75ed1370b8616326f5496a803d65 (diff) | |
download | talos-op-linux-73154383f02998fdd6a1f26c7ef33bfc3785a101.tar.gz talos-op-linux-73154383f02998fdd6a1f26c7ef33bfc3785a101.zip |
Merge branch 'akpm' (incoming from Andrew)
Merge first batch of fixes from Andrew Morton:
- A couple of kthread changes
- A few minor audit patches
- A number of fbdev patches. Florian remains AWOL so I'm picking up
some of these.
- A few kbuild things
- ocfs2 updates
- Almost all of the MM queue
(And in the meantime, I already have the second big batch from Andrew
pending in my mailbox ;^)
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (149 commits)
memcg: take reference before releasing rcu_read_lock
mem hotunplug: fix kfree() of bootmem memory
mmKconfig: add an option to disable bounce
mm, nobootmem: do memset() after memblock_reserve()
mm, nobootmem: clean-up of free_low_memory_core_early()
fs/buffer.c: remove unnecessary init operation after allocating buffer_head.
numa, cpu hotplug: change links of CPU and node when changing node number by onlining CPU
mm: fix memory_hotplug.c printk format warning
mm: swap: mark swap pages writeback before queueing for direct IO
swap: redirty page if page write fails on swap file
mm, memcg: give exiting processes access to memory reserves
thp: fix huge zero page logic for page with pfn == 0
memcg: avoid accessing memcg after releasing reference
fs: fix fsync() error reporting
memblock: fix missing comment of memblock_insert_region()
mm: Remove unused parameter of pages_correctly_reserved()
firmware, memmap: fix firmware_map_entry leak
mm/vmstat: add note on safety of drain_zonestat
mm: thp: add split tail pages to shrink page list in page reclaim
mm: allow for outstanding swap writeback accounting
...
Diffstat (limited to 'arch')
66 files changed, 371 insertions, 900 deletions
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c index 1383f8601a93..1d4aabfcf9a1 100644 --- a/arch/alpha/kernel/sys_nautilus.c +++ b/arch/alpha/kernel/sys_nautilus.c @@ -185,7 +185,6 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr) mb(); } -extern void free_reserved_mem(void *, void *); extern void pcibios_claim_one_bus(struct pci_bus *); static struct resource irongate_io = { @@ -239,8 +238,8 @@ nautilus_init_pci(void) if (pci_mem < memtop) memtop = pci_mem; if (memtop > alpha_mv.min_mem_address) { - free_reserved_mem(__va(alpha_mv.min_mem_address), - __va(memtop)); + free_reserved_area((unsigned long)__va(alpha_mv.min_mem_address), + (unsigned long)__va(memtop), 0, NULL); printk("nautilus_init_pci: %ldk freed\n", (memtop - alpha_mv.min_mem_address) >> 10); } diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 1ad6ca74bed2..0ba85ee4a466 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -31,6 +31,7 @@ #include <asm/console.h> #include <asm/tlb.h> #include <asm/setup.h> +#include <asm/sections.h> extern void die_if_kernel(char *,struct pt_regs *,long); @@ -281,8 +282,6 @@ printk_memory_info(void) { unsigned long codesize, reservedpages, datasize, initsize, tmp; extern int page_is_ram(unsigned long) __init; - extern char _text, _etext, _data, _edata; - extern char __init_begin, __init_end; /* printk all informations */ reservedpages = 0; @@ -318,32 +317,15 @@ mem_init(void) #endif /* CONFIG_DISCONTIGMEM */ void -free_reserved_mem(void *start, void *end) -{ - void *__start = start; - for (; __start < end; __start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(__start)); - init_page_count(virt_to_page(__start)); - free_page((long)__start); - totalram_pages++; - } -} - -void free_initmem(void) { - extern char __init_begin, __init_end; - - free_reserved_mem(&__init_begin, &__init_end); - printk ("Freeing unused kernel memory: %ldk freed\n", - (&__init_end - &__init_begin) >> 10); + free_initmem_default(0); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - free_reserved_mem((void *)start, (void *)end); - printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); + free_reserved_area(start, end, 0, "initrd"); } #endif diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c index 3973ae395772..33885048fa36 100644 --- a/arch/alpha/mm/numa.c +++ b/arch/alpha/mm/numa.c @@ -17,6 +17,7 @@ #include <asm/hwrpb.h> #include <asm/pgalloc.h> +#include <asm/sections.h> pg_data_t node_data[MAX_NUMNODES]; EXPORT_SYMBOL(node_data); @@ -325,8 +326,6 @@ void __init mem_init(void) { unsigned long codesize, reservedpages, datasize, initsize, pfn; extern int page_is_ram(unsigned long) __init; - extern char _text, _etext, _data, _edata; - extern char __init_begin, __init_end; unsigned long nid, i; high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index caf797de23fc..727d4794ea0f 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -144,37 +144,18 @@ void __init mem_init(void) PAGES_TO_KB(reserved_pages)); } -static void __init free_init_pages(const char *what, unsigned long begin, - unsigned long end) -{ - unsigned long addr; - - pr_info("Freeing %s: %ldk [%lx] to [%lx]\n", - what, TO_KB(end - begin), begin, end); - - /* need to check that the page we free is not a partial page */ - for (addr = begin; addr + PAGE_SIZE <= end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } -} - /* * free_initmem: Free all the __init memory. */ void __init_refok free_initmem(void) { - free_init_pages("unused kernel memory", - (unsigned long)__init_begin, - (unsigned long)__init_end); + free_initmem_default(0); } #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { - free_init_pages("initrd memory", start, end); + free_reserved_area(start, end, 0, "initrd"); } #endif diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 80d6fc4dbe4a..9bcd262a9008 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -61,6 +61,15 @@ extern void __pgd_error(const char *file, int line, pgd_t); #define FIRST_USER_ADDRESS PAGE_SIZE /* + * Use TASK_SIZE as the ceiling argument for free_pgtables() and + * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd + * page shared between user and kernel). + */ +#ifdef CONFIG_ARM_LPAE +#define USER_PGTABLES_CEILING TASK_SIZE +#endif + +/* * The pgprot_* and protection_map entries will be fixed up in runtime * to include the cachable and bufferable bits based on memory policy, * as well as any architecture dependent bits like global/ASID and SMP diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index ad722f1208a5..9a5cdc01fcdf 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -99,6 +99,9 @@ void show_mem(unsigned int filter) printk("Mem-info:\n"); show_free_areas(filter); + if (filter & SHOW_MEM_FILTER_PAGE_COUNT) + return; + for_each_bank (i, mi) { struct membank *bank = &mi->bank[i]; unsigned int pfn1, pfn2; @@ -424,24 +427,6 @@ void __init bootmem_init(void) max_pfn = max_high - PHYS_PFN_OFFSET; } -static inline int free_area(unsigned long pfn, unsigned long end, char *s) -{ - unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10); - - for (; pfn < end; pfn++) { - struct page *page = pfn_to_page(pfn); - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - pages++; - } - - if (size && s) - printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); - - return pages; -} - /* * Poison init memory with an undefined instruction (ARM) or a branch to an * undefined instruction (Thumb). @@ -534,6 +519,14 @@ static void __init free_unused_memmap(struct meminfo *mi) #endif } +#ifdef CONFIG_HIGHMEM +static inline void free_area_high(unsigned long pfn, unsigned long end) +{ + for (; pfn < end; pfn++) + free_highmem_page(pfn_to_page(pfn)); +} +#endif + static void __init free_highpages(void) { #ifdef CONFIG_HIGHMEM @@ -569,8 +562,7 @@ static void __init free_highpages(void) if (res_end > end) res_end = end; if (res_start != start) - totalhigh_pages += free_area(start, res_start, - NULL); + free_area_high(start, res_start); start = res_end; if (start == end) break; @@ -578,9 +570,8 @@ static void __init free_highpages(void) /* And now free anything which remains */ if (start < end) - totalhigh_pages += free_area(start, end, NULL); + free_area_high(start, end); } - totalram_pages += totalhigh_pages; #endif } @@ -609,8 +600,7 @@ void __init mem_init(void) #ifdef CONFIG_SA1111 /* now that our DMA memory is actually so designated, we can free it */ - totalram_pages += free_area(PHYS_PFN_OFFSET, - __phys_to_pfn(__pa(swapper_pg_dir)), NULL); + free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, 0, NULL); #endif free_highpages(); @@ -738,16 +728,12 @@ void free_initmem(void) extern char __tcm_start, __tcm_end; poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); - totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)), - __phys_to_pfn(__pa(&__tcm_end)), - "TCM link"); + free_reserved_area(&__tcm_start, &__tcm_end, 0, "TCM link"); #endif poison_init_mem(__init_begin, __init_end - __init_begin); if (!machine_is_integrator() && !machine_is_cintegrator()) - totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), - __phys_to_pfn(__pa(__init_end)), - "init"); + free_initmem_default(0); } #ifdef CONFIG_BLK_DEV_INITRD @@ -758,9 +744,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) { if (!keep_initrd) { poison_init_mem((void *)start, PAGE_ALIGN(end) - start); - totalram_pages += free_area(__phys_to_pfn(__pa(start)), - __phys_to_pfn(__pa(end)), - "initrd"); + free_reserved_area(start, end, 0, "initrd"); } } diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 800aac306a08..f497ca77925a 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -197,24 +197,6 @@ void __init bootmem_init(void) max_pfn = max_low_pfn = max; } -static inline int free_area(unsigned long pfn, unsigned long end, char *s) -{ - unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10); - - for (; pfn < end; pfn++) { - struct page *page = pfn_to_page(pfn); - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - pages++; - } - - if (size && s) - pr_info("Freeing %s memory: %dK\n", s, size); - - return pages; -} - /* * Poison init memory with an undefined instruction (0x0). */ @@ -405,9 +387,7 @@ void __init mem_init(void) void free_initmem(void) { poison_init_mem(__init_begin, __init_end - __init_begin); - totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), - __phys_to_pfn(__pa(__init_end)), - "init"); + free_initmem_default(0); } #ifdef CONFIG_BLK_DEV_INITRD @@ -418,9 +398,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) { if (!keep_initrd) { poison_init_mem((void *)start, PAGE_ALIGN(end) - start); - totalram_pages += free_area(__phys_to_pfn(__pa(start)), - __phys_to_pfn(__pa(end)), - "initrd"); + free_reserved_area(start, end, 0, "initrd"); } } diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 70b8cd4021c4..eeecc9c8ed68 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -391,17 +391,14 @@ int kern_addr_valid(unsigned long addr) } #ifdef CONFIG_SPARSEMEM_VMEMMAP #ifdef CONFIG_ARM64_64K_PAGES -int __meminit vmemmap_populate(struct page *start_page, - unsigned long size, int node) +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) { - return vmemmap_populate_basepages(start_page, size, node); + return vmemmap_populate_basepages(start, end, node); } #else /* !CONFIG_ARM64_64K_PAGES */ -int __meminit vmemmap_populate(struct page *start_page, - unsigned long size, int node) +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) { - unsigned long addr = (unsigned long)start_page; - unsigned long end = (unsigned long)(start_page + size); + unsigned long addr = start; unsigned long next; pgd_t *pgd; pud_t *pud; @@ -434,7 +431,7 @@ int __meminit vmemmap_populate(struct page *start_page, return 0; } #endif /* CONFIG_ARM64_64K_PAGES */ -void vmemmap_free(struct page *memmap, unsigned long nr_pages) +void vmemmap_free(unsigned long start, unsigned long end) { } #endif /* CONFIG_SPARSEMEM_VMEMMAP */ diff --git a/arch/avr32/mm/init.c b/arch/avr32/mm/init.c index 2798c2d4a1cf..e66e8406f992 100644 --- a/arch/avr32/mm/init.c +++ b/arch/avr32/mm/init.c @@ -146,34 +146,14 @@ void __init mem_init(void) initsize >> 10); } -static inline void free_area(unsigned long addr, unsigned long end, char *s) -{ - unsigned int size = (end - addr) >> 10; - - for (; addr < end; addr += PAGE_SIZE) { - struct page *page = virt_to_page(addr); - ClearPageReserved(page); - init_page_count(page); - free_page(addr); - totalram_pages++; - } - - if (size && s) - printk(KERN_INFO "Freeing %s memory: %dK (%lx - %lx)\n", - s, size, end - (size << 10), end); -} - void free_initmem(void) { - free_area((unsigned long)__init_begin, (unsigned long)__init_end, - "init"); + free_initmem_default(0); } #ifdef CONFIG_BLK_DEV_INITRD - void free_initrd_mem(unsigned long start, unsigned long end) { - free_area(start, end, "initrd"); + free_reserved_area(start, end, 0, "initrd"); } - #endif diff --git a/arch/blackfin/mm/init.c b/arch/blackfin/mm/init.c index 9cb85537bd2b..82d01a71207f 100644 --- a/arch/blackfin/mm/init.c +++ b/arch/blackfin/mm/init.c @@ -103,7 +103,7 @@ void __init mem_init(void) max_mapnr = num_physpages = MAP_NR(high_memory); printk(KERN_DEBUG "Kernel managed physical pages: %lu\n", num_physpages); - /* This will put all memory onto the freelists. */ + /* This will put all low memory onto the freelists. */ totalram_pages = free_all_bootmem(); reservedpages = 0; @@ -129,24 +129,11 @@ void __init mem_init(void) initk, codek, datak, DMA_UNCACHED_REGION >> 10, (reservedpages << (PAGE_SHIFT-10))); } -static void __init free_init_pages(const char *what, unsigned long begin, unsigned long end) -{ - unsigned long addr; - /* next to check that the page we free is not a partial page */ - for (addr = begin; addr + PAGE_SIZE <= end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } - printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); -} - #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { #ifndef CONFIG_MPU - free_init_pages("initrd memory", start, end); + free_reserved_area(start, end, 0, "initrd"); #endif } #endif @@ -154,10 +141,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end) void __init_refok free_initmem(void) { #if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU - free_init_pages("unused kernel memory", - (unsigned long)(&__init_begin), - (unsigned long)(&__init_end)); - + free_initmem_default(0); if (memory_start == (unsigned long)(&__init_end)) memory_start = (unsigned long)(&__init_begin); #endif diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c index 89395f09648a..a9fcd89b251b 100644 --- a/arch/c6x/mm/init.c +++ b/arch/c6x/mm/init.c @@ -77,37 +77,11 @@ void __init mem_init(void) #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { - int pages = 0; - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - pages++; - } - printk(KERN_INFO "Freeing initrd memory: %luk freed\n", - (pages * PAGE_SIZE) >> 10); + free_reserved_area(start, end, 0, "initrd"); } #endif void __init free_initmem(void) { - unsigned long addr; - - /* - * The following code should be cool even if these sections - * are not page aligned. - */ - addr = PAGE_ALIGN((unsigned long)(__init_begin)); - - /* next to check that the page we free is not a partial page */ - for (; addr + PAGE_SIZE < (unsigned long)(__init_end); - addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } - printk(KERN_INFO "Freeing unused kernel memory: %dK freed\n", - (int) ((addr - PAGE_ALIGN((long) &__init_begin)) >> 10)); + free_initmem_default(0); } diff --git a/arch/cris/mm/init.c b/arch/cris/mm/init.c index d72ab58fd83e..9ac80946dada 100644 --- a/arch/cris/mm/init.c +++ b/arch/cris/mm/init.c @@ -12,12 +12,10 @@ #include <linux/init.h> #include <linux/bootmem.h> #include <asm/tlb.h> +#include <asm/sections.h> unsigned long empty_zero_page; -extern char _stext, _edata, _etext; /* From linkerscript */ -extern char __init_begin, __init_end; - void __init mem_init(void) { @@ -67,15 +65,5 @@ mem_init(void) void free_initmem(void) { - unsigned long addr; - - addr = (unsigned long)(&__init_begin); - for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } - printk (KERN_INFO "Freeing unused kernel memory: %luk freed\n", - (unsigned long)((&__init_end - &__init_begin) >> 10)); + free_initmem_default(0); } diff --git a/arch/frv/mm/init.c b/arch/frv/mm/init.c index 92e97b0894a6..dee354fa6b64 100644 --- a/arch/frv/mm/init.c +++ b/arch/frv/mm/init.c @@ -122,7 +122,7 @@ void __init mem_init(void) #endif int codek = 0, datak = 0; - /* this will put all memory onto the freelists */ + /* this will put all low memory onto the freelists */ totalram_pages = free_all_bootmem(); #ifdef CONFIG_MMU @@ -131,14 +131,8 @@ void __init mem_init(void) datapages++; #ifdef CONFIG_HIGHMEM - for (pfn = num_physpages - 1; pfn >= num_mappedpages; pfn--) { - struct page *page = &mem_map[pfn]; - - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - totalram_pages++; - } + for (pfn = num_physpages - 1; pfn >= num_mappedpages; pfn--) + free_highmem_page(&mem_map[pfn]); #endif codek = ((unsigned long) &_etext - (unsigned long) &_stext) >> 10; @@ -168,21 +162,7 @@ void __init mem_init(void) void free_initmem(void) { #if defined(CONFIG_RAMKERNEL) && !defined(CONFIG_PROTECT_KERNEL) - unsigned long start, end, addr; - - start = PAGE_ALIGN((unsigned long) &__init_begin); /* round up */ - end = ((unsigned long) &__init_end) & PAGE_MASK; /* round down */ - - /* next to check that the page we free is not a partial page */ - for (addr = start; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } - - printk("Freeing unused kernel memory: %ldKiB freed (0x%lx - 0x%lx)\n", - (end - start) >> 10, start, end); + free_initmem_default(0); #endif } /* end free_initmem() */ @@ -193,14 +173,6 @@ void free_initmem(void) #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { - int pages = 0; - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - pages++; - } - printk("Freeing initrd memory: %dKiB freed\n", (pages * PAGE_SIZE) >> 10); + free_reserved_area(start, end, 0, "initrd"); } /* end free_initrd_mem() */ #endif diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c index 981e25094b1a..ff349d70a29b 100644 --- a/arch/h8300/mm/init.c +++ b/arch/h8300/mm/init.c @@ -139,7 +139,7 @@ void __init mem_init(void) start_mem = PAGE_ALIGN(start_mem); max_mapnr = num_physpages = MAP_NR(high_memory); - /* this will put all memory onto the freelists */ + /* this will put all low memory onto the freelists */ totalram_pages = free_all_bootmem(); codek = (_etext - _stext) >> 10; @@ -161,15 +161,7 @@ void __init mem_init(void) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - int pages = 0; - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - pages++; - } - printk ("Freeing initrd memory: %dk freed\n", pages); + free_reserved_area(start, end, 0, "initrd"); } #endif @@ -177,23 +169,7 @@ void free_initmem(void) { #ifdef CONFIG_RAMKERNEL - unsigned long addr; -/* - * the following code should be cool even if these sections - * are not page aligned. - */ - addr = PAGE_ALIGN((unsigned long)(__init_begin)); - /* next to check that the page we free is not a partial page */ - for (; addr + PAGE_SIZE < (unsigned long)__init_end; addr +=PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } - printk(KERN_INFO "Freeing unused kernel memory: %ldk freed (0x%x - 0x%x)\n", - (addr - PAGE_ALIGN((long) __init_begin)) >> 10, - (int)(PAGE_ALIGN((unsigned long)__init_begin)), - (int)(addr - PAGE_SIZE)); + free_initmem_default(0); #endif } diff --git a/arch/ia64/include/asm/hugetlb.h b/arch/ia64/include/asm/hugetlb.h index 94eaa5bd5d0c..aa910054b8e7 100644 --- a/arch/ia64/include/asm/hugetlb.h +++ b/arch/ia64/include/asm/hugetlb.h @@ -2,6 +2,7 @@ #define _ASM_IA64_HUGETLB_H #include <asm/page.h> +#include <asm-generic/hugetlb.h> void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 80dab509dfb0..67c59ebec899 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c @@ -47,6 +47,8 @@ void show_mem(unsigned int filter) printk(KERN_INFO "Mem-info:\n"); show_free_areas(filter); printk(KERN_INFO "Node memory in pages:\n"); + if (filter & SHOW_MEM_FILTER_PAGE_COUNT) + return; for_each_online_pgdat(pgdat) { unsigned long present; unsigned long flags; diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index c2e955ee79a8..ae4db4bd6d97 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -623,6 +623,8 @@ void show_mem(unsigned int filter) printk(KERN_INFO "Mem-info:\n"); show_free_areas(filter); + if (filter & SHOW_MEM_FILTER_PAGE_COUNT) + return; printk(KERN_INFO "Node memory in pages:\n"); for_each_online_pgdat(pgdat) { unsigned long present; @@ -817,13 +819,12 @@ void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat) #endif #ifdef CONFIG_SPARSEMEM_VMEMMAP -int __meminit vmemmap_populate(struct page *start_page, - unsigned long size, int node) +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) { - return vmemmap_populate_basepages(start_page, size, node); + return vmemmap_populate_basepages(start, end, node); } -void vmemmap_free(struct page *memmap, unsigned long nr_pages) +void vmemmap_free(unsigned long start, unsigned long end) { } #endif diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 20bc967c7209..d1fe4b402601 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -154,25 +154,14 @@ ia64_init_addr_space (void) void free_initmem (void) { - unsigned long addr, eaddr; - - addr = (unsigned long) ia64_imva(__init_begin); - eaddr = (unsigned long) ia64_imva(__init_end); - while (addr < eaddr) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - ++totalram_pages; - addr += PAGE_SIZE; - } - printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n", - (__init_end - __init_begin) >> 10); + free_reserved_area((unsigned long)ia64_imva(__init_begin), + (unsigned long)ia64_imva(__init_end), + 0, "unused kernel"); } void __init free_initrd_mem (unsigned long start, unsigned long end) { - struct page *page; /* * EFI uses 4KB pages while the kernel can use 4KB or bigger. * Thus EFI and the kernel may have different page sizes. It is @@ -213,11 +202,7 @@ free_initrd_mem (unsigned long start, unsigned long end) for (; start < end; start += PAGE_SIZE) { if (!virt_addr_valid(start)) continue; - page = virt_to_page(start); - ClearPageReserved(page); - init_page_count(page); - free_page(start); - ++totalram_pages; + free_reserved_page(virt_to_page(start)); } } diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c index def782e31aac..4248492b9321 100644 --- a/arch/ia64/mm/numa.c +++ b/arch/ia64/mm/numa.c @@ -61,13 +61,26 @@ paddr_to_nid(unsigned long paddr) int __meminit __early_pfn_to_nid(unsigned long pfn) { int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec; + /* + * NOTE: The following SMP-unsafe globals are only used early in boot + * when the kernel is running single-threaded. + */ + static int __meminitdata last_ssec, last_esec; + static int __meminitdata last_nid; + + if (section >= last_ssec && section < last_esec) + return last_nid; for (i = 0; i < num_node_memblks; i++) { ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT; esec = (node_memblk[i].start_paddr + node_memblk[i].size + ((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT; - if (section >= ssec && section < esec) + if (section >= ssec && section < esec) { + last_ssec = ssec; + last_esec = esec; + last_nid = node_memblk[i].nid; return node_memblk[i].nid; + } } return -1; diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c index 78b660e903da..ab4cbce91a9b 100644 --- a/arch/m32r/mm/init.c +++ b/arch/m32r/mm/init.c @@ -28,10 +28,7 @@ #include <asm/mmu_context.h> #include <asm/setup.h> #include <asm/tlb.h> - -/* References to section boundaries */ -extern char _text, _etext, _edata; -extern char __init_begin, __init_end; +#include <asm/sections.h> pgd_t swapper_pg_dir[1024]; @@ -184,17 +181,7 @@ void __init mem_init(void) *======================================================================*/ void free_initmem(void) { - unsigned long addr; - - addr = (unsigned long)(&__init_begin); - for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } - printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", \ - (int)(&__init_end - &__init_begin) >> 10); + free_initmem_default(0); } #ifdef CONFIG_BLK_DEV_INITRD @@ -204,13 +191,6 @@ void free_initmem(void) *======================================================================*/ void free_initrd_mem(unsigned long start, unsigned long end) { - unsigned long p; - for (p = start; p < end; p += PAGE_SIZE) { - ClearPageReserved(virt_to_page(p)); - init_page_count(virt_to_page(p)); - free_page(p); - totalram_pages++; - } - printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10); + free_reserved_area(start, end, 0, "initrd"); } #endif diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index 519aad8fa812..1af2ca3411f6 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -110,18 +110,7 @@ void __init paging_init(void) void free_initmem(void) { #ifndef CONFIG_MMU_SUN3 - unsigned long addr; - - addr = (unsigned long) __init_begin; - for (; addr < ((unsigned long) __init_end); addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } - pr_notice("Freeing unused kernel memory: %luk freed (0x%x - 0x%x)\n", - (addr - (unsigned long) __init_begin) >> 10, - (unsigned int) __init_begin, (unsigned int) __init_end); + free_initmem_default(0); #endif /* CONFIG_MMU_SUN3 */ } @@ -213,15 +202,6 @@ void __init mem_init(void) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - int pages = 0; - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - pages++; - } - pr_notice("Freeing initrd memory: %dk freed\n", - pages << (PAGE_SHIFT - 10)); + free_reserved_area(start, end, 0, "initrd"); } #endif diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c index 504a398d5f8b..d05b8455c44c 100644 --- a/arch/metag/mm/init.c +++ b/arch/metag/mm/init.c @@ -380,14 +380,8 @@ void __init mem_init(void) #ifdef CONFIG_HIGHMEM unsigned long tmp; - for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { - struct page *page = pfn_to_page(tmp); - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - totalhigh_pages++; - } - totalram_pages += totalhigh_pages; + for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) + free_highmem_page(pfn_to_page(tmp)); num_physpages += totalhigh_pages; #endif /* CONFIG_HIGHMEM */ @@ -412,32 +406,15 @@ void __init mem_init(void) return; } -static void free_init_pages(char *what, unsigned long begin, unsigned long end) -{ - unsigned long addr; - - for (addr = begin; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); - free_page(addr); - totalram_pages++; - } - pr_info("Freeing %s: %luk freed\n", what, (end - begin) >> 10); -} - void free_initmem(void) { - free_init_pages("unused kernel memory", - (unsigned long)(&__init_begin), - (unsigned long)(&__init_end)); + free_initmem_default(POISON_FREE_INITMEM); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - end = end & PAGE_MASK; - free_init_pages("initrd memory", start, end); + free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd"); } #endif diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h index 0e0b0a5ec756..f05df5630c84 100644 --- a/arch/microblaze/include/asm/setup.h +++ b/arch/microblaze/include/asm/setup.h @@ -46,7 +46,6 @@ void machine_shutdown(void); void machine_halt(void); void machine_power_off(void); -void free_init_pages(char *what, unsigned long begin, unsigned long end); extern void *alloc_maybe_bootmem(size_t size, gfp_t mask); extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 8f8b367c079e..4ec137d13ad7 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -82,13 +82,9 @@ static unsigned long highmem_setup(void) /* FIXME not sure about */ if (memblock_is_reserved(pfn << PAGE_SHIFT)) continue; - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - totalhigh_pages++; + free_highmem_page(page); reservedpages++; } - totalram_pages += totalhigh_pages; pr_info("High memory: %luk\n", totalhigh_pages << (PAGE_SHIFT-10)); @@ -236,40 +232,16 @@ void __init setup_memory(void) paging_init(); } -void free_init_pages(char *what, unsigned long begin, unsigned long end) -{ - unsigned long addr; - - for (addr = begin; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } - pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10); -} - #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - int pages = 0; - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - pages++; - } - pr_notice("Freeing initrd memory: %dk freed\n", - (int)(pages * (PAGE_SIZE / 1024))); + free_reserved_area(start, end, 0, "initrd"); } #endif void free_initmem(void) { - free_init_pages("unused kernel memory", - (unsigned long)(&__init_begin), - (unsigned long)(&__init_end)); + free_initmem_default(0); } void __init mem_init(void) diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h index ef99db994c2f..fe0d15d32660 100644 --- a/arch/mips/include/asm/hugetlb.h +++ b/arch/mips/include/asm/hugetlb.h @@ -10,6 +10,7 @@ #define __ASM_HUGETLB_H #include <asm/page.h> +#include <asm-generic/hugetlb.h> static inline int is_hugepage_only_range(struct mm_struct *mm, diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 67929251286c..3d0346dbccf4 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -77,10 +77,9 @@ EXPORT_SYMBOL_GPL(empty_zero_page); /* * Not static inline because used by IP27 special magic initialization code */ -unsigned long setup_zero_pages(void) +void setup_zero_pages(void) { - unsigned int order; - unsigned long size; + unsigned int order, i; struct page *page; if (cpu_has_vce) @@ -94,15 +93,10 @@ unsigned long setup_zero_pages(void) page = virt_to_page((void *)empty_zero_page); split_page(page, order); - while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) { - SetPageReserved(page); - page++; - } - - size = PAGE_SIZE << order; - zero_page_mask = (size - 1) & PAGE_MASK; + for (i = 0; i < (1 << order); i++, page++) + mark_page_reserved(page); - return 1UL << order; + zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; } #ifdef CONFIG_MIPS_MT_SMTC @@ -380,7 +374,7 @@ void __init mem_init(void) high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); totalram_pages += free_all_bootmem(); - totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ + setup_zero_pages(); /* Setup zeroed pages. */ reservedpages = ram = 0; for (tmp = 0; tmp < max_low_pfn; tmp++) @@ -399,12 +393,8 @@ void __init mem_init(void) SetPageReserved(page); continue; } - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - totalhigh_pages++; + free_highmem_page(page); } - totalram_pages += totalhigh_pages; num_physpages += totalhigh_pages; #endif @@ -440,11 +430,8 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end) struct page *page = pfn_to_page(pfn); void *addr = phys_to_virt(PFN_PHYS(pfn)); - ClearPageReserved(page); - init_page_count(page); memset(addr, POISON_FREE_INITMEM, PAGE_SIZE); - __free_page(page); - totalram_pages++; + free_reserved_page(page); } printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); } @@ -452,18 +439,14 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - free_init_pages("initrd memory", - virt_to_phys((void *)start), - virt_to_phys((void *)end)); + free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd"); } #endif void __init_refok free_initmem(void) { prom_free_prom_memory(); - free_init_pages("unused kernel memory", - __pa_symbol(&__init_begin), - __pa_symbol(&__init_end)); + free_initmem_default(POISON_FREE_INITMEM); } #ifndef CONFIG_MIPS_PGD_C0_CONTEXT diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index 3505d08ff2fd..5f2bddb1860e 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -457,7 +457,7 @@ void __init prom_free_prom_memory(void) /* We got nothing to free here ... */ } -extern unsigned long setup_zero_pages(void); +extern void setup_zero_pages(void); void __init paging_init(void) { @@ -492,7 +492,7 @@ void __init mem_init(void) totalram_pages += free_all_bootmem_node(NODE_DATA(node)); } - totalram_pages -= setup_zero_pages(); /* This comes from node 0 */ + setup_zero_pages(); /* This comes from node 0 */ codesize = (unsigned long) &_etext - (unsigned long) &_text; datasize = (unsigned long) &_edata - (unsigned long) &_etext; diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c index e57e5bc23562..5a8ace63a6b4 100644 --- a/arch/mn10300/mm/init.c +++ b/arch/mn10300/mm/init.c @@ -139,30 +139,11 @@ void __init mem_init(void) } /* - * - */ -void free_init_pages(char *what, unsigned long begin, unsigned long end) -{ - unsigned long addr; - - for (addr = begin; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - memset((void *) addr, 0xcc, PAGE_SIZE); - free_page(addr); - totalram_pages++; - } - printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); -} - -/* * recycle memory containing stuff only required for initialisation */ void free_initmem(void) { - free_init_pages("unused kernel memory", - (unsigned long) &__init_begin, - (unsigned long) &__init_end); + free_initmem_default(POISON_FREE_INITMEM); } /* @@ -171,6 +152,6 @@ void free_initmem(void) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - free_init_pages("initrd memory", start, end); + free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd"); } #endif diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c index e7fdc50c4bf0..b3cbc6703837 100644 --- a/arch/openrisc/mm/init.c +++ b/arch/openrisc/mm/init.c @@ -43,6 +43,7 @@ #include <asm/kmap_types.h> #include <asm/fixmap.h> #include <asm/tlbflush.h> +#include <asm/sections.h> int mem_init_done; @@ -201,9 +202,6 @@ void __init paging_init(void) /* References to section boundaries */ -extern char _stext, _etext, _edata, __bss_start, _end; -extern char __init_begin, __init_end; - static int __init free_pages_init(void) { int reservedpages, pfn; @@ -263,30 +261,11 @@ void __init mem_init(void) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", - (end - start) >> 10); - - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - } + free_reserved_area(start, end, 0, "initrd"); } #endif void free_initmem(void) { - unsigned long addr; - - addr = (unsigned long)(&__init_begin); - for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } - printk(KERN_INFO "Freeing unused kernel memory: %luk freed\n", - ((unsigned long)&__init_end - - (unsigned long)&__init_begin) >> 10); + free_initmem_default(0); } diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 3ac462de53a4..157b931e7b09 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -505,7 +505,6 @@ static void __init map_pages(unsigned long start_vaddr, void free_initmem(void) { - unsigned long addr; unsigned long init_begin = (unsigned long)__init_begin; unsigned long init_end = (unsigned long)__init_end; @@ -533,19 +532,10 @@ void free_initmem(void) * pages are no-longer executable */ flush_icache_range(init_begin, init_end); - for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - num_physpages++; - totalram_pages++; - } + num_physpages += free_initmem_default(0); /* set up a new led state on systems shipped LED State panel */ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); - - printk(KERN_INFO "Freeing unused kernel memory: %luk freed\n", - (init_end - init_begin) >> 10); } @@ -697,6 +687,8 @@ void show_mem(unsigned int filter) printk(KERN_INFO "Mem-info:\n"); show_free_areas(filter); + if (filter & SHOW_MEM_FILTER_PAGE_COUNT) + return; #ifndef CONFIG_DISCONTIGMEM i = max_mapnr; while (i-- > 0) { @@ -1107,15 +1099,6 @@ void flush_tlb_all(void) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - if (start >= end) - return; - printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10); - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - num_physpages++; - totalram_pages++; - } + num_physpages += free_reserved_area(start, end, 0, "initrd"); } #endif diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 62e11a32c4c2..4fcbd6b14a3a 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -3,6 +3,7 @@ #ifdef CONFIG_HUGETLB_PAGE #include <asm/page.h> +#include <asm-generic/hugetlb.h> extern struct kmem_cache *hugepte_cache; diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index b3ba5163eae2..9ec3fe174cba 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -150,10 +150,7 @@ void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start)) continue; - ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT)); - init_page_count(pfn_to_page(addr >> PAGE_SHIFT)); - free_page((unsigned long)__va(addr)); - totalram_pages++; + free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT)); } } #endif diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 06c8202a69cf..2230fd0ca3e4 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -1045,10 +1045,7 @@ static void fadump_release_memory(unsigned long begin, unsigned long end) if (addr <= ra_end && ((addr + PAGE_SIZE) > ra_start)) continue; - ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT)); - init_page_count(pfn_to_page(addr >> PAGE_SHIFT)); - free_page((unsigned long)__va(addr)); - totalram_pages++; + free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT)); } } diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index a61b133c4f99..6782221d49bd 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c @@ -756,12 +756,7 @@ static __init void kvm_free_tmp(void) end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK; /* Free the tmp space we don't need */ - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - } + free_reserved_area(start, end, 0, NULL); } static int __init kvm_guest_init(void) diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 7e2246fb2f31..5a535b73ea18 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -263,19 +263,14 @@ static __meminit void vmemmap_list_populate(unsigned long phys, vmemmap_list = vmem_back; } -int __meminit vmemmap_populate(struct page *start_page, - unsigned long nr_pages, int node) +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) { - unsigned long start = (unsigned long)start_page; - unsigned long end = (unsigned long)(start_page + nr_pages); unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; /* Align to the page size of the linear mapping. */ start = _ALIGN_DOWN(start, page_size); - pr_debug("vmemmap_populate page %p, %ld pages, node %d\n", - start_page, nr_pages, node); - pr_debug(" -> map %lx..%lx\n", start, end); + pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node); for (; start < end; start += page_size) { void *p; @@ -298,7 +293,7 @@ int __meminit vmemmap_populate(struct page *start_page, return 0; } -void vmemmap_free(struct page *memmap, unsigned long nr_pages) +void vmemmap_free(unsigned long start, unsigned long end) { } diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index f1f7409a4183..cd76c454942f 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -352,13 +352,9 @@ void __init mem_init(void) struct page *page = pfn_to_page(pfn); if (memblock_is_reserved(paddr)) continue; - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - totalhigh_pages++; + free_highmem_page(page); reservedpages--; } - totalram_pages += totalhigh_pages; printk(KERN_DEBUG "High memory: %luk\n", totalhigh_pages << (PAGE_SHIFT-10)); } @@ -405,39 +401,14 @@ void __init mem_init(void) void free_initmem(void) { - unsigned long addr; - ppc_md.progress = ppc_printk_progress; - - addr = (unsigned long)__init_begin; - for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) { - memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } - pr_info("Freeing unused kernel memory: %luk freed\n", - ((unsigned long)__init_end - - (unsigned long)__init_begin) >> 10); + free_initmem_default(POISON_FREE_INITMEM); } #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { - if (start >= end) - return; - - start = _ALIGN_DOWN(start, PAGE_SIZE); - end = _ALIGN_UP(end, PAGE_SIZE); - pr_info("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); - - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - } + free_reserved_area(start, end, 0, "initrd"); } #endif diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index bba87ca2b4d7..b8020dc7b71e 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -62,14 +62,11 @@ static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS]; */ static void __init setup_node_to_cpumask_map(void) { - unsigned int node, num = 0; + unsigned int node; /* setup nr_node_ids if not done yet */ - if (nr_node_ids == MAX_NUMNODES) { - for_each_node_mask(node, node_possible_map) - num = node; - nr_node_ids = num + 1; - } + if (nr_node_ids == MAX_NUMNODES) + setup_nr_node_ids(); /* allocate the map */ for (node = 0; node < nr_node_ids; node++) diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c index d30235b7e3f7..db6ac389ef8c 100644 --- a/arch/powerpc/platforms/512x/mpc512x_shared.c +++ b/arch/powerpc/platforms/512x/mpc512x_shared.c @@ -172,12 +172,9 @@ static struct fsl_diu_shared_fb __attribute__ ((__aligned__(8))) diu_shared_fb; static inline void mpc512x_free_bootmem(struct page *page) { - __ClearPageReserved(page); BUG_ON(PageTail(page)); BUG_ON(atomic_read(&page->_count) > 1); - atomic_set(&page->_count, 1); - __free_page(page); - totalram_pages++; + free_reserved_page(page); } void mpc512x_release_bootmem(void) diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 2372c609fa2b..9a432de363b8 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -72,6 +72,7 @@ unsigned long memory_block_size_bytes(void) return get_memblock_size(); } +#ifdef CONFIG_MEMORY_HOTREMOVE static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) { unsigned long start, start_pfn; @@ -153,6 +154,17 @@ static int pseries_remove_memory(struct device_node *np) ret = pseries_remove_memblock(base, lmb_size); return ret; } +#else +static inline int pseries_remove_memblock(unsigned long base, + unsigned int memblock_size) +{ + return -EOPNOTSUPP; +} +static inline int pseries_remove_memory(struct device_node *np) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_MEMORY_HOTREMOVE */ static int pseries_add_memory(struct device_node *np) { diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h index 593753ee07f3..bd90359d6d22 100644 --- a/arch/s390/include/asm/hugetlb.h +++ b/arch/s390/include/asm/hugetlb.h @@ -114,7 +114,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, #define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \ ({ \ pte_t __pte = huge_ptep_get(__ptep); \ - if (pte_write(__pte)) { \ + if (huge_pte_write(__pte)) { \ huge_ptep_invalidate(__mm, __addr, __ptep); \ set_huge_pte_at(__mm, __addr, __ptep, \ huge_pte_wrprotect(__pte)); \ @@ -127,4 +127,58 @@ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, huge_ptep_invalidate(vma->vm_mm, address, ptep); } +static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) +{ + pte_t pte; + pmd_t pmd; + + pmd = mk_pmd_phys(page_to_phys(page), pgprot); + pte_val(pte) = pmd_val(pmd); + return pte; +} + +static inline int huge_pte_write(pte_t pte) +{ + pmd_t pmd; + + pmd_val(pmd) = pte_val(pte); + return pmd_write(pmd); +} + +static inline int huge_pte_dirty(pte_t pte) +{ + /* No dirty bit in the segment table entry. */ + return 0; +} + +static inline pte_t huge_pte_mkwrite(pte_t pte) +{ + pmd_t pmd; + + pmd_val(pmd) = pte_val(pte); + pte_val(pte) = pmd_val(pmd_mkwrite(pmd)); + return pte; +} + +static inline pte_t huge_pte_mkdirty(pte_t pte) +{ + /* No dirty bit in the segment table entry. */ + return pte; +} + +static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) +{ + pmd_t pmd; + + pmd_val(pmd) = pte_val(pte); + pte_val(pte) = pmd_val(pmd_modify(pmd, newprot)); + return pte; +} + +static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + pmd_clear((pmd_t *) ptep); +} + #endif /* _ASM_S390_HUGETLB_H */ diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 4a64c0e5428f..b4622915bd15 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -424,6 +424,13 @@ extern unsigned long MODULES_END; #define __S110 PAGE_RW #define __S111 PAGE_RW +/* + * Segment entry (large page) protection definitions. + */ +#define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE) +#define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO) +#define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW) + static inline int mm_exclusive(struct mm_struct *mm) { return likely(mm == current->active_mm && @@ -914,26 +921,6 @@ static inline pte_t pte_mkspecial(pte_t pte) #ifdef CONFIG_HUGETLB_PAGE static inline pte_t pte_mkhuge(pte_t pte) { - /* - * PROT_NONE needs to be remapped from the pte type to the ste type. - * The HW invalid bit is also different for pte and ste. The pte - * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE - * bit, so we don't have to clear it. - */ - if (pte_val(pte) & _PAGE_INVALID) { - if (pte_val(pte) & _PAGE_SWT) - pte_val(pte) |= _HPAGE_TYPE_NONE; - pte_val(pte) |= _SEGMENT_ENTRY_INV; - } - /* - * Clear SW pte bits, there are no SW bits in a segment table entry. - */ - pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX | _PAGE_SWC | - _PAGE_SWR | _PAGE_SWW); - /* - * Also set the change-override bit because we don't need dirty bit - * tracking for hugetlbfs pages. - */ pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO); return pte; } @@ -1278,31 +1265,7 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp) } } -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - -#define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE) -#define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO) -#define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW) - -#define __HAVE_ARCH_PGTABLE_DEPOSIT -extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable); - -#define __HAVE_ARCH_PGTABLE_WITHDRAW -extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm); - -static inline int pmd_trans_splitting(pmd_t pmd) -{ - return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT; -} - -static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, - pmd_t *pmdp, pmd_t entry) -{ - if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1) - pmd_val(entry) |= _SEGMENT_ENTRY_CO; - *pmdp = entry; -} - +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) { /* @@ -1323,10 +1286,11 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) return pmd; } -static inline pmd_t pmd_mkhuge(pmd_t pmd) +static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) { - pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; - return pmd; + pmd_t __pmd; + pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); + return __pmd; } static inline pmd_t pmd_mkwrite(pmd_t pmd) @@ -1336,6 +1300,34 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd) pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO; return pmd; } +#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + +#define __HAVE_ARCH_PGTABLE_DEPOSIT +extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable); + +#define __HAVE_ARCH_PGTABLE_WITHDRAW +extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm); + +static inline int pmd_trans_splitting(pmd_t pmd) +{ + return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT; +} + +static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t entry) +{ + if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1) + pmd_val(entry) |= _SEGMENT_ENTRY_CO; + *pmdp = entry; +} + +static inline pmd_t pmd_mkhuge(pmd_t pmd) +{ + pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; + return pmd; +} static inline pmd_t pmd_wrprotect(pmd_t pmd) { @@ -1432,13 +1424,6 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, } } -static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) -{ - pmd_t __pmd; - pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); - return __pmd; -} - #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot)) #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 532525ec88c1..121089d57802 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -39,7 +39,7 @@ int arch_prepare_hugepage(struct page *page) if (!ptep) return -ENOMEM; - pte = mk_pte(page, PAGE_RW); + pte_val(pte) = addr; for (i = 0; i < PTRS_PER_PTE; i++) { set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte); pte_val(pte) += PAGE_SIZE; diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 9f9c315b4c07..0b09b2342302 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -42,11 +42,10 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); unsigned long empty_zero_page, zero_page_mask; EXPORT_SYMBOL(empty_zero_page); -static unsigned long __init setup_zero_pages(void) +static void __init setup_zero_pages(void) { struct cpuid cpu_id; unsigned int order; - unsigned long size; struct page *page; int i; @@ -83,14 +82,11 @@ static unsigned long __init setup_zero_pages(void) page = virt_to_page((void *) empty_zero_page); split_page(page, order); for (i = 1 << order; i > 0; i--) { - SetPageReserved(page); + mark_page_reserved(page); page++; } - size = PAGE_SIZE << order; - zero_page_mask = (size - 1) & PAGE_MASK; - - return 1UL << order; + zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; } /* @@ -147,7 +143,7 @@ void __init mem_init(void) /* this will put all low memory onto the freelists */ totalram_pages += free_all_bootmem(); - totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ + setup_zero_pages(); /* Setup zeroed pages. */ reservedpages = 0; @@ -166,34 +162,15 @@ void __init mem_init(void) PFN_ALIGN((unsigned long)&_eshared) - 1); } -void free_init_pages(char *what, unsigned long begin, unsigned long end) -{ - unsigned long addr = begin; - - if (begin >= end) - return; - for (; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - memset((void *)(addr & PAGE_MASK), POISON_FREE_INITMEM, - PAGE_SIZE); - free_page(addr); - totalram_pages++; - } - printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); -} - void free_initmem(void) { - free_init_pages("unused kernel memory", - (unsigned long)&__init_begin, - (unsigned long)&__init_end); + free_initmem_default(0); } #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { - free_init_pages("initrd memory", start, end); + free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd"); } #endif diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index ffab84db6907..35837054f734 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -191,19 +191,16 @@ static void vmem_remove_range(unsigned long start, unsigned long size) /* * Add a backed mem_map array to the virtual mem_map array. */ -int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) { - unsigned long address, start_addr, end_addr; + unsigned long address = start; pgd_t *pg_dir; pud_t *pu_dir; pmd_t *pm_dir; pte_t *pt_dir; int ret = -ENOMEM; - start_addr = (unsigned long) start; - end_addr = (unsigned long) (start + nr); - - for (address = start_addr; address < end_addr;) { + for (address = start; address < end;) { pg_dir = pgd_offset_k(address); if (pgd_none(*pg_dir)) { pu_dir = vmem_pud_alloc(); @@ -262,14 +259,14 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) } address += PAGE_SIZE; } - memset(start, 0, nr * sizeof(struct page)); + memset((void *)start, 0, end - start); ret = 0; out: - flush_tlb_kernel_range(start_addr, end_addr); + flush_tlb_kernel_range(start, end); return ret; } -void vmemmap_free(struct page *memmap, unsigned long nr_pages) +void vmemmap_free(unsigned long start, unsigned long end) { } diff --git a/arch/score/mm/init.c b/arch/score/mm/init.c index cee6bce1e30c..1592aad7dbc4 100644 --- a/arch/score/mm/init.c +++ b/arch/score/mm/init.c @@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(empty_zero_page); static struct kcore_list kcore_mem, kcore_vmalloc; -static unsigned long setup_zero_page(void) +static void setup_zero_page(void) { struct page *page; @@ -52,9 +52,7 @@ static unsigned long setup_zero_page(void) panic("Oh boy, that early out of memory?"); page = virt_to_page((void *) empty_zero_page); - SetPageReserved(page); - - return 1UL; + mark_page_reserved(page); } #ifndef CONFIG_NEED_MULTIPLE_NODES @@ -84,7 +82,7 @@ void __init mem_init(void) high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); totalram_pages += free_all_bootmem(); - totalram_pages -= setup_zero_page(); /* Setup zeroed pages. */ + setup_zero_page(); /* Setup zeroed pages. */ reservedpages = 0; for (tmp = 0; tmp < max_low_pfn; tmp++) @@ -109,37 +107,16 @@ void __init mem_init(void) } #endif /* !CONFIG_NEED_MULTIPLE_NODES */ -static void free_init_pages(const char *what, unsigned long begin, unsigned long end) -{ - unsigned long pfn; - - for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) { - struct page *page = pfn_to_page(pfn); - void *addr = phys_to_virt(PFN_PHYS(pfn)); - - ClearPageReserved(page); - init_page_count(page); - memset(addr, POISON_FREE_INITMEM, PAGE_SIZE); - __free_page(page); - totalram_pages++; - } - printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); -} - #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - free_init_pages("initrd memory", - virt_to_phys((void *) start), - virt_to_phys((void *) end)); + free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd"); } #endif void __init_refok free_initmem(void) { - free_init_pages("unused kernel memory", - __pa(&__init_begin), - __pa(&__init_end)); + free_initmem_default(POISON_FREE_INITMEM); } unsigned long pgd_current; diff --git a/arch/sh/include/asm/hugetlb.h b/arch/sh/include/asm/hugetlb.h index b3808c7d67b2..699255d6d1c6 100644 --- a/arch/sh/include/asm/hugetlb.h +++ b/arch/sh/include/asm/hugetlb.h @@ -3,6 +3,7 @@ #include <asm/cacheflush.h> #include <asm/page.h> +#include <asm-generic/hugetlb.h> static inline int is_hugepage_only_range(struct mm_struct *mm, diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 105794037143..20f9ead650d3 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -417,15 +417,13 @@ void __init mem_init(void) for_each_online_node(nid) { pg_data_t *pgdat = NODE_DATA(nid); - unsigned long node_pages = 0; void *node_high_memory; num_physpages += pgdat->node_present_pages; if (pgdat->node_spanned_pages) - node_pages = free_all_bootmem_node(pgdat); + totalram_pages += free_all_bootmem_node(pgdat); - totalram_pages += node_pages; node_high_memory = (void *)__va((pgdat->node_start_pfn + pgdat->node_spanned_pages) << @@ -501,31 +499,13 @@ void __init mem_init(void) void free_initmem(void) { - unsigned long addr; - - addr = (unsigned long)(&__init_begin); - for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } - printk("Freeing unused kernel memory: %ldk freed\n", - ((unsigned long)&__init_end - - (unsigned long)&__init_begin) >> 10); + free_initmem_default(0); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - unsigned long p; - for (p = start; p < end; p += PAGE_SIZE) { - ClearPageReserved(virt_to_page(p)); - init_page_count(virt_to_page(p)); - free_page(p); - totalram_pages++; - } - printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); + free_reserved_area(start, end, 0, "initrd"); } #endif diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h index 7eb57d245044..e4cab465b81f 100644 --- a/arch/sparc/include/asm/hugetlb.h +++ b/arch/sparc/include/asm/hugetlb.h @@ -2,6 +2,7 @@ #define _ASM_SPARC64_HUGETLB_H #include <asm/page.h> +#include <asm-generic/hugetlb.h> void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index 48e0c030e8f5..4490c397bb5b 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -282,14 +282,8 @@ static void map_high_region(unsigned long start_pfn, unsigned long end_pfn) printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn); #endif - for (tmp = start_pfn; tmp < end_pfn; tmp++) { - struct page *page = pfn_to_page(tmp); - - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - totalhigh_pages++; - } + for (tmp = start_pfn; tmp < end_pfn; tmp++) + free_highmem_page(pfn_to_page(tmp)); } void __init mem_init(void) @@ -347,8 +341,6 @@ void __init mem_init(void) map_high_region(start_pfn, end_pfn); } - totalram_pages += totalhigh_pages; - codepages = (((unsigned long) &_etext) - ((unsigned long)&_start)); codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; datapages = (((unsigned long) &_edata) - ((unsigned long)&_etext)); diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 1588d33d5492..6ac99d64a13c 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2181,10 +2181,9 @@ unsigned long vmemmap_table[VMEMMAP_SIZE]; static long __meminitdata addr_start, addr_end; static int __meminitdata node_start; -int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) +int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend, + int node) { - unsigned long vstart = (unsigned long) start; - unsigned long vend = (unsigned long) (start + nr); unsigned long phys_start = (vstart - VMEMMAP_BASE); unsigned long phys_end = (vend - VMEMMAP_BASE); unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK; @@ -2236,7 +2235,7 @@ void __meminit vmemmap_populate_print_last(void) } } -void vmemmap_free(struct page *memmap, unsigned long nr_pages) +void vmemmap_free(unsigned long start, unsigned long end) { } diff --git a/arch/tile/include/asm/hugetlb.h b/arch/tile/include/asm/hugetlb.h index 0f885af2b621..3257733003f8 100644 --- a/arch/tile/include/asm/hugetlb.h +++ b/arch/tile/include/asm/hugetlb.h @@ -16,6 +16,7 @@ #define _ASM_TILE_HUGETLB_H #include <asm/page.h> +#include <asm-generic/hugetlb.h> static inline int is_hugepage_only_range(struct mm_struct *mm, diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c index b3b4972c2451..dfd63ce87327 100644 --- a/arch/tile/mm/pgtable.c +++ b/arch/tile/mm/pgtable.c @@ -592,12 +592,7 @@ void iounmap(volatile void __iomem *addr_in) in parallel. Reuse of the virtual address is prevented by leaving it in the global lists until we're done with it. cpa takes care of the direct mappings. */ - read_lock(&vmlist_lock); - for (p = vmlist; p; p = p->next) { - if (p->addr == addr) - break; - } - read_unlock(&vmlist_lock); + p = find_vm_area((void *)addr); if (!p) { pr_err("iounmap: bad address %p\n", addr); diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index 5abcbfbe7e25..9df292b270a8 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -42,17 +42,12 @@ static unsigned long brk_end; static void setup_highmem(unsigned long highmem_start, unsigned long highmem_len) { - struct page *page; unsigned long highmem_pfn; int i; highmem_pfn = __pa(highmem_start) >> PAGE_SHIFT; - for (i = 0; i < highmem_len >> PAGE_SHIFT; i++) { - page = &mem_map[highmem_pfn + i]; - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - } + for (i = 0; i < highmem_len >> PAGE_SHIFT; i++) + free_highmem_page(&mem_map[highmem_pfn + i]); } #endif @@ -73,18 +68,13 @@ void __init mem_init(void) totalram_pages = free_all_bootmem(); max_low_pfn = totalram_pages; #ifdef CONFIG_HIGHMEM - totalhigh_pages = highmem >> PAGE_SHIFT; - totalram_pages += totalhigh_pages; + setup_highmem(end_iomem, highmem); #endif num_physpages = totalram_pages; max_pfn = totalram_pages; printk(KERN_INFO "Memory: %luk available\n", nr_free_pages() << (PAGE_SHIFT-10)); kmalloc_ok = 1; - -#ifdef CONFIG_HIGHMEM - setup_highmem(end_iomem, highmem); -#endif } /* @@ -254,15 +244,7 @@ void free_initmem(void) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - if (start < end) - printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", - (end - start) >> 10); - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - } + free_reserved_area(start, end, 0, "initrd"); } #endif diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c index de186bde8975..63df12d71ce3 100644 --- a/arch/unicore32/mm/init.c +++ b/arch/unicore32/mm/init.c @@ -66,6 +66,9 @@ void show_mem(unsigned int filter) printk(KERN_DEFAULT "Mem-info:\n"); show_free_areas(filter); + if (filter & SHOW_MEM_FILTER_PAGE_COUNT) + return; + for_each_bank(i, mi) { struct membank *bank = &mi->bank[i]; unsigned int pfn1, pfn2; @@ -313,24 +316,6 @@ void __init bootmem_init(void) max_pfn = max_high - PHYS_PFN_OFFSET; } -static inline int free_area(unsigned long pfn, unsigned long end, char *s) -{ - unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10); - - for (; pfn < end; pfn++) { - struct page *page = pfn_to_page(pfn); - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - pages++; - } - - if (size && s) - printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); - - return pages; -} - static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) { @@ -404,9 +389,9 @@ void __init mem_init(void) max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; - /* this will put all unused low memory onto the freelists */ free_unused_memmap(&meminfo); + /* this will put all unused low memory onto the freelists */ totalram_pages += free_all_bootmem(); reserved_pages = free_pages = 0; @@ -491,9 +476,7 @@ void __init mem_init(void) void free_initmem(void) { - totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), - __phys_to_pfn(__pa(__init_end)), - "init"); + free_initmem_default(0); } #ifdef CONFIG_BLK_DEV_INITRD @@ -503,9 +486,7 @@ static int keep_initrd; void free_initrd_mem(unsigned long start, unsigned long end) { if (!keep_initrd) - totalram_pages += free_area(__phys_to_pfn(__pa(start)), - __phys_to_pfn(__pa(end)), - "initrd"); + free_reserved_area(start, end, 0, "initrd"); } static int __init keepinitrd_setup(char *__unused) diff --git a/arch/unicore32/mm/ioremap.c b/arch/unicore32/mm/ioremap.c index b7a605597b08..13068ee22f33 100644 --- a/arch/unicore32/mm/ioremap.c +++ b/arch/unicore32/mm/ioremap.c @@ -235,7 +235,7 @@ EXPORT_SYMBOL(__uc32_ioremap_cached); void __uc32_iounmap(volatile void __iomem *io_addr) { void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); - struct vm_struct **p, *tmp; + struct vm_struct *vm; /* * If this is a section based mapping we need to handle it @@ -244,17 +244,10 @@ void __uc32_iounmap(volatile void __iomem *io_addr) * all the mappings before the area can be reclaimed * by someone else. */ - write_lock(&vmlist_lock); - for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { - if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { - if (tmp->flags & VM_UNICORE_SECTION_MAPPING) { - unmap_area_sections((unsigned long)tmp->addr, - tmp->size); - } - break; - } - } - write_unlock(&vmlist_lock); + vm = find_vm_area(addr); + if (vm && (vm->flags & VM_IOREMAP) && + (vm->flags & VM_UNICORE_SECTION_MAPPING)) + unmap_area_sections((unsigned long)vm->addr, vm->size); vunmap(addr); } diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h index bdd35dbd0605..a8091216963b 100644 --- a/arch/x86/include/asm/hugetlb.h +++ b/arch/x86/include/asm/hugetlb.h @@ -2,6 +2,7 @@ #define _ASM_X86_HUGETLB_H #include <asm/page.h> +#include <asm-generic/hugetlb.h> static inline int is_hugepage_only_range(struct mm_struct *mm, diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index a0e067d3d96c..c9496313843b 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -43,10 +43,10 @@ obj-$(CONFIG_MTRR) += mtrr/ obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o perf_event_amd_ibs.o quiet_cmd_mkcapflags = MKCAP $@ - cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@ + cmd_mkcapflags = $(CONFIG_SHELL) $(srctree)/$(src)/mkcapflags.sh $< $@ cpufeature = $(src)/../../include/asm/cpufeature.h targets += capflags.c -$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.pl FORCE +$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE $(call if_changed,mkcapflags) diff --git a/arch/x86/kernel/cpu/mkcapflags.pl b/arch/x86/kernel/cpu/mkcapflags.pl deleted file mode 100644 index 091972ef49de..000000000000 --- a/arch/x86/kernel/cpu/mkcapflags.pl +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/perl -w -# -# Generate the x86_cap_flags[] array from include/asm-x86/cpufeature.h -# - -($in, $out) = @ARGV; - -open(IN, "< $in\0") or die "$0: cannot open: $in: $!\n"; -open(OUT, "> $out\0") or die "$0: cannot create: $out: $!\n"; - -print OUT "#ifndef _ASM_X86_CPUFEATURE_H\n"; -print OUT "#include <asm/cpufeature.h>\n"; -print OUT "#endif\n"; -print OUT "\n"; -print OUT "const char * const x86_cap_flags[NCAPINTS*32] = {\n"; - -%features = (); -$err = 0; - -while (defined($line = <IN>)) { - if ($line =~ /^\s*\#\s*define\s+(X86_FEATURE_(\S+))\s+(.*)$/) { - $macro = $1; - $feature = "\L$2"; - $tail = $3; - if ($tail =~ /\/\*\s*\"([^"]*)\".*\*\//) { - $feature = "\L$1"; - } - - next if ($feature eq ''); - - if ($features{$feature}++) { - print STDERR "$in: duplicate feature name: $feature\n"; - $err++; - } - printf OUT "\t%-32s = \"%s\",\n", "[$macro]", $feature; - } -} -print OUT "};\n"; - -close(IN); -close(OUT); - -if ($err) { - unlink($out); - exit(1); -} - -exit(0); diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh new file mode 100644 index 000000000000..2bf616505499 --- /dev/null +++ b/arch/x86/kernel/cpu/mkcapflags.sh @@ -0,0 +1,41 @@ +#!/bin/sh +# +# Generate the x86_cap_flags[] array from include/asm/cpufeature.h +# + +IN=$1 +OUT=$2 + +TABS="$(printf '\t\t\t\t\t')" +trap 'rm "$OUT"' EXIT + +( + echo "#ifndef _ASM_X86_CPUFEATURE_H" + echo "#include <asm/cpufeature.h>" + echo "#endif" + echo "" + echo "const char * const x86_cap_flags[NCAPINTS*32] = {" + + # Iterate through any input lines starting with #define X86_FEATURE_ + sed -n -e 's/\t/ /g' -e 's/^ *# *define *X86_FEATURE_//p' $IN | + while read i + do + # Name is everything up to the first whitespace + NAME="$(echo "$i" | sed 's/ .*//')" + + # If the /* comment */ starts with a quote string, grab that. + VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')" + [ -z "$VALUE" ] && VALUE="\"$NAME\"" + [ "$VALUE" == '""' ] && continue + + # Name is uppercase, VALUE is all lowercase + VALUE="$(echo "$VALUE" | tr A-Z a-z)" + + TABCOUNT=$(( ( 5*8 - 14 - $(echo "$NAME" | wc -c) ) / 8 )) + printf "\t[%s]%.*s = %s,\n" \ + "X86_FEATURE_$NAME" "$TABCOUNT" "$TABS" "$VALUE" + done + echo "};" +) > $OUT + +trap - EXIT diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 6f31ee56c008..252b8f5489ba 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -137,5 +137,4 @@ void __init set_highmem_pages_init(void) add_highpages_with_active_regions(nid, zone_start_pfn, zone_end_pfn); } - totalram_pages += totalhigh_pages; } diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 59b7fc453277..fdc5dca14fb3 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -515,11 +515,8 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); for (; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); - free_page(addr); - totalram_pages++; + free_reserved_page(virt_to_page(addr)); } #endif } diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 2d19001151d5..3ac7e319918d 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -427,14 +427,6 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base) pkmap_page_table = pte; } -static void __init add_one_highpage_init(struct page *page) -{ - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - totalhigh_pages++; -} - void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn, unsigned long end_pfn) { @@ -448,7 +440,7 @@ void __init add_highpages_with_active_regions(int nid, start_pfn, end_pfn); for ( ; pfn < e_pfn; pfn++) if (pfn_valid(pfn)) - add_one_highpage_init(pfn_to_page(pfn)); + free_highmem_page(pfn_to_page(pfn)); } } #else diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 474e28f10815..71ff55a1b287 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1011,11 +1011,8 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct) flush_tlb_all(); } -void __ref vmemmap_free(struct page *memmap, unsigned long nr_pages) +void __ref vmemmap_free(unsigned long start, unsigned long end) { - unsigned long start = (unsigned long)memmap; - unsigned long end = (unsigned long)(memmap + nr_pages); - remove_pagetable(start, end, false); } @@ -1067,10 +1064,9 @@ void __init mem_init(void) /* clear_bss() already clear the empty_zero_page */ - reservedpages = 0; - - /* this will put all low memory onto the freelists */ register_page_bootmem_info(); + + /* this will put all memory onto the freelists */ totalram_pages = free_all_bootmem(); absent_pages = absent_pages_in_range(0, max_pfn); @@ -1285,18 +1281,17 @@ static long __meminitdata addr_start, addr_end; static void __meminitdata *p_start, *p_end; static int __meminitdata node_start; -int __meminit -vmemmap_populate(struct page *start_page, unsigned long size, int node) +static int __meminit vmemmap_populate_hugepages(unsigned long start, + unsigned long end, int node) { - unsigned long addr = (unsigned long)start_page; - unsigned long end = (unsigned long)(start_page + size); + unsigned long addr; unsigned long next; pgd_t *pgd; pud_t *pud; pmd_t *pmd; - for (; addr < end; addr = next) { - void *p = NULL; + for (addr = start; addr < end; addr = next) { + next = pmd_addr_end(addr, end); pgd = vmemmap_pgd_populate(addr, node); if (!pgd) @@ -1306,31 +1301,14 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node) if (!pud) return -ENOMEM; - if (!cpu_has_pse) { - next = (addr + PAGE_SIZE) & PAGE_MASK; - pmd = vmemmap_pmd_populate(pud, addr, node); - - if (!pmd) - return -ENOMEM; - - p = vmemmap_pte_populate(pmd, addr, node); + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) { + void *p; - if (!p) - return -ENOMEM; - - addr_end = addr + PAGE_SIZE; - p_end = p + PAGE_SIZE; - } else { - next = pmd_addr_end(addr, end); - - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) { + p = vmemmap_alloc_block_buf(PMD_SIZE, node); + if (p) { pte_t entry; - p = vmemmap_alloc_block_buf(PMD_SIZE, node); - if (!p) - return -ENOMEM; - entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL_LARGE); set_pmd(pmd, __pmd(pte_val(entry))); @@ -1347,15 +1325,32 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node) addr_end = addr + PMD_SIZE; p_end = p + PMD_SIZE; - } else - vmemmap_verify((pte_t *)pmd, node, addr, next); + continue; + } + } else if (pmd_large(*pmd)) { + vmemmap_verify((pte_t *)pmd, node, addr, next); + continue; } - + pr_warn_once("vmemmap: falling back to regular page backing\n"); + if (vmemmap_populate_basepages(addr, next, node)) + return -ENOMEM; } - sync_global_pgds((unsigned long)start_page, end - 1); return 0; } +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) +{ + int err; + + if (cpu_has_pse) + err = vmemmap_populate_hugepages(start, end, node); + else + err = vmemmap_populate_basepages(start, end, node); + if (!err) + sync_global_pgds(start, end - 1); + return err; +} + #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE) void register_page_bootmem_memmap(unsigned long section_nr, struct page *start_page, unsigned long size) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 78fe3f1ac49f..9a1e6583910c 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -282,12 +282,7 @@ void iounmap(volatile void __iomem *addr) in parallel. Reuse of the virtual address is prevented by leaving it in the global lists until we're done with it. cpa takes care of the direct mappings. */ - read_lock(&vmlist_lock); - for (p = vmlist; p; p = p->next) { - if (p->addr == (void __force *)addr) - break; - } - read_unlock(&vmlist_lock); + p = find_vm_area((void __force *)addr); if (!p) { printk(KERN_ERR "iounmap: bad address %p\n", addr); diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 72fe01e9e414..a71c4e207679 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -114,14 +114,11 @@ void numa_clear_node(int cpu) */ void __init setup_node_to_cpumask_map(void) { - unsigned int node, num = 0; + unsigned int node; /* setup nr_node_ids if not done yet */ - if (nr_node_ids == MAX_NUMNODES) { - for_each_node_mask(node, node_possible_map) - num = node; - nr_node_ids = num + 1; - } + if (nr_node_ids == MAX_NUMNODES) + setup_nr_node_ids(); /* allocate the map */ for (node = 0; node < nr_node_ids; node++) diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 7a5156ffebb6..bba125b4bb06 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -208,32 +208,17 @@ void __init mem_init(void) highmemsize >> 10); } -void -free_reserved_mem(void *start, void *end) -{ - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page((unsigned long)start); - totalram_pages++; - } -} - #ifdef CONFIG_BLK_DEV_INITRD extern int initrd_is_mapped; void free_initrd_mem(unsigned long start, unsigned long end) { - if (initrd_is_mapped) { - free_reserved_mem((void*)start, (void*)end); - printk ("Freeing initrd memory: %ldk freed\n",(end-start)>>10); - } + if (initrd_is_mapped) + free_reserved_area(start, end, 0, "initrd"); } #endif void free_initmem(void) { - free_reserved_mem(__init_begin, __init_end); - printk("Freeing unused kernel memory: %zuk freed\n", - (__init_end - __init_begin) >> 10); + free_initmem_default(0); } |