summaryrefslogtreecommitdiffstats
path: root/arch/unicore32/mm/init.c
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2011-03-25 17:41:20 +0200
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2011-03-25 17:41:20 +0200
commit7bf7e370d5919112c223a269462cd0b546903829 (patch)
tree03ccc715239df14ae168277dbccc9d9cf4d8a2c8 /arch/unicore32/mm/init.c
parent68b1a1e786f29c900fa1c516a402e24f0ece622a (diff)
parentd39dd11c3e6a7af5c20bfac40594db36cf270f42 (diff)
downloadtalos-obmc-linux-7bf7e370d5919112c223a269462cd0b546903829.tar.gz
talos-obmc-linux-7bf7e370d5919112c223a269462cd0b546903829.zip
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into for-linus-1
* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6: (9356 commits) [media] rc: update for bitop name changes fs: simplify iget & friends fs: pull inode->i_lock up out of writeback_single_inode fs: rename inode_lock to inode_hash_lock fs: move i_wb_list out from under inode_lock fs: move i_sb_list out from under inode_lock fs: remove inode_lock from iput_final and prune_icache fs: Lock the inode LRU list separately fs: factor inode disposal fs: protect inode->i_state with inode->i_lock lib, arch: add filter argument to show_mem and fix private implementations SLUB: Write to per cpu data when allocating it slub: Fix debugobjects with lockless fastpath autofs4: Do not potentially dereference NULL pointer returned by fget() in autofs_dev_ioctl_setpipefd() autofs4 - remove autofs4_lock autofs4 - fix d_manage() return on rcu-walk autofs4 - fix autofs4_expire_indirect() traversal autofs4 - fix dentry leak in autofs4_expire_direct() autofs4 - reinstate last used update on access vfs - check non-mountpoint dentry might block in __follow_mount_rcu() ... NOTE! This merge commit was created to fix compilation error. The block tree was merged upstream and removed the 'elv_queue_empty()' function which the new 'mtdswap' driver is using. So a simple merge of the mtd tree with upstream does not compile. And the mtd tree has already be published, so re-basing it is not an option. To fix this unfortunate situation, I had to merge upstream into the mtd-2.6.git tree without committing, put the fixup patch on top of this, and then commit this. The result is that we do not have commits which do not compile. In other words, this merge commit "merges" 3 things: the MTD tree, the upstream tree, and the fixup patch.
Diffstat (limited to 'arch/unicore32/mm/init.c')
-rw-r--r--arch/unicore32/mm/init.c517
1 files changed, 517 insertions, 0 deletions
diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c
new file mode 100644
index 000000000000..1fc02633f700
--- /dev/null
+++ b/arch/unicore32/mm/init.c
@@ -0,0 +1,517 @@
+/*
+ * linux/arch/unicore32/mm/init.c
+ *
+ * Copyright (C) 2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/swap.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/mman.h>
+#include <linux/nodemask.h>
+#include <linux/initrd.h>
+#include <linux/highmem.h>
+#include <linux/gfp.h>
+#include <linux/memblock.h>
+#include <linux/sort.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/sizes.h>
+#include <asm/tlb.h>
+#include <mach/map.h>
+
+#include "mm.h"
+
+static unsigned long phys_initrd_start __initdata = 0x01000000;
+static unsigned long phys_initrd_size __initdata = SZ_8M;
+
+static int __init early_initrd(char *p)
+{
+ unsigned long start, size;
+ char *endp;
+
+ start = memparse(p, &endp);
+ if (*endp == ',') {
+ size = memparse(endp + 1, NULL);
+
+ phys_initrd_start = start;
+ phys_initrd_size = size;
+ }
+ return 0;
+}
+early_param("initrd", early_initrd);
+
+/*
+ * This keeps memory configuration data used by a couple memory
+ * initialization functions, as well as show_mem() for the skipping
+ * of holes in the memory map. It is populated by uc32_add_memory().
+ */
+struct meminfo meminfo;
+
+void show_mem(unsigned int filter)
+{
+ int free = 0, total = 0, reserved = 0;
+ int shared = 0, cached = 0, slab = 0, i;
+ struct meminfo *mi = &meminfo;
+
+ printk(KERN_DEFAULT "Mem-info:\n");
+ show_free_areas();
+
+ for_each_bank(i, mi) {
+ struct membank *bank = &mi->bank[i];
+ unsigned int pfn1, pfn2;
+ struct page *page, *end;
+
+ pfn1 = bank_pfn_start(bank);
+ pfn2 = bank_pfn_end(bank);
+
+ page = pfn_to_page(pfn1);
+ end = pfn_to_page(pfn2 - 1) + 1;
+
+ do {
+ total++;
+ if (PageReserved(page))
+ reserved++;
+ else if (PageSwapCache(page))
+ cached++;
+ else if (PageSlab(page))
+ slab++;
+ else if (!page_count(page))
+ free++;
+ else
+ shared += page_count(page) - 1;
+ page++;
+ } while (page < end);
+ }
+
+ printk(KERN_DEFAULT "%d pages of RAM\n", total);
+ printk(KERN_DEFAULT "%d free pages\n", free);
+ printk(KERN_DEFAULT "%d reserved pages\n", reserved);
+ printk(KERN_DEFAULT "%d slab pages\n", slab);
+ printk(KERN_DEFAULT "%d pages shared\n", shared);
+ printk(KERN_DEFAULT "%d pages swap cached\n", cached);
+}
+
+static void __init find_limits(unsigned long *min, unsigned long *max_low,
+ unsigned long *max_high)
+{
+ struct meminfo *mi = &meminfo;
+ int i;
+
+ *min = -1UL;
+ *max_low = *max_high = 0;
+
+ for_each_bank(i, mi) {
+ struct membank *bank = &mi->bank[i];
+ unsigned long start, end;
+
+ start = bank_pfn_start(bank);
+ end = bank_pfn_end(bank);
+
+ if (*min > start)
+ *min = start;
+ if (*max_high < end)
+ *max_high = end;
+ if (bank->highmem)
+ continue;
+ if (*max_low < end)
+ *max_low = end;
+ }
+}
+
+static void __init uc32_bootmem_init(unsigned long start_pfn,
+ unsigned long end_pfn)
+{
+ struct memblock_region *reg;
+ unsigned int boot_pages;
+ phys_addr_t bitmap;
+ pg_data_t *pgdat;
+
+ /*
+ * Allocate the bootmem bitmap page. This must be in a region
+ * of memory which has already been mapped.
+ */
+ boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
+ bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
+ __pfn_to_phys(end_pfn));
+
+ /*
+ * Initialise the bootmem allocator, handing the
+ * memory banks over to bootmem.
+ */
+ node_set_online(0);
+ pgdat = NODE_DATA(0);
+ init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
+
+ /* Free the lowmem regions from memblock into bootmem. */
+ for_each_memblock(memory, reg) {
+ unsigned long start = memblock_region_memory_base_pfn(reg);
+ unsigned long end = memblock_region_memory_end_pfn(reg);
+
+ if (end >= end_pfn)
+ end = end_pfn;
+ if (start >= end)
+ break;
+
+ free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
+ }
+
+ /* Reserve the lowmem memblock reserved regions in bootmem. */
+ for_each_memblock(reserved, reg) {
+ unsigned long start = memblock_region_reserved_base_pfn(reg);
+ unsigned long end = memblock_region_reserved_end_pfn(reg);
+
+ if (end >= end_pfn)
+ end = end_pfn;
+ if (start >= end)
+ break;
+
+ reserve_bootmem(__pfn_to_phys(start),
+ (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
+ }
+}
+
+static void __init uc32_bootmem_free(unsigned long min, unsigned long max_low,
+ unsigned long max_high)
+{
+ unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
+ struct memblock_region *reg;
+
+ /*
+ * initialise the zones.
+ */
+ memset(zone_size, 0, sizeof(zone_size));
+
+ /*
+ * The memory size has already been determined. If we need
+ * to do anything fancy with the allocation of this memory
+ * to the zones, now is the time to do it.
+ */
+ zone_size[0] = max_low - min;
+
+ /*
+ * Calculate the size of the holes.
+ * holes = node_size - sum(bank_sizes)
+ */
+ memcpy(zhole_size, zone_size, sizeof(zhole_size));
+ for_each_memblock(memory, reg) {
+ unsigned long start = memblock_region_memory_base_pfn(reg);
+ unsigned long end = memblock_region_memory_end_pfn(reg);
+
+ if (start < max_low) {
+ unsigned long low_end = min(end, max_low);
+ zhole_size[0] -= low_end - start;
+ }
+ }
+
+ /*
+ * Adjust the sizes according to any special requirements for
+ * this machine type.
+ */
+ arch_adjust_zones(zone_size, zhole_size);
+
+ free_area_init_node(0, zone_size, min, zhole_size);
+}
+
+int pfn_valid(unsigned long pfn)
+{
+ return memblock_is_memory(pfn << PAGE_SHIFT);
+}
+EXPORT_SYMBOL(pfn_valid);
+
+static void uc32_memory_present(void)
+{
+}
+
+static int __init meminfo_cmp(const void *_a, const void *_b)
+{
+ const struct membank *a = _a, *b = _b;
+ long cmp = bank_pfn_start(a) - bank_pfn_start(b);
+ return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+}
+
+void __init uc32_memblock_init(struct meminfo *mi)
+{
+ int i;
+
+ sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]),
+ meminfo_cmp, NULL);
+
+ memblock_init();
+ for (i = 0; i < mi->nr_banks; i++)
+ memblock_add(mi->bank[i].start, mi->bank[i].size);
+
+ /* Register the kernel text, kernel data and initrd with memblock. */
+ memblock_reserve(__pa(_text), _end - _text);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (phys_initrd_size) {
+ memblock_reserve(phys_initrd_start, phys_initrd_size);
+
+ /* Now convert initrd to virtual addresses */
+ initrd_start = __phys_to_virt(phys_initrd_start);
+ initrd_end = initrd_start + phys_initrd_size;
+ }
+#endif
+
+ uc32_mm_memblock_reserve();
+
+ memblock_analyze();
+ memblock_dump_all();
+}
+
+void __init bootmem_init(void)
+{
+ unsigned long min, max_low, max_high;
+
+ max_low = max_high = 0;
+
+ find_limits(&min, &max_low, &max_high);
+
+ uc32_bootmem_init(min, max_low);
+
+#ifdef CONFIG_SWIOTLB
+ swiotlb_init(1);
+#endif
+ /*
+ * Sparsemem tries to allocate bootmem in memory_present(),
+ * so must be done after the fixed reservations
+ */
+ uc32_memory_present();
+
+ /*
+ * sparse_init() needs the bootmem allocator up and running.
+ */
+ sparse_init();
+
+ /*
+ * Now free the memory - free_area_init_node needs
+ * the sparse mem_map arrays initialized by sparse_init()
+ * for memmap_init_zone(), otherwise all PFNs are invalid.
+ */
+ uc32_bootmem_free(min, max_low, max_high);
+
+ high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
+
+ /*
+ * This doesn't seem to be used by the Linux memory manager any
+ * more, but is used by ll_rw_block. If we can get rid of it, we
+ * also get rid of some of the stuff above as well.
+ *
+ * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
+ * the system, not the maximum PFN.
+ */
+ max_low_pfn = max_low - PHYS_PFN_OFFSET;
+ max_pfn = max_high - PHYS_PFN_OFFSET;
+}
+
+static inline int free_area(unsigned long pfn, unsigned long end, char *s)
+{
+ unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
+
+ for (; pfn < end; pfn++) {
+ struct page *page = pfn_to_page(pfn);
+ ClearPageReserved(page);
+ init_page_count(page);
+ __free_page(page);
+ pages++;
+ }
+
+ if (size && s)
+ printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
+
+ return pages;
+}
+
+static inline void
+free_memmap(unsigned long start_pfn, unsigned long end_pfn)
+{
+ struct page *start_pg, *end_pg;
+ unsigned long pg, pgend;
+
+ /*
+ * Convert start_pfn/end_pfn to a struct page pointer.
+ */
+ start_pg = pfn_to_page(start_pfn - 1) + 1;
+ end_pg = pfn_to_page(end_pfn);
+
+ /*
+ * Convert to physical addresses, and
+ * round start upwards and end downwards.
+ */
+ pg = PAGE_ALIGN(__pa(start_pg));
+ pgend = __pa(end_pg) & PAGE_MASK;
+
+ /*
+ * If there are free pages between these,
+ * free the section of the memmap array.
+ */
+ if (pg < pgend)
+ free_bootmem(pg, pgend - pg);
+}
+
+/*
+ * The mem_map array can get very big. Free the unused area of the memory map.
+ */
+static void __init free_unused_memmap(struct meminfo *mi)
+{
+ unsigned long bank_start, prev_bank_end = 0;
+ unsigned int i;
+
+ /*
+ * This relies on each bank being in address order.
+ * The banks are sorted previously in bootmem_init().
+ */
+ for_each_bank(i, mi) {
+ struct membank *bank = &mi->bank[i];
+
+ bank_start = bank_pfn_start(bank);
+
+ /*
+ * If we had a previous bank, and there is a space
+ * between the current bank and the previous, free it.
+ */
+ if (prev_bank_end && prev_bank_end < bank_start)
+ free_memmap(prev_bank_end, bank_start);
+
+ /*
+ * Align up here since the VM subsystem insists that the
+ * memmap entries are valid from the bank end aligned to
+ * MAX_ORDER_NR_PAGES.
+ */
+ prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
+ }
+}
+
+/*
+ * mem_init() marks the free areas in the mem_map and tells us how much
+ * memory is free. This is done after various parts of the system have
+ * claimed their memory after the kernel image.
+ */
+void __init mem_init(void)
+{
+ unsigned long reserved_pages, free_pages;
+ struct memblock_region *reg;
+ int i;
+
+ max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
+
+ /* this will put all unused low memory onto the freelists */
+ free_unused_memmap(&meminfo);
+
+ totalram_pages += free_all_bootmem();
+
+ reserved_pages = free_pages = 0;
+
+ for_each_bank(i, &meminfo) {
+ struct membank *bank = &meminfo.bank[i];
+ unsigned int pfn1, pfn2;
+ struct page *page, *end;
+
+ pfn1 = bank_pfn_start(bank);
+ pfn2 = bank_pfn_end(bank);
+
+ page = pfn_to_page(pfn1);
+ end = pfn_to_page(pfn2 - 1) + 1;
+
+ do {
+ if (PageReserved(page))
+ reserved_pages++;
+ else if (!page_count(page))
+ free_pages++;
+ page++;
+ } while (page < end);
+ }
+
+ /*
+ * Since our memory may not be contiguous, calculate the
+ * real number of pages we have in this system
+ */
+ printk(KERN_INFO "Memory:");
+ num_physpages = 0;
+ for_each_memblock(memory, reg) {
+ unsigned long pages = memblock_region_memory_end_pfn(reg) -
+ memblock_region_memory_base_pfn(reg);
+ num_physpages += pages;
+ printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
+ }
+ printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
+
+ printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
+ nr_free_pages() << (PAGE_SHIFT-10),
+ free_pages << (PAGE_SHIFT-10),
+ reserved_pages << (PAGE_SHIFT-10),
+ totalhigh_pages << (PAGE_SHIFT-10));
+
+ printk(KERN_NOTICE "Virtual kernel memory layout:\n"
+ " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
+ " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
+ " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
+ " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
+ " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
+ " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
+ " .data : 0x%p" " - 0x%p" " (%4d kB)\n",
+
+ VECTORS_BASE, VECTORS_BASE + PAGE_SIZE,
+ DIV_ROUND_UP(PAGE_SIZE, SZ_1K),
+ VMALLOC_START, VMALLOC_END,
+ DIV_ROUND_UP((VMALLOC_END - VMALLOC_START), SZ_1M),
+ PAGE_OFFSET, (unsigned long)high_memory,
+ DIV_ROUND_UP(((unsigned long)high_memory - PAGE_OFFSET), SZ_1M),
+ MODULES_VADDR, MODULES_END,
+ DIV_ROUND_UP((MODULES_END - MODULES_VADDR), SZ_1M),
+
+ __init_begin, __init_end,
+ DIV_ROUND_UP((__init_end - __init_begin), SZ_1K),
+ _stext, _etext,
+ DIV_ROUND_UP((_etext - _stext), SZ_1K),
+ _sdata, _edata,
+ DIV_ROUND_UP((_edata - _sdata), SZ_1K));
+
+ BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
+ BUG_ON(TASK_SIZE > MODULES_VADDR);
+
+ if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
+ /*
+ * On a machine this small we won't get
+ * anywhere without overcommit, so turn
+ * it on by default.
+ */
+ sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
+ }
+}
+
+void free_initmem(void)
+{
+ totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
+ __phys_to_pfn(__pa(__init_end)),
+ "init");
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+
+static int keep_initrd;
+
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ if (!keep_initrd)
+ totalram_pages += free_area(__phys_to_pfn(__pa(start)),
+ __phys_to_pfn(__pa(end)),
+ "initrd");
+}
+
+static int __init keepinitrd_setup(char *__unused)
+{
+ keep_initrd = 1;
+ return 1;
+}
+
+__setup("keepinitrd", keepinitrd_setup);
+#endif
OpenPOWER on IntegriCloud