diff options
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r-- | arch/ia64/mm/contig.c | 101 | ||||
-rw-r--r-- | arch/ia64/mm/discontig.c | 207 | ||||
-rw-r--r-- | arch/ia64/mm/extable.c | 1 | ||||
-rw-r--r-- | arch/ia64/mm/fault.c | 44 | ||||
-rw-r--r-- | arch/ia64/mm/hugetlbpage.c | 1 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 73 | ||||
-rw-r--r-- | arch/ia64/mm/ioremap.c | 6 | ||||
-rw-r--r-- | arch/ia64/mm/numa.c | 1 | ||||
-rw-r--r-- | arch/ia64/mm/tlb.c | 1 |
9 files changed, 224 insertions, 211 deletions
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 84fd1c14c8a9..daf977ff2920 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c @@ -14,7 +14,6 @@ * Routines used by ia64 machines with contiguous (or virtually contiguous) * memory. */ -#include <linux/config.h> #include <linux/bootmem.h> #include <linux/efi.h> #include <linux/mm.h> @@ -27,7 +26,7 @@ #include <asm/mca.h> #ifdef CONFIG_VIRTUAL_MEM_MAP -static unsigned long num_dma_physpages; +static unsigned long max_gap; #endif /** @@ -41,14 +40,21 @@ show_mem (void) int i, total = 0, reserved = 0; int shared = 0, cached = 0; - printk("Mem-info:\n"); + printk(KERN_INFO "Mem-info:\n"); show_free_areas(); - printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); + printk(KERN_INFO "Free swap: %6ldkB\n", + nr_swap_pages<<(PAGE_SHIFT-10)); i = max_mapnr; - while (i-- > 0) { - if (!pfn_valid(i)) + for (i = 0; i < max_mapnr; i++) { + if (!pfn_valid(i)) { +#ifdef CONFIG_VIRTUAL_MEM_MAP + if (max_gap < LARGE_GAP) + continue; + i = vmemmap_find_next_valid_pfn(0, i) - 1; +#endif continue; + } total++; if (PageReserved(mem_map+i)) reserved++; @@ -57,12 +63,12 @@ show_mem (void) else if (page_count(mem_map + i)) shared += page_count(mem_map + i) - 1; } - printk("%d pages of RAM\n", total); - printk("%d reserved pages\n", reserved); - printk("%d pages shared\n", shared); - printk("%d pages swap cached\n", cached); - printk("%ld pages in page table cache\n", - pgtable_quicklist_total_size()); + printk(KERN_INFO "%d pages of RAM\n", total); + printk(KERN_INFO "%d reserved pages\n", reserved); + printk(KERN_INFO "%d pages shared\n", shared); + printk(KERN_INFO "%d pages swap cached\n", cached); + printk(KERN_INFO "%ld pages in page table cache\n", + pgtable_quicklist_total_size()); } /* physical address where the bootmem map is located */ @@ -212,18 +218,6 @@ count_pages (u64 start, u64 end, void *arg) return 0; } -#ifdef CONFIG_VIRTUAL_MEM_MAP -static int -count_dma_pages (u64 start, u64 end, void *arg) -{ - unsigned long *count = arg; - - if (start < MAX_DMA_ADDRESS) - *count += (min(end, MAX_DMA_ADDRESS) - start) >> PAGE_SHIFT; - return 0; -} -#endif - /* * Set up the page tables. */ @@ -232,71 +226,46 @@ void __init paging_init (void) { unsigned long max_dma; - unsigned long zones_size[MAX_NR_ZONES]; -#ifdef CONFIG_VIRTUAL_MEM_MAP - unsigned long zholes_size[MAX_NR_ZONES]; - unsigned long max_gap; -#endif - - /* initialize mem_map[] */ - - memset(zones_size, 0, sizeof(zones_size)); + unsigned long nid = 0; + unsigned long max_zone_pfns[MAX_NR_ZONES]; num_physpages = 0; efi_memmap_walk(count_pages, &num_physpages); max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; + max_zone_pfns[ZONE_DMA] = max_dma; + max_zone_pfns[ZONE_NORMAL] = max_low_pfn; #ifdef CONFIG_VIRTUAL_MEM_MAP - memset(zholes_size, 0, sizeof(zholes_size)); - - num_dma_physpages = 0; - efi_memmap_walk(count_dma_pages, &num_dma_physpages); - - if (max_low_pfn < max_dma) { - zones_size[ZONE_DMA] = max_low_pfn; - zholes_size[ZONE_DMA] = max_low_pfn - num_dma_physpages; - } else { - zones_size[ZONE_DMA] = max_dma; - zholes_size[ZONE_DMA] = max_dma - num_dma_physpages; - if (num_physpages > num_dma_physpages) { - zones_size[ZONE_NORMAL] = max_low_pfn - max_dma; - zholes_size[ZONE_NORMAL] = - ((max_low_pfn - max_dma) - - (num_physpages - num_dma_physpages)); - } - } - - max_gap = 0; + efi_memmap_walk(register_active_ranges, &nid); efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); if (max_gap < LARGE_GAP) { vmem_map = (struct page *) 0; - free_area_init_node(0, NODE_DATA(0), zones_size, 0, - zholes_size); + free_area_init_nodes(max_zone_pfns); } else { unsigned long map_size; /* allocate virtual_mem_map */ - map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page)); + map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * + sizeof(struct page)); vmalloc_end -= map_size; vmem_map = (struct page *) vmalloc_end; efi_memmap_walk(create_mem_map_page_table, NULL); - NODE_DATA(0)->node_mem_map = vmem_map; - free_area_init_node(0, NODE_DATA(0), zones_size, - 0, zholes_size); + /* + * alloc_node_mem_map makes an adjustment for mem_map + * which isn't compatible with vmem_map. + */ + NODE_DATA(0)->node_mem_map = vmem_map + + find_min_pfn_with_active_regions(); + free_area_init_nodes(max_zone_pfns); printk("Virtual mem_map starts at 0x%p\n", mem_map); } #else /* !CONFIG_VIRTUAL_MEM_MAP */ - if (max_low_pfn < max_dma) - zones_size[ZONE_DMA] = max_low_pfn; - else { - zones_size[ZONE_DMA] = max_dma; - zones_size[ZONE_NORMAL] = max_low_pfn - max_dma; - } - free_area_init(zones_size); + add_active_range(0, 0, max_low_pfn); + free_area_init_nodes(max_zone_pfns); #endif /* !CONFIG_VIRTUAL_MEM_MAP */ zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); } diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index b6bcc9fa3603..d497b6b0f5b2 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -33,7 +33,6 @@ */ struct early_node_data { struct ia64_node_data *node_data; - pg_data_t *pgdat; unsigned long pernode_addr; unsigned long pernode_size; struct bootmem_data bootmem_data; @@ -46,6 +45,8 @@ struct early_node_data { static struct early_node_data mem_data[MAX_NUMNODES] __initdata; static nodemask_t memory_less_mask __initdata; +static pg_data_t *pgdat_list[MAX_NUMNODES]; + /* * To prevent cache aliasing effects, align per-node structures so that they * start at addresses that are strided by node number. @@ -99,7 +100,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len, * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been * called yet. Note that node 0 will also count all non-existent cpus. */ -static int __init early_nr_cpus_node(int node) +static int __meminit early_nr_cpus_node(int node) { int cpu, n = 0; @@ -114,7 +115,7 @@ static int __init early_nr_cpus_node(int node) * compute_pernodesize - compute size of pernode data * @node: the node id. */ -static unsigned long __init compute_pernodesize(int node) +static unsigned long __meminit compute_pernodesize(int node) { unsigned long pernodesize = 0, cpus; @@ -175,13 +176,13 @@ static void __init fill_pernode(int node, unsigned long pernode, pernode += PERCPU_PAGE_SIZE * cpus; pernode += node * L1_CACHE_BYTES; - mem_data[node].pgdat = __va(pernode); + pgdat_list[node] = __va(pernode); pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); mem_data[node].node_data = __va(pernode); pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); - mem_data[node].pgdat->bdata = bdp; + pgdat_list[node]->bdata = bdp; pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); cpu_data = per_cpu_node_setup(cpu_data, node); @@ -268,7 +269,7 @@ static int __init find_pernode_space(unsigned long start, unsigned long len, static int __init free_node_bootmem(unsigned long start, unsigned long len, int node) { - free_bootmem_node(mem_data[node].pgdat, start, len); + free_bootmem_node(pgdat_list[node], start, len); return 0; } @@ -287,7 +288,7 @@ static void __init reserve_pernode_space(void) int node; for_each_online_node(node) { - pg_data_t *pdp = mem_data[node].pgdat; + pg_data_t *pdp = pgdat_list[node]; if (node_isset(node, memory_less_mask)) continue; @@ -307,6 +308,27 @@ static void __init reserve_pernode_space(void) } } +static void __meminit scatter_node_data(void) +{ + pg_data_t **dst; + int node; + + /* + * for_each_online_node() can't be used at here. + * node_online_map is not set for hot-added nodes at this time, + * because we are halfway through initialization of the new node's + * structures. If for_each_online_node() is used, a new node's + * pg_data_ptrs will be not initialized. Insted of using it, + * pgdat_list[] is checked. + */ + for_each_node(node) { + if (pgdat_list[node]) { + dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs; + memcpy(dst, pgdat_list, sizeof(pgdat_list)); + } + } +} + /** * initialize_pernode_data - fixup per-cpu & per-node pointers * @@ -317,17 +339,10 @@ static void __init reserve_pernode_space(void) */ static void __init initialize_pernode_data(void) { - pg_data_t *pgdat_list[MAX_NUMNODES]; int cpu, node; - for_each_online_node(node) - pgdat_list[node] = mem_data[node].pgdat; + scatter_node_data(); - /* Copy the pg_data_t list to each node and init the node field */ - for_each_online_node(node) { - memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list, - sizeof(pgdat_list)); - } #ifdef CONFIG_SMP /* Set the node_data pointer for each per-cpu struct */ for (cpu = 0; cpu < NR_CPUS; cpu++) { @@ -372,7 +387,7 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize) if (bestnode == -1) bestnode = anynode; - ptr = __alloc_bootmem_node(mem_data[bestnode].pgdat, pernodesize, + ptr = __alloc_bootmem_node(pgdat_list[bestnode], pernodesize, PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); return ptr; @@ -476,7 +491,7 @@ void __init find_memory(void) pernodesize = mem_data[node].pernode_size; map = pernode + pernodesize; - init_bootmem_node(mem_data[node].pgdat, + init_bootmem_node(pgdat_list[node], map>>PAGE_SHIFT, bdp->node_boot_start>>PAGE_SHIFT, bdp->node_low_pfn); @@ -519,68 +534,6 @@ void __cpuinit *per_cpu_init(void) } #endif /* CONFIG_SMP */ -#ifdef CONFIG_VIRTUAL_MEM_MAP -static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i) -{ - unsigned long end_address, hole_next_pfn; - unsigned long stop_address; - - end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; - end_address = PAGE_ALIGN(end_address); - - stop_address = (unsigned long) &vmem_map[ - pgdat->node_start_pfn + pgdat->node_spanned_pages]; - - do { - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - - pgd = pgd_offset_k(end_address); - if (pgd_none(*pgd)) { - end_address += PGDIR_SIZE; - continue; - } - - pud = pud_offset(pgd, end_address); - if (pud_none(*pud)) { - end_address += PUD_SIZE; - continue; - } - - pmd = pmd_offset(pud, end_address); - if (pmd_none(*pmd)) { - end_address += PMD_SIZE; - continue; - } - - pte = pte_offset_kernel(pmd, end_address); -retry_pte: - if (pte_none(*pte)) { - end_address += PAGE_SIZE; - pte++; - if ((end_address < stop_address) && - (end_address != ALIGN(end_address, 1UL << PMD_SHIFT))) - goto retry_pte; - continue; - } - /* Found next valid vmem_map page */ - break; - } while (end_address < stop_address); - - end_address = min(end_address, stop_address); - end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; - hole_next_pfn = end_address / sizeof(struct page); - return hole_next_pfn - pgdat->node_start_pfn; -} -#else -static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i) -{ - return i + 1; -} -#endif - /** * show_mem - give short summary of memory stats * @@ -594,15 +547,16 @@ void show_mem(void) unsigned long total_present = 0; pg_data_t *pgdat; - printk("Mem-info:\n"); + printk(KERN_INFO "Mem-info:\n"); show_free_areas(); - printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); + printk(KERN_INFO "Free swap: %6ldkB\n", + nr_swap_pages<<(PAGE_SHIFT-10)); + printk(KERN_INFO "Node memory in pages:\n"); for_each_online_pgdat(pgdat) { unsigned long present; unsigned long flags; int shared = 0, cached = 0, reserved = 0; - printk("Node ID: %d\n", pgdat->node_id); pgdat_resize_lock(pgdat, &flags); present = pgdat->node_present_pages; for(i = 0; i < pgdat->node_spanned_pages; i++) { @@ -610,7 +564,8 @@ void show_mem(void) if (pfn_valid(pgdat->node_start_pfn + i)) page = pfn_to_page(pgdat->node_start_pfn + i); else { - i = find_next_valid_pfn_for_pgdat(pgdat, i) - 1; + i = vmemmap_find_next_valid_pfn(pgdat->node_id, + i) - 1; continue; } if (PageReserved(page)) @@ -625,18 +580,17 @@ void show_mem(void) total_reserved += reserved; total_cached += cached; total_shared += shared; - printk("\t%ld pages of RAM\n", present); - printk("\t%d reserved pages\n", reserved); - printk("\t%d pages shared\n", shared); - printk("\t%d pages swap cached\n", cached); + printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, " + "shrd: %10d, swpd: %10d\n", pgdat->node_id, + present, reserved, shared, cached); } - printk("%ld pages of RAM\n", total_present); - printk("%d reserved pages\n", total_reserved); - printk("%d pages shared\n", total_shared); - printk("%d pages swap cached\n", total_cached); - printk("Total of %ld pages in page table cache\n", - pgtable_quicklist_total_size()); - printk("%d free buffer pages\n", nr_free_buffer_pages()); + printk(KERN_INFO "%ld pages of RAM\n", total_present); + printk(KERN_INFO "%d reserved pages\n", total_reserved); + printk(KERN_INFO "%d pages shared\n", total_shared); + printk(KERN_INFO "%d pages swap cached\n", total_cached); + printk(KERN_INFO "Total of %ld pages in page table cache\n", + pgtable_quicklist_total_size()); + printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages()); } /** @@ -700,6 +654,7 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n { unsigned long end = start + len; + add_active_range(node, start >> PAGE_SHIFT, end >> PAGE_SHIFT); mem_data[node].num_physpages += len >> PAGE_SHIFT; if (start <= __pa(MAX_DMA_ADDRESS)) mem_data[node].num_dma_physpages += @@ -724,10 +679,10 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n void __init paging_init(void) { unsigned long max_dma; - unsigned long zones_size[MAX_NR_ZONES]; - unsigned long zholes_size[MAX_NR_ZONES]; unsigned long pfn_offset = 0; + unsigned long max_pfn = 0; int node; + unsigned long max_zone_pfns[MAX_NR_ZONES]; max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; @@ -736,53 +691,45 @@ void __init paging_init(void) efi_memmap_walk(filter_rsvd_memory, count_node_pages); #ifdef CONFIG_VIRTUAL_MEM_MAP - vmalloc_end -= PAGE_ALIGN(max_low_pfn * sizeof(struct page)); + vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * + sizeof(struct page)); vmem_map = (struct page *) vmalloc_end; efi_memmap_walk(create_mem_map_page_table, NULL); printk("Virtual mem_map starts at 0x%p\n", vmem_map); #endif for_each_online_node(node) { - memset(zones_size, 0, sizeof(zones_size)); - memset(zholes_size, 0, sizeof(zholes_size)); - num_physpages += mem_data[node].num_physpages; - - if (mem_data[node].min_pfn >= max_dma) { - /* All of this node's memory is above ZONE_DMA */ - zones_size[ZONE_NORMAL] = mem_data[node].max_pfn - - mem_data[node].min_pfn; - zholes_size[ZONE_NORMAL] = mem_data[node].max_pfn - - mem_data[node].min_pfn - - mem_data[node].num_physpages; - } else if (mem_data[node].max_pfn < max_dma) { - /* All of this node's memory is in ZONE_DMA */ - zones_size[ZONE_DMA] = mem_data[node].max_pfn - - mem_data[node].min_pfn; - zholes_size[ZONE_DMA] = mem_data[node].max_pfn - - mem_data[node].min_pfn - - mem_data[node].num_dma_physpages; - } else { - /* This node has memory in both zones */ - zones_size[ZONE_DMA] = max_dma - - mem_data[node].min_pfn; - zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] - - mem_data[node].num_dma_physpages; - zones_size[ZONE_NORMAL] = mem_data[node].max_pfn - - max_dma; - zholes_size[ZONE_NORMAL] = zones_size[ZONE_NORMAL] - - (mem_data[node].num_physpages - - mem_data[node].num_dma_physpages); - } - pfn_offset = mem_data[node].min_pfn; #ifdef CONFIG_VIRTUAL_MEM_MAP NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset; #endif - free_area_init_node(node, NODE_DATA(node), zones_size, - pfn_offset, zholes_size); + if (mem_data[node].max_pfn > max_pfn) + max_pfn = mem_data[node].max_pfn; } + max_zone_pfns[ZONE_DMA] = max_dma; + max_zone_pfns[ZONE_NORMAL] = max_pfn; + free_area_init_nodes(max_zone_pfns); + zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); } + +pg_data_t *arch_alloc_nodedata(int nid) +{ + unsigned long size = compute_pernodesize(nid); + + return kzalloc(size, GFP_KERNEL); +} + +void arch_free_nodedata(pg_data_t *pgdat) +{ + kfree(pgdat); +} + +void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat) +{ + pgdat_list[update_node] = update_pgdat; + scatter_node_data(); +} diff --git a/arch/ia64/mm/extable.c b/arch/ia64/mm/extable.c index 6d259e34f359..71c50dd8f870 100644 --- a/arch/ia64/mm/extable.c +++ b/arch/ia64/mm/extable.c @@ -5,7 +5,6 @@ * David Mosberger-Tang <davidm@hpl.hp.com> */ -#include <linux/config.h> #include <linux/sort.h> #include <asm/uaccess.h> diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index d98ec49570b8..59f3ab937615 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -19,6 +19,40 @@ extern void die (char *, struct pt_regs *, long); +#ifdef CONFIG_KPROBES +ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); + +/* Hook to register for page fault notifications */ +int register_page_fault_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); +} + +int unregister_page_fault_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); +} + +static inline int notify_page_fault(enum die_val val, const char *str, + struct pt_regs *regs, long err, int trap, int sig) +{ + struct die_args args = { + .regs = regs, + .str = str, + .err = err, + .trapnr = trap, + .signr = sig + }; + return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args); +} +#else +static inline int notify_page_fault(enum die_val val, const char *str, + struct pt_regs *regs, long err, int trap, int sig) +{ + return NOTIFY_DONE; +} +#endif + /* * Return TRUE if ADDRESS points at a page in the kernel's mapped segment * (inside region 5, on ia64) and that page is present. @@ -84,7 +118,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re /* * This is to handle the kprobes on user space access instructions */ - if (notify_die(DIE_PAGE_FAULT, "page fault", regs, code, TRAP_BRKPT, + if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, code, TRAP_BRKPT, SIGSEGV) == NOTIFY_STOP) return; @@ -112,9 +146,11 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re # error File is out of sync with <linux/mm.h>. Please update. # endif + if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE)))) + goto bad_area; + mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT) - | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT) - | (((isr >> IA64_ISR_R_BIT) & 1UL) << VM_READ_BIT)); + | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)); if ((vma->vm_flags & mask) != mask) goto bad_area; @@ -244,7 +280,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re out_of_memory: up_read(&mm->mmap_sem); - if (current->pid == 1) { + if (is_init(current)) { yield(); down_read(&mm->mmap_sem); goto survive; diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index 8d506710fdbd..eee5c1cfbe32 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -8,7 +8,6 @@ * Feb, 2004: dynamic hugetlb page size via boot parameter */ -#include <linux/config.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/mm.h> diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 11f08001f8c2..ff87a5cba399 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -4,7 +4,6 @@ * Copyright (C) 1998-2003 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> */ -#include <linux/config.h> #include <linux/kernel.h> #include <linux/init.h> @@ -416,6 +415,61 @@ ia64_mmu_init (void *my_cpu_data) } #ifdef CONFIG_VIRTUAL_MEM_MAP +int vmemmap_find_next_valid_pfn(int node, int i) +{ + unsigned long end_address, hole_next_pfn; + unsigned long stop_address; + pg_data_t *pgdat = NODE_DATA(node); + + end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; + end_address = PAGE_ALIGN(end_address); + + stop_address = (unsigned long) &vmem_map[ + pgdat->node_start_pfn + pgdat->node_spanned_pages]; + + do { + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + pgd = pgd_offset_k(end_address); + if (pgd_none(*pgd)) { + end_address += PGDIR_SIZE; + continue; + } + + pud = pud_offset(pgd, end_address); + if (pud_none(*pud)) { + end_address += PUD_SIZE; + continue; + } + + pmd = pmd_offset(pud, end_address); + if (pmd_none(*pmd)) { + end_address += PMD_SIZE; + continue; + } + + pte = pte_offset_kernel(pmd, end_address); +retry_pte: + if (pte_none(*pte)) { + end_address += PAGE_SIZE; + pte++; + if ((end_address < stop_address) && + (end_address != ALIGN(end_address, 1UL << PMD_SHIFT))) + goto retry_pte; + continue; + } + /* Found next valid vmem_map page */ + break; + } while (end_address < stop_address); + + end_address = min(end_address, stop_address); + end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; + hole_next_pfn = end_address / sizeof(struct page); + return hole_next_pfn - pgdat->node_start_pfn; +} int __init create_mem_map_page_table (u64 start, u64 end, void *arg) @@ -539,6 +593,18 @@ find_largest_hole (u64 start, u64 end, void *arg) last_end = end; return 0; } + +int __init +register_active_ranges(u64 start, u64 end, void *nid) +{ + BUG_ON(nid == NULL); + BUG_ON(*(unsigned long *)nid >= MAX_NUMNODES); + + add_active_range(*(unsigned long *)nid, + __pa(start) >> PAGE_SHIFT, + __pa(end) >> PAGE_SHIFT); + return 0; +} #endif /* CONFIG_VIRTUAL_MEM_MAP */ static int __init @@ -652,7 +718,7 @@ void online_page(struct page *page) num_physpages++; } -int add_memory(u64 start, u64 size) +int arch_add_memory(int nid, u64 start, u64 size) { pg_data_t *pgdat; struct zone *zone; @@ -660,7 +726,7 @@ int add_memory(u64 start, u64 size) unsigned long nr_pages = size >> PAGE_SHIFT; int ret; - pgdat = NODE_DATA(0); + pgdat = NODE_DATA(nid); zone = pgdat->node_zones + ZONE_NORMAL; ret = __add_pages(zone, start_pfn, nr_pages); @@ -671,7 +737,6 @@ int add_memory(u64 start, u64 size) return ret; } -EXPORT_SYMBOL_GPL(add_memory); int remove_memory(u64 start, u64 size) { diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c index 07bd02b6c372..4280c074d64e 100644 --- a/arch/ia64/mm/ioremap.c +++ b/arch/ia64/mm/ioremap.c @@ -32,7 +32,7 @@ ioremap (unsigned long offset, unsigned long size) */ attr = kern_mem_attribute(offset, size); if (attr & EFI_MEMORY_WB) - return phys_to_virt(offset); + return (void __iomem *) phys_to_virt(offset); else if (attr & EFI_MEMORY_UC) return __ioremap(offset, size); @@ -43,7 +43,7 @@ ioremap (unsigned long offset, unsigned long size) gran_base = GRANULEROUNDDOWN(offset); gran_size = GRANULEROUNDUP(offset + size) - gran_base; if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB) - return phys_to_virt(offset); + return (void __iomem *) phys_to_virt(offset); return __ioremap(offset, size); } @@ -53,7 +53,7 @@ void __iomem * ioremap_nocache (unsigned long offset, unsigned long size) { if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB) - return 0; + return NULL; return __ioremap(offset, size); } diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c index 4e5c8b36ad93..64e4c21f311c 100644 --- a/arch/ia64/mm/numa.c +++ b/arch/ia64/mm/numa.c @@ -10,7 +10,6 @@ * 2002/08/07 Erich Focht <efocht@ess.nec.de> */ -#include <linux/config.h> #include <linux/cpu.h> #include <linux/kernel.h> #include <linux/mm.h> diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index 4dbbca0b5e9c..ffad7624436c 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c @@ -11,7 +11,6 @@ * Rohit Seth <rohit.seth@intel.com> * Ken Chen <kenneth.w.chen@intel.com> */ -#include <linux/config.h> #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> |