diff options
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/Kconfig | 26 | ||||
-rw-r--r-- | arch/ia64/Kconfig.debug | 5 | ||||
-rw-r--r-- | arch/ia64/hp/common/sba_iommu.c | 4 | ||||
-rw-r--r-- | arch/ia64/include/asm/io.h | 14 | ||||
-rw-r--r-- | arch/ia64/include/uapi/asm/socket.h | 3 | ||||
-rw-r--r-- | arch/ia64/kernel/asm-offsets.c | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/fsys.S | 12 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 11 | ||||
-rw-r--r-- | arch/ia64/mm/contig.c | 75 | ||||
-rw-r--r-- | arch/ia64/mm/discontig.c | 134 | ||||
-rw-r--r-- | arch/ia64/mm/fault.c | 2 |
11 files changed, 61 insertions, 229 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index ff861420b8f5..8b4a0c1748c0 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -4,10 +4,6 @@ config PGTABLE_LEVELS range 3 4 if !IA64_PAGE_SIZE_64KB default 3 -source "init/Kconfig" - -source "kernel/Kconfig.freezer" - menu "Processor type and features" config IA64 @@ -16,6 +12,7 @@ config IA64 select ARCH_MIGHT_HAVE_PC_SERIO select PCI if (!IA64_HP_SIM) select ACPI if (!IA64_HP_SIM) + select ARCH_SUPPORTS_ACPI if (!IA64_HP_SIM) select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI select HAVE_UNSTABLE_SCHED_CLOCK @@ -31,6 +28,7 @@ config IA64 select HAVE_ARCH_TRACEHOOK select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP + select NO_BOOTMEM select HAVE_VIRT_CPU_ACCOUNTING select ARCH_HAS_DMA_MARK_CLEAN select ARCH_HAS_SG_CHAIN @@ -368,10 +366,6 @@ config FORCE_CPEI_RETARGET This option it useful to enable this feature on older BIOS's as well. You can also enable this by using boot command line option force_cpei=1. -source "kernel/Kconfig.preempt" - -source "mm/Kconfig" - config ARCH_SELECT_MEMORY_MODEL def_bool y @@ -532,8 +526,6 @@ config CRASH_DUMP source "drivers/firmware/Kconfig" -source "fs/Kconfig.binfmt" - endmenu menu "Power management and ACPI options" @@ -574,10 +566,6 @@ endmenu endif -source "net/Kconfig" - -source "drivers/Kconfig" - source "arch/ia64/hp/sim/Kconfig" config MSPEC @@ -588,13 +576,3 @@ config MSPEC If you have an ia64 and you want to enable memory special operations support (formerly known as fetchop), say Y here, otherwise say N. - -source "fs/Kconfig" - -source "arch/ia64/Kconfig.debug" - -source "security/Kconfig" - -source "crypto/Kconfig" - -source "lib/Kconfig" diff --git a/arch/ia64/Kconfig.debug b/arch/ia64/Kconfig.debug index 677c409425df..1371efc9b005 100644 --- a/arch/ia64/Kconfig.debug +++ b/arch/ia64/Kconfig.debug @@ -1,7 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -menu "Kernel hacking" - -source "lib/Kconfig.debug" choice prompt "Physical memory granularity" @@ -56,5 +53,3 @@ config IA64_DEBUG_IRQ Selecting this option turns on bug checking for the IA-64 irq_save and restore instructions. It's useful for tracking down spinlock problems, but slow! If you're unsure, select N. - -endmenu diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index ee5b652d320a..671ce1e3f6f2 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -1805,7 +1805,7 @@ static struct ioc_iommu ioc_iommu_info[] __initdata = { { SX2000_IOC_ID, "sx2000", NULL }, }; -static void ioc_init(unsigned long hpa, struct ioc *ioc) +static void __init ioc_init(unsigned long hpa, struct ioc *ioc) { struct ioc_iommu *info; @@ -2002,7 +2002,7 @@ sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle) #endif } -static void acpi_sba_ioc_add(struct ioc *ioc) +static void __init acpi_sba_ioc_add(struct ioc *ioc) { acpi_handle handle = ioc->handle; acpi_status status; diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index fb0651961e2c..1e6fef69bb01 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h @@ -83,12 +83,14 @@ virt_to_phys (volatile void *address) { return (unsigned long) address - PAGE_OFFSET; } +#define virt_to_phys virt_to_phys static inline void* phys_to_virt (unsigned long address) { return (void *) (address + PAGE_OFFSET); } +#define phys_to_virt phys_to_virt #define ARCH_HAS_VALID_PHYS_ADDR_RANGE extern u64 kern_mem_attribute (unsigned long phys_addr, unsigned long size); @@ -433,9 +435,11 @@ static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned lo { return ioremap(phys_addr, size); } +#define ioremap ioremap +#define ioremap_nocache ioremap_nocache #define ioremap_cache ioremap_cache #define ioremap_uc ioremap_nocache - +#define iounmap iounmap /* * String version of IO memory access ops: @@ -444,6 +448,14 @@ extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n); extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n); extern void memset_io(volatile void __iomem *s, int c, long n); +#define memcpy_fromio memcpy_fromio +#define memcpy_toio memcpy_toio +#define memset_io memset_io +#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr +#define xlate_dev_mem_ptr xlate_dev_mem_ptr +#include <asm-generic/io.h> +#undef PCI_IOBASE + # endif /* __KERNEL__ */ #endif /* _ASM_IA64_IO_H */ diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h index 3efba40adc54..c872c4e6bafb 100644 --- a/arch/ia64/include/uapi/asm/socket.h +++ b/arch/ia64/include/uapi/asm/socket.h @@ -114,4 +114,7 @@ #define SO_ZEROCOPY 60 +#define SO_TXTIME 61 +#define SCM_TXTIME SO_TXTIME + #endif /* _ASM_IA64_SOCKET_H */ diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index f4db2168d1b8..00e8e2a1eb19 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c @@ -50,8 +50,7 @@ void foo(void) DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked)); DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid)); - DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader)); - DEFINE(IA64_TASK_TGIDLINK_OFFSET, offsetof (struct task_struct, pids[PIDTYPE_PID].pid)); + DEFINE(IA64_TASK_THREAD_PID_OFFSET,offsetof (struct task_struct, thread_pid)); DEFINE(IA64_PID_LEVEL_OFFSET, offsetof (struct pid, level)); DEFINE(IA64_PID_UPID_OFFSET, offsetof (struct pid, numbers[0])); DEFINE(IA64_TASK_PENDING_OFFSET,offsetof (struct task_struct, pending)); @@ -68,6 +67,7 @@ void foo(void) DEFINE(IA64_SIGNAL_GROUP_STOP_COUNT_OFFSET,offsetof (struct signal_struct, group_stop_count)); DEFINE(IA64_SIGNAL_SHARED_PENDING_OFFSET,offsetof (struct signal_struct, shared_pending)); + DEFINE(IA64_SIGNAL_PIDS_TGID_OFFSET, offsetof (struct signal_struct, pids[PIDTYPE_TGID])); BLANK(); diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index fe742ffafc7a..d80c99a5f55d 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S @@ -62,16 +62,16 @@ ENTRY(fsys_getpid) .prologue .altrp b6 .body - add r17=IA64_TASK_GROUP_LEADER_OFFSET,r16 + add r17=IA64_TASK_SIGNAL_OFFSET,r16 ;; - ld8 r17=[r17] // r17 = current->group_leader + ld8 r17=[r17] // r17 = current->signal add r9=TI_FLAGS+IA64_TASK_SIZE,r16 ;; ld4 r9=[r9] - add r17=IA64_TASK_TGIDLINK_OFFSET,r17 + add r17=IA64_SIGNAL_PIDS_TGID_OFFSET,r17 ;; and r9=TIF_ALLWORK_MASK,r9 - ld8 r17=[r17] // r17 = current->group_leader->pids[PIDTYPE_PID].pid + ld8 r17=[r17] // r17 = current->signal->pids[PIDTYPE_TGID] ;; add r8=IA64_PID_LEVEL_OFFSET,r17 ;; @@ -96,11 +96,11 @@ ENTRY(fsys_set_tid_address) .altrp b6 .body add r9=TI_FLAGS+IA64_TASK_SIZE,r16 - add r17=IA64_TASK_TGIDLINK_OFFSET,r16 + add r17=IA64_TASK_THREAD_PID_OFFSET,r16 ;; ld4 r9=[r9] tnat.z p6,p7=r32 // check argument register for being NaT - ld8 r17=[r17] // r17 = current->pids[PIDTYPE_PID].pid + ld8 r17=[r17] // r17 = current->thread_pid ;; and r9=TIF_ALLWORK_MASK,r9 add r8=IA64_PID_LEVEL_OFFSET,r17 diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index ad43cbf70628..0e6c2d9fb498 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -32,6 +32,7 @@ #include <linux/delay.h> #include <linux/cpu.h> #include <linux/kernel.h> +#include <linux/memblock.h> #include <linux/reboot.h> #include <linux/sched/mm.h> #include <linux/sched/clock.h> @@ -383,8 +384,16 @@ reserve_memory (void) sort_regions(rsvd_region, num_rsvd_regions); num_rsvd_regions = merge_regions(rsvd_region, num_rsvd_regions); -} + /* reserve all regions except the end of memory marker with memblock */ + for (n = 0; n < num_rsvd_regions - 1; n++) { + struct rsvd_region *region = &rsvd_region[n]; + phys_addr_t addr = __pa(region->start); + phys_addr_t size = region->end - region->start; + + memblock_reserve(addr, size); + } +} /** * find_initrd - get initrd parameters from the boot parameter structure diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 7d64b30913d1..e2e40bbd391c 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c @@ -34,53 +34,6 @@ static unsigned long max_gap; /* physical address where the bootmem map is located */ unsigned long bootmap_start; -/** - * find_bootmap_location - callback to find a memory area for the bootmap - * @start: start of region - * @end: end of region - * @arg: unused callback data - * - * Find a place to put the bootmap and return its starting address in - * bootmap_start. This address must be page-aligned. - */ -static int __init -find_bootmap_location (u64 start, u64 end, void *arg) -{ - u64 needed = *(unsigned long *)arg; - u64 range_start, range_end, free_start; - int i; - -#if IGNORE_PFN0 - if (start == PAGE_OFFSET) { - start += PAGE_SIZE; - if (start >= end) - return 0; - } -#endif - - free_start = PAGE_OFFSET; - - for (i = 0; i < num_rsvd_regions; i++) { - range_start = max(start, free_start); - range_end = min(end, rsvd_region[i].start & PAGE_MASK); - - free_start = PAGE_ALIGN(rsvd_region[i].end); - - if (range_end <= range_start) - continue; /* skip over empty range */ - - if (range_end - range_start >= needed) { - bootmap_start = __pa(range_start); - return -1; /* done */ - } - - /* nothing more available in this segment */ - if (range_end == end) - return 0; - } - return 0; -} - #ifdef CONFIG_SMP static void *cpu_data; /** @@ -196,8 +149,6 @@ setup_per_cpu_areas(void) void __init find_memory (void) { - unsigned long bootmap_size; - reserve_memory(); /* first find highest page frame number */ @@ -205,21 +156,12 @@ find_memory (void) max_low_pfn = 0; efi_memmap_walk(find_max_min_low_pfn, NULL); max_pfn = max_low_pfn; - /* how many bytes to cover all the pages */ - bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT; - - /* look for a location to hold the bootmap */ - bootmap_start = ~0UL; - efi_memmap_walk(find_bootmap_location, &bootmap_size); - if (bootmap_start == ~0UL) - panic("Cannot find %ld bytes for bootmap\n", bootmap_size); - bootmap_size = init_bootmem_node(NODE_DATA(0), - (bootmap_start >> PAGE_SHIFT), 0, max_pfn); - - /* Free all available memory, then mark bootmem-map as being in use. */ - efi_memmap_walk(filter_rsvd_memory, free_bootmem); - reserve_bootmem(bootmap_start, bootmap_size, BOOTMEM_DEFAULT); +#ifdef CONFIG_VIRTUAL_MEM_MAP + efi_memmap_walk(filter_memory, register_active_ranges); +#else + memblock_add_node(0, PFN_PHYS(max_low_pfn), 0); +#endif find_initrd(); @@ -244,11 +186,9 @@ paging_init (void) max_zone_pfns[ZONE_NORMAL] = max_low_pfn; #ifdef CONFIG_VIRTUAL_MEM_MAP - efi_memmap_walk(filter_memory, register_active_ranges); efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); if (max_gap < LARGE_GAP) { vmem_map = (struct page *) 0; - free_area_init_nodes(max_zone_pfns); } else { unsigned long map_size; @@ -266,13 +206,10 @@ paging_init (void) */ NODE_DATA(0)->node_mem_map = vmem_map + find_min_pfn_with_active_regions(); - free_area_init_nodes(max_zone_pfns); printk("Virtual mem_map starts at 0x%p\n", mem_map); } -#else /* !CONFIG_VIRTUAL_MEM_MAP */ - memblock_add_node(0, PFN_PHYS(max_low_pfn), 0); - free_area_init_nodes(max_zone_pfns); #endif /* !CONFIG_VIRTUAL_MEM_MAP */ + free_area_init_nodes(max_zone_pfns); zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); } diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 7d9bd20319ff..1928d5719e41 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -20,6 +20,7 @@ #include <linux/nmi.h> #include <linux/swap.h> #include <linux/bootmem.h> +#include <linux/memblock.h> #include <linux/acpi.h> #include <linux/efi.h> #include <linux/nodemask.h> @@ -38,9 +39,6 @@ struct early_node_data { struct ia64_node_data *node_data; unsigned long pernode_addr; unsigned long pernode_size; -#ifdef CONFIG_ZONE_DMA32 - unsigned long num_dma_physpages; -#endif unsigned long min_pfn; unsigned long max_pfn; }; @@ -60,33 +58,31 @@ pg_data_t *pgdat_list[MAX_NUMNODES]; (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1))) /** - * build_node_maps - callback to setup bootmem structs for each node + * build_node_maps - callback to setup mem_data structs for each node * @start: physical start of range * @len: length of range * @node: node where this range resides * - * We allocate a struct bootmem_data for each piece of memory that we wish to + * Detect extents of each piece of memory that we wish to * treat as a virtually contiguous block (i.e. each node). Each such block * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down * if necessary. Any non-existent pages will simply be part of the virtual - * memmap. We also update min_low_pfn and max_low_pfn here as we receive - * memory ranges from the caller. + * memmap. */ static int __init build_node_maps(unsigned long start, unsigned long len, int node) { unsigned long spfn, epfn, end = start + len; - struct bootmem_data *bdp = &bootmem_node_data[node]; epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT; spfn = GRANULEROUNDDOWN(start) >> PAGE_SHIFT; - if (!bdp->node_low_pfn) { - bdp->node_min_pfn = spfn; - bdp->node_low_pfn = epfn; + if (!mem_data[node].min_pfn) { + mem_data[node].min_pfn = spfn; + mem_data[node].max_pfn = epfn; } else { - bdp->node_min_pfn = min(spfn, bdp->node_min_pfn); - bdp->node_low_pfn = max(epfn, bdp->node_low_pfn); + mem_data[node].min_pfn = min(spfn, mem_data[node].min_pfn); + mem_data[node].max_pfn = max(epfn, mem_data[node].max_pfn); } return 0; @@ -269,7 +265,6 @@ static void __init fill_pernode(int node, unsigned long pernode, { void *cpu_data; int cpus = early_nr_cpus_node(node); - struct bootmem_data *bdp = &bootmem_node_data[node]; mem_data[node].pernode_addr = pernode; mem_data[node].pernode_size = pernodesize; @@ -284,8 +279,6 @@ static void __init fill_pernode(int node, unsigned long pernode, mem_data[node].node_data = __va(pernode); pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); - - pgdat_list[node]->bdata = bdp; pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); cpu_data = per_cpu_node_setup(cpu_data, node); @@ -325,20 +318,16 @@ static int __init find_pernode_space(unsigned long start, unsigned long len, int node) { unsigned long spfn, epfn; - unsigned long pernodesize = 0, pernode, pages, mapsize; - struct bootmem_data *bdp = &bootmem_node_data[node]; + unsigned long pernodesize = 0, pernode; spfn = start >> PAGE_SHIFT; epfn = (start + len) >> PAGE_SHIFT; - pages = bdp->node_low_pfn - bdp->node_min_pfn; - mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT; - /* * Make sure this memory falls within this node's usable memory * since we may have thrown some away in build_maps(). */ - if (spfn < bdp->node_min_pfn || epfn > bdp->node_low_pfn) + if (spfn < mem_data[node].min_pfn || epfn > mem_data[node].max_pfn) return 0; /* Don't setup this node's local space twice... */ @@ -353,32 +342,13 @@ static int __init find_pernode_space(unsigned long start, unsigned long len, pernode = NODEDATA_ALIGN(start, node); /* Is this range big enough for what we want to store here? */ - if (start + len > (pernode + pernodesize + mapsize)) + if (start + len > (pernode + pernodesize)) fill_pernode(node, pernode, pernodesize); return 0; } /** - * free_node_bootmem - free bootmem allocator memory for use - * @start: physical start of range - * @len: length of range - * @node: node where this range resides - * - * Simply calls the bootmem allocator to free the specified ranged from - * the given pg_data_t's bdata struct. After this function has been called - * for all the entries in the EFI memory map, the bootmem allocator will - * be ready to service allocation requests. - */ -static int __init free_node_bootmem(unsigned long start, unsigned long len, - int node) -{ - free_bootmem_node(pgdat_list[node], start, len); - - return 0; -} - -/** * reserve_pernode_space - reserve memory for per-node space * * Reserve the space used by the bootmem maps & per-node space in the boot @@ -387,28 +357,17 @@ static int __init free_node_bootmem(unsigned long start, unsigned long len, */ static void __init reserve_pernode_space(void) { - unsigned long base, size, pages; - struct bootmem_data *bdp; + unsigned long base, size; int node; for_each_online_node(node) { - pg_data_t *pdp = pgdat_list[node]; - if (node_isset(node, memory_less_mask)) continue; - bdp = pdp->bdata; - - /* First the bootmem_map itself */ - pages = bdp->node_low_pfn - bdp->node_min_pfn; - size = bootmem_bootmap_pages(pages) << PAGE_SHIFT; - base = __pa(bdp->node_bootmem_map); - reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT); - /* Now the per-node space */ size = mem_data[node].pernode_size; base = __pa(mem_data[node].pernode_addr); - reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT); + memblock_reserve(base, size); } } @@ -528,6 +487,7 @@ void __init find_memory(void) int node; reserve_memory(); + efi_memmap_walk(filter_memory, register_active_ranges); if (num_online_nodes() == 0) { printk(KERN_ERR "node info missing!\n"); @@ -544,38 +504,8 @@ void __init find_memory(void) efi_memmap_walk(find_max_min_low_pfn, NULL); for_each_online_node(node) - if (bootmem_node_data[node].node_low_pfn) { + if (mem_data[node].min_pfn) node_clear(node, memory_less_mask); - mem_data[node].min_pfn = ~0UL; - } - - efi_memmap_walk(filter_memory, register_active_ranges); - - /* - * Initialize the boot memory maps in reverse order since that's - * what the bootmem allocator expects - */ - for (node = MAX_NUMNODES - 1; node >= 0; node--) { - unsigned long pernode, pernodesize, map; - struct bootmem_data *bdp; - - if (!node_online(node)) - continue; - else if (node_isset(node, memory_less_mask)) - continue; - - bdp = &bootmem_node_data[node]; - pernode = mem_data[node].pernode_addr; - pernodesize = mem_data[node].pernode_size; - map = pernode + pernodesize; - - init_bootmem_node(pgdat_list[node], - map>>PAGE_SHIFT, - bdp->node_min_pfn, - bdp->node_low_pfn); - } - - efi_memmap_walk(filter_rsvd_memory, free_node_bootmem); reserve_pernode_space(); memory_less_nodes(); @@ -655,36 +585,6 @@ void call_pernode_memory(unsigned long start, unsigned long len, void *arg) } /** - * count_node_pages - callback to build per-node memory info structures - * @start: physical start of range - * @len: length of range - * @node: node where this range resides - * - * Each node has it's own number of physical pages, DMAable pages, start, and - * end page frame number. This routine will be called by call_pernode_memory() - * for each piece of usable memory and will setup these values for each node. - * Very similar to build_maps(). - */ -static __init int count_node_pages(unsigned long start, unsigned long len, int node) -{ - unsigned long end = start + len; - -#ifdef CONFIG_ZONE_DMA32 - if (start <= __pa(MAX_DMA_ADDRESS)) - mem_data[node].num_dma_physpages += - (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT; -#endif - start = GRANULEROUNDDOWN(start); - end = GRANULEROUNDUP(end); - mem_data[node].max_pfn = max(mem_data[node].max_pfn, - end >> PAGE_SHIFT); - mem_data[node].min_pfn = min(mem_data[node].min_pfn, - start >> PAGE_SHIFT); - - return 0; -} - -/** * paging_init - setup page tables * * paging_init() sets up the page tables for each node of the system and frees @@ -700,8 +600,6 @@ void __init paging_init(void) max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; - efi_memmap_walk(filter_rsvd_memory, count_node_pages); - sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 817fa120645f..a9d55ad8d67b 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -86,7 +86,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re struct vm_area_struct *vma, *prev_vma; struct mm_struct *mm = current->mm; unsigned long mask; - int fault; + vm_fault_t fault; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT) |