diff options
Diffstat (limited to 'arch/ia64')
65 files changed, 185 insertions, 191 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 4d4f4188cdf1..95610820041e 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -56,10 +56,10 @@ config MMU config NEED_DMA_MAP_STATE def_bool y -config SWIOTLB - bool +config NEED_SG_DMA_LENGTH + def_bool y -config IOMMU_HELPER +config SWIOTLB bool config GENERIC_LOCKBREAK @@ -498,6 +498,14 @@ config HAVE_ARCH_NODEDATA_EXTENSION def_bool y depends on NUMA +config USE_PERCPU_NUMA_NODE_ID + def_bool y + depends on NUMA + +config HAVE_MEMORYLESS_NODES + def_bool y + depends on NUMA + config ARCH_PROC_KCORE_TEXT def_bool y depends on PROC_KCORE diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index e14c492a8a93..4ce8d1358fee 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -2046,13 +2046,13 @@ acpi_sba_ioc_add(struct acpi_device *device) struct ioc *ioc; acpi_status status; u64 hpa, length; - struct acpi_device_info *dev_info; + struct acpi_device_info *adi; status = hp_acpi_csr_space(device->handle, &hpa, &length); if (ACPI_FAILURE(status)) return 1; - status = acpi_get_object_info(device->handle, &dev_info); + status = acpi_get_object_info(device->handle, &adi); if (ACPI_FAILURE(status)) return 1; @@ -2060,13 +2060,13 @@ acpi_sba_ioc_add(struct acpi_device *device) * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI * root bridges, and its CSR space includes the IOC function. */ - if (strncmp("HWP0001", dev_info->hardware_id.string, 7) == 0) { + if (strncmp("HWP0001", adi->hardware_id.string, 7) == 0) { hpa += ZX1_IOC_OFFSET; /* zx1 based systems default to kernel page size iommu pages */ if (!iovp_shift) iovp_shift = min(PAGE_SHIFT, 16); } - kfree(dev_info); + kfree(adi); /* * default anything not caught above or specified on cmdline to 4k diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index 21adbd7f90f8..837dc82a013e 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h @@ -94,7 +94,6 @@ ia64_acpi_release_global_lock (unsigned int *lock) #define acpi_noirq 0 /* ACPI always enabled on IA64 */ #define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */ #define acpi_strict 1 /* no ACPI spec workarounds on IA64 */ -#define acpi_ht 0 /* no HT-only mode on IA64 */ #endif #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */ static inline void disable_acpi(void) { } diff --git a/arch/ia64/include/asm/asmmacro.h b/arch/ia64/include/asm/asmmacro.h index c1642fd64029..3ab6d75aa3db 100644 --- a/arch/ia64/include/asm/asmmacro.h +++ b/arch/ia64/include/asm/asmmacro.h @@ -70,12 +70,12 @@ name: * path (ivt.S - TLB miss processing) or in places where it might not be * safe to use a "tpa" instruction (mca_asm.S - error recovery). */ - .section ".data.patch.vtop", "a" // declare section & section attributes + .section ".data..patch.vtop", "a" // declare section & section attributes .previous #define LOAD_PHYSICAL(pr, reg, obj) \ [1:](pr)movl reg = obj; \ - .xdata4 ".data.patch.vtop", 1b-. + .xdata4 ".data..patch.vtop", 1b-. /* * For now, we always put in the McKinley E9 workaround. On CPUs that don't need it, @@ -84,11 +84,11 @@ name: #define DO_MCKINLEY_E9_WORKAROUND #ifdef DO_MCKINLEY_E9_WORKAROUND - .section ".data.patch.mckinley_e9", "a" + .section ".data..patch.mckinley_e9", "a" .previous /* workaround for Itanium 2 Errata 9: */ # define FSYS_RETURN \ - .xdata4 ".data.patch.mckinley_e9", 1f-.; \ + .xdata4 ".data..patch.mckinley_e9", 1f-.; \ 1:{ .mib; \ nop.m 0; \ mov r16=ar.pfs; \ @@ -107,11 +107,11 @@ name: * If physical stack register size is different from DEF_NUM_STACK_REG, * dynamically patch the kernel for correct size. */ - .section ".data.patch.phys_stack_reg", "a" + .section ".data..patch.phys_stack_reg", "a" .previous #define LOAD_PHYS_STACK_REG_SIZE(reg) \ [1:] adds reg=IA64_NUM_PHYS_STACK_REG*8+8,r0; \ - .xdata4 ".data.patch.phys_stack_reg", 1b-. + .xdata4 ".data..patch.phys_stack_reg", 1b-. /* * Up until early 2004, use of .align within a function caused bad unwind info. diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 88405cb0832a..4e1948447a00 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -21,8 +21,8 @@ #define ATOMIC_INIT(i) ((atomic_t) { (i) }) #define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) -#define atomic_read(v) ((v)->counter) -#define atomic64_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic64_read(v) (*(volatile long *)&(v)->counter) #define atomic_set(v,i) (((v)->counter) = (i)) #define atomic64_set(v,i) (((v)->counter) = (i)) diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h index 6ebc229a1c51..9da3df6f1a52 100644 --- a/arch/ia64/include/asm/bitops.h +++ b/arch/ia64/include/asm/bitops.h @@ -437,17 +437,18 @@ __fls (unsigned long x) * hweightN: returns the hamming weight (i.e. the number * of bits set) of a N-bit word */ -static __inline__ unsigned long -hweight64 (unsigned long x) +static __inline__ unsigned long __arch_hweight64(unsigned long x) { unsigned long result; result = ia64_popcnt(x); return result; } -#define hweight32(x) (unsigned int) hweight64((x) & 0xfffffffful) -#define hweight16(x) (unsigned int) hweight64((x) & 0xfffful) -#define hweight8(x) (unsigned int) hweight64((x) & 0xfful) +#define __arch_hweight32(x) ((unsigned int) __arch_hweight64((x) & 0xfffffffful)) +#define __arch_hweight16(x) ((unsigned int) __arch_hweight64((x) & 0xfffful)) +#define __arch_hweight8(x) ((unsigned int) __arch_hweight64((x) & 0xfful)) + +#include <asm-generic/bitops/const_hweight.h> #endif /* __KERNEL__ */ diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h index e7482bd628ff..988254a7d349 100644 --- a/arch/ia64/include/asm/cache.h +++ b/arch/ia64/include/asm/cache.h @@ -24,6 +24,6 @@ # define SMP_CACHE_BYTES (1 << 3) #endif -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #endif /* _ASM_IA64_CACHE_H */ diff --git a/arch/ia64/include/asm/dmi.h b/arch/ia64/include/asm/dmi.h index 00eb1b130b63..1ed4c8fedb83 100644 --- a/arch/ia64/include/asm/dmi.h +++ b/arch/ia64/include/asm/dmi.h @@ -1,6 +1,7 @@ #ifndef _ASM_DMI_H #define _ASM_DMI_H 1 +#include <linux/slab.h> #include <asm/io.h> /* Use normal IO mappings for DMI */ diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index a362e67e0ca6..2f229e5de498 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h @@ -235,6 +235,7 @@ struct kvm_vm_data { #define KVM_REQ_PTC_G 32 #define KVM_REQ_RESUME 33 +#define KVM_HPAGE_GFN_SHIFT(x) 0 #define KVM_NR_PAGE_SIZES 1 #define KVM_PAGES_PER_HPAGE(x) 1 diff --git a/arch/ia64/include/asm/mmzone.h b/arch/ia64/include/asm/mmzone.h index f2ca32069b3f..e0de61709cf1 100644 --- a/arch/ia64/include/asm/mmzone.h +++ b/arch/ia64/include/asm/mmzone.h @@ -19,16 +19,12 @@ static inline int pfn_to_nid(unsigned long pfn) { -#ifdef CONFIG_NUMA extern int paddr_to_nid(unsigned long); int nid = paddr_to_nid(pfn << PAGE_SHIFT); if (nid < 0) return 0; else return nid; -#else - return 0; -#endif } #ifdef CONFIG_IA64_DIG /* DIG systems are small */ diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h index f7c00a5e0e2b..14aa1c58912b 100644 --- a/arch/ia64/include/asm/percpu.h +++ b/arch/ia64/include/asm/percpu.h @@ -31,7 +31,7 @@ extern void *per_cpu_init(void); #endif /* SMP */ -#define PER_CPU_BASE_SECTION ".data.percpu" +#define PER_CPU_BASE_SECTION ".data..percpu" /* * Be extremely careful when taking the address of this variable! Due to virtual @@ -39,7 +39,10 @@ extern void *per_cpu_init(void); * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly * more efficient. */ -#define __ia64_per_cpu_var(var) var +#define __ia64_per_cpu_var(var) (*({ \ + __verify_pcpu_ptr(&(var)); \ + ((typeof(var) __kernel __force *)&(var)); \ +})) #include <asm-generic/percpu.h> diff --git a/arch/ia64/include/asm/scatterlist.h b/arch/ia64/include/asm/scatterlist.h index d8e98961dec7..f299a4fb25c8 100644 --- a/arch/ia64/include/asm/scatterlist.h +++ b/arch/ia64/include/asm/scatterlist.h @@ -1,6 +1,7 @@ #ifndef _ASM_IA64_SCATTERLIST_H #define _ASM_IA64_SCATTERLIST_H +#include <asm-generic/scatterlist.h> /* * It used to be that ISA_DMA_THRESHOLD had something to do with the * DMA-limits of ISA-devices. Nowadays, its only remaining use (apart @@ -10,7 +11,6 @@ * that's 4GB - 1. */ #define ISA_DMA_THRESHOLD 0xffffffff - -#include <asm-generic/scatterlist.h> +#define ARCH_HAS_SG_CHAIN #endif /* _ASM_IA64_SCATTERLIST_H */ diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h index 8ce2e388e37c..b6a5ba2aca34 100644 --- a/arch/ia64/include/asm/thread_info.h +++ b/arch/ia64/include/asm/thread_info.h @@ -102,7 +102,7 @@ struct thread_info { #define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */ #define TIF_NOTIFY_RESUME 6 /* resumption notification requested */ #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ -#define TIF_MEMDIE 17 +#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ #define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ #define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */ #define TIF_FREEZE 20 /* is freezing for suspend */ diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h index d323071d0f91..09f646753d1a 100644 --- a/arch/ia64/include/asm/topology.h +++ b/arch/ia64/include/asm/topology.h @@ -26,11 +26,6 @@ #define RECLAIM_DISTANCE 15 /* - * Returns the number of the node containing CPU 'cpu' - */ -#define cpu_to_node(cpu) (int)(cpu_to_node_map[cpu]) - -/* * Returns a bitmask of CPUs on Node 'node'. */ #define cpumask_of_node(node) ((node) == -1 ? \ diff --git a/arch/ia64/kernel/Makefile.gate b/arch/ia64/kernel/Makefile.gate index ab9b03a9adcc..ceeffc509764 100644 --- a/arch/ia64/kernel/Makefile.gate +++ b/arch/ia64/kernel/Makefile.gate @@ -21,7 +21,7 @@ GATECFLAGS_gate-syms.o = -r $(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE $(call if_changed,gate) -# gate-data.o contains the gate DSO image as data in section .data.gate. +# gate-data.o contains the gate DSO image as data in section .data..gate. # We must build gate.so before we can assemble it. # Note: kbuild does not track this dependency due to usage of .incbin $(obj)/gate-data.o: $(obj)/gate.so diff --git a/arch/ia64/kernel/acpi-ext.c b/arch/ia64/kernel/acpi-ext.c index b7515bc808a8..8b9318d311a0 100644 --- a/arch/ia64/kernel/acpi-ext.c +++ b/arch/ia64/kernel/acpi-ext.c @@ -10,6 +10,7 @@ #include <linux/module.h> #include <linux/types.h> +#include <linux/slab.h> #include <linux/acpi.h> #include <asm/acpi-ext.h> diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index f1c9f70b4e45..c6c90f39f4d9 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -44,6 +44,7 @@ #include <linux/efi.h> #include <linux/mmzone.h> #include <linux/nodemask.h> +#include <linux/slab.h> #include <acpi/processor.h> #include <asm/io.h> #include <asm/iosapic.h> @@ -784,6 +785,14 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) return 0; } +int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi) +{ + if (isa_irq >= 16) + return -1; + *gsi = isa_irq; + return 0; +} + /* * ACPI based hotplug CPU support */ diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c index 7b435451b3dc..22f61526a8e1 100644 --- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c +++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c @@ -10,6 +10,7 @@ */ #include <linux/kernel.h> +#include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> #include <linux/cpufreq.h> @@ -112,7 +113,7 @@ processor_get_freq ( dprintk("processor_get_freq\n"); saved_mask = current->cpus_allowed; - set_cpus_allowed(current, cpumask_of_cpu(cpu)); + set_cpus_allowed_ptr(current, cpumask_of(cpu)); if (smp_processor_id() != cpu) goto migrate_end; @@ -120,7 +121,7 @@ processor_get_freq ( ret = processor_get_pstate(&value); if (ret) { - set_cpus_allowed(current, saved_mask); + set_cpus_allowed_ptr(current, &saved_mask); printk(KERN_WARNING "get performance failed with error %d\n", ret); ret = 0; @@ -130,7 +131,7 @@ processor_get_freq ( ret = (clock_freq*1000); migrate_end: - set_cpus_allowed(current, saved_mask); + set_cpus_allowed_ptr(current, &saved_mask); return ret; } @@ -150,7 +151,7 @@ processor_set_freq ( dprintk("processor_set_freq\n"); saved_mask = current->cpus_allowed; - set_cpus_allowed(current, cpumask_of_cpu(cpu)); + set_cpus_allowed_ptr(current, cpumask_of(cpu)); if (smp_processor_id() != cpu) { retval = -EAGAIN; goto migrate_end; @@ -207,7 +208,7 @@ processor_set_freq ( retval = 0; migrate_end: - set_cpus_allowed(current, saved_mask); + set_cpus_allowed_ptr(current, &saved_mask); return (retval); } diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index c745d0aeb6e0..a0f001928502 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -26,6 +26,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> +#include <linux/slab.h> #include <linux/time.h> #include <linux/efi.h> #include <linux/kexec.h> diff --git a/arch/ia64/kernel/gate-data.S b/arch/ia64/kernel/gate-data.S index 258c0a3238fb..b3ef1c72e132 100644 --- a/arch/ia64/kernel/gate-data.S +++ b/arch/ia64/kernel/gate-data.S @@ -1,3 +1,3 @@ - .section .data.gate, "aw" + .section .data..gate, "aw" .incbin "arch/ia64/kernel/gate.so" diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S index cf5e0a105e16..245d3e1ec7e1 100644 --- a/arch/ia64/kernel/gate.S +++ b/arch/ia64/kernel/gate.S @@ -21,18 +21,18 @@ * to targets outside the shared object) and to avoid multi-phase kernel builds, we * simply create minimalistic "patch lists" in special ELF sections. */ - .section ".data.patch.fsyscall_table", "a" + .section ".data..patch.fsyscall_table", "a" .previous #define LOAD_FSYSCALL_TABLE(reg) \ [1:] movl reg=0; \ - .xdata4 ".data.patch.fsyscall_table", 1b-. + .xdata4 ".data..patch.fsyscall_table", 1b-. - .section ".data.patch.brl_fsys_bubble_down", "a" + .section ".data..patch.brl_fsys_bubble_down", "a" .previous #define BRL_COND_FSYS_BUBBLE_DOWN(pr) \ [1:](pr)brl.cond.sptk 0; \ ;; \ - .xdata4 ".data.patch.brl_fsys_bubble_down", 1b-. + .xdata4 ".data..patch.brl_fsys_bubble_down", 1b-. GLOBAL_ENTRY(__kernel_syscall_via_break) .prologue diff --git a/arch/ia64/kernel/gate.lds.S b/arch/ia64/kernel/gate.lds.S index 88c64ed47c36..d32b0855110a 100644 --- a/arch/ia64/kernel/gate.lds.S +++ b/arch/ia64/kernel/gate.lds.S @@ -33,21 +33,21 @@ SECTIONS */ . = GATE_ADDR + 0x600; - .data.patch : { + .data..patch : { __paravirt_start_gate_mckinley_e9_patchlist = .; - *(.data.patch.mckinley_e9) + *(.data..patch.mckinley_e9) __paravirt_end_gate_mckinley_e9_patchlist = .; __paravirt_start_gate_vtop_patchlist = .; - *(.data.patch.vtop) + *(.data..patch.vtop) __paravirt_end_gate_vtop_patchlist = .; __paravirt_start_gate_fsyscall_patchlist = .; - *(.data.patch.fsyscall_table) + *(.data..patch.fsyscall_table) __paravirt_end_gate_fsyscall_patchlist = .; __paravirt_start_gate_brl_fsys_bubble_down_patchlist = .; - *(.data.patch.brl_fsys_bubble_down) + *(.data..patch.brl_fsys_bubble_down) __paravirt_end_gate_brl_fsys_bubble_down_patchlist = .; } :readable diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c index e253ab8fcbc8..f9efe9739d3f 100644 --- a/arch/ia64/kernel/init_task.c +++ b/arch/ia64/kernel/init_task.c @@ -23,7 +23,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); * Initial task structure. * * We need to make sure that this is properly aligned due to the way process stacks are - * handled. This is done by having a special ".data.init_task" section... + * handled. This is done by having a special ".data..init_task" section... */ #define init_thread_info init_task_mem.s.thread_info diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 95ac77aeae9b..7ded76658d2d 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -86,6 +86,7 @@ #include <linux/kernel.h> #include <linux/list.h> #include <linux/pci.h> +#include <linux/slab.h> #include <linux/smp.h> #include <linux/string.h> #include <linux/bootmem.h> diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index d4093a173a3e..f14c35f9b03a 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -22,7 +22,6 @@ #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/kernel_stat.h> -#include <linux/slab.h> #include <linux/ptrace.h> #include <linux/random.h> /* for rand_initialize_irq() */ #include <linux/signal.h> @@ -30,6 +29,7 @@ #include <linux/threads.h> #include <linux/bitops.h> #include <linux/irq.h> +#include <linux/ratelimit.h> #include <asm/delay.h> #include <asm/intrinsics.h> @@ -468,13 +468,9 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) sp = ia64_getreg(_IA64_REG_SP); if ((sp - bsp) < 1024) { - static unsigned char count; - static long last_time; + static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); - if (time_after(jiffies, last_time + 5 * HZ)) - count = 0; - if (++count < 5) { - last_time = jiffies; + if (__ratelimit(&ratelimit)) { printk("ia64_handle_irq: DANGER: less than " "1KB of free stack space!!\n" "(bsp=0x%lx, sp=%lx)\n", bsp, sp); diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index 179fd122e837..d93e396bf599 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S @@ -82,7 +82,7 @@ mov r19=n;; /* prepare to save predicates */ \ br.sptk.many dispatch_to_fault_handler - .section .text.ivt,"ax" + .section .text..ivt,"ax" .align 32768 // align on 32KB boundary .global ia64_ivt diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 378b4833024f..a0220dc5ff42 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -85,6 +85,7 @@ #include <linux/cpumask.h> #include <linux/kdebug.h> #include <linux/cpu.h> +#include <linux/gfp.h> #include <asm/delay.h> #include <asm/machvec.h> diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index f94aaa86933f..09b4d6828c45 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c @@ -22,6 +22,7 @@ #include <linux/smp.h> #include <linux/workqueue.h> #include <linux/mm.h> +#include <linux/slab.h> #include <asm/delay.h> #include <asm/machvec.h> diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h index 292e214a3b84..d56753a11636 100644 --- a/arch/ia64/kernel/minstate.h +++ b/arch/ia64/kernel/minstate.h @@ -16,7 +16,7 @@ #define ACCOUNT_SYS_ENTER #endif -.section ".data.patch.rse", "a" +.section ".data..patch.rse", "a" .previous /* @@ -215,7 +215,7 @@ (pUStk) extr.u r17=r18,3,6; \ (pUStk) sub r16=r18,r22; \ [1:](pKStk) br.cond.sptk.many 1f; \ - .xdata4 ".data.patch.rse",1b-. \ + .xdata4 ".data..patch.rse",1b-. \ ;; \ cmp.ge p6,p7 = 33,r17; \ ;; \ diff --git a/arch/ia64/kernel/paravirtentry.S b/arch/ia64/kernel/paravirtentry.S index 6158560d7f17..92d880c4d3d1 100644 --- a/arch/ia64/kernel/paravirtentry.S +++ b/arch/ia64/kernel/paravirtentry.S @@ -28,7 +28,7 @@ #include "entry.h" #define DATA8(sym, init_value) \ - .pushsection .data.read_mostly ; \ + .pushsection .data..read_mostly ; \ .align 8 ; \ .global sym ; \ sym: ; \ diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c index 53292abf846c..d9485d952ed0 100644 --- a/arch/ia64/kernel/pci-swiotlb.c +++ b/arch/ia64/kernel/pci-swiotlb.c @@ -1,6 +1,7 @@ /* Glue code to lib/swiotlb.c */ #include <linux/pci.h> +#include <linux/gfp.h> #include <linux/cache.h> #include <linux/module.h> #include <linux/dma-mapping.h> @@ -30,8 +31,6 @@ struct dma_map_ops swiotlb_dma_ops = { .unmap_sg = swiotlb_unmap_sg_attrs, .sync_single_for_cpu = swiotlb_sync_single_for_cpu, .sync_single_for_device = swiotlb_sync_single_for_device, - .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, - .sync_single_range_for_device = swiotlb_sync_single_range_for_device, .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, .sync_sg_for_device = swiotlb_sync_sg_for_device, .dma_supported = swiotlb_dma_supported, diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 703062c44fb9..ab985f785c14 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -41,6 +41,7 @@ #include <linux/rcupdate.h> #include <linux/completion.h> #include <linux/tracehook.h> +#include <linux/slab.h> #include <asm/errno.h> #include <asm/intrinsics.h> diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index d92765cae10a..53f1648c8b81 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -15,11 +15,11 @@ #include <linux/kallsyms.h> #include <linux/kernel.h> #include <linux/mm.h> +#include <linux/slab.h> #include <linux/module.h> #include <linux/notifier.h> #include <linux/personality.h> #include <linux/sched.h> -#include <linux/slab.h> #include <linux/stddef.h> #include <linux/thread_info.h> #include <linux/unistd.h> diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index b61afbbe076f..7c7909f9bc93 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -11,7 +11,6 @@ */ #include <linux/kernel.h> #include <linux/sched.h> -#include <linux/slab.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/ptrace.h> @@ -639,7 +638,7 @@ ptrace_attach_sync_user_rbs (struct task_struct *child) */ read_lock(&tasklist_lock); - if (child->signal) { + if (child->sighand) { spin_lock_irq(&child->sighand->siglock); if (child->state == TASK_STOPPED && !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) { @@ -663,7 +662,7 @@ ptrace_attach_sync_user_rbs (struct task_struct *child) * job control stop, so that SIGCONT can be used to wake it up. */ read_lock(&tasklist_lock); - if (child->signal) { + if (child->sighand) { spin_lock_irq(&child->sighand->siglock); if (child->state == TASK_TRACED && (child->signal->flags & SIGNAL_STOP_STOPPED)) { diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index e6676fca4828..aa8b5fa1a8de 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c @@ -404,10 +404,9 @@ static void call_on_cpu(int cpu, void (*fn)(void *), void *arg) { cpumask_t save_cpus_allowed = current->cpus_allowed; - cpumask_t new_cpus_allowed = cpumask_of_cpu(cpu); - set_cpus_allowed(current, new_cpus_allowed); + set_cpus_allowed_ptr(current, cpumask_of(cpu)); (*fn)(arg); - set_cpus_allowed(current, save_cpus_allowed); + set_cpus_allowed_ptr(current, &save_cpus_allowed); } static void diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index e5230b2ff2c5..6a1380e90f87 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -390,6 +390,14 @@ smp_callin (void) fix_b0_for_bsp(); +#ifdef CONFIG_NUMA + /* + * numa_node_id() works after this. + */ + set_numa_node(cpu_to_node_map[cpuid]); + set_numa_mem(local_memory_node(cpu_to_node_map[cpuid])); +#endif + ipi_call_lock_irq(); spin_lock(&vector_lock); /* Setup the per cpu irq handling data structures */ @@ -632,6 +640,9 @@ void __devinit smp_prepare_boot_cpu(void) { cpu_set(smp_processor_id(), cpu_online_map); cpu_set(smp_processor_id(), cpu_callin_map); +#ifdef CONFIG_NUMA + set_numa_node(cpu_to_node_map[smp_processor_id()]); +#endif per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; paravirt_post_smp_prepare_boot_cpu(); } diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 47a192781b0a..653b3c46ea82 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -430,18 +430,16 @@ static int __init rtc_init(void) } module_init(rtc_init); +void read_persistent_clock(struct timespec *ts) +{ + efi_gettimeofday(ts); +} + void __init time_init (void) { register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction); - efi_gettimeofday(&xtime); ia64_init_itm(); - - /* - * Initialize wall_to_monotonic such that adding it to xtime will yield zero, the - * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC). - */ - set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); } /* diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index b3a5818088d9..0baa1bbb65fe 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -17,6 +17,7 @@ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/node.h> +#include <linux/slab.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/nodemask.h> @@ -360,12 +361,12 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) return 0; oldmask = current->cpus_allowed; - retval = set_cpus_allowed(current, cpumask_of_cpu(cpu)); + retval = set_cpus_allowed_ptr(current, cpumask_of(cpu)); if (unlikely(retval)) return retval; retval = cpu_cache_sysfs_init(cpu); - set_cpus_allowed(current, oldmask); + set_cpus_allowed_ptr(current, &oldmask); if (unlikely(retval < 0)) return retval; diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c index 776dd40397e2..622772b7fb6c 100644 --- a/arch/ia64/kernel/unaligned.c +++ b/arch/ia64/kernel/unaligned.c @@ -17,6 +17,7 @@ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/tty.h> +#include <linux/ratelimit.h> #include <asm/intrinsics.h> #include <asm/processor.h> @@ -1283,24 +1284,9 @@ emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs) /* * Make sure we log the unaligned access, so that user/sysadmin can notice it and * eventually fix the program. However, we don't want to do that for every access so we - * pace it with jiffies. This isn't really MP-safe, but it doesn't really have to be - * either... + * pace it with jiffies. */ -static int -within_logging_rate_limit (void) -{ - static unsigned long count, last_time; - - if (time_after(jiffies, last_time + 5 * HZ)) - count = 0; - if (count < 5) { - last_time = jiffies; - count++; - return 1; - } - return 0; - -} +static DEFINE_RATELIMIT_STATE(logging_rate_limit, 5 * HZ, 5); void ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) @@ -1337,7 +1323,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) if (!no_unaligned_warning && !(current->thread.flags & IA64_THREAD_UAC_NOPRINT) && - within_logging_rate_limit()) + __ratelimit(&logging_rate_limit)) { char buf[200]; /* comm[] is at most 16 bytes... */ size_t len; @@ -1370,7 +1356,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) } } } else { - if (within_logging_rate_limit()) { + if (__ratelimit(&logging_rate_limit)) { printk(KERN_WARNING "kernel unaligned access to 0x%016lx, ip=0x%016lx\n", ifa, regs->cr_iip + ipsr->ri); if (unaligned_dump_stack) diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index a595823582d9..c4696d217ce0 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c @@ -18,9 +18,9 @@ #include <linux/init.h> #include <linux/errno.h> #include <linux/string.h> -#include <linux/slab.h> #include <linux/efi.h> #include <linux/genalloc.h> +#include <linux/gfp.h> #include <asm/page.h> #include <asm/pal.h> #include <asm/system.h> diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 1295ba327f6f..e07218a2577f 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -8,7 +8,7 @@ #define IVT_TEXT \ VMLINUX_SYMBOL(__start_ivt_text) = .; \ - *(.text.ivt) \ + *(.text..ivt) \ VMLINUX_SYMBOL(__end_ivt_text) = .; OUTPUT_FORMAT("elf64-ia64-little") @@ -54,8 +54,8 @@ SECTIONS .text2 : AT(ADDR(.text2) - LOAD_OFFSET) { *(.text2) } #ifdef CONFIG_SMP - .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET) - { *(.text.lock) } + .text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) + { *(.text..lock) } #endif _etext = .; @@ -75,10 +75,10 @@ SECTIONS __stop___mca_table = .; } - .data.patch.phys_stack_reg : AT(ADDR(.data.patch.phys_stack_reg) - LOAD_OFFSET) + .data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) { __start___phys_stack_reg_patchlist = .; - *(.data.patch.phys_stack_reg) + *(.data..patch.phys_stack_reg) __end___phys_stack_reg_patchlist = .; } @@ -110,24 +110,24 @@ SECTIONS INIT_TEXT_SECTION(PAGE_SIZE) INIT_DATA_SECTION(16) - .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET) + .data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) { __start___vtop_patchlist = .; - *(.data.patch.vtop) + *(.data..patch.vtop) __end___vtop_patchlist = .; } - .data.patch.rse : AT(ADDR(.data.patch.rse) - LOAD_OFFSET) + .data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) { __start___rse_patchlist = .; - *(.data.patch.rse) + *(.data..patch.rse) __end___rse_patchlist = .; } - .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET) + .data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) { __start___mckinley_e9_bundles = .; - *(.data.patch.mckinley_e9) + *(.data..patch.mckinley_e9) __end___mckinley_e9_bundles = .; } @@ -175,17 +175,17 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_end = .; - .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) + .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) { PAGE_ALIGNED_DATA(PAGE_SIZE) . = ALIGN(PAGE_SIZE); __start_gate_section = .; - *(.data.gate) + *(.data..gate) __stop_gate_section = .; #ifdef CONFIG_XEN . = ALIGN(PAGE_SIZE); __xen_start_gate_section = .; - *(.data.gate.xen) + *(.data..gate.xen) __xen_stop_gate_section = .; #endif } diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 26e0e089bfe7..f56a6316e134 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -23,8 +23,8 @@ #include <linux/module.h> #include <linux/errno.h> #include <linux/percpu.h> -#include <linux/gfp.h> #include <linux/fs.h> +#include <linux/slab.h> #include <linux/smp.h> #include <linux/kvm_host.h> #include <linux/kvm.h> @@ -144,6 +144,7 @@ int kvm_arch_hardware_enable(void *garbage) VP_INIT_ENV : VP_INIT_ENV_INITALIZE, __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); if (status != 0) { + spin_unlock(&vp_lock); printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); return -EINVAL; } @@ -724,8 +725,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) int r; sigset_t sigsaved; - vcpu_load(vcpu); - if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); @@ -747,7 +746,6 @@ out: if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); - vcpu_put(vcpu); return r; } @@ -882,8 +880,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); int i; - vcpu_load(vcpu); - for (i = 0; i < 16; i++) { vpd->vgr[i] = regs->vpd.vgr[i]; vpd->vbgr[i] = regs->vpd.vbgr[i]; @@ -930,8 +926,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) vcpu->arch.itc_offset = regs->saved_itc - kvm_get_itc(vcpu); set_bit(KVM_REQ_RESUME, &vcpu->requests); - vcpu_put(vcpu); - return 0; } @@ -979,11 +973,13 @@ long kvm_arch_vm_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&irq_event, argp, sizeof irq_event)) goto out; + r = -ENXIO; if (irqchip_in_kernel(kvm)) { __s32 status; status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irq_event.irq, irq_event.level); if (ioctl == KVM_IRQ_LINE_STATUS) { + r = -EFAULT; irq_event.status = status; if (copy_to_user(argp, &irq_event, sizeof irq_event)) @@ -1234,7 +1230,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt; p_ctx->cr[8] = 0x3c; - /*Initilize region register*/ + /*Initialize region register*/ p_ctx->rr[0] = 0x30; p_ctx->rr[1] = 0x30; p_ctx->rr[2] = 0x30; @@ -1243,7 +1239,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) p_ctx->rr[5] = 0x30; p_ctx->rr[7] = 0x30; - /*Initilize branch register 0*/ + /*Initialize branch register 0*/ p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry; vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr; @@ -1379,7 +1375,7 @@ static void kvm_release_vm_pages(struct kvm *kvm) int i, j; unsigned long base_gfn; - slots = rcu_dereference(kvm->memslots); + slots = kvm_memslots(kvm); for (i = 0; i < slots->nmemslots; i++) { memslot = &slots->memslots[i]; base_gfn = memslot->base_gfn; @@ -1535,8 +1531,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, goto out; if (copy_to_user(user_stack, stack, - sizeof(struct kvm_ia64_vcpu_stack))) + sizeof(struct kvm_ia64_vcpu_stack))) { + r = -EFAULT; goto out; + } break; } @@ -1702,7 +1700,7 @@ static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info, BUG_ON(!module); if (!kvm_vmm_base) { - printk("kvm: kvm area hasn't been initilized yet!!\n"); + printk("kvm: kvm area hasn't been initialized yet!!\n"); return -EFAULT; } @@ -1797,51 +1795,46 @@ void kvm_arch_exit(void) kvm_vmm_info = NULL; } -static int kvm_ia64_sync_dirty_log(struct kvm *kvm, - struct kvm_dirty_log *log) +static void kvm_ia64_sync_dirty_log(struct kvm *kvm, + struct kvm_memory_slot *memslot) { - struct kvm_memory_slot *memslot; - int r, i; - long n, base; + int i; + long base; + unsigned long n; unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); - r = -EINVAL; - if (log->slot >= KVM_MEMORY_SLOTS) - goto out; - - memslot = &kvm->memslots->memslots[log->slot]; - r = -ENOENT; - if (!memslot->dirty_bitmap) - goto out; - - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; + n = kvm_dirty_bitmap_bytes(memslot); base = memslot->base_gfn / BITS_PER_LONG; + spin_lock(&kvm->arch.dirty_log_lock); for (i = 0; i < n/sizeof(long); ++i) { memslot->dirty_bitmap[i] = dirty_bitmap[base + i]; dirty_bitmap[base + i] = 0; } - r = 0; -out: - return r; + spin_unlock(&kvm->arch.dirty_log_lock); } int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { int r; - int n; + unsigned long n; struct kvm_memory_slot *memslot; int is_dirty = 0; mutex_lock(&kvm->slots_lock); - spin_lock(&kvm->arch.dirty_log_lock); - r = kvm_ia64_sync_dirty_log(kvm, log); - if (r) + r = -EINVAL; + if (log->slot >= KVM_MEMORY_SLOTS) goto out; + memslot = &kvm->memslots->memslots[log->slot]; + r = -ENOENT; + if (!memslot->dirty_bitmap) + goto out; + + kvm_ia64_sync_dirty_log(kvm, memslot); r = kvm_get_dirty_log(kvm, log, &is_dirty); if (r) goto out; @@ -1849,14 +1842,12 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, /* If nothing is dirty, don't bother messing with page tables. */ if (is_dirty) { kvm_flush_remote_tlbs(kvm); - memslot = &kvm->memslots->memslots[log->slot]; - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; + n = kvm_dirty_bitmap_bytes(memslot); memset(memslot->dirty_bitmap, 0, n); } r = 0; out: mutex_unlock(&kvm->slots_lock); - spin_unlock(&kvm->arch.dirty_log_lock); return r; } @@ -1947,11 +1938,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) return vcpu->arch.timer_fired; } -gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) -{ - return gfn; -} - int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) || @@ -1961,9 +1947,7 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { - vcpu_load(vcpu); mp_state->mp_state = vcpu->arch.mp_state; - vcpu_put(vcpu); return 0; } @@ -1994,10 +1978,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, { int r = 0; - vcpu_load(vcpu); vcpu->arch.mp_state = mp_state->mp_state; if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED) r = vcpu_reset(vcpu); - vcpu_put(vcpu); return r; } diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c index 7a62f75778c5..f0b9cac82414 100644 --- a/arch/ia64/kvm/vmm.c +++ b/arch/ia64/kvm/vmm.c @@ -51,7 +51,7 @@ static int __init kvm_vmm_init(void) vmm_fpswa_interface = fpswa_interface; /*Register vmm data to kvm side*/ - return kvm_init(&vmm_info, 1024, THIS_MODULE); + return kvm_init(&vmm_info, 1024, 0, THIS_MODULE); } static void __exit kvm_vmm_exit(void) diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S index 40920c630649..24018484c6e9 100644 --- a/arch/ia64/kvm/vmm_ivt.S +++ b/arch/ia64/kvm/vmm_ivt.S @@ -104,7 +104,7 @@ GLOBAL_ENTRY(kvm_vmm_panic) br.call.sptk.many b6=vmm_panic_handler; END(kvm_vmm_panic) - .section .text.ivt,"ax" + .section .text..ivt,"ax" .align 32768 // align on 32KB boundary .global kvm_ia64_ivt diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 8d586d1e2515..61620323bb60 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -22,6 +22,7 @@ #include <linux/acpi.h> #include <linux/efi.h> #include <linux/nodemask.h> +#include <linux/slab.h> #include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/meminit.h> diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 19261a99e623..0799fea4c588 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -148,7 +148,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re if ((vma->vm_flags & mask) != mask) goto bad_area; - survive: /* * If for any reason at all we couldn't handle the fault, make * sure we exit gracefully rather than endlessly redo the @@ -276,13 +275,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re out_of_memory: up_read(&mm->mmap_sem); - if (is_global_init(current)) { - yield(); - down_read(&mm->mmap_sem); - goto survive; - } - printk(KERN_CRIT "VM: killing process %s\n", current->comm); - if (user_mode(regs)) - do_group_exit(SIGKILL); - goto no_context; + if (!user_mode(regs)) + goto no_context; + pagefault_out_of_memory(); } diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index b0f615759e97..1841ee7e65f9 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -14,7 +14,6 @@ #include <linux/hugetlb.h> #include <linux/pagemap.h> #include <linux/module.h> -#include <linux/slab.h> #include <linux/sysctl.h> #include <linux/log2.h> #include <asm/mman.h> diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index f3de9d7a98b4..7b3cdc6c6d91 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c @@ -22,6 +22,7 @@ #include <linux/smp.h> #include <linux/mm.h> #include <linux/bootmem.h> +#include <linux/slab.h> #include <asm/delay.h> #include <asm/mmu_context.h> @@ -120,7 +121,7 @@ static inline void down_spin(struct spinaphore *ss) ia64_invala(); for (;;) { - asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory"); + asm volatile ("ld8.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory"); if (time_before(t, serve)) return; cpu_relax(); diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 64aff520b899..aa2533ae7e9e 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -335,8 +335,11 @@ pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl) } struct pci_bus * __devinit -pci_acpi_scan_root(struct acpi_device *device, int domain, int bus) +pci_acpi_scan_root(struct acpi_pci_root *root) { + struct acpi_device *device = root->device; + int domain = root->segment; + int bus = root->secondary.start; struct pci_controller *controller; unsigned int windows = 0; struct pci_bus *pbus; diff --git a/arch/ia64/scripts/unwcheck.py b/arch/ia64/scripts/unwcheck.py index c27849889e19..2bfd941ff7c7 100644 --- a/arch/ia64/scripts/unwcheck.py +++ b/arch/ia64/scripts/unwcheck.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/python # # Usage: unwcheck.py FILE # diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c index c6d6b62db66c..cad775a1a157 100644 --- a/arch/ia64/sn/kernel/bte.c +++ b/arch/ia64/sn/kernel/bte.c @@ -19,6 +19,7 @@ #include <linux/bootmem.h> #include <linux/string.h> #include <linux/sched.h> +#include <linux/slab.h> #include <asm/sn/bte.h> diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c index 66f633bff059..8cdcb173a138 100644 --- a/arch/ia64/sn/kernel/io_acpi_init.c +++ b/arch/ia64/sn/kernel/io_acpi_init.c @@ -13,6 +13,7 @@ #include <asm/sn/sn_sal.h> #include "xtalk/hubdev.h" #include <linux/acpi.h> +#include <linux/slab.h> /* diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c index 308e6595110e..4433dd019d3c 100644 --- a/arch/ia64/sn/kernel/io_common.c +++ b/arch/ia64/sn/kernel/io_common.c @@ -7,6 +7,7 @@ */ #include <linux/bootmem.h> +#include <linux/slab.h> #include <asm/sn/types.h> #include <asm/sn/addrs.h> #include <asm/sn/sn_feature_sets.h> diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c index ee774c366a06..98079f29d9a9 100644 --- a/arch/ia64/sn/kernel/io_init.c +++ b/arch/ia64/sn/kernel/io_init.c @@ -6,6 +6,7 @@ * Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved. */ +#include <linux/slab.h> #include <asm/sn/types.h> #include <asm/sn/addrs.h> #include <asm/sn/io.h> diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 40d6eeda1c4b..13c15d968098 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c @@ -12,6 +12,7 @@ #include <linux/spinlock.h> #include <linux/init.h> #include <linux/rculist.h> +#include <linux/slab.h> #include <asm/sn/addrs.h> #include <asm/sn/arch.h> #include <asm/sn/intr.h> diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c index fbbfb9701201..ebfdd6a9ae1a 100644 --- a/arch/ia64/sn/kernel/msi_sn.c +++ b/arch/ia64/sn/kernel/msi_sn.c @@ -11,6 +11,7 @@ #include <linux/pci.h> #include <linux/cpumask.h> #include <linux/msi.h> +#include <linux/slab.h> #include <asm/sn/addrs.h> #include <asm/sn/intr.h> diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index d00dfc180021..dbc4cbecb5ed 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c @@ -507,7 +507,7 @@ static void __init sn_init_pdas(char **cmdline_p) cnodeid_t cnode; /* - * Allocate & initalize the nodepda for each node. + * Allocate & initialize the nodepda for each node. */ for_each_online_node(cnode) { nodepdaindr[cnode] = diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c index 55ac3c4e11d2..fa1eceed0d23 100644 --- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c +++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c @@ -30,7 +30,6 @@ #include <linux/miscdevice.h> #include <linux/utsname.h> #include <linux/cpumask.h> -#include <linux/smp_lock.h> #include <linux/nodemask.h> #include <linux/smp.h> #include <linux/mutex.h> @@ -629,9 +628,9 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info) else { /* migrate the task before calling SAL */ save_allowed = current->cpus_allowed; - set_cpus_allowed(current, cpumask_of_cpu(cpu)); + set_cpus_allowed_ptr(current, cpumask_of(cpu)); sn_hwperf_call_sal(op_info); - set_cpus_allowed(current, save_allowed); + set_cpus_allowed_ptr(current, &save_allowed); } } r = op_info->ret; @@ -682,8 +681,7 @@ static int sn_hwperf_map_err(int hwperf_err) /* * ioctl for "sn_hwperf" misc device */ -static int -sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, unsigned long arg) +static long sn_hwperf_ioctl(struct file *fp, u32 op, unsigned long arg) { struct sn_hwperf_ioctl_args a; struct cpuinfo_ia64 *cdata; @@ -699,8 +697,6 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, unsigned long arg) int i; int j; - unlock_kernel(); - /* only user requests are allowed here */ if ((op & SN_HWPERF_OP_MASK) < 10) { r = -EINVAL; @@ -859,12 +855,11 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, unsigned long arg) error: vfree(p); - lock_kernel(); return r; } static const struct file_operations sn_hwperf_fops = { - .ioctl = sn_hwperf_ioctl, + .unlocked_ioctl = sn_hwperf_ioctl, }; static struct miscdevice sn_hwperf_dev = { diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 98b684928e12..a9d310de57da 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c @@ -9,6 +9,7 @@ * a description of how these routines should be used. */ +#include <linux/gfp.h> #include <linux/module.h> #include <linux/dma-mapping.h> #include <asm/dma.h> diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c index d13e5a22a558..3cb5cf377644 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c @@ -8,6 +8,7 @@ #include <linux/interrupt.h> #include <linux/types.h> +#include <linux/slab.h> #include <linux/pci.h> #include <asm/sn/addrs.h> #include <asm/sn/geo.h> diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index efb454534e52..4d4536e3b6f3 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c @@ -10,6 +10,7 @@ #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/bitmap.h> +#include <linux/slab.h> #include <asm/sn/sn_sal.h> #include <asm/sn/addrs.h> #include <asm/sn/io.h> diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c index 012f3b82ee55..27faba035f3a 100644 --- a/arch/ia64/sn/pci/tioce_provider.c +++ b/arch/ia64/sn/pci/tioce_provider.c @@ -8,6 +8,7 @@ #include <linux/types.h> #include <linux/interrupt.h> +#include <linux/slab.h> #include <linux/pci.h> #include <asm/sn/sn_sal.h> #include <asm/sn/addrs.h> diff --git a/arch/ia64/xen/gate-data.S b/arch/ia64/xen/gate-data.S index 7d4830afc91d..6f95b6b32a4e 100644 --- a/arch/ia64/xen/gate-data.S +++ b/arch/ia64/xen/gate-data.S @@ -1,3 +1,3 @@ - .section .data.gate.xen, "aw" + .section .data..gate.xen, "aw" .incbin "arch/ia64/xen/gate.so" diff --git a/arch/ia64/xen/grant-table.c b/arch/ia64/xen/grant-table.c index 777dd9a9108b..48cca37625eb 100644 --- a/arch/ia64/xen/grant-table.c +++ b/arch/ia64/xen/grant-table.c @@ -22,6 +22,7 @@ #include <linux/module.h> #include <linux/vmalloc.h> +#include <linux/slab.h> #include <linux/mm.h> #include <xen/interface/xen.h> diff --git a/arch/ia64/xen/xensetup.S b/arch/ia64/xen/xensetup.S index aff8346ea193..b820ed02ab9f 100644 --- a/arch/ia64/xen/xensetup.S +++ b/arch/ia64/xen/xensetup.S @@ -14,7 +14,7 @@ #include <linux/init.h> #include <xen/interface/elfnote.h> - .section .data.read_mostly + .section .data..read_mostly .align 8 .global xen_domain_type xen_domain_type: |