From 82524746c27fa418c250a56dd7606b9d3fc79826 Mon Sep 17 00:00:00 2001 From: Franck Bui-Huu Date: Mon, 12 May 2008 21:21:05 +0200 Subject: rcu: split list.h and move rcu-protected lists into rculist.h Move rcu-protected lists from list.h into a new header file rculist.h. This is done because list are a very used primitive structure all over the kernel and it's currently impossible to include other header files in this list.h without creating some circular dependencies. For example, list.h implements rcu-protected list and uses rcu_dereference() without including rcupdate.h. It actually compiles because users of rcu_dereference() are macros. Others RCU functions could be used too but aren't probably because of this. Therefore this patch creates rculist.h which includes rcupdates without to many changes/troubles. Signed-off-by: Franck Bui-Huu Acked-by: Paul E. McKenney Acked-by: Josh Triplett Signed-off-by: Andrew Morton Signed-off-by: Ingo Molnar --- arch/ia64/sn/kernel/irq.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/ia64') diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 53351c3cd7b1..96c31b4180c3 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include -- cgit v1.2.1 From f20d2752980c144c82649eb18746ef0c29f508dd Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Tue, 20 May 2008 13:13:50 +0200 Subject: KVM: ia64: fix zero extending for mmio ld1/2/4 emulation in KVM Only copy in the data actually requested by the instruction emulation and zero pad the destination register first. This avoids the problem where emulated mmio access got garbled data from ld2.acq instructions in the vga console driver. Signed-off-by: Jes Sorensen Acked-by: Xiantao Zhang Signed-off-by: Avi Kivity --- arch/ia64/kvm/mmio.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c index 351bf70da463..7f1a858bc69f 100644 --- a/arch/ia64/kvm/mmio.c +++ b/arch/ia64/kvm/mmio.c @@ -159,7 +159,8 @@ static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest, if (p->u.ioreq.state == STATE_IORESP_READY) { if (dir == IOREQ_READ) - *dest = p->u.ioreq.data; + /* it's necessary to ensure zero extending */ + *dest = p->u.ioreq.data & (~0UL >> (64-(s*8))); } else panic_vm(vcpu); out: -- cgit v1.2.1 From 83014699b06fb9a300d896c7c49fb8be1c6c5ddc Mon Sep 17 00:00:00 2001 From: stephane eranian Date: Wed, 11 Jun 2008 15:24:13 -0700 Subject: [IA64] perfmon: fix async exit bug Move the cleanup of the async queue to the close callback from the flush callback. This avoids losing asynchronous overflow notifications when the file descriptor is shared by multiple processes and one terminates. Signed-off-by: Stephane Eranian Signed-off-by: Tony Luck --- arch/ia64/kernel/perfmon.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 71d05133f556..7714a97b0104 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -1864,11 +1864,6 @@ pfm_flush(struct file *filp, fl_owner_t id) * invoked after, it will find an empty queue and no * signal will be sent. In both case, we are safe */ - if (filp->f_flags & FASYNC) { - DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue)); - pfm_do_fasync (-1, filp, ctx, 0); - } - PROTECT_CTX(ctx, flags); state = ctx->ctx_state; @@ -1999,6 +1994,11 @@ pfm_close(struct inode *inode, struct file *filp) return -EBADF; } + if (filp->f_flags & FASYNC) { + DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue)); + pfm_do_fasync(-1, filp, ctx, 0); + } + PROTECT_CTX(ctx, flags); state = ctx->ctx_state; -- cgit v1.2.1 From 39b8931b5cad9a7cbcd2394a40a088311e783a82 Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Mon, 9 Jun 2008 16:48:18 -0700 Subject: ACPI: handle invalid ACPI SLIT table This is a SLIT sanity checking patch. It moves slit_valid() function to generic ACPI code and does sanity checking for both x86 and ia64. It sets up node_distance with LOCAL_DISTANCE and REMOTE_DISTANCE when hitting invalid SLIT table on ia64. It also cleans up unused variable localities in acpi_parse_slit() on x86. Signed-off-by: Fenghua Yu Signed-off-by: Andrew Morton Signed-off-by: Len Brown --- arch/ia64/kernel/acpi.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 853d1f11be00..43687cc60dfb 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -465,7 +465,6 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) printk(KERN_ERR "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", len, slit->header.length); - memset(numa_slit, 10, sizeof(numa_slit)); return; } slit_table = slit; @@ -574,8 +573,14 @@ void __init acpi_numa_arch_fixup(void) printk(KERN_INFO "Number of memory chunks in system = %d\n", num_node_memblks); - if (!slit_table) + if (!slit_table) { + for (i = 0; i < MAX_NUMNODES; i++) + for (j = 0; j < MAX_NUMNODES; j++) + node_distance(i, j) = i == j ? LOCAL_DISTANCE : + REMOTE_DISTANCE; return; + } + memset(numa_slit, -1, sizeof(numa_slit)); for (i = 0; i < slit_table->locality_count; i++) { if (!pxm_bit_test(i)) -- cgit v1.2.1 From 3463a93def55c309f3c0d0a8aaf216be3be42d64 Mon Sep 17 00:00:00 2001 From: Alex Chiang Date: Wed, 11 Jun 2008 17:29:27 -0600 Subject: [IA64] Update check_sal_cache_flush to use platform_send_ipi() check_sal_cache_flush is used to detect broken firmware that drops pending interrupts. The old implementation schedules a timer interrupt for itself in the future by getting the current value of the Interval Timer Counter + 1000 cycles, waits for the interrupt to be pended, calls SAL_CACHE_FLUSH, and finally checks to see if the interrupt is still pending. This implementation can cause problems for virtual machine code if the process of scheduling the timer interrupt takes more than 1000 cycles; the virtual machine can end up sleeping for several hundred years while waiting for the ITC to wrap around. The fix is to use platform_send_ipi. The processor will still send an interrupt to itself, using the IA64_IPI_DM_INT delivery mode, which causes the IPI to look like an external interrupt. The rest of the SAL_CACHE_FLUSH + checking to see if the interrupt is still pending remains unchanged. This fix has been boot tested successfully on: - intel tiger2 - hp rx6600 - hp rx5670 The rx5670 has known buggy firmware, where SAL_CACHE_FLUSH drops pending interrupts. A boot test on this machine showed this message on the console: SAL: SAL_CACHE_FLUSH drops interrupts; PAL_CACHE_FLUSH will be used instead Which proves that the self-inflicted IPI approach is viable. And as expected, the other tested platforms correctly did not display the warning. Signed-off-by: Alex Chiang Signed-off-by: Tony Luck --- arch/ia64/kernel/sal.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c index 7e0259709c04..0464173ea568 100644 --- a/arch/ia64/kernel/sal.c +++ b/arch/ia64/kernel/sal.c @@ -252,11 +252,10 @@ check_sal_cache_flush (void) local_irq_save(flags); /* - * Schedule a timer interrupt, wait until it's reported, and see if - * SAL_CACHE_FLUSH drops it. + * Send ourselves a timer interrupt, wait until it's reported, and see + * if SAL_CACHE_FLUSH drops it. */ - ia64_set_itv(IA64_TIMER_VECTOR); - ia64_set_itm(ia64_get_itc() + 1000); + platform_send_ipi(cpu, IA64_TIMER_VECTOR, IA64_IPI_DM_INT, 0); while (!ia64_get_irr(IA64_TIMER_VECTOR)) cpu_relax(); -- cgit v1.2.1 From 732a675a6303156d1a197dc780b0712bd4b49d46 Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Sat, 14 Jun 2008 07:57:25 -0500 Subject: [IA64] Fix CONFIG_IA64_SGI_UV build error Fix build error in CONFIG_IA64_SGI_UV config. (GENERIC builds are ok). Signed-off-by: Jack Steiner Signed-off-by: Tony Luck --- arch/ia64/Makefile | 1 + arch/ia64/uv/kernel/setup.c | 12 ++++++++++++ 2 files changed, 13 insertions(+) (limited to 'arch/ia64') diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index 88f1a55c6c94..e67ee3f27698 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile @@ -57,6 +57,7 @@ core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/ core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/ +core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/ core-$(CONFIG_KVM) += arch/ia64/kvm/ drivers-$(CONFIG_PCI) += arch/ia64/pci/ diff --git a/arch/ia64/uv/kernel/setup.c b/arch/ia64/uv/kernel/setup.c index 9aa743203c3c..cf5f28ae96c4 100644 --- a/arch/ia64/uv/kernel/setup.c +++ b/arch/ia64/uv/kernel/setup.c @@ -17,6 +17,9 @@ DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info); +#ifdef CONFIG_IA64_SGI_UV +int sn_prom_type; +#endif struct redir_addr { unsigned long redirect; @@ -64,6 +67,15 @@ void __init uv_setup(char **cmdline_p) m_n_config.s.m_skt = 37; m_n_config.s.n_skt = 0; mmr_base = 0; +#if 0 + /* Need BIOS calls - TDB */ + if (!ia64_sn_is_fake_prom()) + sn_prom_type = 1; + else +#endif + sn_prom_type = 2; + printk(KERN_INFO "Running on medusa with %s PROM\n", + (sn_prom_type == 1) ? "real" : "fake"); } else { get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); node_id.v = uv_read_local_mmr(UVH_NODE_ID); -- cgit v1.2.1 From e0c6d97c65e0784aade7e97b9411f245a6c543e7 Mon Sep 17 00:00:00 2001 From: Cliff Wickman Date: Fri, 20 Jun 2008 12:02:00 -0700 Subject: [IA64] SN2: security hole in sn2_ptc_proc_write Security hole in sn2_ptc_proc_write It is possible to overrun a buffer with a write to this /proc file. Signed-off-by: Cliff Wickman Signed-off-by: Tony Luck --- arch/ia64/sn/kernel/sn2/sn2_smp.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/ia64') diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c index 49d3120415eb..6dd886c5d860 100644 --- a/arch/ia64/sn/kernel/sn2/sn2_smp.c +++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c @@ -512,6 +512,8 @@ static ssize_t sn2_ptc_proc_write(struct file *file, const char __user *user, si int cpu; char optstr[64]; + if (count > sizeof(optstr)) + return -EINVAL; if (copy_from_user(optstr, user, count)) return -EFAULT; optstr[count - 1] = '\0'; -- cgit v1.2.1 From 2826f8c0f4c97b7db33e2a680f184d828eb7a785 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Tue, 24 Jun 2008 11:30:09 -0400 Subject: [IA64] Fix boot failure on ia64/sn2 Call check_sal_cache_flush() after platform_setup() as check_sal_cache_flush() now relies on being able to call platform vector code. Problem was introduced by: 3463a93def55c309f3c0d0a8aaf216be3be42d64 "Update check_sal_cache_flush to use platform_send_ipi()" Signed-off-by: Jes Sorensen Tested-by: Alex Chiang: Signed-off-by: Tony Luck --- arch/ia64/kernel/setup.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index f48a809c686d..4ae15c8c2488 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -578,8 +578,6 @@ setup_arch (char **cmdline_p) cpu_init(); /* initialize the bootstrap CPU */ mmu_context_init(); /* initialize context_id bitmap */ - check_sal_cache_flush(); - #ifdef CONFIG_ACPI acpi_boot_init(); #endif @@ -607,6 +605,7 @@ setup_arch (char **cmdline_p) ia64_mca_init(); platform_setup(cmdline_p); + check_sal_cache_flush(); paging_init(); } -- cgit v1.2.1 From 8097110d179b874d91c6495330c2b96c991e8c6e Mon Sep 17 00:00:00 2001 From: Cliff Wickman Date: Tue, 24 Jun 2008 10:20:06 -0700 Subject: [IA64] Handle count==0 in sn2_ptc_proc_write() The fix applied in e0c6d97c65e0784aade7e97b9411f245a6c543e7 "security hole in sn2_ptc_proc_write" didn't take into account the case where count==0 (which results in a buffer underrun when adding the trailing '\0'). Thanks to Andi Kleen for pointing this out. Signed-off-by: Cliff Wickman Signed-off-by: Tony Luck --- arch/ia64/sn/kernel/sn2/sn2_smp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c index 6dd886c5d860..e585f9a2afb9 100644 --- a/arch/ia64/sn/kernel/sn2/sn2_smp.c +++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c @@ -512,7 +512,7 @@ static ssize_t sn2_ptc_proc_write(struct file *file, const char __user *user, si int cpu; char optstr[64]; - if (count > sizeof(optstr)) + if (count == 0 || count > sizeof(optstr)) return -EINVAL; if (copy_from_user(optstr, user, count)) return -EFAULT; -- cgit v1.2.1 From e2569b7e572c0e6782380b3fdda901deb175d75a Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Tue, 24 Jun 2008 10:22:05 +0200 Subject: [IA64] Eliminate NULL test after alloc_bootmem in iosapic_alloc_rte() As noted by Akinobu Mita alloc_bootmem and related functions never return NULL and always return a zeroed region of memory. Thus a NULL test or memset after calls to these functions is unnecessary. Signed-off-by: Julia Lawall Signed-off-by: Tony Luck --- arch/ia64/kernel/iosapic.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 082c31dcfd99..39752cdef6ff 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -558,8 +558,6 @@ static struct iosapic_rte_info * __init_refok iosapic_alloc_rte (void) if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) { rte = alloc_bootmem(sizeof(struct iosapic_rte_info) * NR_PREALLOCATE_RTE_ENTRIES); - if (!rte) - return NULL; for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++) list_add(&rte->rte_list, &free_rte_list); } -- cgit v1.2.1 From f27b433ef32a77c8cb76f018507453df7c03e552 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 26 Jun 2008 11:22:30 +0200 Subject: ia64: convert to generic helpers for IPI function calls This converts ia64 to use the new helpers for smp_call_function() and friends, and adds support for smp_call_function_single(). Cc: Tony Luck Signed-off-by: Jens Axboe --- arch/ia64/Kconfig | 1 + arch/ia64/kernel/smp.c | 250 +++------------------------------------------ arch/ia64/kernel/smpboot.c | 4 +- 3 files changed, 16 insertions(+), 239 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 16be41446b5b..18bcc10903b4 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -303,6 +303,7 @@ config VIRT_CPU_ACCOUNTING config SMP bool "Symmetric multi-processing support" + select USE_GENERIC_SMP_HELPERS help This enables support for systems with more than one CPU. If you have a system with only one CPU, say N. If you have a system with more diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 983296f1c813..19152dcbf6e4 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -60,25 +60,9 @@ static struct local_tlb_flush_counts { static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned; - -/* - * Structure and data for smp_call_function(). This is designed to minimise static memory - * requirements. It also looks cleaner. - */ -static __cacheline_aligned DEFINE_SPINLOCK(call_lock); - -struct call_data_struct { - void (*func) (void *info); - void *info; - long wait; - atomic_t started; - atomic_t finished; -}; - -static volatile struct call_data_struct *call_data; - #define IPI_CALL_FUNC 0 #define IPI_CPU_STOP 1 +#define IPI_CALL_FUNC_SINGLE 2 #define IPI_KDUMP_CPU_STOP 3 /* This needs to be cacheline aligned because it is written to by *other* CPUs. */ @@ -86,43 +70,6 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation); extern void cpu_halt (void); -void -lock_ipi_calllock(void) -{ - spin_lock_irq(&call_lock); -} - -void -unlock_ipi_calllock(void) -{ - spin_unlock_irq(&call_lock); -} - -static inline void -handle_call_data(void) -{ - struct call_data_struct *data; - void (*func)(void *info); - void *info; - int wait; - - /* release the 'pointer lock' */ - data = (struct call_data_struct *)call_data; - func = data->func; - info = data->info; - wait = data->wait; - - mb(); - atomic_inc(&data->started); - /* At this point the structure may be gone unless wait is true. */ - (*func)(info); - - /* Notify the sending CPU that the task is done. */ - mb(); - if (wait) - atomic_inc(&data->finished); -} - static void stop_this_cpu(void) { @@ -163,13 +110,15 @@ handle_IPI (int irq, void *dev_id) ops &= ~(1 << which); switch (which) { - case IPI_CALL_FUNC: - handle_call_data(); - break; - case IPI_CPU_STOP: stop_this_cpu(); break; + case IPI_CALL_FUNC: + generic_smp_call_function_interrupt(); + break; + case IPI_CALL_FUNC_SINGLE: + generic_smp_call_function_single_interrupt(); + break; #ifdef CONFIG_KEXEC case IPI_KDUMP_CPU_STOP: unw_init_running(kdump_cpu_freeze, NULL); @@ -187,6 +136,8 @@ handle_IPI (int irq, void *dev_id) return IRQ_HANDLED; } + + /* * Called with preemption disabled. */ @@ -360,190 +311,15 @@ smp_flush_tlb_mm (struct mm_struct *mm) on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1); } -/* - * Run a function on a specific CPU - * The function to run. This must be fast and non-blocking. - * An arbitrary pointer to pass to the function. - * Currently unused. - * If true, wait until function has completed on other CPUs. - * [RETURNS] 0 on success, else a negative status code. - * - * Does not return until the remote CPU is nearly ready to execute - * or is or has executed. - */ - -int -smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic, - int wait) -{ - struct call_data_struct data; - int cpus = 1; - int me = get_cpu(); /* prevent preemption and reschedule on another processor */ - - if (cpuid == me) { - local_irq_disable(); - func(info); - local_irq_enable(); - put_cpu(); - return 0; - } - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - spin_lock_bh(&call_lock); - - call_data = &data; - mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */ - send_IPI_single(cpuid, IPI_CALL_FUNC); - - /* Wait for response */ - while (atomic_read(&data.started) != cpus) - cpu_relax(); - - if (wait) - while (atomic_read(&data.finished) != cpus) - cpu_relax(); - call_data = NULL; - - spin_unlock_bh(&call_lock); - put_cpu(); - return 0; -} -EXPORT_SYMBOL(smp_call_function_single); - -/** - * smp_call_function_mask(): Run a function on a set of other CPUs. - * The set of cpus to run on. Must not include the current cpu. - * The function to run. This must be fast and non-blocking. - * An arbitrary pointer to pass to the function. - * If true, wait (atomically) until function - * has completed on other CPUs. - * - * Returns 0 on success, else a negative status code. - * - * If @wait is true, then returns once @func has returned; otherwise - * it returns just before the target cpu calls @func. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. - */ -int smp_call_function_mask(cpumask_t mask, - void (*func)(void *), void *info, - int wait) +void arch_send_call_function_single_ipi(int cpu) { - struct call_data_struct data; - cpumask_t allbutself; - int cpus; - - spin_lock(&call_lock); - allbutself = cpu_online_map; - cpu_clear(smp_processor_id(), allbutself); - - cpus_and(mask, mask, allbutself); - cpus = cpus_weight(mask); - if (!cpus) { - spin_unlock(&call_lock); - return 0; - } - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - call_data = &data; - mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/ - - /* Send a message to other CPUs */ - if (cpus_equal(mask, allbutself)) - send_IPI_allbutself(IPI_CALL_FUNC); - else - send_IPI_mask(mask, IPI_CALL_FUNC); - - /* Wait for response */ - while (atomic_read(&data.started) != cpus) - cpu_relax(); - - if (wait) - while (atomic_read(&data.finished) != cpus) - cpu_relax(); - call_data = NULL; - - spin_unlock(&call_lock); - return 0; - + send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); } -EXPORT_SYMBOL(smp_call_function_mask); -/* - * this function sends a 'generic call function' IPI to all other CPUs - * in the system. - */ - -/* - * [SUMMARY] Run a function on all other CPUs. - * The function to run. This must be fast and non-blocking. - * An arbitrary pointer to pass to the function. - * currently unused. - * If true, wait (atomically) until function has completed on other CPUs. - * [RETURNS] 0 on success, else a negative status code. - * - * Does not return until remote CPUs are nearly ready to execute or are or have - * executed. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. - */ -int -smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait) +void arch_send_call_function_ipi(cpumask_t mask) { - struct call_data_struct data; - int cpus; - - spin_lock(&call_lock); - cpus = num_online_cpus() - 1; - if (!cpus) { - spin_unlock(&call_lock); - return 0; - } - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - call_data = &data; - mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */ - send_IPI_allbutself(IPI_CALL_FUNC); - - /* Wait for response */ - while (atomic_read(&data.started) != cpus) - cpu_relax(); - - if (wait) - while (atomic_read(&data.finished) != cpus) - cpu_relax(); - call_data = NULL; - - spin_unlock(&call_lock); - return 0; + send_IPI_mask(mask, IPI_CALL_FUNC); } -EXPORT_SYMBOL(smp_call_function); /* * this function calls the 'stop' function on all other CPUs in the system. diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index d7ad42b77d41..eaa1b6795a13 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -395,14 +395,14 @@ smp_callin (void) fix_b0_for_bsp(); - lock_ipi_calllock(); + ipi_call_lock_irq(); spin_lock(&vector_lock); /* Setup the per cpu irq handling data structures */ __setup_vector_irq(cpuid); cpu_set(cpuid, cpu_online_map); per_cpu(cpu_state, cpuid) = CPU_ONLINE; spin_unlock(&vector_lock); - unlock_ipi_calllock(); + ipi_call_unlock_irq(); smp_setup_percpu_timer(); -- cgit v1.2.1 From 8691e5a8f691cc2a4fda0651e8d307aaba0e7d68 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 6 Jun 2008 11:18:06 +0200 Subject: smp_call_function: get rid of the unused nonatomic/retry argument It's never used and the comments refer to nonatomic and retry interchangably. So get rid of it. Acked-by: Jeremy Fitzhardinge Signed-off-by: Jens Axboe --- arch/ia64/kernel/mca.c | 2 +- arch/ia64/kernel/palinfo.c | 2 +- arch/ia64/kernel/perfmon.c | 2 +- arch/ia64/kernel/process.c | 2 +- arch/ia64/kernel/smpboot.c | 2 +- arch/ia64/kernel/uncached.c | 5 ++--- arch/ia64/sn/kernel/sn2/sn_hwperf.c | 2 +- 7 files changed, 8 insertions(+), 9 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 705176b434b3..9cd818cc7008 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -1881,7 +1881,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb, case CPU_ONLINE: case CPU_ONLINE_FROZEN: smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust, - NULL, 1, 0); + NULL, 0); break; } return NOTIFY_OK; diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 9dc00f7fe10e..e5c57f413ca2 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c @@ -921,7 +921,7 @@ int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page) /* will send IPI to other CPU and wait for completion of remote call */ - if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 0, 1))) { + if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 1))) { printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: " "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret); return 0; diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 7714a97b0104..9baa48255c12 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -1820,7 +1820,7 @@ pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx) int ret; DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu)); - ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1); + ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1); DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret)); } #endif /* CONFIG_SMP */ diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index a3a34b4eb038..fabaf08d9a69 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -286,7 +286,7 @@ void cpu_idle_wait(void) { smp_mb(); /* kick all the CPUs so that they exit out of pm_idle */ - smp_call_function(do_nothing, NULL, 0, 1); + smp_call_function(do_nothing, NULL, 1); } EXPORT_SYMBOL_GPL(cpu_idle_wait); diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index eaa1b6795a13..9d1d429c6c59 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -317,7 +317,7 @@ ia64_sync_itc (unsigned int master) go[MASTER] = 1; - if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) { + if (smp_call_function_single(master, sync_master, NULL, 0) < 0) { printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master); return; } diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index e77995a6e3ed..8eff8c1d40a6 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c @@ -123,8 +123,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { atomic_set(&uc_pool->status, 0); - status = smp_call_function(uncached_ipi_visibility, uc_pool, - 0, 1); + status = smp_call_function(uncached_ipi_visibility, uc_pool, 1); if (status || atomic_read(&uc_pool->status)) goto failed; } else if (status != PAL_VISIBILITY_OK) @@ -146,7 +145,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) if (status != PAL_STATUS_SUCCESS) goto failed; atomic_set(&uc_pool->status, 0); - status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1); + status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1); if (status || atomic_read(&uc_pool->status)) goto failed; diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c index 8cc0c4753d89..636588e7e068 100644 --- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c +++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c @@ -629,7 +629,7 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info) if (use_ipi) { /* use an interprocessor interrupt to call SAL */ smp_call_function_single(cpu, sn_hwperf_call_sal, - op_info, 1, 1); + op_info, 1); } else { /* migrate the task before calling SAL */ -- cgit v1.2.1 From 15c8b6c1aaaf1c4edd67e2f02e4d8e1bd1a51c0d Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 9 May 2008 09:39:44 +0200 Subject: on_each_cpu(): kill unused 'retry' parameter It's not even passed on to smp_call_function() anymore, since that was removed. So kill it. Acked-by: Jeremy Fitzhardinge Reviewed-by: Paul E. McKenney Signed-off-by: Jens Axboe --- arch/ia64/kernel/mca.c | 4 ++-- arch/ia64/kernel/perfmon.c | 4 ++-- arch/ia64/kernel/smp.c | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 9cd818cc7008..7dd96c127177 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -707,7 +707,7 @@ ia64_mca_cmc_vector_enable (void *dummy) static void ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) { - on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0); + on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0); } /* @@ -719,7 +719,7 @@ ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) static void ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused) { - on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0); + on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0); } /* diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 9baa48255c12..19d4493c6193 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -6508,7 +6508,7 @@ pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) } /* save the current system wide pmu states */ - ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1); + ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1); if (ret) { DPRINT(("on_each_cpu() failed: %d\n", ret)); goto cleanup_reserve; @@ -6553,7 +6553,7 @@ pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) pfm_alt_intr_handler = NULL; - ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1); + ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1); if (ret) { DPRINT(("on_each_cpu() failed: %d\n", ret)); } diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 19152dcbf6e4..3676468612b6 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -285,7 +285,7 @@ smp_flush_tlb_cpumask(cpumask_t xcpumask) void smp_flush_tlb_all (void) { - on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1); + on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1); } void @@ -308,7 +308,7 @@ smp_flush_tlb_mm (struct mm_struct *mm) * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is * rather trivial. */ - on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1); + on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1); } void arch_send_call_function_single_ipi(int cpu) -- cgit v1.2.1 From dd4f0888f8b42a97c93a66617a4f9acaff3089d6 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Mon, 30 Jun 2008 15:03:14 -0700 Subject: [IA64] Bugfix for system with 32 cpus On a system where there are no hot pluggable cpus "additional_cpus" is still set to -1 at the point where we call per_cpu_scan_finalize(). If we didn't find an SRAT table and so pick the default "32" for the number of cpus, when we get to: high_cpu = min(high_cpu + reserve_cpus, NR_CPUS); we will end up initializing for just 31 cpus ... and so we will die horribly when bringing up cpu#32. Problem introduced by: 2c6e6db41f01b6b4eb98809350827c9678996698 "Minimize per_cpu reservations." Acked-by: Robin Holt Signed-off-by: Tony Luck --- arch/ia64/kernel/setup.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 4ae15c8c2488..632cda8f2e76 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -547,7 +547,8 @@ setup_arch (char **cmdline_p) # ifdef CONFIG_ACPI_NUMA acpi_numa_init(); per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? - 32 : cpus_weight(early_cpu_possible_map)), additional_cpus); + 32 : cpus_weight(early_cpu_possible_map)), + additional_cpus > 0 ? additional_cpus : 0); # endif #else # ifdef CONFIG_SMP -- cgit v1.2.1 From 3a677d216445dba3332a000063405de3fc135859 Mon Sep 17 00:00:00 2001 From: Doug Chapman Date: Mon, 30 Jun 2008 15:06:48 -0700 Subject: [IA64] export account_system_vtime The symbol account_system_vtime is used by the kvm module but not exported. This breaks building with CONFIG_VIRT_CPU_ACCOUNTING and CONFIG_KVM=m. Signed-off-by: Doug Chapman Acked-by: Hidetosho Seto Signed-off-by: Tony Luck --- arch/ia64/kernel/time.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 8c73643f2d66..aad1b7b1fff9 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -117,6 +117,7 @@ void account_system_vtime(struct task_struct *tsk) local_irq_restore(flags); } +EXPORT_SYMBOL_GPL(account_system_vtime); /* * Called from the timer interrupt handler to charge accumulated user time -- cgit v1.2.1 From c1e3b377ad48febba6f91b8ae42c44ee4d4ab45e Mon Sep 17 00:00:00 2001 From: Zhao Yakui Date: Tue, 24 Jun 2008 17:58:53 +0800 Subject: ACPI: Create "idle=halt" bootparam "idle=halt" limits the idle loop to using the halt instruction. No MWAIT, no IO accesses, no C-states deeper than C1. If something is broken in the idle code, "idle=halt" is a less severe workaround than "idle=poll" which disables all power savings. Signed-off-by: Zhao Yakui Signed-off-by: Len Brown Signed-off-by: Andi Kleen --- arch/ia64/kernel/process.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index fabaf08d9a69..612b3c4a0603 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -55,6 +55,8 @@ void (*ia64_mark_idle)(int); unsigned long boot_option_idle_override = 0; EXPORT_SYMBOL(boot_option_idle_override); +unsigned long idle_halt; +EXPORT_SYMBOL(idle_halt); void ia64_do_show_stack (struct unw_frame_info *info, void *arg) -- cgit v1.2.1 From da5e09a1b3e5a9fc0b15a3feb64e921ccc55ba74 Mon Sep 17 00:00:00 2001 From: Zhao Yakui Date: Tue, 24 Jun 2008 18:01:09 +0800 Subject: ACPI : Create "idle=nomwait" bootparam "idle=nomwait" disables the use of the MWAIT instruction from both C1 (C1_FFH) and deeper (C2C3_FFH) C-states. When MWAIT is unavailable, the BIOS and OS generally negotiate to use the HALT instruction for C1, and use IO accesses for deeper C-states. This option is useful for power and performance comparisons, and also to work around BIOS bugs where broken MWAIT support is advertised. http://bugzilla.kernel.org/show_bug.cgi?id=10807 http://bugzilla.kernel.org/show_bug.cgi?id=10914 Signed-off-by: Zhao Yakui Signed-off-by: Li Shaohua Signed-off-by: Len Brown Signed-off-by: Andi Kleen --- arch/ia64/kernel/process.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 612b3c4a0603..3ab8373103ec 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -57,6 +57,8 @@ unsigned long boot_option_idle_override = 0; EXPORT_SYMBOL(boot_option_idle_override); unsigned long idle_halt; EXPORT_SYMBOL(idle_halt); +unsigned long idle_nomwait; +EXPORT_SYMBOL(idle_nomwait); void ia64_do_show_stack (struct unw_frame_info *info, void *arg) -- cgit v1.2.1 From 2f73ccab5628b4f8e8f4b93fea8082dd31a87a10 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Thu, 17 Jul 2008 18:09:12 +0200 Subject: fix build error of arch/ia64/kvm/* Fix calls of smp_call_function*() in arch/ia64/kvm for recent API changes. CC [M] arch/ia64/kvm/kvm-ia64.o arch/ia64/kvm/kvm-ia64.c: In function 'handle_global_purge': arch/ia64/kvm/kvm-ia64.c:398: error: too many arguments to function 'smp_call_function_single' arch/ia64/kvm/kvm-ia64.c: In function 'kvm_vcpu_kick': arch/ia64/kvm/kvm-ia64.c:1696: error: too many arguments to function 'smp_call_function_single' Signed-off-by: Takashi Iwai Acked-by Xiantao Zhang Signed-off-by: Linus Torvalds --- arch/ia64/kvm/kvm-ia64.c | 4 ++-- arch/ia64/kvm/kvm_fw.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 318b81100623..68c978be9a51 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -395,7 +395,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) if (kvm->vcpus[i]->cpu != -1) { call_data.vcpu = kvm->vcpus[i]; smp_call_function_single(kvm->vcpus[i]->cpu, - vcpu_global_purge, &call_data, 0, 1); + vcpu_global_purge, &call_data, 1); } else printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n"); @@ -1693,7 +1693,7 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu) wake_up_interruptible(&vcpu->wq); if (vcpu->guest_mode) - smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0); + smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0); } int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c index 091f936c4485..0c69d9ec92d4 100644 --- a/arch/ia64/kvm/kvm_fw.c +++ b/arch/ia64/kvm/kvm_fw.c @@ -130,7 +130,7 @@ static struct ia64_pal_retval pal_cache_flush(struct kvm_vcpu *vcpu) args.cache_type = gr29; args.operation = gr30; smp_call_function(remote_pal_cache_flush, - (void *)&args, 1, 1); + (void *)&args, 1); if (args.status != 0) printk(KERN_ERR"pal_cache_flush error!," "status:0x%lx\n", args.status); -- cgit v1.2.1