summaryrefslogtreecommitdiffstats
path: root/arch/parisc/kernel/irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/kernel/irq.c')
-rw-r--r--arch/parisc/kernel/irq.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index ac2c822928c7..29e70e16ede8 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -112,7 +112,7 @@ void cpu_end_irq(unsigned int irq)
}
#ifdef CONFIG_SMP
-int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
+int cpu_check_affinity(unsigned int irq, const struct cpumask *dest)
{
int cpu_dest;
@@ -120,23 +120,25 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
if (CHECK_IRQ_PER_CPU(irq)) {
/* Bad linux design decision. The mask has already
* been set; we must reset it */
- irq_desc[irq].affinity = CPU_MASK_ALL;
+ cpumask_setall(&irq_desc[irq].affinity);
return -EINVAL;
}
/* whatever mask they set, we just allow one CPU */
cpu_dest = first_cpu(*dest);
- *dest = cpumask_of_cpu(cpu_dest);
- return 0;
+ return cpu_dest;
}
static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
{
- if (cpu_check_affinity(irq, dest))
+ int cpu_dest;
+
+ cpu_dest = cpu_check_affinity(irq, dest);
+ if (cpu_dest < 0)
return;
- irq_desc[irq].affinity = *dest;
+ cpumask_copy(&irq_desc[irq].affinity, &cpumask_of_cpu(cpu_dest));
}
#endif
@@ -295,7 +297,7 @@ int txn_alloc_irq(unsigned int bits_wide)
unsigned long txn_affinity_addr(unsigned int irq, int cpu)
{
#ifdef CONFIG_SMP
- irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+ cpumask_copy(&irq_desc[irq].affinity, cpumask_of(cpu));
#endif
return per_cpu(cpu_data, cpu).txn_addr;
@@ -352,7 +354,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
irq = eirr_to_irq(eirr_val);
#ifdef CONFIG_SMP
- dest = irq_desc[irq].affinity;
+ cpumask_copy(&dest, &irq_desc[irq].affinity);
if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
!cpu_isset(smp_processor_id(), dest)) {
int cpu = first_cpu(dest);
OpenPOWER on IntegriCloud