diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2009-01-01 10:12:26 +1030 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2009-01-01 10:12:26 +1030 |
commit | d036e67b40f52bdd95392390108defbac7e53837 (patch) | |
tree | 4a00537671036c955c98891af9f4729332b35c50 /kernel/irq/proc.c | |
parent | 6b954823c24f04ed026a8517f6bab5abda279db8 (diff) | |
download | blackbird-op-linux-d036e67b40f52bdd95392390108defbac7e53837.tar.gz blackbird-op-linux-d036e67b40f52bdd95392390108defbac7e53837.zip |
cpumask: convert kernel/irq
Impact: Reduce stack usage, use new cpumask API. ALPHA mod!
Main change is that irq_default_affinity becomes a cpumask_var_t, so
treat it as a pointer (this effects alpha).
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'kernel/irq/proc.c')
-rw-r--r-- | kernel/irq/proc.c | 32 |
1 files changed, 21 insertions, 11 deletions
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index d2c0e5ee53c5..2abd3a7716ed 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -20,7 +20,7 @@ static struct proc_dir_entry *root_irq_dir; static int irq_affinity_proc_show(struct seq_file *m, void *v) { struct irq_desc *desc = irq_to_desc((long)m->private); - cpumask_t *mask = &desc->affinity; + const struct cpumask *mask = &desc->affinity; #ifdef CONFIG_GENERIC_PENDING_IRQ if (desc->status & IRQ_MOVE_PENDING) @@ -93,7 +93,7 @@ static const struct file_operations irq_affinity_proc_fops = { static int default_affinity_show(struct seq_file *m, void *v) { - seq_cpumask(m, &irq_default_affinity); + seq_cpumask(m, irq_default_affinity); seq_putc(m, '\n'); return 0; } @@ -101,27 +101,37 @@ static int default_affinity_show(struct seq_file *m, void *v) static ssize_t default_affinity_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { - cpumask_t new_value; + cpumask_var_t new_value; int err; - err = cpumask_parse_user(buffer, count, &new_value); + if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) + return -ENOMEM; + + err = cpumask_parse_user(buffer, count, new_value); if (err) - return err; + goto out; - if (!is_affinity_mask_valid(new_value)) - return -EINVAL; + if (!is_affinity_mask_valid(new_value)) { + err = -EINVAL; + goto out; + } /* * Do not allow disabling IRQs completely - it's a too easy * way to make the system unusable accidentally :-) At least * one online CPU still has to be targeted. */ - if (!cpus_intersects(new_value, cpu_online_map)) - return -EINVAL; + if (!cpumask_intersects(new_value, cpu_online_mask)) { + err = -EINVAL; + goto out; + } - irq_default_affinity = new_value; + cpumask_copy(irq_default_affinity, new_value); + err = count; - return count; +out: + free_cpumask_var(new_value); + return err; } static int default_affinity_open(struct inode *inode, struct file *file) |