summaryrefslogtreecommitdiffstats
path: root/kernel/irq
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-07-04 17:39:23 +0900
committerThomas Gleixner <tglx@linutronix.de>2016-07-04 12:25:13 +0200
commit9c2555835bb3d34dfac52a0be943dcc4bedd650f (patch)
tree1a5a265c2f5c61b478bb1ed5df4ce9d8d15889e7 /kernel/irq
parentb6140914fd079e43ea75a53429b47128584f033a (diff)
downloadblackbird-op-linux-9c2555835bb3d34dfac52a0be943dcc4bedd650f.tar.gz
blackbird-op-linux-9c2555835bb3d34dfac52a0be943dcc4bedd650f.zip
genirq: Introduce IRQD_AFFINITY_MANAGED flag
Interupts marked with this flag are excluded from user space interrupt affinity changes. Contrary to the IRQ_NO_BALANCING flag, the kernel internal affinity mechanism is not blocked. This flag will be used for multi-queue device interrupts. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Christoph Hellwig <hch@lst.de> Cc: linux-block@vger.kernel.org Cc: linux-pci@vger.kernel.org Cc: linux-nvme@lists.infradead.org Cc: axboe@fb.com Cc: agordeev@redhat.com Link: http://lkml.kernel.org/r/1467621574-8277-3-git-send-email-hch@lst.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/internals.h2
-rw-r--r--kernel/irq/manage.c21
-rw-r--r--kernel/irq/proc.c2
3 files changed, 21 insertions, 4 deletions
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 09be2c903c6d..b15aa3b617a2 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -105,6 +105,8 @@ static inline void unregister_handler_proc(unsigned int irq,
struct irqaction *action) { }
#endif
+extern bool irq_can_set_affinity_usr(unsigned int irq);
+
extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
extern void irq_set_thread_affinity(struct irq_desc *desc);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index ef0bc02c3a70..30658e9827f0 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -115,12 +115,12 @@ EXPORT_SYMBOL(synchronize_irq);
#ifdef CONFIG_SMP
cpumask_var_t irq_default_affinity;
-static int __irq_can_set_affinity(struct irq_desc *desc)
+static bool __irq_can_set_affinity(struct irq_desc *desc)
{
if (!desc || !irqd_can_balance(&desc->irq_data) ||
!desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
- return 0;
- return 1;
+ return false;
+ return true;
}
/**
@@ -134,6 +134,21 @@ int irq_can_set_affinity(unsigned int irq)
}
/**
+ * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
+ * @irq: Interrupt to check
+ *
+ * Like irq_can_set_affinity() above, but additionally checks for the
+ * AFFINITY_MANAGED flag.
+ */
+bool irq_can_set_affinity_usr(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ return __irq_can_set_affinity(desc) &&
+ !irqd_affinity_is_managed(&desc->irq_data);
+}
+
+/**
* irq_set_thread_affinity - Notify irq threads to adjust affinity
* @desc: irq descriptor which has affitnity changed
*
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 4e1b94726818..40bdcdc1f700 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -96,7 +96,7 @@ static ssize_t write_irq_affinity(int type, struct file *file,
cpumask_var_t new_value;
int err;
- if (!irq_can_set_affinity(irq) || no_irq_affinity)
+ if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
return -EIO;
if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
OpenPOWER on IntegriCloud