summaryrefslogtreecommitdiffstats
path: root/kernel/irq/cpuhotplug.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2017-06-20 01:37:39 +0200
committerThomas Gleixner <tglx@linutronix.de>2017-06-22 18:21:21 +0200
commit415fcf1a2293046e0c1f4ab8558a87bad66652b1 (patch)
tree0e23019f68c72279cdf746716d082f2d39921118 /kernel/irq/cpuhotplug.c
parent0d3f54257dc300f2db480d6a46b34bdb87f18c1b (diff)
downloadblackbird-op-linux-415fcf1a2293046e0c1f4ab8558a87bad66652b1.tar.gz
blackbird-op-linux-415fcf1a2293046e0c1f4ab8558a87bad66652b1.zip
genirq/cpuhotplug: Use effective affinity mask
If the architecture supports the effective affinity mask, migrating interrupts away which are not targeted by the effective mask is pointless. They can stay in the user or system supplied affinity mask, but won't be targetted at any given point as the affinity setter functions need to validate against the online cpu mask anyway. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Jens Axboe <axboe@kernel.dk> Cc: Marc Zyngier <marc.zyngier@arm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Keith Busch <keith.busch@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Christoph Hellwig <hch@lst.de> Link: http://lkml.kernel.org/r/20170619235446.328488490@linutronix.de
Diffstat (limited to 'kernel/irq/cpuhotplug.c')
-rw-r--r--kernel/irq/cpuhotplug.c14
1 files changed, 11 insertions, 3 deletions
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
index e09cb91a7c8b..0b093db3336b 100644
--- a/kernel/irq/cpuhotplug.c
+++ b/kernel/irq/cpuhotplug.c
@@ -14,6 +14,14 @@
#include "internals.h"
+/* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */
+static inline bool irq_needs_fixup(struct irq_data *d)
+{
+ const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
+
+ return cpumask_test_cpu(smp_processor_id(), m);
+}
+
static bool migrate_one_irq(struct irq_desc *desc)
{
struct irq_data *d = irq_desc_get_irq_data(desc);
@@ -42,9 +50,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
* Note: Do not check desc->action as this might be a chained
* interrupt.
*/
- affinity = irq_data_get_affinity_mask(d);
- if (irqd_is_per_cpu(d) || !irqd_is_started(d) ||
- !cpumask_test_cpu(smp_processor_id(), affinity)) {
+ if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) {
/*
* If an irq move is pending, abort it if the dying CPU is
* the sole target.
@@ -69,6 +75,8 @@ static bool migrate_one_irq(struct irq_desc *desc)
*/
if (irq_fixup_move_pending(desc, true))
affinity = irq_desc_get_pending_mask(desc);
+ else
+ affinity = irq_data_get_affinity_mask(d);
/* Mask the chip for interrupts which cannot move in process context */
if (maskchip && chip->irq_mask)
OpenPOWER on IntegriCloud