summaryrefslogtreecommitdiffstats
path: root/kernel/irq/spurious.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-02-07 10:34:30 +0100
committerThomas Gleixner <tglx@linutronix.de>2011-02-19 12:58:09 +0100
commitfe200ae48ef5c79bf7941fe8046ff9505c570ff6 (patch)
tree767d2cf011437a266a655ce2ec39360cb85f7f28 /kernel/irq/spurious.c
parentd05c65fff0ef672be75429266751f0e015b54d94 (diff)
downloadblackbird-op-linux-fe200ae48ef5c79bf7941fe8046ff9505c570ff6.tar.gz
blackbird-op-linux-fe200ae48ef5c79bf7941fe8046ff9505c570ff6.zip
genirq: Mark polled irqs and defer the real handler
With the chip.end() function gone we might run into a situation where a poll call runs and the real interrupt comes in, sees IRQ_INPROGRESS and disables the line. That might be a perfect working one, which will then be masked forever. So mark them polled while the poll runs. When the real handler sees IRQ_INPROGRESS it checks the poll flag and waits for the polling to complete. Add the necessary amount of sanity checks to it to avoid deadlocks. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/irq/spurious.c')
-rw-r--r--kernel/irq/spurious.c51
1 files changed, 39 insertions, 12 deletions
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 56ff8fffb8b0..f749d29bfd81 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -25,12 +25,44 @@ static int irq_poll_cpu;
static atomic_t irq_poll_active;
/*
+ * We wait here for a poller to finish.
+ *
+ * If the poll runs on this CPU, then we yell loudly and return
+ * false. That will leave the interrupt line disabled in the worst
+ * case, but it should never happen.
+ *
+ * We wait until the poller is done and then recheck disabled and
+ * action (about to be disabled). Only if it's still active, we return
+ * true and let the handler run.
+ */
+bool irq_wait_for_poll(struct irq_desc *desc)
+{
+ if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
+ "irq poll in progress on cpu %d for irq %d\n",
+ smp_processor_id(), desc->irq_data.irq))
+ return false;
+
+#ifdef CONFIG_SMP
+ do {
+ raw_spin_unlock(&desc->lock);
+ while (desc->status & IRQ_INPROGRESS)
+ cpu_relax();
+ raw_spin_lock(&desc->lock);
+ } while (desc->status & IRQ_INPROGRESS);
+ /* Might have been disabled in meantime */
+ return !(desc->status & IRQ_DISABLED) && desc->action;
+#else
+ return false;
+#endif
+}
+
+/*
* Recovery handler for misrouted interrupts.
*/
static int try_one_irq(int irq, struct irq_desc *desc, bool force)
{
struct irqaction *action;
- int ok = 0, work = 0;
+ int ok = 0;
raw_spin_lock(&desc->lock);
@@ -64,10 +96,9 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
goto out;
}
- /* Honour the normal IRQ locking */
- desc->status |= IRQ_INPROGRESS;
+ /* Honour the normal IRQ locking and mark it poll in progress */
+ desc->status |= IRQ_INPROGRESS | IRQ_POLL_INPROGRESS;
do {
- work++;
desc->status &= ~IRQ_PENDING;
raw_spin_unlock(&desc->lock);
if (handle_IRQ_event(irq, action) != IRQ_NONE)
@@ -76,14 +107,7 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
action = desc->action;
} while ((desc->status & IRQ_PENDING) && action);
- desc->status &= ~IRQ_INPROGRESS;
- /*
- * If we did actual work for the real IRQ line we must let the
- * IRQ controller clean up too
- */
- if (work > 1)
- irq_end(irq, desc);
-
+ desc->status &= ~(IRQ_INPROGRESS | IRQ_POLL_INPROGRESS);
out:
raw_spin_unlock(&desc->lock);
return ok;
@@ -238,6 +262,9 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
void note_interrupt(unsigned int irq, struct irq_desc *desc,
irqreturn_t action_ret)
{
+ if (desc->status & IRQ_POLL_INPROGRESS)
+ return;
+
if (unlikely(action_ret != IRQ_HANDLED)) {
/*
* If we are seeing only the odd spurious IRQ caused by
OpenPOWER on IntegriCloud