diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-02-02 21:41:27 +0000 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2011-03-28 16:55:11 +0200 |
commit | 6829310548a76d343205029bb41c14e75bf6a7fb (patch) | |
tree | a12c2f6d6fb9b25e6c289e463fa690d6bdbcb501 /arch/arm/mach-ns9xxx | |
parent | f9ba4475f95b135e6f68e74d59bba92fd35ca835 (diff) | |
download | blackbird-op-linux-6829310548a76d343205029bb41c14e75bf6a7fb.tar.gz blackbird-op-linux-6829310548a76d343205029bb41c14e75bf6a7fb.zip |
arm: Ns9xxx: Remove private irq flow handler
handle_prio_irq is almost identical with handle_fasteoi_irq. The
subtle differences are
1) The handler checks for IRQ_DISABLED after the device handler has
been called. In case it's set it masks the interrupt.
2) When the handler sees IRQ_DISABLED on entry it masks the interupt
in the same way as handle_fastoei_irq, but does not set the
IRQ_PENDING flag.
3) Instead of gracefully handling a recursive interrupt it crashes the
kernel.
#1 is just relevant when a device handler calls disable_irq_nosync()
and it does not matter whether we mask the interrupt right away or
not. We handle lazy masking for disable_irq anyway, so there is no
real reason to have this extra mask in place.
#2 will prevent the resend of a pending interrupt, which can result in
lost interrupts for edge type interrupts. For level type interrupts
the resend is a noop in the generic code. According to the
datasheet all interrupts are level type, so marking them as such
will result in the exact same behaviour as the private
handle_prio_irq implementation.
#3 is just stupid. Crashing the kernel instead of handling a problem
gracefully is just wrong. With the current semantics- all handlers
run with interrupts disabled - this is even more wrong.
Rename ack to eoi, remove the unused mask_ack, switch to
handle_fasteoi_irq and remove the private function.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Acked-by: Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
Cc: linux-arm-kernel@lists.infradead.org
LKML-Reference: <20110202212552.299898447@linutronix.de>
Diffstat (limited to 'arch/arm/mach-ns9xxx')
-rw-r--r-- | arch/arm/mach-ns9xxx/irq.c | 58 |
1 files changed, 4 insertions, 54 deletions
diff --git a/arch/arm/mach-ns9xxx/irq.c b/arch/arm/mach-ns9xxx/irq.c index 389fa5c669de..bf0fd48cbd80 100644 --- a/arch/arm/mach-ns9xxx/irq.c +++ b/arch/arm/mach-ns9xxx/irq.c @@ -31,17 +31,11 @@ static void ns9xxx_mask_irq(struct irq_data *d) __raw_writel(ic, SYS_IC(prio / 4)); } -static void ns9xxx_ack_irq(struct irq_data *d) +static void ns9xxx_eoi_irq(struct irq_data *d) { __raw_writel(0, SYS_ISRADDR); } -static void ns9xxx_maskack_irq(struct irq_data *d) -{ - ns9xxx_mask_irq(d); - ns9xxx_ack_irq(d); -} - static void ns9xxx_unmask_irq(struct irq_data *d) { /* XXX: better use cpp symbols */ @@ -52,56 +46,11 @@ static void ns9xxx_unmask_irq(struct irq_data *d) } static struct irq_chip ns9xxx_chip = { - .irq_ack = ns9xxx_ack_irq, + .irq_eoi = ns9xxx_eoi_irq, .irq_mask = ns9xxx_mask_irq, - .irq_mask_ack = ns9xxx_maskack_irq, .irq_unmask = ns9xxx_unmask_irq, }; -#if 0 -#define handle_irq handle_level_irq -#else -static void handle_prio_irq(unsigned int irq, struct irq_desc *desc) -{ - struct irqaction *action; - irqreturn_t action_ret; - - raw_spin_lock(&desc->lock); - - BUG_ON(desc->status & IRQ_INPROGRESS); - - desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); - kstat_incr_irqs_this_cpu(irq, desc); - - action = desc->action; - if (unlikely(!action || (desc->status & IRQ_DISABLED))) - goto out_mask; - - desc->status |= IRQ_INPROGRESS; - raw_spin_unlock(&desc->lock); - - action_ret = handle_IRQ_event(irq, action); - - /* XXX: There is no direct way to access noirqdebug, so check - * unconditionally for spurious irqs... - * Maybe this function should go to kernel/irq/chip.c? */ - note_interrupt(irq, desc, action_ret); - - raw_spin_lock(&desc->lock); - desc->status &= ~IRQ_INPROGRESS; - - if (desc->status & IRQ_DISABLED) -out_mask: - desc->irq_data.chip->irq_mask(&desc->irq_data); - - /* ack unconditionally to unmask lower prio irqs */ - desc->irq_data.chip->irq_ack(&desc->irq_data); - - raw_spin_unlock(&desc->lock); -} -#define handle_irq handle_prio_irq -#endif - void __init ns9xxx_init_irq(void) { int i; @@ -119,7 +68,8 @@ void __init ns9xxx_init_irq(void) for (i = 0; i <= 31; ++i) { set_irq_chip(i, &ns9xxx_chip); - set_irq_handler(i, handle_irq); + set_irq_handler(i, handle_fasteoi_irq); set_irq_flags(i, IRQF_VALID); + irq_set_status_flags(i, IRQ_LEVEL); } } |