summaryrefslogtreecommitdiffstats
path: root/include/linux/irq.h
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2011-09-23 17:03:06 +0100
committerThomas Gleixner <tglx@linutronix.de>2011-10-03 15:35:26 +0200
commit31d9d9b6d83030f748d013e61502fa5477e2ac0e (patch)
tree503670b94d594c09daa83c047b426e7b5328aa76 /include/linux/irq.h
parent60f96b41f71d2a13d1c0a457b8b77958f77142d1 (diff)
downloadblackbird-op-linux-31d9d9b6d83030f748d013e61502fa5477e2ac0e.tar.gz
blackbird-op-linux-31d9d9b6d83030f748d013e61502fa5477e2ac0e.zip
genirq: Add support for per-cpu dev_id interrupts
The ARM GIC interrupt controller offers per CPU interrupts (PPIs), which are usually used to connect local timers to each core. Each CPU has its own private interface to the GIC, and only sees the PPIs that are directly connect to it. While these timers are separate devices and have a separate interrupt line to a core, they all use the same IRQ number. For these devices, request_irq() is not the right API as it assumes that an IRQ number is visible by a number of CPUs (through the affinity setting), but makes it very awkward to express that an IRQ number can be handled by all CPUs, and yet be a different interrupt line on each CPU, requiring a different dev_id cookie to be passed back to the handler. The *_percpu_irq() functions is designed to overcome these limitations, by providing a per-cpu dev_id vector: int request_percpu_irq(unsigned int irq, irq_handler_t handler, const char *devname, void __percpu *percpu_dev_id); void free_percpu_irq(unsigned int, void __percpu *); int setup_percpu_irq(unsigned int irq, struct irqaction *new); void remove_percpu_irq(unsigned int irq, struct irqaction *act); void enable_percpu_irq(unsigned int irq); void disable_percpu_irq(unsigned int irq); The API has a number of limitations: - no interrupt sharing - no threading - common handler across all the CPUs Once the interrupt is requested using setup_percpu_irq() or request_percpu_irq(), it must be enabled by each core that wishes its local interrupt to be delivered. Based on an initial patch by Thomas Gleixner. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Cc: linux-arm-kernel@lists.infradead.org Link: http://lkml.kernel.org/r/1316793788-14500-2-git-send-email-marc.zyngier@arm.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/linux/irq.h')
-rw-r--r--include/linux/irq.h16
1 files changed, 15 insertions, 1 deletions
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 73e31abeba1c..59e49c80cc2c 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -66,6 +66,7 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data);
* IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set)
* IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
* IRQ_NESTED_TRHEAD - Interrupt nests into another thread
+ * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable
*/
enum {
IRQ_TYPE_NONE = 0x00000000,
@@ -88,12 +89,13 @@ enum {
IRQ_MOVE_PCNTXT = (1 << 14),
IRQ_NESTED_THREAD = (1 << 15),
IRQ_NOTHREAD = (1 << 16),
+ IRQ_PER_CPU_DEVID = (1 << 17),
};
#define IRQF_MODIFY_MASK \
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
- IRQ_PER_CPU | IRQ_NESTED_THREAD)
+ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID)
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
@@ -367,6 +369,8 @@ enum {
struct irqaction;
extern int setup_irq(unsigned int irq, struct irqaction *new);
extern void remove_irq(unsigned int irq, struct irqaction *act);
+extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
+extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
extern void irq_cpu_online(void);
extern void irq_cpu_offline(void);
@@ -394,6 +398,7 @@ extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_nested_irq(unsigned int irq);
@@ -422,6 +427,8 @@ static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *c
irq_set_chip_and_handler_name(irq, chip, handle, NULL);
}
+extern int irq_set_percpu_devid(unsigned int irq);
+
extern void
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
const char *name);
@@ -483,6 +490,13 @@ static inline void irq_set_nested_thread(unsigned int irq, bool nest)
irq_clear_status_flags(irq, IRQ_NESTED_THREAD);
}
+static inline void irq_set_percpu_devid_flags(unsigned int irq)
+{
+ irq_set_status_flags(irq,
+ IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD |
+ IRQ_NOPROBE | IRQ_PER_CPU_DEVID);
+}
+
/* Handle dynamic irq creation and destruction */
extern unsigned int create_irq_nr(unsigned int irq_want, int node);
extern int create_irq(void);
OpenPOWER on IntegriCloud