diff options
Diffstat (limited to 'drivers/irqchip')
34 files changed, 2943 insertions, 400 deletions
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 80e10f4e213a..6d397732138d 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -315,6 +315,18 @@ config INGENIC_IRQ depends on MACH_INGENIC default y +config INGENIC_TCU_IRQ + bool "Ingenic JZ47xx TCU interrupt controller" + default MACH_INGENIC + depends on MIPS || COMPILE_TEST + select MFD_SYSCON + select GENERIC_IRQ_CHIP + help + Support for interrupts in the Timer/Counter Unit (TCU) of the Ingenic + JZ47xx SoCs. + + If unsure, say N. + config RENESAS_H8300H_INTC bool select IRQ_DOMAIN @@ -358,6 +370,10 @@ config MVEBU_PIC config MVEBU_SEI bool +config LS_EXTIRQ + def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE + select MFD_SYSCON + config LS_SCFG_MSI def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE depends on PCI && PCI_MSI @@ -422,7 +438,7 @@ config CSKY_MPINTC help Say yes here to enable C-SKY SMP interrupt controller driver used for C-SKY SMP system. - In fact it's not mmio map in hw and it use ld/st to visit the + In fact it's not mmio map in hardware and it uses ld/st to visit the controller's register inside CPU. config CSKY_APB_INTC @@ -430,7 +446,7 @@ config CSKY_APB_INTC depends on CSKY help Say yes here to enable C-SKY APB interrupt controller driver used - by C-SKY single core SOC system. It use mmio map apb-bus to visit + by C-SKY single core SOC system. It uses mmio map apb-bus to visit the controller's register. config IMX_IRQSTEER @@ -441,6 +457,12 @@ config IMX_IRQSTEER help Support for the i.MX IRQSTEER interrupt multiplexer/remapper. +config IMX_INTMUX + def_bool y if ARCH_MXC + select IRQ_DOMAIN + help + Support for the i.MX INTMUX interrupt multiplexer. + config LS1X_IRQ bool "Loongson-1 Interrupt Controller" depends on MACH_LOONGSON32 @@ -471,11 +493,10 @@ config TI_SCI_INTA_IRQCHIP If you wish to use interrupt aggregator irq resources managed by the TI System Controller, say Y here. Otherwise, say N. -endmenu - config SIFIVE_PLIC bool "SiFive Platform-Level Interrupt Controller" depends on RISCV + select IRQ_DOMAIN_HIERARCHY help This enables support for the PLIC chip found in SiFive (and potentially other) RISC-V systems. The PLIC controls devices @@ -484,3 +505,12 @@ config SIFIVE_PLIC interrupt sources are subordinate to the PLIC. If you don't know what to do here, say Y. + +config EXYNOS_IRQ_COMBINER + bool "Samsung Exynos IRQ combiner support" if COMPILE_TEST + depends on (ARCH_EXYNOS && ARM) || COMPILE_TEST + help + Say yes here to add support for the IRQ combiner devices embedded + in Samsung Exynos chips. + +endmenu diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 8d0fcec6ab23..eae0d78cbf22 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -9,7 +9,7 @@ obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o obj-$(CONFIG_DAVINCI_AINTC) += irq-davinci-aintc.o obj-$(CONFIG_DAVINCI_CP_INTC) += irq-davinci-cp-intc.o -obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o +obj-$(CONFIG_EXYNOS_IRQ_COMBINER) += exynos-combiner.o obj-$(CONFIG_FARADAY_FTINTC010) += irq-ftintc010.o obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o obj-$(CONFIG_ARCH_LPC32XX) += irq-lpc32xx.o @@ -75,6 +75,7 @@ obj-$(CONFIG_RENESAS_H8300H_INTC) += irq-renesas-h8300h.o obj-$(CONFIG_RENESAS_H8S_INTC) += irq-renesas-h8s.o obj-$(CONFIG_ARCH_SA1100) += irq-sa11x0.o obj-$(CONFIG_INGENIC_IRQ) += irq-ingenic.o +obj-$(CONFIG_INGENIC_TCU_IRQ) += irq-ingenic-tcu.o obj-$(CONFIG_IMX_GPCV2) += irq-imx-gpcv2.o obj-$(CONFIG_PIC32_EVIC) += irq-pic32-evic.o obj-$(CONFIG_MSCC_OCELOT_IRQ) += irq-mscc-ocelot.o @@ -83,9 +84,10 @@ obj-$(CONFIG_MVEBU_ICU) += irq-mvebu-icu.o obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o obj-$(CONFIG_MVEBU_PIC) += irq-mvebu-pic.o obj-$(CONFIG_MVEBU_SEI) += irq-mvebu-sei.o +obj-$(CONFIG_LS_EXTIRQ) += irq-ls-extirq.o obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o -obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o +obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o irq-aspeed-scu-ic.o obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o @@ -98,6 +100,7 @@ obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o +obj-$(CONFIG_IMX_INTMUX) += irq-imx-intmux.o obj-$(CONFIG_MADERA_IRQ) += irq-madera.o obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o diff --git a/drivers/irqchip/irq-al-fic.c b/drivers/irqchip/irq-al-fic.c index 1a57cee3efab..0b0a73739756 100644 --- a/drivers/irqchip/irq-al-fic.c +++ b/drivers/irqchip/irq-al-fic.c @@ -15,6 +15,7 @@ /* FIC Registers */ #define AL_FIC_CAUSE 0x00 +#define AL_FIC_SET_CAUSE 0x08 #define AL_FIC_MASK 0x10 #define AL_FIC_CONTROL 0x28 @@ -126,6 +127,16 @@ static void al_fic_irq_handler(struct irq_desc *desc) chained_irq_exit(irqchip, desc); } +static int al_fic_irq_retrigger(struct irq_data *data) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); + struct al_fic *fic = gc->private; + + writel_relaxed(BIT(data->hwirq), fic->base + AL_FIC_SET_CAUSE); + + return 1; +} + static int al_fic_register(struct device_node *node, struct al_fic *fic) { @@ -159,6 +170,7 @@ static int al_fic_register(struct device_node *node, gc->chip_types->chip.irq_unmask = irq_gc_mask_clr_bit; gc->chip_types->chip.irq_ack = irq_gc_ack_clr_bit; gc->chip_types->chip.irq_set_type = al_fic_irq_set_type; + gc->chip_types->chip.irq_retrigger = al_fic_irq_retrigger; gc->chip_types->chip.flags = IRQCHIP_SKIP_SET_WAKE; gc->private = fic; diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c new file mode 100644 index 000000000000..c90a3346b985 --- /dev/null +++ b/drivers/irqchip/irq-aspeed-scu-ic.c @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Aspeed AST24XX, AST25XX, and AST26XX SCU Interrupt Controller + * Copyright 2019 IBM Corporation + * + * Eddie James <eajames@linux.ibm.com> + */ + +#include <linux/bitops.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/mfd/syscon.h> +#include <linux/of_irq.h> +#include <linux/regmap.h> + +#define ASPEED_SCU_IC_REG 0x018 +#define ASPEED_SCU_IC_SHIFT 0 +#define ASPEED_SCU_IC_ENABLE GENMASK(6, ASPEED_SCU_IC_SHIFT) +#define ASPEED_SCU_IC_NUM_IRQS 7 +#define ASPEED_SCU_IC_STATUS_SHIFT 16 + +#define ASPEED_AST2600_SCU_IC0_REG 0x560 +#define ASPEED_AST2600_SCU_IC0_SHIFT 0 +#define ASPEED_AST2600_SCU_IC0_ENABLE \ + GENMASK(5, ASPEED_AST2600_SCU_IC0_SHIFT) +#define ASPEED_AST2600_SCU_IC0_NUM_IRQS 6 + +#define ASPEED_AST2600_SCU_IC1_REG 0x570 +#define ASPEED_AST2600_SCU_IC1_SHIFT 4 +#define ASPEED_AST2600_SCU_IC1_ENABLE \ + GENMASK(5, ASPEED_AST2600_SCU_IC1_SHIFT) +#define ASPEED_AST2600_SCU_IC1_NUM_IRQS 2 + +struct aspeed_scu_ic { + unsigned long irq_enable; + unsigned long irq_shift; + unsigned int num_irqs; + unsigned int reg; + struct regmap *scu; + struct irq_domain *irq_domain; +}; + +static void aspeed_scu_ic_irq_handler(struct irq_desc *desc) +{ + unsigned int irq; + unsigned int sts; + unsigned long bit; + unsigned long enabled; + unsigned long max; + unsigned long status; + struct aspeed_scu_ic *scu_ic = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned int mask = scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT; + + chained_irq_enter(chip, desc); + + /* + * The SCU IC has just one register to control its operation and read + * status. The interrupt enable bits occupy the lower 16 bits of the + * register, while the interrupt status bits occupy the upper 16 bits. + * The status bit for a given interrupt is always 16 bits shifted from + * the enable bit for the same interrupt. + * Therefore, perform the IRQ operations in the enable bit space by + * shifting the status down to get the mapping and then back up to + * clear the bit. + */ + regmap_read(scu_ic->scu, scu_ic->reg, &sts); + enabled = sts & scu_ic->irq_enable; + status = (sts >> ASPEED_SCU_IC_STATUS_SHIFT) & enabled; + + bit = scu_ic->irq_shift; + max = scu_ic->num_irqs + bit; + + for_each_set_bit_from(bit, &status, max) { + irq = irq_find_mapping(scu_ic->irq_domain, + bit - scu_ic->irq_shift); + generic_handle_irq(irq); + + regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, + BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT)); + } + + chained_irq_exit(chip, desc); +} + +static void aspeed_scu_ic_irq_mask(struct irq_data *data) +{ + struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data); + unsigned int mask = BIT(data->hwirq + scu_ic->irq_shift) | + (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT); + + /* + * Status bits are cleared by writing 1. In order to prevent the mask + * operation from clearing the status bits, they should be under the + * mask and written with 0. + */ + regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, 0); +} + +static void aspeed_scu_ic_irq_unmask(struct irq_data *data) +{ + struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data); + unsigned int bit = BIT(data->hwirq + scu_ic->irq_shift); + unsigned int mask = bit | + (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT); + + /* + * Status bits are cleared by writing 1. In order to prevent the unmask + * operation from clearing the status bits, they should be under the + * mask and written with 0. + */ + regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, bit); +} + +static int aspeed_scu_ic_irq_set_affinity(struct irq_data *data, + const struct cpumask *dest, + bool force) +{ + return -EINVAL; +} + +static struct irq_chip aspeed_scu_ic_chip = { + .name = "aspeed-scu-ic", + .irq_mask = aspeed_scu_ic_irq_mask, + .irq_unmask = aspeed_scu_ic_irq_unmask, + .irq_set_affinity = aspeed_scu_ic_irq_set_affinity, +}; + +static int aspeed_scu_ic_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &aspeed_scu_ic_chip, handle_level_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +} + +static const struct irq_domain_ops aspeed_scu_ic_domain_ops = { + .map = aspeed_scu_ic_map, +}; + +static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic, + struct device_node *node) +{ + int irq; + int rc = 0; + + if (!node->parent) { + rc = -ENODEV; + goto err; + } + + scu_ic->scu = syscon_node_to_regmap(node->parent); + if (IS_ERR(scu_ic->scu)) { + rc = PTR_ERR(scu_ic->scu); + goto err; + } + + irq = irq_of_parse_and_map(node, 0); + if (irq < 0) { + rc = irq; + goto err; + } + + scu_ic->irq_domain = irq_domain_add_linear(node, scu_ic->num_irqs, + &aspeed_scu_ic_domain_ops, + scu_ic); + if (!scu_ic->irq_domain) { + rc = -ENOMEM; + goto err; + } + + irq_set_chained_handler_and_data(irq, aspeed_scu_ic_irq_handler, + scu_ic); + + return 0; + +err: + kfree(scu_ic); + + return rc; +} + +static int __init aspeed_scu_ic_of_init(struct device_node *node, + struct device_node *parent) +{ + struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL); + + if (!scu_ic) + return -ENOMEM; + + scu_ic->irq_enable = ASPEED_SCU_IC_ENABLE; + scu_ic->irq_shift = ASPEED_SCU_IC_SHIFT; + scu_ic->num_irqs = ASPEED_SCU_IC_NUM_IRQS; + scu_ic->reg = ASPEED_SCU_IC_REG; + + return aspeed_scu_ic_of_init_common(scu_ic, node); +} + +static int __init aspeed_ast2600_scu_ic0_of_init(struct device_node *node, + struct device_node *parent) +{ + struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL); + + if (!scu_ic) + return -ENOMEM; + + scu_ic->irq_enable = ASPEED_AST2600_SCU_IC0_ENABLE; + scu_ic->irq_shift = ASPEED_AST2600_SCU_IC0_SHIFT; + scu_ic->num_irqs = ASPEED_AST2600_SCU_IC0_NUM_IRQS; + scu_ic->reg = ASPEED_AST2600_SCU_IC0_REG; + + return aspeed_scu_ic_of_init_common(scu_ic, node); +} + +static int __init aspeed_ast2600_scu_ic1_of_init(struct device_node *node, + struct device_node *parent) +{ + struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL); + + if (!scu_ic) + return -ENOMEM; + + scu_ic->irq_enable = ASPEED_AST2600_SCU_IC1_ENABLE; + scu_ic->irq_shift = ASPEED_AST2600_SCU_IC1_SHIFT; + scu_ic->num_irqs = ASPEED_AST2600_SCU_IC1_NUM_IRQS; + scu_ic->reg = ASPEED_AST2600_SCU_IC1_REG; + + return aspeed_scu_ic_of_init_common(scu_ic, node); +} + +IRQCHIP_DECLARE(ast2400_scu_ic, "aspeed,ast2400-scu-ic", aspeed_scu_ic_of_init); +IRQCHIP_DECLARE(ast2500_scu_ic, "aspeed,ast2500-scu-ic", aspeed_scu_ic_of_init); +IRQCHIP_DECLARE(ast2600_scu_ic0, "aspeed,ast2600-scu-ic0", + aspeed_ast2600_scu_ic0_of_init); +IRQCHIP_DECLARE(ast2600_scu_ic1, "aspeed,ast2600-scu-ic1", + aspeed_ast2600_scu_ic1_of_init); diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c index 6acad2ea0fb3..29333497ba10 100644 --- a/drivers/irqchip/irq-atmel-aic5.c +++ b/drivers/irqchip/irq-atmel-aic5.c @@ -313,6 +313,7 @@ static void __init sama5d3_aic_irq_fixup(void) static const struct of_device_id aic5_irq_fixups[] __initconst = { { .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup }, { .compatible = "atmel,sama5d4", .data = sama5d3_aic_irq_fixup }, + { .compatible = "microchip,sam9x60", .data = sama5d3_aic_irq_fixup }, { /* sentinel */ }, }; @@ -390,3 +391,12 @@ static int __init sama5d4_aic5_of_init(struct device_node *node, return aic5_of_init(node, parent, NR_SAMA5D4_IRQS); } IRQCHIP_DECLARE(sama5d4_aic5, "atmel,sama5d4-aic", sama5d4_aic5_of_init); + +#define NR_SAM9X60_IRQS 50 + +static int __init sam9x60_aic5_of_init(struct device_node *node, + struct device_node *parent) +{ + return aic5_of_init(node, parent, NR_SAM9X60_IRQS); +} +IRQCHIP_DECLARE(sam9x60_aic5, "microchip,sam9x60-aic", sam9x60_aic5_of_init); diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c index fc75c61233aa..cbf01afcd2a6 100644 --- a/drivers/irqchip/irq-bcm7038-l1.c +++ b/drivers/irqchip/irq-bcm7038-l1.c @@ -27,6 +27,7 @@ #include <linux/types.h> #include <linux/irqchip.h> #include <linux/irqchip/chained_irq.h> +#include <linux/syscore_ops.h> #define IRQS_PER_WORD 32 #define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 4) @@ -39,6 +40,11 @@ struct bcm7038_l1_chip { unsigned int n_words; struct irq_domain *domain; struct bcm7038_l1_cpu *cpus[NR_CPUS]; +#ifdef CONFIG_PM_SLEEP + struct list_head list; + u32 wake_mask[MAX_WORDS]; +#endif + u32 irq_fwd_mask[MAX_WORDS]; u8 affinity[MAX_WORDS * IRQS_PER_WORD]; }; @@ -249,6 +255,7 @@ static int __init bcm7038_l1_init_one(struct device_node *dn, resource_size_t sz; struct bcm7038_l1_cpu *cpu; unsigned int i, n_words, parent_irq; + int ret; if (of_address_to_resource(dn, idx, &res)) return -EINVAL; @@ -262,6 +269,14 @@ static int __init bcm7038_l1_init_one(struct device_node *dn, else if (intc->n_words != n_words) return -EINVAL; + ret = of_property_read_u32_array(dn , "brcm,int-fwd-mask", + intc->irq_fwd_mask, n_words); + if (ret != 0 && ret != -EINVAL) { + /* property exists but has the wrong number of words */ + pr_err("invalid brcm,int-fwd-mask property\n"); + return -EINVAL; + } + cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32), GFP_KERNEL); if (!cpu) @@ -272,8 +287,11 @@ static int __init bcm7038_l1_init_one(struct device_node *dn, return -ENOMEM; for (i = 0; i < n_words; i++) { - l1_writel(0xffffffff, cpu->map_base + reg_mask_set(intc, i)); - cpu->mask_cache[i] = 0xffffffff; + l1_writel(~intc->irq_fwd_mask[i], + cpu->map_base + reg_mask_set(intc, i)); + l1_writel(intc->irq_fwd_mask[i], + cpu->map_base + reg_mask_clr(intc, i)); + cpu->mask_cache[i] = ~intc->irq_fwd_mask[i]; } parent_irq = irq_of_parse_and_map(dn, idx); @@ -281,12 +299,89 @@ static int __init bcm7038_l1_init_one(struct device_node *dn, pr_err("failed to map parent interrupt %d\n", parent_irq); return -EINVAL; } + + if (of_property_read_bool(dn, "brcm,irq-can-wake")) + enable_irq_wake(parent_irq); + irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle, intc); return 0; } +#ifdef CONFIG_PM_SLEEP +/* + * We keep a list of bcm7038_l1_chip used for suspend/resume. This hack is + * used because the struct chip_type suspend/resume hooks are not called + * unless chip_type is hooked onto a generic_chip. Since this driver does + * not use generic_chip, we need to manually hook our resume/suspend to + * syscore_ops. + */ +static LIST_HEAD(bcm7038_l1_intcs_list); +static DEFINE_RAW_SPINLOCK(bcm7038_l1_intcs_lock); + +static int bcm7038_l1_suspend(void) +{ + struct bcm7038_l1_chip *intc; + int boot_cpu, word; + u32 val; + + /* Wakeup interrupt should only come from the boot cpu */ + boot_cpu = cpu_logical_map(0); + + list_for_each_entry(intc, &bcm7038_l1_intcs_list, list) { + for (word = 0; word < intc->n_words; word++) { + val = intc->wake_mask[word] | intc->irq_fwd_mask[word]; + l1_writel(~val, + intc->cpus[boot_cpu]->map_base + reg_mask_set(intc, word)); + l1_writel(val, + intc->cpus[boot_cpu]->map_base + reg_mask_clr(intc, word)); + } + } + + return 0; +} + +static void bcm7038_l1_resume(void) +{ + struct bcm7038_l1_chip *intc; + int boot_cpu, word; + + boot_cpu = cpu_logical_map(0); + + list_for_each_entry(intc, &bcm7038_l1_intcs_list, list) { + for (word = 0; word < intc->n_words; word++) { + l1_writel(intc->cpus[boot_cpu]->mask_cache[word], + intc->cpus[boot_cpu]->map_base + reg_mask_set(intc, word)); + l1_writel(~intc->cpus[boot_cpu]->mask_cache[word], + intc->cpus[boot_cpu]->map_base + reg_mask_clr(intc, word)); + } + } +} + +static struct syscore_ops bcm7038_l1_syscore_ops = { + .suspend = bcm7038_l1_suspend, + .resume = bcm7038_l1_resume, +}; + +static int bcm7038_l1_set_wake(struct irq_data *d, unsigned int on) +{ + struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d); + unsigned long flags; + u32 word = d->hwirq / IRQS_PER_WORD; + u32 mask = BIT(d->hwirq % IRQS_PER_WORD); + + raw_spin_lock_irqsave(&intc->lock, flags); + if (on) + intc->wake_mask[word] |= mask; + else + intc->wake_mask[word] &= ~mask; + raw_spin_unlock_irqrestore(&intc->lock, flags); + + return 0; +} +#endif + static struct irq_chip bcm7038_l1_irq_chip = { .name = "bcm7038-l1", .irq_mask = bcm7038_l1_mask, @@ -295,11 +390,21 @@ static struct irq_chip bcm7038_l1_irq_chip = { #ifdef CONFIG_SMP .irq_cpu_offline = bcm7038_l1_cpu_offline, #endif +#ifdef CONFIG_PM_SLEEP + .irq_set_wake = bcm7038_l1_set_wake, +#endif }; static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw_irq) { + struct bcm7038_l1_chip *intc = d->host_data; + u32 mask = BIT(hw_irq % IRQS_PER_WORD); + u32 word = hw_irq / IRQS_PER_WORD; + + if (intc->irq_fwd_mask[word] & mask) + return -EPERM; + irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq); irq_set_chip_data(virq, d->host_data); irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq))); @@ -340,6 +445,16 @@ int __init bcm7038_l1_of_init(struct device_node *dn, goto out_unmap; } +#ifdef CONFIG_PM_SLEEP + /* Add bcm7038_l1_chip into a list */ + raw_spin_lock(&bcm7038_l1_intcs_lock); + list_add_tail(&intc->list, &bcm7038_l1_intcs_list); + raw_spin_unlock(&bcm7038_l1_intcs_lock); + + if (list_is_singular(&bcm7038_l1_intcs_list)) + register_syscore_ops(&bcm7038_l1_syscore_ops); +#endif + pr_info("registered BCM7038 L1 intc (%pOF, IRQs: %d)\n", dn, IRQS_PER_WORD * intc->n_words); diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c index b0a8215a13fc..82520006195d 100644 --- a/drivers/irqchip/irq-gic-common.c +++ b/drivers/irqchip/irq-gic-common.c @@ -41,6 +41,8 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks, void *data) { for (; quirks->desc; quirks++) { + if (quirks->compatible) + continue; if (quirks->iidr != (quirks->mask & iidr)) continue; if (quirks->init(data)) @@ -63,7 +65,7 @@ int gic_configure_irq(unsigned int irq, unsigned int type, * for "irq", depending on "type". */ raw_spin_lock_irqsave(&irq_controller_lock, flags); - val = oldval = readl_relaxed(base + GIC_DIST_CONFIG + confoff); + val = oldval = readl_relaxed(base + confoff); if (type & IRQ_TYPE_LEVEL_MASK) val &= ~confmask; else if (type & IRQ_TYPE_EDGE_BOTH) @@ -83,14 +85,10 @@ int gic_configure_irq(unsigned int irq, unsigned int type, * does not allow us to set the configuration or we are in a * non-secure mode, and hence it may not be catastrophic. */ - writel_relaxed(val, base + GIC_DIST_CONFIG + confoff); - if (readl_relaxed(base + GIC_DIST_CONFIG + confoff) != val) { - if (WARN_ON(irq >= 32)) - ret = -EINVAL; - else - pr_warn("GIC: PPI%d is secure or misconfigured\n", - irq - 16); - } + writel_relaxed(val, base + confoff); + if (readl_relaxed(base + confoff) != val) + ret = -EINVAL; + raw_spin_unlock_irqrestore(&irq_controller_lock, flags); if (sync_access) @@ -132,26 +130,31 @@ void gic_dist_config(void __iomem *base, int gic_irqs, sync_access(); } -void gic_cpu_config(void __iomem *base, void (*sync_access)(void)) +void gic_cpu_config(void __iomem *base, int nr, void (*sync_access)(void)) { int i; /* * Deal with the banked PPI and SGI interrupts - disable all - * PPI interrupts, ensure all SGI interrupts are enabled. - * Make sure everything is deactivated. + * private interrupts. Make sure everything is deactivated. */ - writel_relaxed(GICD_INT_EN_CLR_X32, base + GIC_DIST_ACTIVE_CLEAR); - writel_relaxed(GICD_INT_EN_CLR_PPI, base + GIC_DIST_ENABLE_CLEAR); - writel_relaxed(GICD_INT_EN_SET_SGI, base + GIC_DIST_ENABLE_SET); + for (i = 0; i < nr; i += 32) { + writel_relaxed(GICD_INT_EN_CLR_X32, + base + GIC_DIST_ACTIVE_CLEAR + i / 8); + writel_relaxed(GICD_INT_EN_CLR_X32, + base + GIC_DIST_ENABLE_CLEAR + i / 8); + } /* * Set priority on PPI and SGI interrupts */ - for (i = 0; i < 32; i += 4) + for (i = 0; i < nr; i += 4) writel_relaxed(GICD_INT_DEF_PRI_X4, base + GIC_DIST_PRI + i * 4 / 4); + /* Ensure all SGI interrupts are now enabled */ + writel_relaxed(GICD_INT_EN_SET_SGI, base + GIC_DIST_ENABLE_SET); + if (sync_access) sync_access(); } diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h index 5a46b6b57750..ccba8b0fe0f5 100644 --- a/drivers/irqchip/irq-gic-common.h +++ b/drivers/irqchip/irq-gic-common.h @@ -22,7 +22,7 @@ int gic_configure_irq(unsigned int irq, unsigned int type, void __iomem *base, void (*sync_access)(void)); void gic_dist_config(void __iomem *base, int gic_irqs, void (*sync_access)(void)); -void gic_cpu_config(void __iomem *base, void (*sync_access)(void)); +void gic_cpu_config(void __iomem *base, int nr, void (*sync_access)(void)); void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks, void *data); void gic_enable_of_quirks(const struct device_node *np, diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c index 7338f90b2f9e..fbec07d634ad 100644 --- a/drivers/irqchip/irq-gic-v2m.c +++ b/drivers/irqchip/irq-gic-v2m.c @@ -17,6 +17,7 @@ #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> +#include <linux/pci.h> #include <linux/msi.h> #include <linux/of_address.h> #include <linux/of_pci.h> @@ -525,7 +526,7 @@ acpi_parse_madt_msi(union acpi_subtable_headers *header, spi_start, nr_spis); } - fwnode = irq_domain_alloc_fwnode((void *)m->base_address); + fwnode = irq_domain_alloc_fwnode(&res.start); if (!fwnode) { pr_err("Unable to allocate GICv2m domain token\n"); return -EINVAL; diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c index 229d586c3d7a..87711e0f8014 100644 --- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c @@ -5,6 +5,7 @@ */ #include <linux/acpi_iort.h> +#include <linux/pci.h> #include <linux/msi.h> #include <linux/of.h> #include <linux/of_irq.h> diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 1b5c3672aea2..83b1186ffcad 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -6,6 +6,7 @@ #include <linux/acpi.h> #include <linux/acpi_iort.h> +#include <linux/bitfield.h> #include <linux/bitmap.h> #include <linux/cpu.h> #include <linux/crash_dump.h> @@ -102,24 +103,36 @@ struct its_node { struct its_collection *collections; struct fwnode_handle *fwnode_handle; u64 (*get_msi_base)(struct its_device *its_dev); + u64 typer; u64 cbaser_save; u32 ctlr_save; + u32 mpidr; struct list_head its_device_list; u64 flags; unsigned long list_nr; - u32 ite_size; - u32 device_ids; int numa_node; unsigned int msi_domain_flags; u32 pre_its_base; /* for Socionext Synquacer */ - bool is_v4; int vlpi_redist_offset; }; +#define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS)) +#define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP)) +#define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1) + #define ITS_ITT_ALIGN SZ_256 /* The maximum number of VPEID bits supported by VLPI commands */ -#define ITS_MAX_VPEID_BITS (16) +#define ITS_MAX_VPEID_BITS \ + ({ \ + int nvpeid = 16; \ + if (gic_rdists->has_rvpeid && \ + gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \ + nvpeid = 1 + (gic_rdists->gicd_typer2 & \ + GICD_TYPER2_VID); \ + \ + nvpeid; \ + }) #define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS)) /* Convert page order to size in bytes */ @@ -130,7 +143,7 @@ struct event_lpi_map { u16 *col_map; irq_hw_number_t lpi_base; int nr_lpis; - struct mutex vlpi_lock; + raw_spinlock_t vlpi_lock; struct its_vm *vm; struct its_vlpi_map *vlpi_maps; int nr_vlpis; @@ -175,6 +188,28 @@ static DEFINE_IDA(its_vpeid_ida); #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) +static u16 get_its_list(struct its_vm *vm) +{ + struct its_node *its; + unsigned long its_list = 0; + + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + if (vm->vlpi_count[its->list_nr]) + __set_bit(its->list_nr, &its_list); + } + + return (u16)its_list; +} + +static inline u32 its_get_event_id(struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + return d->hwirq - its_dev->event_map.lpi_base; +} + static struct its_collection *dev_event_to_col(struct its_device *its_dev, u32 event) { @@ -183,6 +218,38 @@ static struct its_collection *dev_event_to_col(struct its_device *its_dev, return its->collections + its_dev->event_map.col_map[event]; } +static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev, + u32 event) +{ + if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis)) + return NULL; + + return &its_dev->event_map.vlpi_maps[event]; +} + +static struct its_vlpi_map *get_vlpi_map(struct irq_data *d) +{ + if (irqd_is_forwarded_to_vcpu(d)) { + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + return dev_event_to_vlpi_map(its_dev, event); + } + + return NULL; +} + +static int irq_to_cpuid(struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_vlpi_map *map = get_vlpi_map(d); + + if (map) + return map->vpe->col_idx; + + return its_dev->event_map.col_map[its_get_event_id(d)]; +} + static struct its_collection *valid_col(struct its_collection *col) { if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0))) @@ -282,6 +349,10 @@ struct its_cmd_desc { u16 seq_num; u16 its_list; } its_vmovp_cmd; + + struct { + struct its_vpe *vpe; + } its_invdb_cmd; }; }; @@ -289,7 +360,10 @@ struct its_cmd_desc { * The ITS command block, which is what the ITS actually parses. */ struct its_cmd_block { - u64 raw_cmd[4]; + union { + u64 raw_cmd[4]; + __le64 raw_cmd_le[4]; + }; }; #define ITS_CMD_QUEUE_SZ SZ_64K @@ -395,13 +469,45 @@ static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0); } +static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa) +{ + its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16); +} + +static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc) +{ + its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8); +} + +static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz) +{ + its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9); +} + +static void its_encode_vmapp_default_db(struct its_cmd_block *cmd, + u32 vpe_db_lpi) +{ + its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0); +} + +static void its_encode_vmovp_default_db(struct its_cmd_block *cmd, + u32 vpe_db_lpi) +{ + its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0); +} + +static void its_encode_db(struct its_cmd_block *cmd, bool db) +{ + its_mask_encode(&cmd->raw_cmd[2], db, 63, 63); +} + static inline void its_fixup_cmd(struct its_cmd_block *cmd) { /* Let's fixup BE commands */ - cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]); - cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]); - cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]); - cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]); + cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]); + cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]); + cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]); + cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]); } static struct its_collection *its_build_mapd_cmd(struct its_node *its, @@ -555,7 +661,7 @@ static struct its_collection *its_build_invall_cmd(struct its_node *its, struct its_cmd_desc *desc) { its_encode_cmd(cmd, GITS_CMD_INVALL); - its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); + its_encode_collection(cmd, desc->its_invall_cmd.col->col_id); its_fixup_cmd(cmd); @@ -578,19 +684,45 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, struct its_cmd_block *cmd, struct its_cmd_desc *desc) { - unsigned long vpt_addr; + unsigned long vpt_addr, vconf_addr; u64 target; - - vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); - target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; + bool alloc; its_encode_cmd(cmd, GITS_CMD_VMAPP); its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); its_encode_valid(cmd, desc->its_vmapp_cmd.valid); + + if (!desc->its_vmapp_cmd.valid) { + if (is_v4_1(its)) { + alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count); + its_encode_alloc(cmd, alloc); + } + + goto out; + } + + vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); + target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; + its_encode_target(cmd, target); its_encode_vpt_addr(cmd, vpt_addr); its_encode_vpt_size(cmd, LPI_NRBITS - 1); + if (!is_v4_1(its)) + goto out; + + vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page)); + + alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count); + + its_encode_alloc(cmd, alloc); + + /* We can only signal PTZ when alloc==1. Why do we have two bits? */ + its_encode_ptz(cmd, alloc); + its_encode_vconf_addr(cmd, vconf_addr); + its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi); + +out: its_fixup_cmd(cmd); return valid_vpe(its, desc->its_vmapp_cmd.vpe); @@ -602,7 +734,7 @@ static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, { u32 db; - if (desc->its_vmapti_cmd.db_enabled) + if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled) db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; else db = 1023; @@ -625,7 +757,7 @@ static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, { u32 db; - if (desc->its_vmovi_cmd.db_enabled) + if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled) db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; else db = 1023; @@ -655,11 +787,85 @@ static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); its_encode_target(cmd, target); + if (is_v4_1(its)) { + its_encode_db(cmd, true); + its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi); + } + its_fixup_cmd(cmd); return valid_vpe(its, desc->its_vmovp_cmd.vpe); } +static struct its_vpe *its_build_vinv_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_vlpi_map *map; + + map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev, + desc->its_inv_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INV); + its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_inv_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, map->vpe); +} + +static struct its_vpe *its_build_vint_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_vlpi_map *map; + + map = dev_event_to_vlpi_map(desc->its_int_cmd.dev, + desc->its_int_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INT); + its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_int_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, map->vpe); +} + +static struct its_vpe *its_build_vclear_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_vlpi_map *map; + + map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev, + desc->its_clear_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_CLEAR); + its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_clear_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, map->vpe); +} + +static struct its_vpe *its_build_invdb_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + if (WARN_ON(!is_v4_1(its))) + return NULL; + + its_encode_cmd(cmd, GITS_CMD_INVDB); + its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_invdb_cmd.vpe); +} + static u64 its_cmd_ptr_to_offset(struct its_node *its, struct its_cmd_block *ptr) { @@ -937,7 +1143,7 @@ static void its_send_invall(struct its_node *its, struct its_collection *col) static void its_send_vmapti(struct its_device *dev, u32 id) { - struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; + struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id); struct its_cmd_desc desc; desc.its_vmapti_cmd.vpe = map->vpe; @@ -951,7 +1157,7 @@ static void its_send_vmapti(struct its_device *dev, u32 id) static void its_send_vmovi(struct its_device *dev, u32 id) { - struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; + struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id); struct its_cmd_desc desc; desc.its_vmovi_cmd.vpe = map->vpe; @@ -976,17 +1182,15 @@ static void its_send_vmapp(struct its_node *its, static void its_send_vmovp(struct its_vpe *vpe) { - struct its_cmd_desc desc; + struct its_cmd_desc desc = {}; struct its_node *its; unsigned long flags; int col_id = vpe->col_idx; desc.its_vmovp_cmd.vpe = vpe; - desc.its_vmovp_cmd.its_list = (u16)its_list_map; if (!its_list_map) { its = list_first_entry(&its_nodes, struct its_node, entry); - desc.its_vmovp_cmd.seq_num = 0; desc.its_vmovp_cmd.col = &its->collections[col_id]; its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); return; @@ -1003,10 +1207,11 @@ static void its_send_vmovp(struct its_vpe *vpe) raw_spin_lock_irqsave(&vmovp_lock, flags); desc.its_vmovp_cmd.seq_num = vmovp_seq_num++; + desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm); /* Emit VMOVPs */ list_for_each_entry(its, &its_nodes, entry) { - if (!its->is_v4) + if (!is_v4(its)) continue; if (!vpe->its_vm->vlpi_count[its->list_nr]) @@ -1027,29 +1232,68 @@ static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); } -/* - * irqchip functions - assumes MSI, mostly. - */ +static void its_send_vinv(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; -static inline u32 its_get_event_id(struct irq_data *d) + /* + * There is no real VINV command. This is just a normal INV, + * with a VSYNC instead of a SYNC. + */ + desc.its_inv_cmd.dev = dev; + desc.its_inv_cmd.event_id = event_id; + + its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc); +} + +static void its_send_vint(struct its_device *dev, u32 event_id) { - struct its_device *its_dev = irq_data_get_irq_chip_data(d); - return d->hwirq - its_dev->event_map.lpi_base; + struct its_cmd_desc desc; + + /* + * There is no real VINT command. This is just a normal INT, + * with a VSYNC instead of a SYNC. + */ + desc.its_int_cmd.dev = dev; + desc.its_int_cmd.event_id = event_id; + + its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc); } +static void its_send_vclear(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + /* + * There is no real VCLEAR command. This is just a normal CLEAR, + * with a VSYNC instead of a SYNC. + */ + desc.its_clear_cmd.dev = dev; + desc.its_clear_cmd.event_id = event_id; + + its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc); +} + +static void its_send_invdb(struct its_node *its, struct its_vpe *vpe) +{ + struct its_cmd_desc desc; + + desc.its_invdb_cmd.vpe = vpe; + its_send_single_vcommand(its, its_build_invdb_cmd, &desc); +} + +/* + * irqchip functions - assumes MSI, mostly. + */ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) { + struct its_vlpi_map *map = get_vlpi_map(d); irq_hw_number_t hwirq; void *va; u8 *cfg; - if (irqd_is_forwarded_to_vcpu(d)) { - struct its_device *its_dev = irq_data_get_irq_chip_data(d); - u32 event = its_get_event_id(d); - struct its_vlpi_map *map; - - va = page_address(its_dev->event_map.vm->vprop_page); - map = &its_dev->event_map.vlpi_maps[event]; + if (map) { + va = page_address(map->vm->vprop_page); hwirq = map->vintid; /* Remember the updated property */ @@ -1075,23 +1319,70 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) dsb(ishst); } +static void wait_for_syncr(void __iomem *rdbase) +{ + while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) + cpu_relax(); +} + +static void direct_lpi_inv(struct irq_data *d) +{ + struct its_vlpi_map *map = get_vlpi_map(d); + void __iomem *rdbase; + u64 val; + + if (map) { + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + WARN_ON(!is_v4_1(its_dev->its)); + + val = GICR_INVLPIR_V; + val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id); + val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid); + } else { + val = d->hwirq; + } + + /* Target the redistributor this LPI is currently routed to */ + rdbase = per_cpu_ptr(gic_rdists->rdist, irq_to_cpuid(d))->rd_base; + gic_write_lpir(val, rdbase + GICR_INVLPIR); + + wait_for_syncr(rdbase); +} + static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); lpi_write_config(d, clr, set); - its_send_inv(its_dev, its_get_event_id(d)); + if (gic_rdists->has_direct_lpi && + (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d))) + direct_lpi_inv(d); + else if (!irqd_is_forwarded_to_vcpu(d)) + its_send_inv(its_dev, its_get_event_id(d)); + else + its_send_vinv(its_dev, its_get_event_id(d)); } static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); + struct its_vlpi_map *map; + + /* + * GICv4.1 does away with the per-LPI nonsense, nothing to do + * here. + */ + if (is_v4_1(its_dev->its)) + return; - if (its_dev->event_map.vlpi_maps[event].db_enabled == enable) + map = dev_event_to_vlpi_map(its_dev, event); + + if (map->db_enabled == enable) return; - its_dev->event_map.vlpi_maps[event].db_enabled = enable; + map->db_enabled = enable; /* * More fun with the architecture: @@ -1193,10 +1484,17 @@ static int its_irq_set_irqchip_state(struct irq_data *d, if (which != IRQCHIP_STATE_PENDING) return -EINVAL; - if (state) - its_send_int(its_dev, event); - else - its_send_clear(its_dev, event); + if (irqd_is_forwarded_to_vcpu(d)) { + if (state) + its_send_vint(its_dev, event); + else + its_send_vclear(its_dev, event); + } else { + if (state) + its_send_int(its_dev, event); + else + its_send_clear(its_dev, event); + } return 0; } @@ -1264,13 +1562,13 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) if (!info->map) return -EINVAL; - mutex_lock(&its_dev->event_map.vlpi_lock); + raw_spin_lock(&its_dev->event_map.vlpi_lock); if (!its_dev->event_map.vm) { struct its_vlpi_map *maps; maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps), - GFP_KERNEL); + GFP_ATOMIC); if (!maps) { ret = -ENOMEM; goto out; @@ -1313,29 +1611,30 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) } out: - mutex_unlock(&its_dev->event_map.vlpi_lock); + raw_spin_unlock(&its_dev->event_map.vlpi_lock); return ret; } static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); - u32 event = its_get_event_id(d); + struct its_vlpi_map *map; int ret = 0; - mutex_lock(&its_dev->event_map.vlpi_lock); + raw_spin_lock(&its_dev->event_map.vlpi_lock); - if (!its_dev->event_map.vm || - !its_dev->event_map.vlpi_maps[event].vm) { + map = get_vlpi_map(d); + + if (!its_dev->event_map.vm || !map) { ret = -EINVAL; goto out; } /* Copy our mapping information to the incoming request */ - *info->map = its_dev->event_map.vlpi_maps[event]; + *info->map = *map; out: - mutex_unlock(&its_dev->event_map.vlpi_lock); + raw_spin_unlock(&its_dev->event_map.vlpi_lock); return ret; } @@ -1345,7 +1644,7 @@ static int its_vlpi_unmap(struct irq_data *d) u32 event = its_get_event_id(d); int ret = 0; - mutex_lock(&its_dev->event_map.vlpi_lock); + raw_spin_lock(&its_dev->event_map.vlpi_lock); if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { ret = -EINVAL; @@ -1375,7 +1674,7 @@ static int its_vlpi_unmap(struct irq_data *d) } out: - mutex_unlock(&its_dev->event_map.vlpi_lock); + raw_spin_unlock(&its_dev->event_map.vlpi_lock); return ret; } @@ -1401,7 +1700,7 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) struct its_cmd_info *info = vcpu_info; /* Need a v4 ITS */ - if (!its_dev->its->is_v4) + if (!is_v4(its_dev->its)) return -EINVAL; /* Unmap request? */ @@ -1907,9 +2206,9 @@ static bool its_parse_indirect_baser(struct its_node *its, if (new_order >= MAX_ORDER) { new_order = MAX_ORDER - 1; ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); - pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n", + pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n", &its->phys_base, its_base_type_string[type], - its->device_ids, ids); + device_ids(its), ids); } *order = new_order; @@ -1917,6 +2216,65 @@ static bool its_parse_indirect_baser(struct its_node *its, return indirect; } +static u32 compute_common_aff(u64 val) +{ + u32 aff, clpiaff; + + aff = FIELD_GET(GICR_TYPER_AFFINITY, val); + clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val); + + return aff & ~(GENMASK(31, 0) >> (clpiaff * 8)); +} + +static u32 compute_its_aff(struct its_node *its) +{ + u64 val; + u32 svpet; + + /* + * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute + * the resulting affinity. We then use that to see if this match + * our own affinity. + */ + svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer); + val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet); + val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr); + return compute_common_aff(val); +} + +static struct its_node *find_sibling_its(struct its_node *cur_its) +{ + struct its_node *its; + u32 aff; + + if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer)) + return NULL; + + aff = compute_its_aff(cur_its); + + list_for_each_entry(its, &its_nodes, entry) { + u64 baser; + + if (!is_v4_1(its) || its == cur_its) + continue; + + if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) + continue; + + if (aff != compute_its_aff(its)) + continue; + + /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */ + baser = its->tables[2].val; + if (!(baser & GITS_BASER_VALID)) + continue; + + return its; + } + + return NULL; +} + static void its_free_tables(struct its_node *its) { int i; @@ -1955,10 +2313,21 @@ static int its_alloc_tables(struct its_node *its) case GITS_BASER_TYPE_DEVICE: indirect = its_parse_indirect_baser(its, baser, psz, &order, - its->device_ids); + device_ids(its)); break; case GITS_BASER_TYPE_VCPU: + if (is_v4_1(its)) { + struct its_node *sibling; + + WARN_ON(i != 2); + if ((sibling = find_sibling_its(its))) { + *baser = sibling->tables[2]; + its_write_baser(its, baser, baser->val); + continue; + } + } + indirect = its_parse_indirect_baser(its, baser, psz, &order, ITS_MAX_VPEID_BITS); @@ -1980,6 +2349,287 @@ static int its_alloc_tables(struct its_node *its) return 0; } +static u64 inherit_vpe_l1_table_from_its(void) +{ + struct its_node *its; + u64 val; + u32 aff; + + val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); + aff = compute_common_aff(val); + + list_for_each_entry(its, &its_nodes, entry) { + u64 baser, addr; + + if (!is_v4_1(its)) + continue; + + if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) + continue; + + if (aff != compute_its_aff(its)) + continue; + + /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */ + baser = its->tables[2].val; + if (!(baser & GITS_BASER_VALID)) + continue; + + /* We have a winner! */ + gic_data_rdist()->vpe_l1_base = its->tables[2].base; + + val = GICR_VPROPBASER_4_1_VALID; + if (baser & GITS_BASER_INDIRECT) + val |= GICR_VPROPBASER_4_1_INDIRECT; + val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, + FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)); + switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) { + case GIC_PAGE_SIZE_64K: + addr = GITS_BASER_ADDR_48_to_52(baser); + break; + default: + addr = baser & GENMASK_ULL(47, 12); + break; + } + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12); + val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK, + FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser)); + val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK, + FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser)); + val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1); + + return val; + } + + return 0; +} + +static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask) +{ + u32 aff; + u64 val; + int cpu; + + val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); + aff = compute_common_aff(val); + + for_each_possible_cpu(cpu) { + void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; + + if (!base || cpu == smp_processor_id()) + continue; + + val = gic_read_typer(base + GICR_TYPER); + if (aff != compute_common_aff(val)) + continue; + + /* + * At this point, we have a victim. This particular CPU + * has already booted, and has an affinity that matches + * ours wrt CommonLPIAff. Let's use its own VPROPBASER. + * Make sure we don't write the Z bit in that case. + */ + val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); + val &= ~GICR_VPROPBASER_4_1_Z; + + gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base; + *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask; + + return val; + } + + return 0; +} + +static bool allocate_vpe_l2_table(int cpu, u32 id) +{ + void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; + unsigned int psz, esz, idx, npg, gpsz; + u64 val; + struct page *page; + __le64 *table; + + if (!gic_rdists->has_rvpeid) + return true; + + val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); + + esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1; + gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val); + npg = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1; + + switch (gpsz) { + default: + WARN_ON(1); + /* fall through */ + case GIC_PAGE_SIZE_4K: + psz = SZ_4K; + break; + case GIC_PAGE_SIZE_16K: + psz = SZ_16K; + break; + case GIC_PAGE_SIZE_64K: + psz = SZ_64K; + break; + } + + /* Don't allow vpe_id that exceeds single, flat table limit */ + if (!(val & GICR_VPROPBASER_4_1_INDIRECT)) + return (id < (npg * psz / (esz * SZ_8))); + + /* Compute 1st level table index & check if that exceeds table limit */ + idx = id >> ilog2(psz / (esz * SZ_8)); + if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE)) + return false; + + table = gic_data_rdist_cpu(cpu)->vpe_l1_base; + + /* Allocate memory for 2nd level table */ + if (!table[idx]) { + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz)); + if (!page) + return false; + + /* Flush Lvl2 table to PoC if hw doesn't support coherency */ + if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(page_address(page), psz); + + table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); + + /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ + if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); + + /* Ensure updated table contents are visible to RD hardware */ + dsb(sy); + } + + return true; +} + +static int allocate_vpe_l1_table(void) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val, gpsz, npg, pa; + unsigned int psz = SZ_64K; + unsigned int np, epp, esz; + struct page *page; + + if (!gic_rdists->has_rvpeid) + return 0; + + /* + * if VPENDBASER.Valid is set, disable any previously programmed + * VPE by setting PendingLast while clearing Valid. This has the + * effect of making sure no doorbell will be generated and we can + * then safely clear VPROPBASER.Valid. + */ + if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid) + gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast, + vlpi_base + GICR_VPENDBASER); + + /* + * If we can inherit the configuration from another RD, let's do + * so. Otherwise, we have to go through the allocation process. We + * assume that all RDs have the exact same requirements, as + * nothing will work otherwise. + */ + val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask); + if (val & GICR_VPROPBASER_4_1_VALID) + goto out; + + gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_KERNEL); + if (!gic_data_rdist()->vpe_table_mask) + return -ENOMEM; + + val = inherit_vpe_l1_table_from_its(); + if (val & GICR_VPROPBASER_4_1_VALID) + goto out; + + /* First probe the page size */ + val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K); + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER); + gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val); + esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val); + + switch (gpsz) { + default: + gpsz = GIC_PAGE_SIZE_4K; + /* fall through */ + case GIC_PAGE_SIZE_4K: + psz = SZ_4K; + break; + case GIC_PAGE_SIZE_16K: + psz = SZ_16K; + break; + case GIC_PAGE_SIZE_64K: + psz = SZ_64K; + break; + } + + /* + * Start populating the register from scratch, including RO fields + * (which we want to print in debug cases...) + */ + val = 0; + val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz); + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz); + + /* How many entries per GIC page? */ + esz++; + epp = psz / (esz * SZ_8); + + /* + * If we need more than just a single L1 page, flag the table + * as indirect and compute the number of required L1 pages. + */ + if (epp < ITS_MAX_VPEID) { + int nl2; + + val |= GICR_VPROPBASER_4_1_INDIRECT; + + /* Number of L2 pages required to cover the VPEID space */ + nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp); + + /* Number of L1 pages to point to the L2 pages */ + npg = DIV_ROUND_UP(nl2 * SZ_8, psz); + } else { + npg = 1; + } + + val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1); + + /* Right, that's the number of CPU pages we need for L1 */ + np = DIV_ROUND_UP(npg * psz, PAGE_SIZE); + + pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n", + np, npg, psz, epp, esz); + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(np * PAGE_SIZE)); + if (!page) + return -ENOMEM; + + gic_data_rdist()->vpe_l1_base = page_address(page); + pa = virt_to_phys(page_address(page)); + WARN_ON(!IS_ALIGNED(pa, psz)); + + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12); + val |= GICR_VPROPBASER_RaWb; + val |= GICR_VPROPBASER_InnerShareable; + val |= GICR_VPROPBASER_4_1_Z; + val |= GICR_VPROPBASER_4_1_VALID; + +out: + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask); + + pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n", + smp_processor_id(), val, + cpumask_pr_args(gic_data_rdist()->vpe_table_mask)); + + return 0; +} + static int its_alloc_collections(struct its_node *its) { int i; @@ -2071,18 +2721,20 @@ static int __init allocate_lpi_tables(void) return 0; } -static u64 its_clear_vpend_valid(void __iomem *vlpi_base) +static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set) { u32 count = 1000000; /* 1s! */ bool clean; u64 val; - val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); + val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); val &= ~GICR_VPENDBASER_Valid; - gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); + val &= ~clr; + val |= set; + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); do { - val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); + val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); clean = !(val & GICR_VPENDBASER_Dirty); if (!clean) { count--; @@ -2091,6 +2743,11 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base) } } while (!clean && count); + if (unlikely(val & GICR_VPENDBASER_Dirty)) { + pr_err_ratelimited("ITS virtual pending table not cleaning\n"); + val |= GICR_VPENDBASER_PendingLast; + } + return val; } @@ -2179,7 +2836,7 @@ static void its_cpu_init_lpis(void) val |= GICR_CTLR_ENABLE_LPIS; writel_relaxed(val, rbase + GICR_CTLR); - if (gic_rdists->has_vlpis) { + if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) { void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); /* @@ -2192,15 +2849,24 @@ static void its_cpu_init_lpis(void) val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n", smp_processor_id(), val); - gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); /* * Also clear Valid bit of GICR_VPENDBASER, in case some * ancient programming gets left in and has possibility of * corrupting memory. */ - val = its_clear_vpend_valid(vlpi_base); - WARN_ON(val & GICR_VPENDBASER_Dirty); + val = its_clear_vpend_valid(vlpi_base, 0, 0); + } + + if (allocate_vpe_l1_table()) { + /* + * If the allocation has failed, we're in massive trouble. + * Disable direct injection, and pray that no VM was + * already running... + */ + gic_rdists->has_rvpeid = false; + gic_rdists->has_vlpis = false; } /* Make sure the GIC has seen the above */ @@ -2346,7 +3012,7 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id) /* Don't allow device id that exceeds ITS hardware limit */ if (!baser) - return (ilog2(dev_id) < its->device_ids); + return (ilog2(dev_id) < device_ids(its)); return its_alloc_table_entry(its, baser, dev_id); } @@ -2354,6 +3020,7 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id) static bool its_alloc_vpe_table(u32 vpe_id) { struct its_node *its; + int cpu; /* * Make sure the L2 tables are allocated on *all* v4 ITSs. We @@ -2365,7 +3032,7 @@ static bool its_alloc_vpe_table(u32 vpe_id) list_for_each_entry(its, &its_nodes, entry) { struct its_baser *baser; - if (!its->is_v4) + if (!is_v4(its)) continue; baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); @@ -2376,6 +3043,19 @@ static bool its_alloc_vpe_table(u32 vpe_id) return false; } + /* Non v4.1? No need to iterate RDs and go back early. */ + if (!gic_rdists->has_rvpeid) + return true; + + /* + * Make sure the L2 tables are allocated for all copies of + * the L1 table on *all* v4.1 RDs. + */ + for_each_possible_cpu(cpu) { + if (!allocate_vpe_l2_table(cpu, vpe_id)) + return false; + } + return true; } @@ -2404,7 +3084,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, * sized as a power of two (and you need at least one bit...). */ nr_ites = max(2, nvecs); - sz = nr_ites * its->ite_size; + sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1); sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node); if (alloc_lpis) { @@ -2435,7 +3115,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, dev->event_map.col_map = col_map; dev->event_map.lpi_base = lpi_base; dev->event_map.nr_lpis = nr_lpis; - mutex_init(&dev->event_map.vlpi_lock); + raw_spin_lock_init(&dev->event_map.vlpi_lock); dev->device_id = dev_id; INIT_LIST_HEAD(&dev->entry); @@ -2456,6 +3136,7 @@ static void its_free_device(struct its_device *its_dev) raw_spin_lock_irqsave(&its_dev->its->lock, flags); list_del(&its_dev->entry); raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); + kfree(its_dev->event_map.col_map); kfree(its_dev->itt); kfree(its_dev); } @@ -2464,6 +3145,7 @@ static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number { int idx; + /* Find a free LPI region in lpi_map and allocate them. */ idx = bitmap_find_free_region(dev->event_map.lpi_map, dev->event_map.nr_lpis, get_count_order(nvecs)); @@ -2471,7 +3153,6 @@ static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number return -ENOSPC; *hwirq = dev->event_map.lpi_base + idx; - set_bit(idx, dev->event_map.lpi_map); return 0; } @@ -2641,14 +3322,13 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, struct its_node *its = its_dev->its; int i; + bitmap_release_region(its_dev->event_map.lpi_map, + its_get_event_id(irq_domain_get_irq_data(domain, virq)), + get_count_order(nr_irqs)); + for (i = 0; i < nr_irqs; i++) { struct irq_data *data = irq_domain_get_irq_data(domain, virq + i); - u32 event = its_get_event_id(data); - - /* Mark interrupt index as unused */ - clear_bit(event, its_dev->event_map.lpi_map); - /* Nuke the entry in the domain */ irq_domain_reset_irq_data(data); } @@ -2665,7 +3345,6 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, its_lpi_free(its_dev->event_map.lpi_map, its_dev->event_map.lpi_base, its_dev->event_map.nr_lpis); - kfree(its_dev->event_map.col_map); /* Unmap device/itt */ its_send_mapd(its_dev, 0); @@ -2687,7 +3366,7 @@ static const struct irq_domain_ops its_domain_ops = { /* * This is insane. * - * If a GICv4 doesn't implement Direct LPIs (which is extremely + * If a GICv4.0 doesn't implement Direct LPIs (which is extremely * likely), the only way to perform an invalidate is to use a fake * device to issue an INV command, implying that the LPI has first * been mapped to some event on that device. Since this is not exactly @@ -2695,9 +3374,20 @@ static const struct irq_domain_ops its_domain_ops = { * only issue an UNMAP if we're short on available slots. * * Broken by design(tm). + * + * GICv4.1, on the other hand, mandates that we're able to invalidate + * by writing to a MMIO register. It doesn't implement the whole of + * DirectLPI, but that's good enough. And most of the time, we don't + * even have to invalidate anything, as the redistributor can be told + * whether to generate a doorbell or not (we thus leave it enabled, + * always). */ static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) { + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + /* Already unmapped? */ if (vpe->vpe_proxy_event == -1) return; @@ -2720,6 +3410,10 @@ static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) { + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + if (!gic_rdists->has_direct_lpi) { unsigned long flags; @@ -2731,6 +3425,10 @@ static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe) { + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + /* Already mapped? */ if (vpe->vpe_proxy_event != -1) return; @@ -2753,13 +3451,16 @@ static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) unsigned long flags; struct its_collection *target_col; + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + if (gic_rdists->has_direct_lpi) { void __iomem *rdbase; rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base; gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); - while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) - cpu_relax(); + wait_for_syncr(rdbase); return; } @@ -2780,7 +3481,7 @@ static int its_vpe_set_affinity(struct irq_data *d, bool force) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); - int cpu = cpumask_first(mask_val); + int from, cpu = cpumask_first(mask_val); /* * Changing affinity is mega expensive, so let's be as lazy as @@ -2788,14 +3489,24 @@ static int its_vpe_set_affinity(struct irq_data *d, * into the proxy device, we need to move the doorbell * interrupt to its new location. */ - if (vpe->col_idx != cpu) { - int from = vpe->col_idx; + if (vpe->col_idx == cpu) + goto out; - vpe->col_idx = cpu; - its_send_vmovp(vpe); - its_vpe_db_proxy_move(vpe, from, cpu); - } + from = vpe->col_idx; + vpe->col_idx = cpu; + + /* + * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD + * is sharing its VPE table with the current one. + */ + if (gic_data_rdist_cpu(cpu)->vpe_table_mask && + cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask)) + goto out; + its_send_vmovp(vpe); + its_vpe_db_proxy_move(vpe, from, cpu); + +out: irq_data_update_effective_affinity(d, cpumask_of(cpu)); return IRQ_SET_MASK_OK_DONE; @@ -2812,7 +3523,7 @@ static void its_vpe_schedule(struct its_vpe *vpe) val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; val |= GICR_VPROPBASER_RaWb; val |= GICR_VPROPBASER_InnerShareable; - gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); val = virt_to_phys(page_address(vpe->vpt_page)) & GENMASK_ULL(51, 16); @@ -2830,7 +3541,7 @@ static void its_vpe_schedule(struct its_vpe *vpe) val |= GICR_VPENDBASER_PendingLast; val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; val |= GICR_VPENDBASER_Valid; - gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); } static void its_vpe_deschedule(struct its_vpe *vpe) @@ -2838,16 +3549,10 @@ static void its_vpe_deschedule(struct its_vpe *vpe) void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); u64 val; - val = its_clear_vpend_valid(vlpi_base); + val = its_clear_vpend_valid(vlpi_base, 0, 0); - if (unlikely(val & GICR_VPENDBASER_Dirty)) { - pr_err_ratelimited("ITS virtual pending table not cleaning\n"); - vpe->idai = false; - vpe->pending_last = true; - } else { - vpe->idai = !!(val & GICR_VPENDBASER_IDAI); - vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); - } + vpe->idai = !!(val & GICR_VPENDBASER_IDAI); + vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); } static void its_vpe_invall(struct its_vpe *vpe) @@ -2855,7 +3560,7 @@ static void its_vpe_invall(struct its_vpe *vpe) struct its_node *its; list_for_each_entry(its, &its_nodes, entry) { - if (!its->is_v4) + if (!is_v4(its)) continue; if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) @@ -2913,10 +3618,10 @@ static void its_vpe_send_inv(struct irq_data *d) if (gic_rdists->has_direct_lpi) { void __iomem *rdbase; + /* Target the redistributor this VPE is currently known on */ rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; - gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR); - while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) - cpu_relax(); + gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR); + wait_for_syncr(rdbase); } else { its_vpe_send_cmd(vpe, its_send_inv); } @@ -2958,8 +3663,7 @@ static int its_vpe_set_irqchip_state(struct irq_data *d, gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR); } else { gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); - while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) - cpu_relax(); + wait_for_syncr(rdbase); } } else { if (state) @@ -2981,6 +3685,139 @@ static struct irq_chip its_vpe_irq_chip = { .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, }; +static struct its_node *find_4_1_its(void) +{ + static struct its_node *its = NULL; + + if (!its) { + list_for_each_entry(its, &its_nodes, entry) { + if (is_v4_1(its)) + return its; + } + + /* Oops? */ + its = NULL; + } + + return its; +} + +static void its_vpe_4_1_send_inv(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* + * GICv4.1 wants doorbells to be invalidated using the + * INVDB command in order to be broadcast to all RDs. Send + * it to the first valid ITS, and let the HW do its magic. + */ + its = find_4_1_its(); + if (its) + its_send_invdb(its, vpe); +} + +static void its_vpe_4_1_mask_irq(struct irq_data *d) +{ + lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); + its_vpe_4_1_send_inv(d); +} + +static void its_vpe_4_1_unmask_irq(struct irq_data *d) +{ + lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); + its_vpe_4_1_send_inv(d); +} + +static void its_vpe_4_1_schedule(struct its_vpe *vpe, + struct its_cmd_info *info) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val = 0; + + /* Schedule the VPE */ + val |= GICR_VPENDBASER_Valid; + val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0; + val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0; + val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id); + + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); +} + +static void its_vpe_4_1_deschedule(struct its_vpe *vpe, + struct its_cmd_info *info) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + if (info->req_db) { + /* + * vPE is going to block: make the vPE non-resident with + * PendingLast clear and DB set. The GIC guarantees that if + * we read-back PendingLast clear, then a doorbell will be + * delivered when an interrupt comes. + */ + val = its_clear_vpend_valid(vlpi_base, + GICR_VPENDBASER_PendingLast, + GICR_VPENDBASER_4_1_DB); + vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); + } else { + /* + * We're not blocking, so just make the vPE non-resident + * with PendingLast set, indicating that we'll be back. + */ + val = its_clear_vpend_valid(vlpi_base, + 0, + GICR_VPENDBASER_PendingLast); + vpe->pending_last = true; + } +} + +static void its_vpe_4_1_invall(struct its_vpe *vpe) +{ + void __iomem *rdbase; + u64 val; + + val = GICR_INVALLR_V; + val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id); + + /* Target the redistributor this vPE is currently known on */ + rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; + gic_write_lpir(val, rdbase + GICR_INVALLR); +} + +static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + switch (info->cmd_type) { + case SCHEDULE_VPE: + its_vpe_4_1_schedule(vpe, info); + return 0; + + case DESCHEDULE_VPE: + its_vpe_4_1_deschedule(vpe, info); + return 0; + + case INVALL_VPE: + its_vpe_4_1_invall(vpe); + return 0; + + default: + return -EINVAL; + } +} + +static struct irq_chip its_vpe_4_1_irq_chip = { + .name = "GICv4.1-vpe", + .irq_mask = its_vpe_4_1_mask_irq, + .irq_unmask = its_vpe_4_1_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_vpe_set_affinity, + .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity, +}; + static int its_vpe_id_alloc(void) { return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); @@ -3016,7 +3853,10 @@ static int its_vpe_init(struct its_vpe *vpe) vpe->vpe_id = vpe_id; vpe->vpt_page = vpt_page; - vpe->vpe_proxy_event = -1; + if (gic_rdists->has_rvpeid) + atomic_set(&vpe->vmapp_count, 0); + else + vpe->vpe_proxy_event = -1; return 0; } @@ -3058,6 +3898,7 @@ static void its_vpe_irq_domain_free(struct irq_domain *domain, static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { + struct irq_chip *irqchip = &its_vpe_irq_chip; struct its_vm *vm = args; unsigned long *bitmap; struct page *vprop_page; @@ -3085,6 +3926,9 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq vm->nr_db_lpis = nr_ids; vm->vprop_page = vprop_page; + if (gic_rdists->has_rvpeid) + irqchip = &its_vpe_4_1_irq_chip; + for (i = 0; i < nr_irqs; i++) { vm->vpes[i]->vpe_db_lpi = base + i; err = its_vpe_init(vm->vpes[i]); @@ -3095,7 +3939,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq if (err) break; irq_domain_set_hwirq_and_chip(domain, virq + i, i, - &its_vpe_irq_chip, vm->vpes[i]); + irqchip, vm->vpes[i]); set_bit(i, bitmap); } @@ -3124,7 +3968,7 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, vpe->col_idx = cpumask_first(cpu_online_mask); list_for_each_entry(its, &its_nodes, entry) { - if (!its->is_v4) + if (!is_v4(its)) continue; its_send_vmapp(its, vpe, true); @@ -3150,7 +3994,7 @@ static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, return; list_for_each_entry(its, &its_nodes, entry) { - if (!its->is_v4) + if (!is_v4(its)) continue; its_send_vmapp(its, vpe, false); @@ -3201,8 +4045,9 @@ static bool __maybe_unused its_enable_quirk_cavium_22375(void *data) { struct its_node *its = data; - /* erratum 22375: only alloc 8MB table size */ - its->device_ids = 0x14; /* 20 bits, 8MB */ + /* erratum 22375: only alloc 8MB table size (20 bits) */ + its->typer &= ~GITS_TYPER_DEVBITS; + its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1); its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; return true; @@ -3222,7 +4067,8 @@ static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) struct its_node *its = data; /* On QDF2400, the size of the ITE is 16Bytes */ - its->ite_size = 16; + its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE; + its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1); return true; } @@ -3256,8 +4102,10 @@ static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data) its->get_msi_base = its_irq_get_msi_base_pre_its; ids = ilog2(pre_its_window[1]) - 2; - if (its->device_ids > ids) - its->device_ids = ids; + if (device_ids(its) > ids) { + its->typer &= ~GITS_TYPER_DEVBITS; + its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1); + } /* the pre-ITS breaks isolation, so disable MSI remapping */ its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP; @@ -3490,7 +4338,7 @@ static int its_init_vpe_domain(void) } /* Use the last possible DevID */ - devid = GENMASK(its->device_ids - 1, 0); + devid = GENMASK(device_ids(its) - 1, 0); vpe_proxy.dev = its_create_device(its, devid, entries, false); if (!vpe_proxy.dev) { kfree(vpe_proxy.vpes); @@ -3588,12 +4436,10 @@ static int __init its_probe_one(struct resource *res, INIT_LIST_HEAD(&its->entry); INIT_LIST_HEAD(&its->its_device_list); typer = gic_read_typer(its_base + GITS_TYPER); + its->typer = typer; its->base = its_base; its->phys_base = res->start; - its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer); - its->device_ids = GITS_TYPER_DEVBITS(typer); - its->is_v4 = !!(typer & GITS_TYPER_VLPIS); - if (its->is_v4) { + if (is_v4(its)) { if (!(typer & GITS_TYPER_VMOVP)) { err = its_compute_its_list_map(res, its_base); if (err < 0) @@ -3606,6 +4452,14 @@ static int __init its_probe_one(struct resource *res, } else { pr_info("ITS@%pa: Single VMOVP capable\n", &res->start); } + + if (is_v4_1(its)) { + u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer); + its->mpidr = readl_relaxed(its_base + GITS_MPIDR); + + pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n", + &res->start, its->mpidr, svpet); + } } its->numa_node = numa_node; @@ -3660,7 +4514,7 @@ static int __init its_probe_one(struct resource *res, gits_write_cwriter(0, its->base + GITS_CWRITER); ctlr = readl_relaxed(its->base + GITS_CTLR); ctlr |= GITS_CTLR_ENABLE; - if (its->is_v4) + if (is_v4(its)) ctlr |= GITS_CTLR_ImDe; writel_relaxed(ctlr, its->base + GITS_CTLR); @@ -3921,7 +4775,7 @@ static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header, res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1; res.flags = IORESOURCE_MEM; - dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address); + dom_handle = irq_domain_alloc_fwnode(&res.start); if (!dom_handle) { pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n", &res.start); @@ -3966,6 +4820,8 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, bool has_v4 = false; int err; + gic_rdists = rdists; + its_parent = parent_domain; of_node = to_of_node(handle); if (of_node) @@ -3978,14 +4834,12 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, return -ENXIO; } - gic_rdists = rdists; - err = allocate_lpi_tables(); if (err) return err; list_for_each_entry(its, &its_nodes, entry) - has_v4 |= its->is_v4; + has_v4 |= is_v4(its); if (has_v4 & rdists->has_vlpis) { if (its_init_vpe_domain() || diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 96d927f0f91a..c1f7af9d9ae7 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -51,13 +51,17 @@ struct gic_chip_data { u32 nr_redist_regions; u64 flags; bool has_rss; - unsigned int irq_nr; - struct partition_desc *ppi_descs[16]; + unsigned int ppi_nr; + struct partition_desc **ppi_descs; }; static struct gic_chip_data gic_data __read_mostly; static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); +#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer)) +#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U) +#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer) + /* * The behaviours of RPR and PMR registers differ depending on the value of * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the @@ -83,8 +87,17 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); */ static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis); +/* + * Global static key controlling whether an update to PMR allowing more + * interrupts requires to be propagated to the redistributor (DSB SY). + * And this needs to be exported for modules to be able to enable + * interrupts... + */ +DEFINE_STATIC_KEY_FALSE(gic_pmr_sync); +EXPORT_SYMBOL(gic_pmr_sync); + /* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */ -static refcount_t ppi_nmi_refs[16]; +static refcount_t *ppi_nmi_refs; static struct gic_kvm_info gic_v3_kvm_info; static DEFINE_PER_CPU(bool, has_rss); @@ -97,6 +110,38 @@ static DEFINE_PER_CPU(bool, has_rss); /* Our default, arbitrary priority value. Linux only uses one anyway. */ #define DEFAULT_PMR_VALUE 0xf0 +enum gic_intid_range { + PPI_RANGE, + SPI_RANGE, + EPPI_RANGE, + ESPI_RANGE, + LPI_RANGE, + __INVALID_RANGE__ +}; + +static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq) +{ + switch (hwirq) { + case 16 ... 31: + return PPI_RANGE; + case 32 ... 1019: + return SPI_RANGE; + case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63): + return EPPI_RANGE; + case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023): + return ESPI_RANGE; + case 8192 ... GENMASK(23, 0): + return LPI_RANGE; + default: + return __INVALID_RANGE__; + } +} + +static enum gic_intid_range get_intid_range(struct irq_data *d) +{ + return __get_intid_range(d->hwirq); +} + static inline unsigned int gic_irq(struct irq_data *d) { return d->hwirq; @@ -104,18 +149,26 @@ static inline unsigned int gic_irq(struct irq_data *d) static inline int gic_irq_in_rdist(struct irq_data *d) { - return gic_irq(d) < 32; + enum gic_intid_range range = get_intid_range(d); + return range == PPI_RANGE || range == EPPI_RANGE; } static inline void __iomem *gic_dist_base(struct irq_data *d) { - if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */ + switch (get_intid_range(d)) { + case PPI_RANGE: + case EPPI_RANGE: + /* SGI+PPI -> SGI_base for this CPU */ return gic_data_rdist_sgi_base(); - if (d->hwirq <= 1023) /* SPI -> dist_base */ + case SPI_RANGE: + case ESPI_RANGE: + /* SPI -> dist_base */ return gic_data.dist_base; - return NULL; + default: + return NULL; + } } static void gic_do_wait_for_rwp(void __iomem *base) @@ -130,7 +183,7 @@ static void gic_do_wait_for_rwp(void __iomem *base) } cpu_relax(); udelay(1); - }; + } } /* Wait for completion of a distributor change */ @@ -187,7 +240,7 @@ static void gic_enable_redist(bool enable) break; cpu_relax(); udelay(1); - }; + } if (!count) pr_err_ratelimited("redistributor failed to %s...\n", enable ? "wakeup" : "sleep"); @@ -196,24 +249,79 @@ static void gic_enable_redist(bool enable) /* * Routines to disable, enable, EOI and route interrupts */ +static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index) +{ + switch (get_intid_range(d)) { + case PPI_RANGE: + case SPI_RANGE: + *index = d->hwirq; + return offset; + case EPPI_RANGE: + /* + * Contrary to the ESPI range, the EPPI range is contiguous + * to the PPI range in the registers, so let's adjust the + * displacement accordingly. Consistency is overrated. + */ + *index = d->hwirq - EPPI_BASE_INTID + 32; + return offset; + case ESPI_RANGE: + *index = d->hwirq - ESPI_BASE_INTID; + switch (offset) { + case GICD_ISENABLER: + return GICD_ISENABLERnE; + case GICD_ICENABLER: + return GICD_ICENABLERnE; + case GICD_ISPENDR: + return GICD_ISPENDRnE; + case GICD_ICPENDR: + return GICD_ICPENDRnE; + case GICD_ISACTIVER: + return GICD_ISACTIVERnE; + case GICD_ICACTIVER: + return GICD_ICACTIVERnE; + case GICD_IPRIORITYR: + return GICD_IPRIORITYRnE; + case GICD_ICFGR: + return GICD_ICFGRnE; + case GICD_IROUTER: + return GICD_IROUTERnE; + default: + break; + } + break; + default: + break; + } + + WARN_ON(1); + *index = d->hwirq; + return offset; +} + static int gic_peek_irq(struct irq_data *d, u32 offset) { - u32 mask = 1 << (gic_irq(d) % 32); void __iomem *base; + u32 index, mask; + + offset = convert_offset_index(d, offset, &index); + mask = 1 << (index % 32); if (gic_irq_in_rdist(d)) base = gic_data_rdist_sgi_base(); else base = gic_data.dist_base; - return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask); + return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask); } static void gic_poke_irq(struct irq_data *d, u32 offset) { - u32 mask = 1 << (gic_irq(d) % 32); void (*rwp_wait)(void); void __iomem *base; + u32 index, mask; + + offset = convert_offset_index(d, offset, &index); + mask = 1 << (index % 32); if (gic_irq_in_rdist(d)) { base = gic_data_rdist_sgi_base(); @@ -223,7 +331,7 @@ static void gic_poke_irq(struct irq_data *d, u32 offset) rwp_wait = gic_dist_wait_for_rwp; } - writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4); + writel_relaxed(mask, base + offset + (index / 32) * 4); rwp_wait(); } @@ -263,7 +371,7 @@ static int gic_irq_set_irqchip_state(struct irq_data *d, { u32 reg; - if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */ + if (d->hwirq >= 8192) /* PPI/SPI only */ return -EINVAL; switch (which) { @@ -290,7 +398,7 @@ static int gic_irq_set_irqchip_state(struct irq_data *d, static int gic_irq_get_irqchip_state(struct irq_data *d, enum irqchip_irq_state which, bool *val) { - if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */ + if (d->hwirq >= 8192) /* PPI/SPI only */ return -EINVAL; switch (which) { @@ -316,8 +424,23 @@ static int gic_irq_get_irqchip_state(struct irq_data *d, static void gic_irq_set_prio(struct irq_data *d, u8 prio) { void __iomem *base = gic_dist_base(d); + u32 offset, index; - writeb_relaxed(prio, base + GICD_IPRIORITYR + gic_irq(d)); + offset = convert_offset_index(d, GICD_IPRIORITYR, &index); + + writeb_relaxed(prio, base + offset + index); +} + +static u32 gic_get_ppi_index(struct irq_data *d) +{ + switch (get_intid_range(d)) { + case PPI_RANGE: + return d->hwirq - 16; + case EPPI_RANGE: + return d->hwirq - EPPI_BASE_INTID + 16; + default: + unreachable(); + } } static int gic_irq_nmi_setup(struct irq_data *d) @@ -340,10 +463,12 @@ static int gic_irq_nmi_setup(struct irq_data *d) return -EINVAL; /* desc lock should already be held */ - if (gic_irq(d) < 32) { + if (gic_irq_in_rdist(d)) { + u32 idx = gic_get_ppi_index(d); + /* Setting up PPI as NMI, only switch handler for first NMI */ - if (!refcount_inc_not_zero(&ppi_nmi_refs[gic_irq(d) - 16])) { - refcount_set(&ppi_nmi_refs[gic_irq(d) - 16], 1); + if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) { + refcount_set(&ppi_nmi_refs[idx], 1); desc->handle_irq = handle_percpu_devid_fasteoi_nmi; } } else { @@ -375,9 +500,11 @@ static void gic_irq_nmi_teardown(struct irq_data *d) return; /* desc lock should already be held */ - if (gic_irq(d) < 32) { + if (gic_irq_in_rdist(d)) { + u32 idx = gic_get_ppi_index(d); + /* Tearing down NMI, only switch handler for last NMI */ - if (refcount_dec_and_test(&ppi_nmi_refs[gic_irq(d) - 16])) + if (refcount_dec_and_test(&ppi_nmi_refs[idx])) desc->handle_irq = handle_percpu_devid_irq; } else { desc->handle_irq = handle_fasteoi_irq; @@ -404,17 +531,22 @@ static void gic_eoimode1_eoi_irq(struct irq_data *d) static int gic_set_type(struct irq_data *d, unsigned int type) { + enum gic_intid_range range; unsigned int irq = gic_irq(d); void (*rwp_wait)(void); void __iomem *base; + u32 offset, index; + int ret; /* Interrupt configuration for SGIs can't be changed */ if (irq < 16) return -EINVAL; + range = get_intid_range(d); + /* SPIs have restrictions on the supported types */ - if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && - type != IRQ_TYPE_EDGE_RISING) + if ((range == SPI_RANGE || range == ESPI_RANGE) && + type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) return -EINVAL; if (gic_irq_in_rdist(d)) { @@ -425,7 +557,16 @@ static int gic_set_type(struct irq_data *d, unsigned int type) rwp_wait = gic_dist_wait_for_rwp; } - return gic_configure_irq(irq, type, base, rwp_wait); + offset = convert_offset_index(d, GICD_ICFGR, &index); + + ret = gic_configure_irq(index, type, base + offset, rwp_wait); + if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) { + /* Misconfigured PPIs are usually not fatal */ + pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq); + ret = 0; + } + + return ret; } static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) @@ -500,7 +641,12 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs gic_arch_enable_irqs(); } - if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) { + /* Check for special IDs first */ + if ((irqnr >= 1020 && irqnr <= 1023)) + return; + + /* Treat anything but SGIs in a uniform way */ + if (likely(irqnr > 15)) { int err; if (static_branch_likely(&supports_deactivate_key)) @@ -588,10 +734,26 @@ static void __init gic_dist_init(void) * do the right thing if the kernel is running in secure mode, * but that's not the intended use case anyway. */ - for (i = 32; i < gic_data.irq_nr; i += 32) + for (i = 32; i < GIC_LINE_NR; i += 32) writel_relaxed(~0, base + GICD_IGROUPR + i / 8); - gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp); + /* Extended SPI range, not handled by the GICv2/GICv3 common code */ + for (i = 0; i < GIC_ESPI_NR; i += 32) { + writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8); + writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8); + } + + for (i = 0; i < GIC_ESPI_NR; i += 32) + writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8); + + for (i = 0; i < GIC_ESPI_NR; i += 16) + writel_relaxed(0, base + GICD_ICFGRnE + i / 4); + + for (i = 0; i < GIC_ESPI_NR; i += 4) + writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i); + + /* Now do the common stuff, and wait for the distributor to drain */ + gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp); /* Enable distributor with ARE, Group1 */ writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1, @@ -602,8 +764,11 @@ static void __init gic_dist_init(void) * enabled. */ affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id())); - for (i = 32; i < gic_data.irq_nr; i++) + for (i = 32; i < GIC_LINE_NR; i++) gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); + + for (i = 0; i < GIC_ESPI_NR; i++) + gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8); } static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *)) @@ -689,22 +854,41 @@ static int gic_populate_rdist(void) return -ENODEV; } -static int __gic_update_vlpi_properties(struct redist_region *region, - void __iomem *ptr) +static int __gic_update_rdist_properties(struct redist_region *region, + void __iomem *ptr) { u64 typer = gic_read_typer(ptr + GICR_TYPER); + gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS); - gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS); + + /* RVPEID implies some form of DirectLPI, no matter what the doc says... :-/ */ + gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID); + gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) | + gic_data.rdists.has_rvpeid); + + /* Detect non-sensical configurations */ + if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) { + gic_data.rdists.has_direct_lpi = false; + gic_data.rdists.has_vlpis = false; + gic_data.rdists.has_rvpeid = false; + } + + gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr); return 1; } -static void gic_update_vlpi_properties(void) +static void gic_update_rdist_properties(void) { - gic_iterate_rdists(__gic_update_vlpi_properties); - pr_info("%sVLPI support, %sdirect LPI support\n", + gic_data.ppi_nr = UINT_MAX; + gic_iterate_rdists(__gic_update_rdist_properties); + if (WARN_ON(gic_data.ppi_nr == UINT_MAX)) + gic_data.ppi_nr = 0; + pr_info("%d PPIs implemented\n", gic_data.ppi_nr); + pr_info("%sVLPI support, %sdirect LPI support, %sRVPEID support\n", !gic_data.rdists.has_vlpis ? "no " : "", - !gic_data.rdists.has_direct_lpi ? "no " : ""); + !gic_data.rdists.has_direct_lpi ? "no " : "", + !gic_data.rdists.has_rvpeid ? "no " : ""); } /* Check whether it's single security state view */ @@ -845,6 +1029,7 @@ static int gic_dist_supports_lpis(void) static void gic_cpu_init(void) { void __iomem *rbase; + int i; /* Register ourselves with the rest of the world */ if (gic_populate_rdist()) @@ -852,12 +1037,18 @@ static void gic_cpu_init(void) gic_enable_redist(true); + WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) && + !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange), + "Distributor has extended ranges, but CPU%d doesn't\n", + smp_processor_id()); + rbase = gic_data_rdist_sgi_base(); /* Configure SGIs/PPIs as non-secure Group-1 */ - writel_relaxed(~0, rbase + GICR_IGROUPR0); + for (i = 0; i < gic_data.ppi_nr + 16; i += 32) + writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8); - gic_cpu_config(rbase, gic_redist_wait_for_rwp); + gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp); /* initialise system registers */ gic_cpu_sys_reg_init(); @@ -961,6 +1152,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { unsigned int cpu; + u32 offset, index; void __iomem *reg; int enabled; u64 val; @@ -981,7 +1173,8 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, if (enabled) gic_mask_irq(d); - reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8); + offset = convert_offset_index(d, GICD_IROUTER, &index); + reg = gic_dist_base(d) + offset + (index * 8); val = gic_mpidr_to_affinity(cpu_logical_map(cpu)); gic_write_irouter(val, reg); @@ -1065,8 +1258,6 @@ static struct irq_chip gic_eoimode1_chip = { IRQCHIP_MASK_ON_SUSPEND, }; -#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer)) - static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { @@ -1075,36 +1266,32 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, if (static_branch_likely(&supports_deactivate_key)) chip = &gic_eoimode1_chip; - /* SGIs are private to the core kernel */ - if (hw < 16) - return -EPERM; - /* Nothing here */ - if (hw >= gic_data.irq_nr && hw < 8192) - return -EPERM; - /* Off limits */ - if (hw >= GIC_ID_NR) - return -EPERM; - - /* PPIs */ - if (hw < 32) { + switch (__get_intid_range(hw)) { + case PPI_RANGE: + case EPPI_RANGE: irq_set_percpu_devid(irq); irq_domain_set_info(d, irq, hw, chip, d->host_data, handle_percpu_devid_irq, NULL, NULL); irq_set_status_flags(irq, IRQ_NOAUTOEN); - } - /* SPIs */ - if (hw >= 32 && hw < gic_data.irq_nr) { + break; + + case SPI_RANGE: + case ESPI_RANGE: irq_domain_set_info(d, irq, hw, chip, d->host_data, handle_fasteoi_irq, NULL, NULL); irq_set_probe(irq); irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); - } - /* LPIs */ - if (hw >= 8192 && hw < GIC_ID_NR) { + break; + + case LPI_RANGE: if (!gic_dist_supports_lpis()) return -EPERM; irq_domain_set_info(d, irq, hw, chip, d->host_data, handle_fasteoi_irq, NULL, NULL); + break; + + default: + return -EPERM; } return 0; @@ -1126,12 +1313,24 @@ static int gic_irq_domain_translate(struct irq_domain *d, *hwirq = fwspec->param[1] + 32; break; case 1: /* PPI */ - case GIC_IRQ_TYPE_PARTITION: *hwirq = fwspec->param[1] + 16; break; + case 2: /* ESPI */ + *hwirq = fwspec->param[1] + ESPI_BASE_INTID; + break; + case 3: /* EPPI */ + *hwirq = fwspec->param[1] + EPPI_BASE_INTID; + break; case GIC_IRQ_TYPE_LPI: /* LPI */ *hwirq = fwspec->param[1]; break; + case GIC_IRQ_TYPE_PARTITION: + *hwirq = fwspec->param[1]; + if (fwspec->param[1] >= 16) + *hwirq += EPPI_BASE_INTID - 16; + else + *hwirq += 16; + break; default: return -EINVAL; } @@ -1211,7 +1410,8 @@ static int gic_irq_domain_select(struct irq_domain *d, * then we need to match the partition domain. */ if (fwspec->param_count >= 4 && - fwspec->param[0] == 1 && fwspec->param[3] != 0) + fwspec->param[0] == 1 && fwspec->param[3] != 0 && + gic_data.ppi_descs) return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]); return d == gic_data.domain; @@ -1232,6 +1432,9 @@ static int partition_domain_translate(struct irq_domain *d, struct device_node *np; int ret; + if (!gic_data.ppi_descs) + return -ENOMEM; + np = of_find_node_by_phandle(fwspec->param[3]); if (WARN_ON(!np)) return -EINVAL; @@ -1261,13 +1464,78 @@ static bool gic_enable_quirk_msm8996(void *data) return true; } +static bool gic_enable_quirk_hip06_07(void *data) +{ + struct gic_chip_data *d = data; + + /* + * HIP06 GICD_IIDR clashes with GIC-600 product number (despite + * not being an actual ARM implementation). The saving grace is + * that GIC-600 doesn't have ESPI, so nothing to do in that case. + * HIP07 doesn't even have a proper IIDR, and still pretends to + * have ESPI. In both cases, put them right. + */ + if (d->rdists.gicd_typer & GICD_TYPER_ESPI) { + /* Zero both ESPI and the RES0 field next to it... */ + d->rdists.gicd_typer &= ~GENMASK(9, 8); + return true; + } + + return false; +} + +static const struct gic_quirk gic_quirks[] = { + { + .desc = "GICv3: Qualcomm MSM8996 broken firmware", + .compatible = "qcom,msm8996-gic-v3", + .init = gic_enable_quirk_msm8996, + }, + { + .desc = "GICv3: HIP06 erratum 161010803", + .iidr = 0x0204043b, + .mask = 0xffffffff, + .init = gic_enable_quirk_hip06_07, + }, + { + .desc = "GICv3: HIP07 erratum 161010803", + .iidr = 0x00000000, + .mask = 0xffffffff, + .init = gic_enable_quirk_hip06_07, + }, + { + } +}; + static void gic_enable_nmi_support(void) { int i; - for (i = 0; i < 16; i++) + if (!gic_prio_masking_enabled()) + return; + + if (gic_has_group0() && !gic_dist_security_disabled()) { + pr_warn("SCR_EL3.FIQ is cleared, cannot enable use of pseudo-NMIs\n"); + return; + } + + ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL); + if (!ppi_nmi_refs) + return; + + for (i = 0; i < gic_data.ppi_nr; i++) refcount_set(&ppi_nmi_refs[i], 0); + /* + * Linux itself doesn't use 1:N distribution, so has no need to + * set PMHE. The only reason to have it set is if EL3 requires it + * (and we can't change it). + */ + if (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK) + static_branch_enable(&gic_pmr_sync); + + pr_info("%s ICC_PMR_EL1 synchronisation\n", + static_branch_unlikely(&gic_pmr_sync) ? "Forcing" : "Relaxing"); + static_branch_enable(&supports_pseudo_nmis); if (static_branch_likely(&supports_deactivate_key)) @@ -1283,7 +1551,6 @@ static int __init gic_init_bases(void __iomem *dist_base, struct fwnode_handle *handle) { u32 typer; - int gic_irqs; int err; if (!is_hyp_mode_available()) @@ -1300,19 +1567,23 @@ static int __init gic_init_bases(void __iomem *dist_base, /* * Find out how many interrupts are supported. - * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) */ typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); gic_data.rdists.gicd_typer = typer; - gic_irqs = GICD_TYPER_IRQS(typer); - if (gic_irqs > 1020) - gic_irqs = 1020; - gic_data.irq_nr = gic_irqs; + + gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR), + gic_quirks, &gic_data); + + pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32); + pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR); + + gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2); gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, &gic_data); irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED); gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); + gic_data.rdists.has_rvpeid = true; gic_data.rdists.has_vlpis = true; gic_data.rdists.has_direct_lpi = true; @@ -1333,7 +1604,7 @@ static int __init gic_init_bases(void __iomem *dist_base, set_handle_irq(gic_handle_irq); - gic_update_vlpi_properties(); + gic_update_rdist_properties(); gic_smp_init(); gic_dist_init(); @@ -1348,12 +1619,7 @@ static int __init gic_init_bases(void __iomem *dist_base, gicv2m_init(handle, gic_data.domain); } - if (gic_prio_masking_enabled()) { - if (!gic_has_group0() || gic_dist_security_disabled()) - gic_enable_nmi_support(); - else - pr_warn("SCR_EL3.FIQ is cleared, cannot enable use of pseudo-NMIs\n"); - } + gic_enable_nmi_support(); return 0; @@ -1386,6 +1652,10 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node) if (!parts_node) return; + gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL); + if (!gic_data.ppi_descs) + return; + nr_parts = of_get_child_count(parts_node); if (!nr_parts) @@ -1437,7 +1707,7 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node) part_idx++; } - for (i = 0; i < 16; i++) { + for (i = 0; i < gic_data.ppi_nr; i++) { unsigned int irq; struct partition_desc *desc; struct irq_fwspec ppi_fwspec = { @@ -1490,16 +1760,6 @@ static void __init gic_of_setup_kvm_info(struct device_node *node) gic_set_kvm_info(&gic_v3_kvm_info); } -static const struct gic_quirk gic_quirks[] = { - { - .desc = "GICv3: Qualcomm MSM8996 broken firmware", - .compatible = "qcom,msm8996-gic-v3", - .init = gic_enable_quirk_msm8996, - }, - { - } -}; - static int __init gic_of_init(struct device_node *node, struct device_node *parent) { void __iomem *dist_base; @@ -1579,6 +1839,7 @@ static struct struct redist_region *redist_regs; u32 nr_redist_regions; bool single_redist; + int enabled_rdists; u32 maint_irq; int maint_irq_mode; phys_addr_t vcpu_base; @@ -1673,8 +1934,10 @@ static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header, * If GICC is enabled and has valid gicr base address, then it means * GICR base is presented via GICC */ - if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) + if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) { + acpi_data.enabled_rdists++; return 0; + } /* * It's perfectly valid firmware can pass disabled GICC entry, driver @@ -1704,8 +1967,10 @@ static int __init gic_acpi_count_gicr_regions(void) count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, gic_acpi_match_gicc, 0); - if (count > 0) + if (count > 0) { acpi_data.single_redist = true; + count = acpi_data.enabled_rdists; + } return count; } @@ -1845,7 +2110,7 @@ gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end) if (err) goto out_redist_unmap; - domain_handle = irq_domain_alloc_fwnode(acpi_data.dist_base); + domain_handle = irq_domain_alloc_fwnode(&dist->base_address); if (!domain_handle) { err = -ENOMEM; goto out_redist_unmap; diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c index 563e87ed0766..45969927cc81 100644 --- a/drivers/irqchip/irq-gic-v4.c +++ b/drivers/irqchip/irq-gic-v4.c @@ -141,12 +141,17 @@ static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info) int its_schedule_vpe(struct its_vpe *vpe, bool on) { struct its_cmd_info info; + int ret; WARN_ON(preemptible()); info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE; - return its_send_vpe_cmd(vpe, &info); + ret = its_send_vpe_cmd(vpe, &info); + if (!ret) + vpe->resident = on; + + return ret; } int its_invall_vpe(struct its_vpe *vpe) diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index e45f45e68720..30ab623343d3 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -291,6 +291,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) { void __iomem *base = gic_dist_base(d); unsigned int gicirq = gic_irq(d); + int ret; /* Interrupt configuration for SGIs can't be changed */ if (gicirq < 16) @@ -301,7 +302,14 @@ static int gic_set_type(struct irq_data *d, unsigned int type) type != IRQ_TYPE_EDGE_RISING) return -EINVAL; - return gic_configure_irq(gicirq, type, base, NULL); + ret = gic_configure_irq(gicirq, type, base + GIC_DIST_CONFIG, NULL); + if (ret && gicirq < 32) { + /* Misconfigured PPIs are usually not fatal */ + pr_warn("GIC: PPI%d is secure or misconfigured\n", gicirq - 16); + ret = 0; + } + + return ret; } static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) @@ -535,7 +543,7 @@ static int gic_cpu_init(struct gic_chip_data *gic) gic_cpu_map[i] &= ~cpu_mask; } - gic_cpu_config(dist_base, NULL); + gic_cpu_config(dist_base, 32, NULL); writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK); gic_cpu_if_up(gic); @@ -1627,7 +1635,7 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header, /* * Initialize GIC instance zero (no multi-GIC support). */ - domain_handle = irq_domain_alloc_fwnode(gic->raw_dist_base); + domain_handle = irq_domain_alloc_fwnode(&dist->base_address); if (!domain_handle) { pr_err("Unable to allocate domain handle\n"); gic_teardown(gic); diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c index cf705827599c..130caa1c9d93 100644 --- a/drivers/irqchip/irq-hip04.c +++ b/drivers/irqchip/irq-hip04.c @@ -130,7 +130,12 @@ static int hip04_irq_set_type(struct irq_data *d, unsigned int type) raw_spin_lock(&irq_controller_lock); - ret = gic_configure_irq(irq, type, base, NULL); + ret = gic_configure_irq(irq, type, base + GIC_DIST_CONFIG, NULL); + if (ret && irq < 32) { + /* Misconfigured PPIs are usually not fatal */ + pr_warn("GIC: PPI%d is secure or misconfigured\n", irq - 16); + ret = 0; + } raw_spin_unlock(&irq_controller_lock); @@ -268,7 +273,7 @@ static void hip04_irq_cpu_init(struct hip04_irq_data *intc) if (i != cpu) hip04_cpu_map[i] &= ~cpu_mask; - gic_cpu_config(dist_base, NULL); + gic_cpu_config(dist_base, 32, NULL); writel_relaxed(0xf0, base + GIC_CPU_PRIMASK); writel_relaxed(1, base + GIC_CPU_CTRL); diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c index d00489a4b54f..698d07f48fed 100644 --- a/drivers/irqchip/irq-imgpdc.c +++ b/drivers/irqchip/irq-imgpdc.c @@ -362,10 +362,8 @@ static int pdc_intc_probe(struct platform_device *pdev) } for (i = 0; i < priv->nr_perips; ++i) { irq = platform_get_irq(pdev, 1 + i); - if (irq < 0) { - dev_err(&pdev->dev, "cannot find perip IRQ #%u\n", i); + if (irq < 0) return irq; - } priv->perip_irqs[i] = irq; } /* check if too many were provided */ @@ -376,10 +374,8 @@ static int pdc_intc_probe(struct platform_device *pdev) /* Get syswake IRQ number */ irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "cannot find syswake IRQ\n"); + if (irq < 0) return irq; - } priv->syswake_irq = irq; /* Set up an IRQ domain */ diff --git a/drivers/irqchip/irq-imx-intmux.c b/drivers/irqchip/irq-imx-intmux.c new file mode 100644 index 000000000000..c27577c81126 --- /dev/null +++ b/drivers/irqchip/irq-imx-intmux.c @@ -0,0 +1,309 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright 2017 NXP + +/* INTMUX Block Diagram + * + * ________________ + * interrupt source # 0 +---->| | + * | | | + * interrupt source # 1 +++-->| | + * ... | | | channel # 0 |--------->interrupt out # 0 + * ... | | | | + * ... | | | | + * interrupt source # X-1 +++-->|________________| + * | | | + * | | | + * | | | ________________ + * +---->| | + * | | | | | + * | +-->| | + * | | | | channel # 1 |--------->interrupt out # 1 + * | | +>| | + * | | | | | + * | | | |________________| + * | | | + * | | | + * | | | ... + * | | | ... + * | | | + * | | | ________________ + * +---->| | + * | | | | + * +-->| | + * | | channel # N |--------->interrupt out # N + * +>| | + * | | + * |________________| + * + * + * N: Interrupt Channel Instance Number (N=7) + * X: Interrupt Source Number for each channel (X=32) + * + * The INTMUX interrupt multiplexer has 8 channels, each channel receives 32 + * interrupt sources and generates 1 interrupt output. + * + */ + +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/spinlock.h> + +#define CHANIER(n) (0x10 + (0x40 * n)) +#define CHANIPR(n) (0x20 + (0x40 * n)) + +#define CHAN_MAX_NUM 0x8 + +struct intmux_irqchip_data { + int chanidx; + int irq; + struct irq_domain *domain; +}; + +struct intmux_data { + raw_spinlock_t lock; + void __iomem *regs; + struct clk *ipg_clk; + int channum; + struct intmux_irqchip_data irqchip_data[]; +}; + +static void imx_intmux_irq_mask(struct irq_data *d) +{ + struct intmux_irqchip_data *irqchip_data = d->chip_data; + int idx = irqchip_data->chanidx; + struct intmux_data *data = container_of(irqchip_data, struct intmux_data, + irqchip_data[idx]); + unsigned long flags; + void __iomem *reg; + u32 val; + + raw_spin_lock_irqsave(&data->lock, flags); + reg = data->regs + CHANIER(idx); + val = readl_relaxed(reg); + /* disable the interrupt source of this channel */ + val &= ~BIT(d->hwirq); + writel_relaxed(val, reg); + raw_spin_unlock_irqrestore(&data->lock, flags); +} + +static void imx_intmux_irq_unmask(struct irq_data *d) +{ + struct intmux_irqchip_data *irqchip_data = d->chip_data; + int idx = irqchip_data->chanidx; + struct intmux_data *data = container_of(irqchip_data, struct intmux_data, + irqchip_data[idx]); + unsigned long flags; + void __iomem *reg; + u32 val; + + raw_spin_lock_irqsave(&data->lock, flags); + reg = data->regs + CHANIER(idx); + val = readl_relaxed(reg); + /* enable the interrupt source of this channel */ + val |= BIT(d->hwirq); + writel_relaxed(val, reg); + raw_spin_unlock_irqrestore(&data->lock, flags); +} + +static struct irq_chip imx_intmux_irq_chip = { + .name = "intmux", + .irq_mask = imx_intmux_irq_mask, + .irq_unmask = imx_intmux_irq_unmask, +}; + +static int imx_intmux_irq_map(struct irq_domain *h, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_data(irq, h->host_data); + irq_set_chip_and_handler(irq, &imx_intmux_irq_chip, handle_level_irq); + + return 0; +} + +static int imx_intmux_irq_xlate(struct irq_domain *d, struct device_node *node, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, unsigned int *out_type) +{ + struct intmux_irqchip_data *irqchip_data = d->host_data; + int idx = irqchip_data->chanidx; + struct intmux_data *data = container_of(irqchip_data, struct intmux_data, + irqchip_data[idx]); + + /* + * two cells needed in interrupt specifier: + * the 1st cell: hw interrupt number + * the 2nd cell: channel index + */ + if (WARN_ON(intsize != 2)) + return -EINVAL; + + if (WARN_ON(intspec[1] >= data->channum)) + return -EINVAL; + + *out_hwirq = intspec[0]; + *out_type = IRQ_TYPE_LEVEL_HIGH; + + return 0; +} + +static int imx_intmux_irq_select(struct irq_domain *d, struct irq_fwspec *fwspec, + enum irq_domain_bus_token bus_token) +{ + struct intmux_irqchip_data *irqchip_data = d->host_data; + + /* Not for us */ + if (fwspec->fwnode != d->fwnode) + return false; + + return irqchip_data->chanidx == fwspec->param[1]; +} + +static const struct irq_domain_ops imx_intmux_domain_ops = { + .map = imx_intmux_irq_map, + .xlate = imx_intmux_irq_xlate, + .select = imx_intmux_irq_select, +}; + +static void imx_intmux_irq_handler(struct irq_desc *desc) +{ + struct intmux_irqchip_data *irqchip_data = irq_desc_get_handler_data(desc); + int idx = irqchip_data->chanidx; + struct intmux_data *data = container_of(irqchip_data, struct intmux_data, + irqchip_data[idx]); + unsigned long irqstat; + int pos, virq; + + chained_irq_enter(irq_desc_get_chip(desc), desc); + + /* read the interrupt source pending status of this channel */ + irqstat = readl_relaxed(data->regs + CHANIPR(idx)); + + for_each_set_bit(pos, &irqstat, 32) { + virq = irq_find_mapping(irqchip_data->domain, pos); + if (virq) + generic_handle_irq(virq); + } + + chained_irq_exit(irq_desc_get_chip(desc), desc); +} + +static int imx_intmux_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct irq_domain *domain; + struct intmux_data *data; + int channum; + int i, ret; + + channum = platform_irq_count(pdev); + if (channum == -EPROBE_DEFER) { + return -EPROBE_DEFER; + } else if (channum > CHAN_MAX_NUM) { + dev_err(&pdev->dev, "supports up to %d multiplex channels\n", + CHAN_MAX_NUM); + return -EINVAL; + } + + data = devm_kzalloc(&pdev->dev, sizeof(*data) + + channum * sizeof(data->irqchip_data[0]), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(data->regs)) { + dev_err(&pdev->dev, "failed to initialize reg\n"); + return PTR_ERR(data->regs); + } + + data->ipg_clk = devm_clk_get(&pdev->dev, "ipg"); + if (IS_ERR(data->ipg_clk)) { + ret = PTR_ERR(data->ipg_clk); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret); + return ret; + } + + data->channum = channum; + raw_spin_lock_init(&data->lock); + + ret = clk_prepare_enable(data->ipg_clk); + if (ret) { + dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret); + return ret; + } + + for (i = 0; i < channum; i++) { + data->irqchip_data[i].chanidx = i; + + data->irqchip_data[i].irq = irq_of_parse_and_map(np, i); + if (data->irqchip_data[i].irq <= 0) { + ret = -EINVAL; + dev_err(&pdev->dev, "failed to get irq\n"); + goto out; + } + + domain = irq_domain_add_linear(np, 32, &imx_intmux_domain_ops, + &data->irqchip_data[i]); + if (!domain) { + ret = -ENOMEM; + dev_err(&pdev->dev, "failed to create IRQ domain\n"); + goto out; + } + data->irqchip_data[i].domain = domain; + + /* disable all interrupt sources of this channel firstly */ + writel_relaxed(0, data->regs + CHANIER(i)); + + irq_set_chained_handler_and_data(data->irqchip_data[i].irq, + imx_intmux_irq_handler, + &data->irqchip_data[i]); + } + + platform_set_drvdata(pdev, data); + + return 0; +out: + clk_disable_unprepare(data->ipg_clk); + return ret; +} + +static int imx_intmux_remove(struct platform_device *pdev) +{ + struct intmux_data *data = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < data->channum; i++) { + /* disable all interrupt sources of this channel */ + writel_relaxed(0, data->regs + CHANIER(i)); + + irq_set_chained_handler_and_data(data->irqchip_data[i].irq, + NULL, NULL); + + irq_domain_remove(data->irqchip_data[i].domain); + } + + clk_disable_unprepare(data->ipg_clk); + + return 0; +} + +static const struct of_device_id imx_intmux_id[] = { + { .compatible = "fsl,imx-intmux", }, + { /* sentinel */ }, +}; + +static struct platform_driver imx_intmux_driver = { + .driver = { + .name = "imx-intmux", + .of_match_table = imx_intmux_id, + }, + .probe = imx_intmux_probe, + .remove = imx_intmux_remove, +}; +builtin_platform_driver(imx_intmux_driver); diff --git a/drivers/irqchip/irq-ingenic-tcu.c b/drivers/irqchip/irq-ingenic-tcu.c new file mode 100644 index 000000000000..6d05cefe9d79 --- /dev/null +++ b/drivers/irqchip/irq-ingenic-tcu.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * JZ47xx SoCs TCU IRQ driver + * Copyright (C) 2019 Paul Cercueil <paul@crapouillou.net> + */ + +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/mfd/ingenic-tcu.h> +#include <linux/mfd/syscon.h> +#include <linux/of_irq.h> +#include <linux/regmap.h> + +struct ingenic_tcu { + struct regmap *map; + struct clk *clk; + struct irq_domain *domain; + unsigned int nb_parent_irqs; + u32 parent_irqs[3]; +}; + +static void ingenic_tcu_intc_cascade(struct irq_desc *desc) +{ + struct irq_chip *irq_chip = irq_data_get_irq_chip(&desc->irq_data); + struct irq_domain *domain = irq_desc_get_handler_data(desc); + struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0); + struct regmap *map = gc->private; + uint32_t irq_reg, irq_mask; + unsigned int i; + + regmap_read(map, TCU_REG_TFR, &irq_reg); + regmap_read(map, TCU_REG_TMR, &irq_mask); + + chained_irq_enter(irq_chip, desc); + + irq_reg &= ~irq_mask; + + for_each_set_bit(i, (unsigned long *)&irq_reg, 32) + generic_handle_irq(irq_linear_revmap(domain, i)); + + chained_irq_exit(irq_chip, desc); +} + +static void ingenic_tcu_gc_unmask_enable_reg(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct irq_chip_type *ct = irq_data_get_chip_type(d); + struct regmap *map = gc->private; + u32 mask = d->mask; + + irq_gc_lock(gc); + regmap_write(map, ct->regs.ack, mask); + regmap_write(map, ct->regs.enable, mask); + *ct->mask_cache |= mask; + irq_gc_unlock(gc); +} + +static void ingenic_tcu_gc_mask_disable_reg(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct irq_chip_type *ct = irq_data_get_chip_type(d); + struct regmap *map = gc->private; + u32 mask = d->mask; + + irq_gc_lock(gc); + regmap_write(map, ct->regs.disable, mask); + *ct->mask_cache &= ~mask; + irq_gc_unlock(gc); +} + +static void ingenic_tcu_gc_mask_disable_reg_and_ack(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct irq_chip_type *ct = irq_data_get_chip_type(d); + struct regmap *map = gc->private; + u32 mask = d->mask; + + irq_gc_lock(gc); + regmap_write(map, ct->regs.ack, mask); + regmap_write(map, ct->regs.disable, mask); + irq_gc_unlock(gc); +} + +static int __init ingenic_tcu_irq_init(struct device_node *np, + struct device_node *parent) +{ + struct irq_chip_generic *gc; + struct irq_chip_type *ct; + struct ingenic_tcu *tcu; + struct regmap *map; + unsigned int i; + int ret, irqs; + + map = device_node_to_regmap(np); + if (IS_ERR(map)) + return PTR_ERR(map); + + tcu = kzalloc(sizeof(*tcu), GFP_KERNEL); + if (!tcu) + return -ENOMEM; + + tcu->map = map; + + irqs = of_property_count_elems_of_size(np, "interrupts", sizeof(u32)); + if (irqs < 0 || irqs > ARRAY_SIZE(tcu->parent_irqs)) { + pr_crit("%s: Invalid 'interrupts' property\n", __func__); + ret = -EINVAL; + goto err_free_tcu; + } + + tcu->nb_parent_irqs = irqs; + + tcu->domain = irq_domain_add_linear(np, 32, &irq_generic_chip_ops, + NULL); + if (!tcu->domain) { + ret = -ENOMEM; + goto err_free_tcu; + } + + ret = irq_alloc_domain_generic_chips(tcu->domain, 32, 1, "TCU", + handle_level_irq, 0, + IRQ_NOPROBE | IRQ_LEVEL, 0); + if (ret) { + pr_crit("%s: Invalid 'interrupts' property\n", __func__); + goto out_domain_remove; + } + + gc = irq_get_domain_generic_chip(tcu->domain, 0); + ct = gc->chip_types; + + gc->wake_enabled = IRQ_MSK(32); + gc->private = tcu->map; + + ct->regs.disable = TCU_REG_TMSR; + ct->regs.enable = TCU_REG_TMCR; + ct->regs.ack = TCU_REG_TFCR; + ct->chip.irq_unmask = ingenic_tcu_gc_unmask_enable_reg; + ct->chip.irq_mask = ingenic_tcu_gc_mask_disable_reg; + ct->chip.irq_mask_ack = ingenic_tcu_gc_mask_disable_reg_and_ack; + ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE; + + /* Mask all IRQs by default */ + regmap_write(tcu->map, TCU_REG_TMSR, IRQ_MSK(32)); + + /* + * On JZ4740, timer 0 and timer 1 have their own interrupt line; + * timers 2-7 share one interrupt. + * On SoCs >= JZ4770, timer 5 has its own interrupt line; + * timers 0-4 and 6-7 share one single interrupt. + * + * To keep things simple, we just register the same handler to + * all parent interrupts. The handler will properly detect which + * channel fired the interrupt. + */ + for (i = 0; i < irqs; i++) { + tcu->parent_irqs[i] = irq_of_parse_and_map(np, i); + if (!tcu->parent_irqs[i]) { + ret = -EINVAL; + goto out_unmap_irqs; + } + + irq_set_chained_handler_and_data(tcu->parent_irqs[i], + ingenic_tcu_intc_cascade, + tcu->domain); + } + + return 0; + +out_unmap_irqs: + for (; i > 0; i--) + irq_dispose_mapping(tcu->parent_irqs[i - 1]); +out_domain_remove: + irq_domain_remove(tcu->domain); +err_free_tcu: + kfree(tcu); + return ret; +} +IRQCHIP_DECLARE(jz4740_tcu_irq, "ingenic,jz4740-tcu", ingenic_tcu_irq_init); +IRQCHIP_DECLARE(jz4725b_tcu_irq, "ingenic,jz4725b-tcu", ingenic_tcu_irq_init); +IRQCHIP_DECLARE(jz4770_tcu_irq, "ingenic,jz4770-tcu", ingenic_tcu_irq_init); diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c index f126255b3260..c5589ee0dfb3 100644 --- a/drivers/irqchip/irq-ingenic.c +++ b/drivers/irqchip/irq-ingenic.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> - * JZ4740 platform IRQ support + * Ingenic XBurst platform IRQ support */ #include <linux/errno.h> @@ -10,7 +10,6 @@ #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/irqchip.h> -#include <linux/irqchip/ingenic.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/timex.h> @@ -18,10 +17,10 @@ #include <linux/delay.h> #include <asm/io.h> -#include <asm/mach-jz4740/irq.h> struct ingenic_intc_data { void __iomem *base; + struct irq_domain *domain; unsigned num_chips; }; @@ -35,41 +34,30 @@ struct ingenic_intc_data { static irqreturn_t intc_cascade(int irq, void *data) { struct ingenic_intc_data *intc = irq_get_handler_data(irq); - uint32_t irq_reg; + struct irq_domain *domain = intc->domain; + struct irq_chip_generic *gc; + uint32_t pending; unsigned i; for (i = 0; i < intc->num_chips; i++) { - irq_reg = readl(intc->base + (i * CHIP_SIZE) + - JZ_REG_INTC_PENDING); - if (!irq_reg) + gc = irq_get_domain_generic_chip(domain, i * 32); + + pending = irq_reg_readl(gc, JZ_REG_INTC_PENDING); + if (!pending) continue; - generic_handle_irq(__fls(irq_reg) + (i * 32) + JZ4740_IRQ_BASE); + while (pending) { + int bit = __fls(pending); + + irq = irq_linear_revmap(domain, bit + (i * 32)); + generic_handle_irq(irq); + pending &= ~BIT(bit); + } } return IRQ_HANDLED; } -static void intc_irq_set_mask(struct irq_chip_generic *gc, uint32_t mask) -{ - struct irq_chip_regs *regs = &gc->chip_types->regs; - - writel(mask, gc->reg_base + regs->enable); - writel(~mask, gc->reg_base + regs->disable); -} - -void ingenic_intc_irq_suspend(struct irq_data *data) -{ - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); - intc_irq_set_mask(gc, gc->wake_active); -} - -void ingenic_intc_irq_resume(struct irq_data *data) -{ - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); - intc_irq_set_mask(gc, gc->mask_cache); -} - static struct irqaction intc_cascade_action = { .handler = intc_cascade, .name = "SoC intc cascade interrupt", @@ -108,17 +96,26 @@ static int __init ingenic_intc_of_init(struct device_node *node, goto out_unmap_irq; } - for (i = 0; i < num_chips; i++) { - /* Mask all irqs */ - writel(0xffffffff, intc->base + (i * CHIP_SIZE) + - JZ_REG_INTC_SET_MASK); + domain = irq_domain_add_linear(node, num_chips * 32, + &irq_generic_chip_ops, NULL); + if (!domain) { + err = -ENOMEM; + goto out_unmap_base; + } - gc = irq_alloc_generic_chip("INTC", 1, - JZ4740_IRQ_BASE + (i * 32), - intc->base + (i * CHIP_SIZE), - handle_level_irq); + intc->domain = domain; + + err = irq_alloc_domain_generic_chips(domain, 32, 1, "INTC", + handle_level_irq, 0, + IRQ_NOPROBE | IRQ_LEVEL, 0); + if (err) + goto out_domain_remove; + + for (i = 0; i < num_chips; i++) { + gc = irq_get_domain_generic_chip(domain, i * 32); gc->wake_enabled = IRQ_MSK(32); + gc->reg_base = intc->base + (i * CHIP_SIZE); ct = gc->chip_types; ct->regs.enable = JZ_REG_INTC_CLEAR_MASK; @@ -127,21 +124,19 @@ static int __init ingenic_intc_of_init(struct device_node *node, ct->chip.irq_mask = irq_gc_mask_disable_reg; ct->chip.irq_mask_ack = irq_gc_mask_disable_reg; ct->chip.irq_set_wake = irq_gc_set_wake; - ct->chip.irq_suspend = ingenic_intc_irq_suspend; - ct->chip.irq_resume = ingenic_intc_irq_resume; + ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND; - irq_setup_generic_chip(gc, IRQ_MSK(32), 0, 0, - IRQ_NOPROBE | IRQ_LEVEL); + /* Mask all irqs */ + irq_reg_writel(gc, IRQ_MSK(32), JZ_REG_INTC_SET_MASK); } - domain = irq_domain_add_legacy(node, num_chips * 32, JZ4740_IRQ_BASE, 0, - &irq_domain_simple_ops, NULL); - if (!domain) - pr_warn("unable to register IRQ domain\n"); - setup_irq(parent_irq, &intc_cascade_action); return 0; +out_domain_remove: + irq_domain_remove(domain); +out_unmap_base: + iounmap(intc->base); out_unmap_irq: irq_dispose_mapping(parent_irq); out_free: diff --git a/drivers/irqchip/irq-ixp4xx.c b/drivers/irqchip/irq-ixp4xx.c index 6751c35b7e1d..37e0749215c7 100644 --- a/drivers/irqchip/irq-ixp4xx.c +++ b/drivers/irqchip/irq-ixp4xx.c @@ -319,7 +319,7 @@ void __init ixp4xx_irq_init(resource_size_t irqbase, pr_crit("IXP4XX: could not ioremap interrupt controller\n"); return; } - fwnode = irq_domain_alloc_fwnode(base); + fwnode = irq_domain_alloc_fwnode(&irqbase); if (!fwnode) { pr_crit("IXP4XX: no domain handle\n"); return; diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c index efbcf8435185..8118ebe80b09 100644 --- a/drivers/irqchip/irq-keystone.c +++ b/drivers/irqchip/irq-keystone.c @@ -164,10 +164,8 @@ static int keystone_irq_probe(struct platform_device *pdev) } kirq->irq = platform_get_irq(pdev, 0); - if (kirq->irq < 0) { - dev_err(dev, "no irq resource %d\n", kirq->irq); + if (kirq->irq < 0) return kirq->irq; - } kirq->dev = dev; kirq->mask = ~0x0; diff --git a/drivers/irqchip/irq-ls-extirq.c b/drivers/irqchip/irq-ls-extirq.c new file mode 100644 index 000000000000..4d1179fed77c --- /dev/null +++ b/drivers/irqchip/irq-ls-extirq.c @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define pr_fmt(fmt) "irq-ls-extirq: " fmt + +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqdomain.h> +#include <linux/of.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> +#include <linux/slab.h> + +#include <dt-bindings/interrupt-controller/arm-gic.h> + +#define MAXIRQ 12 +#define LS1021A_SCFGREVCR 0x200 + +struct ls_extirq_data { + struct regmap *syscon; + u32 intpcr; + bool bit_reverse; + u32 nirq; + struct irq_fwspec map[MAXIRQ]; +}; + +static int +ls_extirq_set_type(struct irq_data *data, unsigned int type) +{ + struct ls_extirq_data *priv = data->chip_data; + irq_hw_number_t hwirq = data->hwirq; + u32 value, mask; + + if (priv->bit_reverse) + mask = 1U << (31 - hwirq); + else + mask = 1U << hwirq; + + switch (type) { + case IRQ_TYPE_LEVEL_LOW: + type = IRQ_TYPE_LEVEL_HIGH; + value = mask; + break; + case IRQ_TYPE_EDGE_FALLING: + type = IRQ_TYPE_EDGE_RISING; + value = mask; + break; + case IRQ_TYPE_LEVEL_HIGH: + case IRQ_TYPE_EDGE_RISING: + value = 0; + break; + default: + return -EINVAL; + } + regmap_update_bits(priv->syscon, priv->intpcr, mask, value); + + return irq_chip_set_type_parent(data, type); +} + +static struct irq_chip ls_extirq_chip = { + .name = "ls-extirq", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_type = ls_extirq_set_type, + .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_set_affinity = irq_chip_set_affinity_parent, + .flags = IRQCHIP_SET_TYPE_MASKED, +}; + +static int +ls_extirq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct ls_extirq_data *priv = domain->host_data; + struct irq_fwspec *fwspec = arg; + irq_hw_number_t hwirq; + + if (fwspec->param_count != 2) + return -EINVAL; + + hwirq = fwspec->param[0]; + if (hwirq >= priv->nirq) + return -EINVAL; + + irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &ls_extirq_chip, + priv); + + return irq_domain_alloc_irqs_parent(domain, virq, 1, &priv->map[hwirq]); +} + +static const struct irq_domain_ops extirq_domain_ops = { + .xlate = irq_domain_xlate_twocell, + .alloc = ls_extirq_domain_alloc, + .free = irq_domain_free_irqs_common, +}; + +static int +ls_extirq_parse_map(struct ls_extirq_data *priv, struct device_node *node) +{ + const __be32 *map; + u32 mapsize; + int ret; + + map = of_get_property(node, "interrupt-map", &mapsize); + if (!map) + return -ENOENT; + if (mapsize % sizeof(*map)) + return -EINVAL; + mapsize /= sizeof(*map); + + while (mapsize) { + struct device_node *ipar; + u32 hwirq, intsize, j; + + if (mapsize < 3) + return -EINVAL; + hwirq = be32_to_cpup(map); + if (hwirq >= MAXIRQ) + return -EINVAL; + priv->nirq = max(priv->nirq, hwirq + 1); + + ipar = of_find_node_by_phandle(be32_to_cpup(map + 2)); + map += 3; + mapsize -= 3; + if (!ipar) + return -EINVAL; + priv->map[hwirq].fwnode = &ipar->fwnode; + ret = of_property_read_u32(ipar, "#interrupt-cells", &intsize); + if (ret) + return ret; + + if (intsize > mapsize) + return -EINVAL; + + priv->map[hwirq].param_count = intsize; + for (j = 0; j < intsize; ++j) + priv->map[hwirq].param[j] = be32_to_cpup(map++); + mapsize -= intsize; + } + return 0; +} + +static int __init +ls_extirq_of_init(struct device_node *node, struct device_node *parent) +{ + + struct irq_domain *domain, *parent_domain; + struct ls_extirq_data *priv; + int ret; + + parent_domain = irq_find_host(parent); + if (!parent_domain) { + pr_err("Cannot find parent domain\n"); + return -ENODEV; + } + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->syscon = syscon_node_to_regmap(node->parent); + if (IS_ERR(priv->syscon)) { + ret = PTR_ERR(priv->syscon); + pr_err("Failed to lookup parent regmap\n"); + goto out; + } + ret = of_property_read_u32(node, "reg", &priv->intpcr); + if (ret) { + pr_err("Missing INTPCR offset value\n"); + goto out; + } + + ret = ls_extirq_parse_map(priv, node); + if (ret) + goto out; + + if (of_device_is_compatible(node, "fsl,ls1021a-extirq")) { + u32 revcr; + + ret = regmap_read(priv->syscon, LS1021A_SCFGREVCR, &revcr); + if (ret) + goto out; + priv->bit_reverse = (revcr != 0); + } + + domain = irq_domain_add_hierarchy(parent_domain, 0, priv->nirq, node, + &extirq_domain_ops, priv); + if (!domain) + ret = -ENOMEM; + +out: + if (ret) + kfree(priv); + return ret; +} + +IRQCHIP_DECLARE(ls1021a_extirq, "fsl,ls1021a-extirq", ls_extirq_of_init); diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c index 3f09f658e8e2..6b566bba263b 100644 --- a/drivers/irqchip/irq-mbigen.c +++ b/drivers/irqchip/irq-mbigen.c @@ -374,6 +374,7 @@ static struct platform_driver mbigen_platform_driver = { .name = "Hisilicon MBIGEN-V2", .of_match_table = mbigen_of_match, .acpi_match_table = ACPI_PTR(mbigen_acpi_match), + .suppress_bind_attrs = true, }, .probe = mbigen_device_probe, }; diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c index dcdc23b9dce6..ccc7f823911b 100644 --- a/drivers/irqchip/irq-meson-gpio.c +++ b/drivers/irqchip/irq-meson-gpio.c @@ -24,34 +24,101 @@ #define REG_PIN_47_SEL 0x08 #define REG_FILTER_SEL 0x0c -#define REG_EDGE_POL_MASK(x) (BIT(x) | BIT(16 + (x))) -#define REG_EDGE_POL_EDGE(x) BIT(x) -#define REG_EDGE_POL_LOW(x) BIT(16 + (x)) +/* use for A1 like chips */ +#define REG_PIN_A1_SEL 0x04 + +/* + * Note: The S905X3 datasheet reports that BOTH_EDGE is controlled by + * bits 24 to 31. Tests on the actual HW show that these bits are + * stuck at 0. Bits 8 to 15 are responsive and have the expected + * effect. + */ +#define REG_EDGE_POL_EDGE(params, x) BIT((params)->edge_single_offset + (x)) +#define REG_EDGE_POL_LOW(params, x) BIT((params)->pol_low_offset + (x)) +#define REG_BOTH_EDGE(params, x) BIT((params)->edge_both_offset + (x)) +#define REG_EDGE_POL_MASK(params, x) ( \ + REG_EDGE_POL_EDGE(params, x) | \ + REG_EDGE_POL_LOW(params, x) | \ + REG_BOTH_EDGE(params, x)) #define REG_PIN_SEL_SHIFT(x) (((x) % 4) * 8) #define REG_FILTER_SEL_SHIFT(x) ((x) * 4) +struct meson_gpio_irq_controller; +static void meson8_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl, + unsigned int channel, unsigned long hwirq); +static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl); +static void meson_a1_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl, + unsigned int channel, + unsigned long hwirq); +static void meson_a1_gpio_irq_init(struct meson_gpio_irq_controller *ctl); + +struct irq_ctl_ops { + void (*gpio_irq_sel_pin)(struct meson_gpio_irq_controller *ctl, + unsigned int channel, unsigned long hwirq); + void (*gpio_irq_init)(struct meson_gpio_irq_controller *ctl); +}; + struct meson_gpio_irq_params { unsigned int nr_hwirq; + bool support_edge_both; + unsigned int edge_both_offset; + unsigned int edge_single_offset; + unsigned int pol_low_offset; + unsigned int pin_sel_mask; + struct irq_ctl_ops ops; }; +#define INIT_MESON_COMMON(irqs, init, sel) \ + .nr_hwirq = irqs, \ + .ops = { \ + .gpio_irq_init = init, \ + .gpio_irq_sel_pin = sel, \ + }, + +#define INIT_MESON8_COMMON_DATA(irqs) \ + INIT_MESON_COMMON(irqs, meson_gpio_irq_init_dummy, \ + meson8_gpio_irq_sel_pin) \ + .edge_single_offset = 0, \ + .pol_low_offset = 16, \ + .pin_sel_mask = 0xff, \ + +#define INIT_MESON_A1_COMMON_DATA(irqs) \ + INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \ + meson_a1_gpio_irq_sel_pin) \ + .support_edge_both = true, \ + .edge_both_offset = 16, \ + .edge_single_offset = 8, \ + .pol_low_offset = 0, \ + .pin_sel_mask = 0x7f, \ + static const struct meson_gpio_irq_params meson8_params = { - .nr_hwirq = 134, + INIT_MESON8_COMMON_DATA(134) }; static const struct meson_gpio_irq_params meson8b_params = { - .nr_hwirq = 119, + INIT_MESON8_COMMON_DATA(119) }; static const struct meson_gpio_irq_params gxbb_params = { - .nr_hwirq = 133, + INIT_MESON8_COMMON_DATA(133) }; static const struct meson_gpio_irq_params gxl_params = { - .nr_hwirq = 110, + INIT_MESON8_COMMON_DATA(110) }; static const struct meson_gpio_irq_params axg_params = { - .nr_hwirq = 100, + INIT_MESON8_COMMON_DATA(100) +}; + +static const struct meson_gpio_irq_params sm1_params = { + INIT_MESON8_COMMON_DATA(100) + .support_edge_both = true, + .edge_both_offset = 8, +}; + +static const struct meson_gpio_irq_params a1_params = { + INIT_MESON_A1_COMMON_DATA(62) }; static const struct of_device_id meson_irq_gpio_matches[] = { @@ -61,11 +128,13 @@ static const struct of_device_id meson_irq_gpio_matches[] = { { .compatible = "amlogic,meson-gxl-gpio-intc", .data = &gxl_params }, { .compatible = "amlogic,meson-axg-gpio-intc", .data = &axg_params }, { .compatible = "amlogic,meson-g12a-gpio-intc", .data = &axg_params }, + { .compatible = "amlogic,meson-sm1-gpio-intc", .data = &sm1_params }, + { .compatible = "amlogic,meson-a1-gpio-intc", .data = &a1_params }, { } }; struct meson_gpio_irq_controller { - unsigned int nr_hwirq; + const struct meson_gpio_irq_params *params; void __iomem *base; u32 channel_irqs[NUM_CHANNEL]; DECLARE_BITMAP(channel_map, NUM_CHANNEL); @@ -83,9 +152,43 @@ static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl, writel_relaxed(tmp, ctl->base + reg); } -static unsigned int meson_gpio_irq_channel_to_reg(unsigned int channel) +static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl) +{ +} + +static void meson8_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl, + unsigned int channel, unsigned long hwirq) { - return (channel < 4) ? REG_PIN_03_SEL : REG_PIN_47_SEL; + unsigned int reg_offset; + unsigned int bit_offset; + + reg_offset = (channel < 4) ? REG_PIN_03_SEL : REG_PIN_47_SEL; + bit_offset = REG_PIN_SEL_SHIFT(channel); + + meson_gpio_irq_update_bits(ctl, reg_offset, + ctl->params->pin_sel_mask << bit_offset, + hwirq << bit_offset); +} + +static void meson_a1_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl, + unsigned int channel, + unsigned long hwirq) +{ + unsigned int reg_offset; + unsigned int bit_offset; + + bit_offset = ((channel % 2) == 0) ? 0 : 16; + reg_offset = REG_PIN_A1_SEL + ((channel / 2) << 2); + + meson_gpio_irq_update_bits(ctl, reg_offset, + ctl->params->pin_sel_mask << bit_offset, + hwirq << bit_offset); +} + +/* For a1 or later chips like a1 there is a switch to enable/disable irq */ +static void meson_a1_gpio_irq_init(struct meson_gpio_irq_controller *ctl) +{ + meson_gpio_irq_update_bits(ctl, REG_EDGE_POL, BIT(31), BIT(31)); } static int @@ -93,7 +196,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl, unsigned long hwirq, u32 **channel_hwirq) { - unsigned int reg, idx; + unsigned int idx; spin_lock(&ctl->lock); @@ -112,10 +215,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl, * Setup the mux of the channel to route the signal of the pad * to the appropriate input of the GIC */ - reg = meson_gpio_irq_channel_to_reg(idx); - meson_gpio_irq_update_bits(ctl, reg, - 0xff << REG_PIN_SEL_SHIFT(idx), - hwirq << REG_PIN_SEL_SHIFT(idx)); + ctl->params->ops.gpio_irq_sel_pin(ctl, idx, hwirq); /* * Get the hwirq number assigned to this channel through @@ -156,7 +256,9 @@ static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl, { u32 val = 0; unsigned int idx; + const struct meson_gpio_irq_params *params; + params = ctl->params; idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq); /* @@ -168,19 +270,27 @@ static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl, */ type &= IRQ_TYPE_SENSE_MASK; - if (type == IRQ_TYPE_EDGE_BOTH) - return -EINVAL; + /* + * New controller support EDGE_BOTH trigger. This setting takes + * precedence over the other edge/polarity settings + */ + if (type == IRQ_TYPE_EDGE_BOTH) { + if (!params->support_edge_both) + return -EINVAL; - if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) - val |= REG_EDGE_POL_EDGE(idx); + val |= REG_BOTH_EDGE(params, idx); + } else { + if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) + val |= REG_EDGE_POL_EDGE(params, idx); - if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) - val |= REG_EDGE_POL_LOW(idx); + if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) + val |= REG_EDGE_POL_LOW(params, idx); + } spin_lock(&ctl->lock); meson_gpio_irq_update_bits(ctl, REG_EDGE_POL, - REG_EDGE_POL_MASK(idx), val); + REG_EDGE_POL_MASK(params, idx), val); spin_unlock(&ctl->lock); @@ -199,7 +309,7 @@ static unsigned int meson_gpio_irq_type_output(unsigned int type) */ if (sense & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) type |= IRQ_TYPE_LEVEL_HIGH; - else if (sense & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) + else type |= IRQ_TYPE_EDGE_RISING; return type; @@ -328,15 +438,13 @@ static int __init meson_gpio_irq_parse_dt(struct device_node *node, struct meson_gpio_irq_controller *ctl) { const struct of_device_id *match; - const struct meson_gpio_irq_params *params; int ret; match = of_match_node(meson_irq_gpio_matches, node); if (!match) return -ENODEV; - params = match->data; - ctl->nr_hwirq = params->nr_hwirq; + ctl->params = match->data; ret = of_property_read_variable_u32_array(node, "amlogic,channel-interrupts", @@ -348,6 +456,8 @@ static int __init meson_gpio_irq_parse_dt(struct device_node *node, return ret; } + ctl->params->ops.gpio_irq_init(ctl); + return 0; } @@ -385,7 +495,8 @@ static int __init meson_gpio_irq_of_init(struct device_node *node, if (ret) goto free_channel_irqs; - domain = irq_domain_create_hierarchy(parent_domain, 0, ctl->nr_hwirq, + domain = irq_domain_create_hierarchy(parent_domain, 0, + ctl->params->nr_hwirq, of_node_to_fwnode(node), &meson_gpio_irq_domain_ops, ctl); @@ -396,7 +507,7 @@ static int __init meson_gpio_irq_of_init(struct device_node *node, } pr_info("%d to %d gpio interrupt mux initialized\n", - ctl->nr_hwirq, NUM_CHANNEL); + ctl->params->nr_hwirq, NUM_CHANNEL); return 0; diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index f3985469c221..d70507133c1d 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -716,7 +716,7 @@ static int __init gic_of_init(struct device_node *node, __sync(); } - mips_gic_base = ioremap_nocache(gic_base, gic_len); + mips_gic_base = ioremap(gic_base, gic_len); gicconfig = read_gic_config(); gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS; diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c index 14618dc0bd39..4a74ac7b7c42 100644 --- a/drivers/irqchip/irq-mmp.c +++ b/drivers/irqchip/irq-mmp.c @@ -13,6 +13,7 @@ #include <linux/init.h> #include <linux/irq.h> #include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/io.h> #include <linux/ioport.h> @@ -43,6 +44,7 @@ struct icu_chip_data { unsigned int conf_enable; unsigned int conf_disable; unsigned int conf_mask; + unsigned int conf2_mask; unsigned int clr_mfp_irq_base; unsigned int clr_mfp_hwirq; struct irq_domain *domain; @@ -52,9 +54,11 @@ struct mmp_intc_conf { unsigned int conf_enable; unsigned int conf_disable; unsigned int conf_mask; + unsigned int conf2_mask; }; static void __iomem *mmp_icu_base; +static void __iomem *mmp_icu2_base; static struct icu_chip_data icu_data[MAX_ICU_NR]; static int max_icu_nr; @@ -97,6 +101,16 @@ static void icu_mask_irq(struct irq_data *d) r &= ~data->conf_mask; r |= data->conf_disable; writel_relaxed(r, mmp_icu_base + (hwirq << 2)); + + if (data->conf2_mask) { + /* + * ICU1 (above) only controls PJ4 MP1; if using SMP, + * we need to also mask the MP2 and MM cores via ICU2. + */ + r = readl_relaxed(mmp_icu2_base + (hwirq << 2)); + r &= ~data->conf2_mask; + writel_relaxed(r, mmp_icu2_base + (hwirq << 2)); + } } else { r = readl_relaxed(data->reg_mask) | (1 << hwirq); writel_relaxed(r, data->reg_mask); @@ -132,11 +146,14 @@ struct irq_chip icu_irq_chip = { static void icu_mux_irq_demux(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); struct irq_domain *domain; struct icu_chip_data *data; int i; unsigned long mask, status, n; + chained_irq_enter(chip, desc); + for (i = 1; i < max_icu_nr; i++) { if (irq == icu_data[i].cascade_irq) { domain = icu_data[i].domain; @@ -146,7 +163,7 @@ static void icu_mux_irq_demux(struct irq_desc *desc) } if (i >= max_icu_nr) { pr_err("Spurious irq %d in MMP INTC\n", irq); - return; + goto out; } mask = readl_relaxed(data->reg_mask); @@ -158,6 +175,9 @@ static void icu_mux_irq_demux(struct irq_desc *desc) generic_handle_irq(icu_data[i].virq_base + n); } } + +out: + chained_irq_exit(chip, desc); } static int mmp_irq_domain_map(struct irq_domain *d, unsigned int irq, @@ -194,6 +214,14 @@ static const struct mmp_intc_conf mmp2_conf = { MMP2_ICU_INT_ROUTE_PJ4_FIQ, }; +static struct mmp_intc_conf mmp3_conf = { + .conf_enable = 0x20, + .conf_disable = 0x0, + .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ | + MMP2_ICU_INT_ROUTE_PJ4_FIQ, + .conf2_mask = 0xf0, +}; + static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs) { int hwirq; @@ -395,7 +423,6 @@ static int __init mmp_of_init(struct device_node *node, icu_data[0].conf_enable = mmp_conf.conf_enable; icu_data[0].conf_disable = mmp_conf.conf_disable; icu_data[0].conf_mask = mmp_conf.conf_mask; - irq_set_default_host(icu_data[0].domain); set_handle_irq(mmp_handle_irq); max_icu_nr = 1; return 0; @@ -414,19 +441,50 @@ static int __init mmp2_of_init(struct device_node *node, icu_data[0].conf_enable = mmp2_conf.conf_enable; icu_data[0].conf_disable = mmp2_conf.conf_disable; icu_data[0].conf_mask = mmp2_conf.conf_mask; - irq_set_default_host(icu_data[0].domain); set_handle_irq(mmp2_handle_irq); max_icu_nr = 1; return 0; } IRQCHIP_DECLARE(mmp2_intc, "mrvl,mmp2-intc", mmp2_of_init); +static int __init mmp3_of_init(struct device_node *node, + struct device_node *parent) +{ + int ret; + + mmp_icu2_base = of_iomap(node, 1); + if (!mmp_icu2_base) { + pr_err("Failed to get interrupt controller register #2\n"); + return -ENODEV; + } + + ret = mmp_init_bases(node); + if (ret < 0) { + iounmap(mmp_icu2_base); + return ret; + } + + icu_data[0].conf_enable = mmp3_conf.conf_enable; + icu_data[0].conf_disable = mmp3_conf.conf_disable; + icu_data[0].conf_mask = mmp3_conf.conf_mask; + icu_data[0].conf2_mask = mmp3_conf.conf2_mask; + + if (!parent) { + /* This is the main interrupt controller. */ + set_handle_irq(mmp2_handle_irq); + } + + max_icu_nr = 1; + return 0; +} +IRQCHIP_DECLARE(mmp3_intc, "marvell,mmp3-intc", mmp3_of_init); + static int __init mmp2_mux_of_init(struct device_node *node, struct device_node *parent) { - struct resource res; int i, ret, irq, j = 0; u32 nr_irqs, mfp_irq; + u32 reg[4]; if (!parent) return -ENODEV; @@ -438,18 +496,22 @@ static int __init mmp2_mux_of_init(struct device_node *node, pr_err("Not found mrvl,intc-nr-irqs property\n"); return -EINVAL; } - ret = of_address_to_resource(node, 0, &res); - if (ret < 0) { - pr_err("Not found reg property\n"); - return -EINVAL; - } - icu_data[i].reg_status = mmp_icu_base + res.start; - ret = of_address_to_resource(node, 1, &res); + + /* + * For historical reasons, the "regs" property of the + * mrvl,mmp2-mux-intc is not a regular "regs" property containing + * addresses on the parent bus, but offsets from the intc's base. + * That is why we can't use of_address_to_resource() here. + */ + ret = of_property_read_variable_u32_array(node, "reg", reg, + ARRAY_SIZE(reg), + ARRAY_SIZE(reg)); if (ret < 0) { pr_err("Not found reg property\n"); return -EINVAL; } - icu_data[i].reg_mask = mmp_icu_base + res.start; + icu_data[i].reg_status = mmp_icu_base + reg[0]; + icu_data[i].reg_mask = mmp_icu_base + reg[2]; icu_data[i].cascade_irq = irq_of_parse_and_map(node, 0); if (!icu_data[i].cascade_irq) return -EINVAL; diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c index a166d30deea2..f747e2209ea9 100644 --- a/drivers/irqchip/irq-nvic.c +++ b/drivers/irqchip/irq-nvic.c @@ -45,17 +45,6 @@ nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs) handle_IRQ(irq, regs); } -static int nvic_irq_domain_translate(struct irq_domain *d, - struct irq_fwspec *fwspec, - unsigned long *hwirq, unsigned int *type) -{ - if (WARN_ON(fwspec->param_count < 1)) - return -EINVAL; - *hwirq = fwspec->param[0]; - *type = IRQ_TYPE_NONE; - return 0; -} - static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { @@ -64,7 +53,7 @@ static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int type = IRQ_TYPE_NONE; struct irq_fwspec *fwspec = arg; - ret = nvic_irq_domain_translate(domain, fwspec, &hwirq, &type); + ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type); if (ret) return ret; @@ -75,7 +64,7 @@ static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, } static const struct irq_domain_ops nvic_irq_domain_ops = { - .translate = nvic_irq_domain_translate, + .translate = irq_domain_translate_onecell, .alloc = nvic_irq_domain_alloc, .free = irq_domain_free_irqs_top, }; diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index f82bc60a6793..6e5e3172796b 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c @@ -460,7 +460,7 @@ static int intc_irqpin_probe(struct platform_device *pdev) goto err0; } - i->iomem = devm_ioremap_nocache(dev, io[k]->start, + i->iomem = devm_ioremap(dev, io[k]->start, resource_size(io[k])); if (!i->iomem) { dev_err(dev, "failed to remap IOMEM\n"); diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index cf755964f2f8..aa4af886e43a 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -97,7 +97,7 @@ static inline void plic_irq_toggle(const struct cpumask *mask, } } -static void plic_irq_enable(struct irq_data *d) +static void plic_irq_unmask(struct irq_data *d) { unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d), cpu_online_mask); @@ -106,7 +106,7 @@ static void plic_irq_enable(struct irq_data *d) plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1); } -static void plic_irq_disable(struct irq_data *d) +static void plic_irq_mask(struct irq_data *d) { plic_irq_toggle(cpu_possible_mask, d->hwirq, 0); } @@ -125,10 +125,8 @@ static int plic_set_affinity(struct irq_data *d, if (cpu >= nr_cpu_ids) return -EINVAL; - if (!irqd_irq_disabled(d)) { - plic_irq_toggle(cpu_possible_mask, d->hwirq, 0); - plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1); - } + plic_irq_toggle(cpu_possible_mask, d->hwirq, 0); + plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1); irq_data_update_effective_affinity(d, cpumask_of(cpu)); @@ -136,14 +134,18 @@ static int plic_set_affinity(struct irq_data *d, } #endif +static void plic_irq_eoi(struct irq_data *d) +{ + struct plic_handler *handler = this_cpu_ptr(&plic_handlers); + + writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); +} + static struct irq_chip plic_chip = { .name = "SiFive PLIC", - /* - * There is no need to mask/unmask PLIC interrupts. They are "masked" - * by reading claim and "unmasked" when writing it back. - */ - .irq_enable = plic_irq_enable, - .irq_disable = plic_irq_disable, + .irq_mask = plic_irq_mask, + .irq_unmask = plic_irq_unmask, + .irq_eoi = plic_irq_eoi, #ifdef CONFIG_SMP .irq_set_affinity = plic_set_affinity, #endif @@ -152,15 +154,37 @@ static struct irq_chip plic_chip = { static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { - irq_set_chip_and_handler(irq, &plic_chip, handle_simple_irq); - irq_set_chip_data(irq, NULL); + irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data, + handle_fasteoi_irq, NULL, NULL); irq_set_noprobe(irq); return 0; } +static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int i, ret; + irq_hw_number_t hwirq; + unsigned int type; + struct irq_fwspec *fwspec = arg; + + ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) { + ret = plic_irqdomain_map(domain, virq + i, hwirq + i); + if (ret) + return ret; + } + + return 0; +} + static const struct irq_domain_ops plic_irqdomain_ops = { - .map = plic_irqdomain_map, - .xlate = irq_domain_xlate_onecell, + .translate = irq_domain_translate_onecell, + .alloc = plic_irq_domain_alloc, + .free = irq_domain_free_irqs_top, }; static struct irq_domain *plic_irqdomain; @@ -179,7 +203,7 @@ static void plic_handle_irq(struct pt_regs *regs) WARN_ON_ONCE(!handler->present); - csr_clear(sie, SIE_SEIE); + csr_clear(CSR_IE, IE_EIE); while ((hwirq = readl(claim))) { int irq = irq_find_mapping(plic_irqdomain, hwirq); @@ -188,9 +212,8 @@ static void plic_handle_irq(struct pt_regs *regs) hwirq); else generic_handle_irq(irq); - writel(hwirq, claim); } - csr_set(sie, SIE_SEIE); + csr_set(CSR_IE, IE_EIE); } /* @@ -244,14 +267,18 @@ static int __init plic_init(struct device_node *node, struct plic_handler *handler; irq_hw_number_t hwirq; int cpu, hartid; + u32 threshold = 0; if (of_irq_parse_one(node, i, &parent)) { pr_err("failed to parse parent for context %d.\n", i); continue; } - /* skip context holes */ - if (parent.args[0] == -1) + /* + * Skip contexts other than external interrupts for our + * privilege level. + */ + if (parent.args[0] != RV_IRQ_EXT) continue; hartid = plic_find_hart_id(parent.np); @@ -266,10 +293,16 @@ static int __init plic_init(struct device_node *node, continue; } + /* + * When running in M-mode we need to ignore the S-mode handler. + * Here we assume it always comes later, but that might be a + * little fragile. + */ handler = per_cpu_ptr(&plic_handlers, cpu); if (handler->present) { pr_warn("handler already present for context %d.\n", i); - continue; + threshold = 0xffffffff; + goto done; } handler->present = true; @@ -279,8 +312,9 @@ static int __init plic_init(struct device_node *node, handler->enable_base = plic_regs + ENABLE_BASE + i * ENABLE_PER_HART; +done: /* priority must be > threshold to trigger an interrupt */ - writel(0, handler->hart_base + CONTEXT_THRESHOLD); + writel(threshold, handler->hart_base + CONTEXT_THRESHOLD); for (hwirq = 1; hwirq <= nr_irqs; hwirq++) plic_toggle(handler, hwirq, 0); nr_handlers++; diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c index ef4d625d2d80..8f6e6b08eadf 100644 --- a/drivers/irqchip/irq-ti-sci-inta.c +++ b/drivers/irqchip/irq-ti-sci-inta.c @@ -246,8 +246,8 @@ static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_irq(struct irq_domain *d /* No free bits available. Allocate a new vint */ vint_desc = ti_sci_inta_alloc_parent_irq(domain); if (IS_ERR(vint_desc)) { - mutex_unlock(&inta->vint_mutex); - return ERR_PTR(PTR_ERR(vint_desc)); + event_desc = ERR_CAST(vint_desc); + goto unlock; } free_bit = find_first_zero_bit(vint_desc->event_map, @@ -259,6 +259,7 @@ alloc_event: if (IS_ERR(event_desc)) clear_bit(free_bit, vint_desc->event_map); +unlock: mutex_unlock(&inta->vint_mutex); return event_desc; } diff --git a/drivers/irqchip/irq-uniphier-aidet.c b/drivers/irqchip/irq-uniphier-aidet.c index ed7b4f47ff3f..89121b39be26 100644 --- a/drivers/irqchip/irq-uniphier-aidet.c +++ b/drivers/irqchip/irq-uniphier-aidet.c @@ -166,7 +166,6 @@ static int uniphier_aidet_probe(struct platform_device *pdev) struct device_node *parent_np; struct irq_domain *parent_domain; struct uniphier_aidet_priv *priv; - struct resource *res; parent_np = of_irq_find_parent(dev->of_node); if (!parent_np) @@ -181,8 +180,7 @@ static int uniphier_aidet_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->reg_base = devm_ioremap_resource(dev, res); + priv->reg_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->reg_base)) return PTR_ERR(priv->reg_base); diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c index 5a7efeb3892d..84163f1ebfcf 100644 --- a/drivers/irqchip/irq-zevio.c +++ b/drivers/irqchip/irq-zevio.c @@ -51,7 +51,7 @@ static void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs) while (readl(zevio_irq_io + IO_STATUS)) { irqnr = readl(zevio_irq_io + IO_CURRENT); handle_domain_irq(zevio_irq_domain, irqnr, regs); - }; + } } static void __init zevio_init_irq_base(void __iomem *base) diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c index d88e993aa66d..abfe59284ff2 100644 --- a/drivers/irqchip/qcom-irq-combiner.c +++ b/drivers/irqchip/qcom-irq-combiner.c @@ -248,10 +248,8 @@ static int __init combiner_probe(struct platform_device *pdev) return err; combiner->parent_irq = platform_get_irq(pdev, 0); - if (combiner->parent_irq <= 0) { - dev_err(&pdev->dev, "Error getting IRQ resource\n"); + if (combiner->parent_irq <= 0) return -EPROBE_DEFER; - } combiner->domain = irq_domain_create_linear(pdev->dev.fwnode, combiner->nirqs, &domain_ops, combiner); diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c index faa7d61b9d6c..6ae9e1f0819d 100644 --- a/drivers/irqchip/qcom-pdc.c +++ b/drivers/irqchip/qcom-pdc.c @@ -1,10 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. */ #include <linux/err.h> #include <linux/init.h> +#include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/irqdomain.h> @@ -13,12 +14,13 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_device.h> +#include <linux/soc/qcom/irq.h> #include <linux/spinlock.h> -#include <linux/platform_device.h> #include <linux/slab.h> #include <linux/types.h> -#define PDC_MAX_IRQS 126 +#define PDC_MAX_IRQS 168 +#define PDC_MAX_GPIO_IRQS 256 #define CLEAR_INTR(reg, intr) (reg & ~(1 << intr)) #define ENABLE_INTR(reg, intr) (reg | (1 << intr)) @@ -26,6 +28,8 @@ #define IRQ_ENABLE_BANK 0x10 #define IRQ_i_CFG 0x110 +#define PDC_NO_PARENT_IRQ ~0UL + struct pdc_pin_region { u32 pin_base; u32 parent_base; @@ -47,6 +51,26 @@ static u32 pdc_reg_read(int reg, u32 i) return readl_relaxed(pdc_base + reg + i * sizeof(u32)); } +static int qcom_pdc_gic_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool *state) +{ + if (d->hwirq == GPIO_NO_WAKE_IRQ) + return 0; + + return irq_chip_get_parent_state(d, which, state); +} + +static int qcom_pdc_gic_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool value) +{ + if (d->hwirq == GPIO_NO_WAKE_IRQ) + return 0; + + return irq_chip_set_parent_state(d, which, value); +} + static void pdc_enable_intr(struct irq_data *d, bool on) { int pin_out = d->hwirq; @@ -63,15 +87,37 @@ static void pdc_enable_intr(struct irq_data *d, bool on) raw_spin_unlock(&pdc_lock); } -static void qcom_pdc_gic_mask(struct irq_data *d) +static void qcom_pdc_gic_disable(struct irq_data *d) { + if (d->hwirq == GPIO_NO_WAKE_IRQ) + return; + pdc_enable_intr(d, false); + irq_chip_disable_parent(d); +} + +static void qcom_pdc_gic_enable(struct irq_data *d) +{ + if (d->hwirq == GPIO_NO_WAKE_IRQ) + return; + + pdc_enable_intr(d, true); + irq_chip_enable_parent(d); +} + +static void qcom_pdc_gic_mask(struct irq_data *d) +{ + if (d->hwirq == GPIO_NO_WAKE_IRQ) + return; + irq_chip_mask_parent(d); } static void qcom_pdc_gic_unmask(struct irq_data *d) { - pdc_enable_intr(d, true); + if (d->hwirq == GPIO_NO_WAKE_IRQ) + return; + irq_chip_unmask_parent(d); } @@ -114,6 +160,9 @@ static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type) int pin_out = d->hwirq; enum pdc_irq_config_bits pdc_type; + if (pin_out == GPIO_NO_WAKE_IRQ) + return 0; + switch (type) { case IRQ_TYPE_EDGE_RISING: pdc_type = PDC_EDGE_RISING; @@ -148,6 +197,10 @@ static struct irq_chip qcom_pdc_gic_chip = { .irq_eoi = irq_chip_eoi_parent, .irq_mask = qcom_pdc_gic_mask, .irq_unmask = qcom_pdc_gic_unmask, + .irq_disable = qcom_pdc_gic_disable, + .irq_enable = qcom_pdc_gic_enable, + .irq_get_irqchip_state = qcom_pdc_gic_get_irqchip_state, + .irq_set_irqchip_state = qcom_pdc_gic_set_irqchip_state, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_set_type = qcom_pdc_gic_set_type, .flags = IRQCHIP_MASK_ON_SUSPEND | @@ -169,8 +222,7 @@ static irq_hw_number_t get_parent_hwirq(int pin) return (region->parent_base + pin - region->pin_base); } - WARN_ON(1); - return ~0UL; + return PDC_NO_PARENT_IRQ; } static int qcom_pdc_translate(struct irq_domain *d, struct irq_fwspec *fwspec, @@ -199,17 +251,17 @@ static int qcom_pdc_alloc(struct irq_domain *domain, unsigned int virq, ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type); if (ret) - return -EINVAL; - - parent_hwirq = get_parent_hwirq(hwirq); - if (parent_hwirq == ~0UL) - return -EINVAL; + return ret; ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &qcom_pdc_gic_chip, NULL); if (ret) return ret; + parent_hwirq = get_parent_hwirq(hwirq); + if (parent_hwirq == PDC_NO_PARENT_IRQ) + return 0; + if (type & IRQ_TYPE_EDGE_BOTH) type = IRQ_TYPE_EDGE_RISING; @@ -232,6 +284,60 @@ static const struct irq_domain_ops qcom_pdc_ops = { .free = irq_domain_free_irqs_common, }; +static int qcom_pdc_gpio_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *data) +{ + struct irq_fwspec *fwspec = data; + struct irq_fwspec parent_fwspec; + irq_hw_number_t hwirq, parent_hwirq; + unsigned int type; + int ret; + + ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, + &qcom_pdc_gic_chip, NULL); + if (ret) + return ret; + + if (hwirq == GPIO_NO_WAKE_IRQ) + return 0; + + parent_hwirq = get_parent_hwirq(hwirq); + if (parent_hwirq == PDC_NO_PARENT_IRQ) + return 0; + + if (type & IRQ_TYPE_EDGE_BOTH) + type = IRQ_TYPE_EDGE_RISING; + + if (type & IRQ_TYPE_LEVEL_MASK) + type = IRQ_TYPE_LEVEL_HIGH; + + parent_fwspec.fwnode = domain->parent->fwnode; + parent_fwspec.param_count = 3; + parent_fwspec.param[0] = 0; + parent_fwspec.param[1] = parent_hwirq; + parent_fwspec.param[2] = type; + + return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, + &parent_fwspec); +} + +static int qcom_pdc_gpio_domain_select(struct irq_domain *d, + struct irq_fwspec *fwspec, + enum irq_domain_bus_token bus_token) +{ + return bus_token == DOMAIN_BUS_WAKEUP; +} + +static const struct irq_domain_ops qcom_pdc_gpio_ops = { + .select = qcom_pdc_gpio_domain_select, + .alloc = qcom_pdc_gpio_alloc, + .free = irq_domain_free_irqs_common, +}; + static int pdc_setup_pin_mapping(struct device_node *np) { int ret, n; @@ -270,7 +376,7 @@ static int pdc_setup_pin_mapping(struct device_node *np) static int qcom_pdc_init(struct device_node *node, struct device_node *parent) { - struct irq_domain *parent_domain, *pdc_domain; + struct irq_domain *parent_domain, *pdc_domain, *pdc_gpio_domain; int ret; pdc_base = of_iomap(node, 0); @@ -301,12 +407,27 @@ static int qcom_pdc_init(struct device_node *node, struct device_node *parent) goto fail; } + pdc_gpio_domain = irq_domain_create_hierarchy(parent_domain, + IRQ_DOMAIN_FLAG_QCOM_PDC_WAKEUP, + PDC_MAX_GPIO_IRQS, + of_fwnode_handle(node), + &qcom_pdc_gpio_ops, NULL); + if (!pdc_gpio_domain) { + pr_err("%pOF: PDC domain add failed for GPIO domain\n", node); + ret = -ENOMEM; + goto remove; + } + + irq_domain_update_bus_token(pdc_gpio_domain, DOMAIN_BUS_WAKEUP); + return 0; +remove: + irq_domain_remove(pdc_domain); fail: kfree(pdc_region); iounmap(pdc_base); return ret; } -IRQCHIP_DECLARE(pdc_sdm845, "qcom,sdm845-pdc", qcom_pdc_init); +IRQCHIP_DECLARE(qcom_pdc, "qcom,pdc", qcom_pdc_init); |