diff options
Diffstat (limited to 'arch/mips/cavium-octeon/octeon-irq.c')
-rw-r--r-- | arch/mips/cavium-octeon/octeon-irq.c | 842 |
1 files changed, 679 insertions, 163 deletions
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 274cd4fad30c..02b15eed4bcd 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -16,12 +16,11 @@ #include <linux/of.h> #include <asm/octeon/octeon.h> - -static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock); -static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock); +#include <asm/octeon/cvmx-ciu2-defs.h> static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror); static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); +static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock); static __read_mostly u8 octeon_irq_ciu_to_irq[8][64]; @@ -29,8 +28,9 @@ union octeon_ciu_chip_data { void *p; unsigned long l; struct { - unsigned int line:6; - unsigned int bit:6; + unsigned long line:6; + unsigned long bit:6; + unsigned long gpio_line:6; } s; }; @@ -45,7 +45,7 @@ struct octeon_core_chip_data { static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES]; -static void octeon_irq_set_ciu_mapping(int irq, int line, int bit, +static void octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line, struct irq_chip *chip, irq_flow_handler_t handler) { @@ -56,6 +56,7 @@ static void octeon_irq_set_ciu_mapping(int irq, int line, int bit, cd.l = 0; cd.s.line = line; cd.s.bit = bit; + cd.s.gpio_line = gpio_line; irq_set_chip_data(irq, cd.p); octeon_irq_ciu_to_irq[line][bit] = irq; @@ -231,22 +232,31 @@ static void octeon_irq_ciu_enable(struct irq_data *data) unsigned long *pen; unsigned long flags; union octeon_ciu_chip_data cd; + raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); cd.p = irq_data_get_irq_chip_data(data); + raw_spin_lock_irqsave(lock, flags); if (cd.s.line == 0) { - raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); - set_bit(cd.s.bit, pen); + __set_bit(cd.s.bit, pen); + /* + * Must be visible to octeon_irq_ip{2,3}_ciu() before + * enabling the irq. + */ + wmb(); cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); - raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); } else { - raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); - set_bit(cd.s.bit, pen); + __set_bit(cd.s.bit, pen); + /* + * Must be visible to octeon_irq_ip{2,3}_ciu() before + * enabling the irq. + */ + wmb(); cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); - raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); } + raw_spin_unlock_irqrestore(lock, flags); } static void octeon_irq_ciu_enable_local(struct irq_data *data) @@ -254,22 +264,31 @@ static void octeon_irq_ciu_enable_local(struct irq_data *data) unsigned long *pen; unsigned long flags; union octeon_ciu_chip_data cd; + raw_spinlock_t *lock = &__get_cpu_var(octeon_irq_ciu_spinlock); cd.p = irq_data_get_irq_chip_data(data); + raw_spin_lock_irqsave(lock, flags); if (cd.s.line == 0) { - raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); - set_bit(cd.s.bit, pen); + __set_bit(cd.s.bit, pen); + /* + * Must be visible to octeon_irq_ip{2,3}_ciu() before + * enabling the irq. + */ + wmb(); cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); - raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); } else { - raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); - set_bit(cd.s.bit, pen); + __set_bit(cd.s.bit, pen); + /* + * Must be visible to octeon_irq_ip{2,3}_ciu() before + * enabling the irq. + */ + wmb(); cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); - raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); } + raw_spin_unlock_irqrestore(lock, flags); } static void octeon_irq_ciu_disable_local(struct irq_data *data) @@ -277,22 +296,31 @@ static void octeon_irq_ciu_disable_local(struct irq_data *data) unsigned long *pen; unsigned long flags; union octeon_ciu_chip_data cd; + raw_spinlock_t *lock = &__get_cpu_var(octeon_irq_ciu_spinlock); cd.p = irq_data_get_irq_chip_data(data); + raw_spin_lock_irqsave(lock, flags); if (cd.s.line == 0) { - raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); - clear_bit(cd.s.bit, pen); + __clear_bit(cd.s.bit, pen); + /* + * Must be visible to octeon_irq_ip{2,3}_ciu() before + * enabling the irq. + */ + wmb(); cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); - raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); } else { - raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); - clear_bit(cd.s.bit, pen); + __clear_bit(cd.s.bit, pen); + /* + * Must be visible to octeon_irq_ip{2,3}_ciu() before + * enabling the irq. + */ + wmb(); cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); - raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); } + raw_spin_unlock_irqrestore(lock, flags); } static void octeon_irq_ciu_disable_all(struct irq_data *data) @@ -301,29 +329,30 @@ static void octeon_irq_ciu_disable_all(struct irq_data *data) unsigned long *pen; int cpu; union octeon_ciu_chip_data cd; - - wmb(); /* Make sure flag changes arrive before register updates. */ + raw_spinlock_t *lock; cd.p = irq_data_get_irq_chip_data(data); - if (cd.s.line == 0) { - raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); - for_each_online_cpu(cpu) { - int coreid = octeon_coreid_for_cpu(cpu); + for_each_online_cpu(cpu) { + int coreid = octeon_coreid_for_cpu(cpu); + lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); + if (cd.s.line == 0) pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); - clear_bit(cd.s.bit, pen); - cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); - } - raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); - } else { - raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); - for_each_online_cpu(cpu) { - int coreid = octeon_coreid_for_cpu(cpu); + else pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); - clear_bit(cd.s.bit, pen); + + raw_spin_lock_irqsave(lock, flags); + __clear_bit(cd.s.bit, pen); + /* + * Must be visible to octeon_irq_ip{2,3}_ciu() before + * enabling the irq. + */ + wmb(); + if (cd.s.line == 0) + cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); + else cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); - } - raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); + raw_spin_unlock_irqrestore(lock, flags); } } @@ -333,27 +362,30 @@ static void octeon_irq_ciu_enable_all(struct irq_data *data) unsigned long *pen; int cpu; union octeon_ciu_chip_data cd; + raw_spinlock_t *lock; cd.p = irq_data_get_irq_chip_data(data); - if (cd.s.line == 0) { - raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); - for_each_online_cpu(cpu) { - int coreid = octeon_coreid_for_cpu(cpu); + for_each_online_cpu(cpu) { + int coreid = octeon_coreid_for_cpu(cpu); + lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); + if (cd.s.line == 0) pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); - set_bit(cd.s.bit, pen); - cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); - } - raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); - } else { - raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); - for_each_online_cpu(cpu) { - int coreid = octeon_coreid_for_cpu(cpu); + else pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); - set_bit(cd.s.bit, pen); + + raw_spin_lock_irqsave(lock, flags); + __set_bit(cd.s.bit, pen); + /* + * Must be visible to octeon_irq_ip{2,3}_ciu() before + * enabling the irq. + */ + wmb(); + if (cd.s.line == 0) + cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); + else cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); - } - raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); + raw_spin_unlock_irqrestore(lock, flags); } } @@ -435,7 +467,7 @@ static void octeon_irq_ciu_ack(struct irq_data *data) u64 mask; union octeon_ciu_chip_data cd; - cd.p = data->chip_data; + cd.p = irq_data_get_irq_chip_data(data); mask = 1ull << (cd.s.bit); if (cd.s.line == 0) { @@ -456,9 +488,7 @@ static void octeon_irq_ciu_disable_all_v2(struct irq_data *data) u64 mask; union octeon_ciu_chip_data cd; - wmb(); /* Make sure flag changes arrive before register updates. */ - - cd.p = data->chip_data; + cd.p = irq_data_get_irq_chip_data(data); mask = 1ull << (cd.s.bit); if (cd.s.line == 0) { @@ -486,7 +516,7 @@ static void octeon_irq_ciu_enable_all_v2(struct irq_data *data) u64 mask; union octeon_ciu_chip_data cd; - cd.p = data->chip_data; + cd.p = irq_data_get_irq_chip_data(data); mask = 1ull << (cd.s.bit); if (cd.s.line == 0) { @@ -521,7 +551,7 @@ static void octeon_irq_gpio_setup(struct irq_data *data) cfg.s.fil_cnt = 7; cfg.s.fil_sel = 3; - cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.bit - 16), cfg.u64); + cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), cfg.u64); } static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data) @@ -549,7 +579,7 @@ static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data) union octeon_ciu_chip_data cd; cd.p = irq_data_get_irq_chip_data(data); - cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.bit - 16), 0); + cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); octeon_irq_ciu_disable_all_v2(data); } @@ -559,7 +589,7 @@ static void octeon_irq_ciu_disable_gpio(struct irq_data *data) union octeon_ciu_chip_data cd; cd.p = irq_data_get_irq_chip_data(data); - cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.bit - 16), 0); + cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); octeon_irq_ciu_disable_all(data); } @@ -570,7 +600,7 @@ static void octeon_irq_ciu_gpio_ack(struct irq_data *data) u64 mask; cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.bit - 16); + mask = 1ull << (cd.s.gpio_line); cvmx_write_csr(CVMX_GPIO_INT_CLR, mask); } @@ -615,8 +645,10 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data, bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); unsigned long flags; union octeon_ciu_chip_data cd; + unsigned long *pen; + raw_spinlock_t *lock; - cd.p = data->chip_data; + cd.p = irq_data_get_irq_chip_data(data); /* * For non-v2 CIU, we will allow only single CPU affinity. @@ -629,36 +661,36 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data, if (!enable_one) return 0; - if (cd.s.line == 0) { - raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); - for_each_online_cpu(cpu) { - int coreid = octeon_coreid_for_cpu(cpu); - unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); - if (cpumask_test_cpu(cpu, dest) && enable_one) { - enable_one = false; - set_bit(cd.s.bit, pen); - } else { - clear_bit(cd.s.bit, pen); - } - cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); + for_each_online_cpu(cpu) { + int coreid = octeon_coreid_for_cpu(cpu); + + lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); + raw_spin_lock_irqsave(lock, flags); + + if (cd.s.line == 0) + pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); + else + pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); + + if (cpumask_test_cpu(cpu, dest) && enable_one) { + enable_one = 0; + __set_bit(cd.s.bit, pen); + } else { + __clear_bit(cd.s.bit, pen); } - raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); - } else { - raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); - for_each_online_cpu(cpu) { - int coreid = octeon_coreid_for_cpu(cpu); - unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); + /* + * Must be visible to octeon_irq_ip{2,3}_ciu() before + * enabling the irq. + */ + wmb(); - if (cpumask_test_cpu(cpu, dest) && enable_one) { - enable_one = false; - set_bit(cd.s.bit, pen); - } else { - clear_bit(cd.s.bit, pen); - } + if (cd.s.line == 0) + cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); + else cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); - } - raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); + + raw_spin_unlock_irqrestore(lock, flags); } return 0; } @@ -679,7 +711,7 @@ static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data, if (!enable_one) return 0; - cd.p = data->chip_data; + cd.p = irq_data_get_irq_chip_data(data); mask = 1ull << cd.s.bit; if (cd.s.line == 0) { @@ -714,14 +746,6 @@ static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data, #endif /* - * The v1 CIU code already masks things, so supply a dummy version to - * the core chip code. - */ -static void octeon_irq_dummy_mask(struct irq_data *data) -{ -} - -/* * Newer octeon chips have support for lockless CIU operation. */ static struct irq_chip octeon_irq_chip_ciu_v2 = { @@ -742,7 +766,8 @@ static struct irq_chip octeon_irq_chip_ciu = { .irq_enable = octeon_irq_ciu_enable, .irq_disable = octeon_irq_ciu_disable_all, .irq_ack = octeon_irq_ciu_ack, - .irq_mask = octeon_irq_dummy_mask, + .irq_mask = octeon_irq_ciu_disable_local, + .irq_unmask = octeon_irq_ciu_enable, #ifdef CONFIG_SMP .irq_set_affinity = octeon_irq_ciu_set_affinity, .irq_cpu_offline = octeon_irq_cpu_offline_ciu, @@ -766,6 +791,8 @@ static struct irq_chip octeon_irq_chip_ciu_mbox = { .name = "CIU-M", .irq_enable = octeon_irq_ciu_enable_all, .irq_disable = octeon_irq_ciu_disable_all, + .irq_ack = octeon_irq_ciu_disable_local, + .irq_eoi = octeon_irq_ciu_enable_local, .irq_cpu_online = octeon_irq_ciu_enable_local, .irq_cpu_offline = octeon_irq_ciu_disable_local, @@ -790,7 +817,8 @@ static struct irq_chip octeon_irq_chip_ciu_gpio = { .name = "CIU-GPIO", .irq_enable = octeon_irq_ciu_enable_gpio, .irq_disable = octeon_irq_ciu_disable_gpio, - .irq_mask = octeon_irq_dummy_mask, + .irq_mask = octeon_irq_ciu_disable_local, + .irq_unmask = octeon_irq_ciu_enable, .irq_ack = octeon_irq_ciu_gpio_ack, .irq_set_type = octeon_irq_ciu_gpio_set_type, #ifdef CONFIG_SMP @@ -809,12 +837,18 @@ static void octeon_irq_ciu_wd_enable(struct irq_data *data) unsigned long *pen; int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ int cpu = octeon_cpu_for_coreid(coreid); + raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); - raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); + raw_spin_lock_irqsave(lock, flags); pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); - set_bit(coreid, pen); + __set_bit(coreid, pen); + /* + * Must be visible to octeon_irq_ip{2,3}_ciu() before enabling + * the irq. + */ + wmb(); cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); - raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); + raw_spin_unlock_irqrestore(lock, flags); } /* @@ -843,7 +877,8 @@ static struct irq_chip octeon_irq_chip_ciu_wd = { .name = "CIU-W", .irq_enable = octeon_irq_ciu_wd_enable, .irq_disable = octeon_irq_ciu_disable_all, - .irq_mask = octeon_irq_dummy_mask, + .irq_mask = octeon_irq_ciu_disable_local, + .irq_unmask = octeon_irq_ciu_enable_local, }; static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit) @@ -976,19 +1011,20 @@ static int octeon_irq_ciu_map(struct irq_domain *d, return -EINVAL; if (octeon_irq_ciu_is_edge(line, bit)) - octeon_irq_set_ciu_mapping(virq, line, bit, + octeon_irq_set_ciu_mapping(virq, line, bit, 0, octeon_irq_ciu_chip, handle_edge_irq); else - octeon_irq_set_ciu_mapping(virq, line, bit, + octeon_irq_set_ciu_mapping(virq, line, bit, 0, octeon_irq_ciu_chip, handle_level_irq); return 0; } -static int octeon_irq_gpio_map(struct irq_domain *d, - unsigned int virq, irq_hw_number_t hw) +static int octeon_irq_gpio_map_common(struct irq_domain *d, + unsigned int virq, irq_hw_number_t hw, + int line_limit, struct irq_chip *chip) { struct octeon_irq_gpio_domain_data *gpiod = d->host_data; unsigned int line, bit; @@ -999,15 +1035,20 @@ static int octeon_irq_gpio_map(struct irq_domain *d, hw += gpiod->base_hwirq; line = hw >> 6; bit = hw & 63; - if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0) + if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0) return -EINVAL; - octeon_irq_set_ciu_mapping(virq, line, bit, - octeon_irq_gpio_chip, - octeon_irq_handle_gpio); + octeon_irq_set_ciu_mapping(virq, line, bit, hw, + chip, octeon_irq_handle_gpio); return 0; } +static int octeon_irq_gpio_map(struct irq_domain *d, + unsigned int virq, irq_hw_number_t hw) +{ + return octeon_irq_gpio_map_common(d, virq, hw, 1, octeon_irq_gpio_chip); +} + static struct irq_domain_ops octeon_irq_domain_ciu_ops = { .map = octeon_irq_ciu_map, .xlate = octeon_irq_ciu_xlat, @@ -1018,13 +1059,12 @@ static struct irq_domain_ops octeon_irq_domain_gpio_ops = { .xlate = octeon_irq_gpio_xlat, }; -static void octeon_irq_ip2_v1(void) +static void octeon_irq_ip2_ciu(void) { const unsigned long core_id = cvmx_get_core_num(); u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2)); ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror); - clear_c0_status(STATUSF_IP2); if (likely(ciu_sum)) { int bit = fls64(ciu_sum) - 1; int irq = octeon_irq_ciu_to_irq[0][bit]; @@ -1035,32 +1075,13 @@ static void octeon_irq_ip2_v1(void) } else { spurious_interrupt(); } - set_c0_status(STATUSF_IP2); } -static void octeon_irq_ip2_v2(void) -{ - const unsigned long core_id = cvmx_get_core_num(); - u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2)); - - ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror); - if (likely(ciu_sum)) { - int bit = fls64(ciu_sum) - 1; - int irq = octeon_irq_ciu_to_irq[0][bit]; - if (likely(irq)) - do_IRQ(irq); - else - spurious_interrupt(); - } else { - spurious_interrupt(); - } -} -static void octeon_irq_ip3_v1(void) +static void octeon_irq_ip3_ciu(void) { u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1); ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror); - clear_c0_status(STATUSF_IP3); if (likely(ciu_sum)) { int bit = fls64(ciu_sum) - 1; int irq = octeon_irq_ciu_to_irq[1][bit]; @@ -1071,24 +1092,13 @@ static void octeon_irq_ip3_v1(void) } else { spurious_interrupt(); } - set_c0_status(STATUSF_IP3); } -static void octeon_irq_ip3_v2(void) -{ - u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1); +static bool octeon_irq_use_ip4; - ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror); - if (likely(ciu_sum)) { - int bit = fls64(ciu_sum) - 1; - int irq = octeon_irq_ciu_to_irq[1][bit]; - if (likely(irq)) - do_IRQ(irq); - else - spurious_interrupt(); - } else { - spurious_interrupt(); - } +static void __cpuinit octeon_irq_local_enable_ip4(void *arg) +{ + set_c0_status(STATUSF_IP4); } static void octeon_irq_ip4_mask(void) @@ -1103,6 +1113,13 @@ static void (*octeon_irq_ip4)(void); void __cpuinitdata (*octeon_irq_setup_secondary)(void); +void __cpuinit octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h) +{ + octeon_irq_ip4 = h; + octeon_irq_use_ip4 = true; + on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1); +} + static void __cpuinit octeon_irq_percpu_enable(void) { irq_cpu_online(); @@ -1111,6 +1128,12 @@ static void __cpuinit octeon_irq_percpu_enable(void) static void __cpuinit octeon_irq_init_ciu_percpu(void) { int coreid = cvmx_get_core_num(); + + + __get_cpu_var(octeon_irq_ciu0_en_mirror) = 0; + __get_cpu_var(octeon_irq_ciu1_en_mirror) = 0; + wmb(); + raw_spin_lock_init(&__get_cpu_var(octeon_irq_ciu_spinlock)); /* * Disable All CIU Interrupts. The ones we need will be * enabled later. Read the SUM register so we know the write @@ -1123,12 +1146,30 @@ static void __cpuinit octeon_irq_init_ciu_percpu(void) cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2))); } -static void __cpuinit octeon_irq_setup_secondary_ciu(void) +static void octeon_irq_init_ciu2_percpu(void) { + u64 regx, ipx; + int coreid = cvmx_get_core_num(); + u64 base = CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid); - __get_cpu_var(octeon_irq_ciu0_en_mirror) = 0; - __get_cpu_var(octeon_irq_ciu1_en_mirror) = 0; + /* + * Disable All CIU2 Interrupts. The ones we need will be + * enabled later. Read the SUM register so we know the write + * completed. + * + * There are 9 registers and 3 IPX levels with strides 0x1000 + * and 0x200 respectivly. Use loops to clear them. + */ + for (regx = 0; regx <= 0x8000; regx += 0x1000) { + for (ipx = 0; ipx <= 0x400; ipx += 0x200) + cvmx_write_csr(base + regx + ipx, 0); + } + cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid)); +} + +static void __cpuinit octeon_irq_setup_secondary_ciu(void) +{ octeon_irq_init_ciu_percpu(); octeon_irq_percpu_enable(); @@ -1137,6 +1178,19 @@ static void __cpuinit octeon_irq_setup_secondary_ciu(void) clear_c0_status(STATUSF_IP4); } +static void octeon_irq_setup_secondary_ciu2(void) +{ + octeon_irq_init_ciu2_percpu(); + octeon_irq_percpu_enable(); + + /* Enable the CIU lines */ + set_c0_status(STATUSF_IP3 | STATUSF_IP2); + if (octeon_irq_use_ip4) + set_c0_status(STATUSF_IP4); + else + clear_c0_status(STATUSF_IP4); +} + static void __init octeon_irq_init_ciu(void) { unsigned int i; @@ -1150,19 +1204,17 @@ static void __init octeon_irq_init_ciu(void) octeon_irq_init_ciu_percpu(); octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; + octeon_irq_ip2 = octeon_irq_ip2_ciu; + octeon_irq_ip3 = octeon_irq_ip3_ciu; if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || OCTEON_IS_MODEL(OCTEON_CN6XXX)) { - octeon_irq_ip2 = octeon_irq_ip2_v2; - octeon_irq_ip3 = octeon_irq_ip3_v2; chip = &octeon_irq_chip_ciu_v2; chip_mbox = &octeon_irq_chip_ciu_mbox_v2; chip_wd = &octeon_irq_chip_ciu_wd_v2; octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2; } else { - octeon_irq_ip2 = octeon_irq_ip2_v1; - octeon_irq_ip3 = octeon_irq_ip3_v1; chip = &octeon_irq_chip_ciu; chip_mbox = &octeon_irq_chip_ciu_mbox; chip_wd = &octeon_irq_chip_ciu_wd; @@ -1192,6 +1244,7 @@ static void __init octeon_irq_init_ciu(void) ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu"); if (ciu_node) { ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL); + irq_set_default_host(ciu_domain); of_node_put(ciu_node); } else panic("Cannot find device node for cavium,octeon-3860-ciu."); @@ -1200,8 +1253,8 @@ static void __init octeon_irq_init_ciu(void) for (i = 0; i < 16; i++) octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); - octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq); - octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq); + octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq); + octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq); for (i = 0; i < 4; i++) octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); @@ -1217,7 +1270,7 @@ static void __init octeon_irq_init_ciu(void) /* CIU_1 */ for (i = 0; i < 16; i++) - octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq); + octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, handle_level_irq); octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); @@ -1226,6 +1279,466 @@ static void __init octeon_irq_init_ciu(void) clear_c0_status(STATUSF_IP4); } +/* + * Watchdog interrupts are special. They are associated with a single + * core, so we hardwire the affinity to that core. + */ +static void octeon_irq_ciu2_wd_enable(struct irq_data *data) +{ + u64 mask; + u64 en_addr; + int coreid = data->irq - OCTEON_IRQ_WDOG0; + union octeon_ciu_chip_data cd; + + cd.p = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd.s.bit); + + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); + cvmx_write_csr(en_addr, mask); + +} + +static void octeon_irq_ciu2_enable(struct irq_data *data) +{ + u64 mask; + u64 en_addr; + int cpu = next_cpu_for_irq(data); + int coreid = octeon_coreid_for_cpu(cpu); + union octeon_ciu_chip_data cd; + + cd.p = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd.s.bit); + + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); + cvmx_write_csr(en_addr, mask); +} + +static void octeon_irq_ciu2_enable_local(struct irq_data *data) +{ + u64 mask; + u64 en_addr; + int coreid = cvmx_get_core_num(); + union octeon_ciu_chip_data cd; + + cd.p = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd.s.bit); + + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); + cvmx_write_csr(en_addr, mask); + +} + +static void octeon_irq_ciu2_disable_local(struct irq_data *data) +{ + u64 mask; + u64 en_addr; + int coreid = cvmx_get_core_num(); + union octeon_ciu_chip_data cd; + + cd.p = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd.s.bit); + + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + (0x1000ull * cd.s.line); + cvmx_write_csr(en_addr, mask); + +} + +static void octeon_irq_ciu2_ack(struct irq_data *data) +{ + u64 mask; + u64 en_addr; + int coreid = cvmx_get_core_num(); + union octeon_ciu_chip_data cd; + + cd.p = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd.s.bit); + + en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd.s.line); + cvmx_write_csr(en_addr, mask); + +} + +static void octeon_irq_ciu2_disable_all(struct irq_data *data) +{ + int cpu; + u64 mask; + union octeon_ciu_chip_data cd; + + cd.p = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd.s.bit); + + for_each_online_cpu(cpu) { + u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); + cvmx_write_csr(en_addr, mask); + } +} + +static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data) +{ + int cpu; + u64 mask; + + mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); + + for_each_online_cpu(cpu) { + u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(octeon_coreid_for_cpu(cpu)); + cvmx_write_csr(en_addr, mask); + } +} + +static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data) +{ + int cpu; + u64 mask; + + mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); + + for_each_online_cpu(cpu) { + u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(octeon_coreid_for_cpu(cpu)); + cvmx_write_csr(en_addr, mask); + } +} + +static void octeon_irq_ciu2_mbox_enable_local(struct irq_data *data) +{ + u64 mask; + u64 en_addr; + int coreid = cvmx_get_core_num(); + + mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); + en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(coreid); + cvmx_write_csr(en_addr, mask); +} + +static void octeon_irq_ciu2_mbox_disable_local(struct irq_data *data) +{ + u64 mask; + u64 en_addr; + int coreid = cvmx_get_core_num(); + + mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); + en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(coreid); + cvmx_write_csr(en_addr, mask); +} + +#ifdef CONFIG_SMP +static int octeon_irq_ciu2_set_affinity(struct irq_data *data, + const struct cpumask *dest, bool force) +{ + int cpu; + bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); + u64 mask; + union octeon_ciu_chip_data cd; + + if (!enable_one) + return 0; + + cd.p = irq_data_get_irq_chip_data(data); + mask = 1ull << cd.s.bit; + + for_each_online_cpu(cpu) { + u64 en_addr; + if (cpumask_test_cpu(cpu, dest) && enable_one) { + enable_one = false; + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); + } else { + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); + } + cvmx_write_csr(en_addr, mask); + } + + return 0; +} +#endif + +static void octeon_irq_ciu2_enable_gpio(struct irq_data *data) +{ + octeon_irq_gpio_setup(data); + octeon_irq_ciu2_enable(data); +} + +static void octeon_irq_ciu2_disable_gpio(struct irq_data *data) +{ + union octeon_ciu_chip_data cd; + cd.p = irq_data_get_irq_chip_data(data); + + cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); + + octeon_irq_ciu2_disable_all(data); +} + +static struct irq_chip octeon_irq_chip_ciu2 = { + .name = "CIU2-E", + .irq_enable = octeon_irq_ciu2_enable, + .irq_disable = octeon_irq_ciu2_disable_all, + .irq_ack = octeon_irq_ciu2_ack, + .irq_mask = octeon_irq_ciu2_disable_local, + .irq_unmask = octeon_irq_ciu2_enable, +#ifdef CONFIG_SMP + .irq_set_affinity = octeon_irq_ciu2_set_affinity, + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, +#endif +}; + +static struct irq_chip octeon_irq_chip_ciu2_mbox = { + .name = "CIU2-M", + .irq_enable = octeon_irq_ciu2_mbox_enable_all, + .irq_disable = octeon_irq_ciu2_mbox_disable_all, + .irq_ack = octeon_irq_ciu2_mbox_disable_local, + .irq_eoi = octeon_irq_ciu2_mbox_enable_local, + + .irq_cpu_online = octeon_irq_ciu2_mbox_enable_local, + .irq_cpu_offline = octeon_irq_ciu2_mbox_disable_local, + .flags = IRQCHIP_ONOFFLINE_ENABLED, +}; + +static struct irq_chip octeon_irq_chip_ciu2_wd = { + .name = "CIU2-W", + .irq_enable = octeon_irq_ciu2_wd_enable, + .irq_disable = octeon_irq_ciu2_disable_all, + .irq_mask = octeon_irq_ciu2_disable_local, + .irq_unmask = octeon_irq_ciu2_enable_local, +}; + +static struct irq_chip octeon_irq_chip_ciu2_gpio = { + .name = "CIU-GPIO", + .irq_enable = octeon_irq_ciu2_enable_gpio, + .irq_disable = octeon_irq_ciu2_disable_gpio, + .irq_ack = octeon_irq_ciu_gpio_ack, + .irq_mask = octeon_irq_ciu2_disable_local, + .irq_unmask = octeon_irq_ciu2_enable, + .irq_set_type = octeon_irq_ciu_gpio_set_type, +#ifdef CONFIG_SMP + .irq_set_affinity = octeon_irq_ciu2_set_affinity, + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, +#endif + .flags = IRQCHIP_SET_TYPE_MASKED, +}; + +static int octeon_irq_ciu2_xlat(struct irq_domain *d, + struct device_node *node, + const u32 *intspec, + unsigned int intsize, + unsigned long *out_hwirq, + unsigned int *out_type) +{ + unsigned int ciu, bit; + + ciu = intspec[0]; + bit = intspec[1]; + + /* Line 7 are the GPIO lines */ + if (ciu > 6 || bit > 63) + return -EINVAL; + + *out_hwirq = (ciu << 6) | bit; + *out_type = 0; + + return 0; +} + +static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit) +{ + bool edge = false; + + if (line == 3) /* MIO */ + switch (bit) { + case 2: /* IPD_DRP */ + case 8 ... 11: /* Timers */ + case 48: /* PTP */ + edge = true; + break; + default: + break; + } + else if (line == 6) /* PKT */ + switch (bit) { + case 52 ... 53: /* ILK_DRP */ + case 8 ... 12: /* GMX_DRP */ + edge = true; + break; + default: + break; + } + return edge; +} + +static int octeon_irq_ciu2_map(struct irq_domain *d, + unsigned int virq, irq_hw_number_t hw) +{ + unsigned int line = hw >> 6; + unsigned int bit = hw & 63; + + if (!octeon_irq_virq_in_range(virq)) + return -EINVAL; + + /* Line 7 are the GPIO lines */ + if (line > 6 || octeon_irq_ciu_to_irq[line][bit] != 0) + return -EINVAL; + + if (octeon_irq_ciu2_is_edge(line, bit)) + octeon_irq_set_ciu_mapping(virq, line, bit, 0, + &octeon_irq_chip_ciu2, + handle_edge_irq); + else + octeon_irq_set_ciu_mapping(virq, line, bit, 0, + &octeon_irq_chip_ciu2, + handle_level_irq); + + return 0; +} +static int octeon_irq_ciu2_gpio_map(struct irq_domain *d, + unsigned int virq, irq_hw_number_t hw) +{ + return octeon_irq_gpio_map_common(d, virq, hw, 7, &octeon_irq_chip_ciu2_gpio); +} + +static struct irq_domain_ops octeon_irq_domain_ciu2_ops = { + .map = octeon_irq_ciu2_map, + .xlate = octeon_irq_ciu2_xlat, +}; + +static struct irq_domain_ops octeon_irq_domain_ciu2_gpio_ops = { + .map = octeon_irq_ciu2_gpio_map, + .xlate = octeon_irq_gpio_xlat, +}; + +static void octeon_irq_ciu2(void) +{ + int line; + int bit; + int irq; + u64 src_reg, src, sum; + const unsigned long core_id = cvmx_get_core_num(); + + sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core_id)) & 0xfful; + + if (unlikely(!sum)) + goto spurious; + + line = fls64(sum) - 1; + src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core_id) + (0x1000 * line); + src = cvmx_read_csr(src_reg); + + if (unlikely(!src)) + goto spurious; + + bit = fls64(src) - 1; + irq = octeon_irq_ciu_to_irq[line][bit]; + if (unlikely(!irq)) + goto spurious; + + do_IRQ(irq); + goto out; + +spurious: + spurious_interrupt(); +out: + /* CN68XX pass 1.x has an errata that accessing the ACK registers + can stop interrupts from propagating */ + if (OCTEON_IS_MODEL(OCTEON_CN68XX)) + cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY); + else + cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id)); + return; +} + +static void octeon_irq_ciu2_mbox(void) +{ + int line; + + const unsigned long core_id = cvmx_get_core_num(); + u64 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id)) >> 60; + + if (unlikely(!sum)) + goto spurious; + + line = fls64(sum) - 1; + + do_IRQ(OCTEON_IRQ_MBOX0 + line); + goto out; + +spurious: + spurious_interrupt(); +out: + /* CN68XX pass 1.x has an errata that accessing the ACK registers + can stop interrupts from propagating */ + if (OCTEON_IS_MODEL(OCTEON_CN68XX)) + cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY); + else + cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id)); + return; +} + +static void __init octeon_irq_init_ciu2(void) +{ + unsigned int i; + struct device_node *gpio_node; + struct device_node *ciu_node; + struct irq_domain *ciu_domain = NULL; + + octeon_irq_init_ciu2_percpu(); + octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2; + + octeon_irq_ip2 = octeon_irq_ciu2; + octeon_irq_ip3 = octeon_irq_ciu2_mbox; + octeon_irq_ip4 = octeon_irq_ip4_mask; + + /* Mips internal */ + octeon_irq_init_core(); + + gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); + if (gpio_node) { + struct octeon_irq_gpio_domain_data *gpiod; + + gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); + if (gpiod) { + /* gpio domain host_data is the base hwirq number. */ + gpiod->base_hwirq = 7 << 6; + irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_ciu2_gpio_ops, gpiod); + of_node_put(gpio_node); + } else + pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); + } else + pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n"); + + ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-6880-ciu2"); + if (ciu_node) { + ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu2_ops, NULL); + irq_set_default_host(ciu_domain); + of_node_put(ciu_node); + } else + panic("Cannot find device node for cavium,octeon-6880-ciu2."); + + /* CUI2 */ + for (i = 0; i < 64; i++) + octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i); + + for (i = 0; i < 32; i++) + octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0, + &octeon_irq_chip_ciu2_wd, handle_level_irq); + + for (i = 0; i < 4; i++) + octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8); + + octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44); + + for (i = 0; i < 4; i++) + octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); + + for (i = 0; i < 4; i++) + octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8); + + irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); + irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); + irq_set_chip_and_handler(OCTEON_IRQ_MBOX2, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); + irq_set_chip_and_handler(OCTEON_IRQ_MBOX3, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); + + /* Enable the CIU lines */ + set_c0_status(STATUSF_IP3 | STATUSF_IP2); + clear_c0_status(STATUSF_IP4); +} + void __init arch_init_irq(void) { #ifdef CONFIG_SMP @@ -1233,7 +1746,10 @@ void __init arch_init_irq(void) cpumask_clear(irq_default_affinity); cpumask_set_cpu(smp_processor_id(), irq_default_affinity); #endif - octeon_irq_init_ciu(); + if (OCTEON_IS_MODEL(OCTEON_CN68XX)) + octeon_irq_init_ciu2(); + else + octeon_irq_init_ciu(); } asmlinkage void plat_irq_dispatch(void) |