summaryrefslogtreecommitdiffstats
path: root/arch/mips/netlogic
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/netlogic')
-rw-r--r--arch/mips/netlogic/Kconfig28
-rw-r--r--arch/mips/netlogic/common/irq.c165
-rw-r--r--arch/mips/netlogic/common/smp.c89
-rw-r--r--arch/mips/netlogic/common/smpboot.S6
-rw-r--r--arch/mips/netlogic/xlp/nlm_hal.c67
-rw-r--r--arch/mips/netlogic/xlp/setup.c50
-rw-r--r--arch/mips/netlogic/xlp/wakeup.c83
-rw-r--r--arch/mips/netlogic/xlr/Makefile4
-rw-r--r--arch/mips/netlogic/xlr/fmn-config.c290
-rw-r--r--arch/mips/netlogic/xlr/fmn.c204
-rw-r--r--arch/mips/netlogic/xlr/setup.c37
-rw-r--r--arch/mips/netlogic/xlr/wakeup.c23
12 files changed, 830 insertions, 216 deletions
diff --git a/arch/mips/netlogic/Kconfig b/arch/mips/netlogic/Kconfig
index 8059eb76f8eb..3c05bf9e280a 100644
--- a/arch/mips/netlogic/Kconfig
+++ b/arch/mips/netlogic/Kconfig
@@ -9,6 +9,34 @@ config DT_XLP_EVP
This DTB will be used if the firmware does not pass in a DTB
pointer to the kernel. The corresponding DTS file is at
arch/mips/netlogic/dts/xlp_evp.dts
+
+config NLM_MULTINODE
+ bool "Support for multi-chip boards"
+ depends on NLM_XLP_BOARD
+ default n
+ help
+ Add support for boards with 2 or 4 XLPs connected over ICI.
+
+if NLM_MULTINODE
+choice
+ prompt "Number of XLPs on the board"
+ default NLM_MULTINODE_2
+ help
+ In the multi-node case, specify the number of SoCs on the board.
+
+config NLM_MULTINODE_2
+ bool "Dual-XLP board"
+ help
+ Support boards with upto two XLPs connected over ICI.
+
+config NLM_MULTINODE_4
+ bool "Quad-XLP board"
+ help
+ Support boards with upto four XLPs connected over ICI.
+
+endchoice
+
+endif
endif
config NLM_COMMON
diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c
index e52bfcbce093..00dcc7a2bc5a 100644
--- a/arch/mips/netlogic/common/irq.c
+++ b/arch/mips/netlogic/common/irq.c
@@ -36,7 +36,6 @@
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/interrupt.h>
-#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/irq.h>
@@ -59,68 +58,70 @@
#elif defined(CONFIG_CPU_XLR)
#include <asm/netlogic/xlr/iomap.h>
#include <asm/netlogic/xlr/pic.h>
+#include <asm/netlogic/xlr/fmn.h>
#else
#error "Unknown CPU"
#endif
-/*
- * These are the routines that handle all the low level interrupt stuff.
- * Actions handled here are: initialization of the interrupt map, requesting of
- * interrupt lines by handlers, dispatching if interrupts to handlers, probing
- * for interrupt lines
- */
-/* Globals */
-static uint64_t nlm_irq_mask;
-static DEFINE_SPINLOCK(nlm_pic_lock);
+#ifdef CONFIG_SMP
+#define SMP_IRQ_MASK ((1ULL << IRQ_IPI_SMP_FUNCTION) | \
+ (1ULL << IRQ_IPI_SMP_RESCHEDULE))
+#else
+#define SMP_IRQ_MASK 0
+#endif
+#define PERCPU_IRQ_MASK (SMP_IRQ_MASK | (1ull << IRQ_TIMER) | \
+ (1ull << IRQ_FMN))
+
+struct nlm_pic_irq {
+ void (*extra_ack)(struct irq_data *);
+ struct nlm_soc_info *node;
+ int picirq;
+ int irt;
+ int flags;
+};
static void xlp_pic_enable(struct irq_data *d)
{
unsigned long flags;
- int irt;
+ struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
- irt = nlm_irq_to_irt(d->irq);
- if (irt == -1)
- return;
- spin_lock_irqsave(&nlm_pic_lock, flags);
- nlm_pic_enable_irt(nlm_pic_base, irt);
- spin_unlock_irqrestore(&nlm_pic_lock, flags);
+ BUG_ON(!pd);
+ spin_lock_irqsave(&pd->node->piclock, flags);
+ nlm_pic_enable_irt(pd->node->picbase, pd->irt);
+ spin_unlock_irqrestore(&pd->node->piclock, flags);
}
static void xlp_pic_disable(struct irq_data *d)
{
+ struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
unsigned long flags;
- int irt;
- irt = nlm_irq_to_irt(d->irq);
- if (irt == -1)
- return;
- spin_lock_irqsave(&nlm_pic_lock, flags);
- nlm_pic_disable_irt(nlm_pic_base, irt);
- spin_unlock_irqrestore(&nlm_pic_lock, flags);
+ BUG_ON(!pd);
+ spin_lock_irqsave(&pd->node->piclock, flags);
+ nlm_pic_disable_irt(pd->node->picbase, pd->irt);
+ spin_unlock_irqrestore(&pd->node->piclock, flags);
}
static void xlp_pic_mask_ack(struct irq_data *d)
{
- uint64_t mask = 1ull << d->irq;
+ struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
+ uint64_t mask = 1ull << pd->picirq;
write_c0_eirr(mask); /* ack by writing EIRR */
}
static void xlp_pic_unmask(struct irq_data *d)
{
- void *hd = irq_data_get_irq_handler_data(d);
- int irt;
+ struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
- irt = nlm_irq_to_irt(d->irq);
- if (irt == -1)
+ if (!pd)
return;
- if (hd) {
- void (*extra_ack)(void *) = hd;
- extra_ack(d);
- }
+ if (pd->extra_ack)
+ pd->extra_ack(d);
+
/* Ack is a single write, no need to lock */
- nlm_pic_ack(nlm_pic_base, irt);
+ nlm_pic_ack(pd->node->picbase, pd->irt);
}
static struct irq_chip xlp_pic = {
@@ -174,64 +175,108 @@ struct irq_chip nlm_cpu_intr = {
.irq_eoi = cpuintr_ack,
};
-void __init init_nlm_common_irqs(void)
+static void __init nlm_init_percpu_irqs(void)
{
- int i, irq, irt;
+ int i;
for (i = 0; i < PIC_IRT_FIRST_IRQ; i++)
irq_set_chip_and_handler(i, &nlm_cpu_intr, handle_percpu_irq);
-
- for (i = PIC_IRT_FIRST_IRQ; i <= PIC_IRT_LAST_IRQ ; i++)
- irq_set_chip_and_handler(i, &xlp_pic, handle_level_irq);
-
#ifdef CONFIG_SMP
irq_set_chip_and_handler(IRQ_IPI_SMP_FUNCTION, &nlm_cpu_intr,
nlm_smp_function_ipi_handler);
irq_set_chip_and_handler(IRQ_IPI_SMP_RESCHEDULE, &nlm_cpu_intr,
nlm_smp_resched_ipi_handler);
- nlm_irq_mask |=
- ((1ULL << IRQ_IPI_SMP_FUNCTION) | (1ULL << IRQ_IPI_SMP_RESCHEDULE));
#endif
+}
+
+void nlm_setup_pic_irq(int node, int picirq, int irq, int irt)
+{
+ struct nlm_pic_irq *pic_data;
+ int xirq;
+
+ xirq = nlm_irq_to_xirq(node, irq);
+ pic_data = kzalloc(sizeof(*pic_data), GFP_KERNEL);
+ BUG_ON(pic_data == NULL);
+ pic_data->irt = irt;
+ pic_data->picirq = picirq;
+ pic_data->node = nlm_get_node(node);
+ irq_set_chip_and_handler(xirq, &xlp_pic, handle_level_irq);
+ irq_set_handler_data(xirq, pic_data);
+}
+
+void nlm_set_pic_extra_ack(int node, int irq, void (*xack)(struct irq_data *))
+{
+ struct nlm_pic_irq *pic_data;
+ int xirq;
+
+ xirq = nlm_irq_to_xirq(node, irq);
+ pic_data = irq_get_handler_data(xirq);
+ pic_data->extra_ack = xack;
+}
- for (irq = PIC_IRT_FIRST_IRQ; irq <= PIC_IRT_LAST_IRQ; irq++) {
- irt = nlm_irq_to_irt(irq);
+static void nlm_init_node_irqs(int node)
+{
+ int i, irt;
+ uint64_t irqmask;
+ struct nlm_soc_info *nodep;
+
+ pr_info("Init IRQ for node %d\n", node);
+ nodep = nlm_get_node(node);
+ irqmask = PERCPU_IRQ_MASK;
+ for (i = PIC_IRT_FIRST_IRQ; i <= PIC_IRT_LAST_IRQ; i++) {
+ irt = nlm_irq_to_irt(i);
if (irt == -1)
continue;
- nlm_irq_mask |= (1ULL << irq);
- nlm_pic_init_irt(nlm_pic_base, irt, irq, 0);
+ nlm_setup_pic_irq(node, i, i, irt);
+ /* set interrupts to first cpu in node */
+ nlm_pic_init_irt(nodep->picbase, irt, i,
+ node * NLM_CPUS_PER_NODE);
+ irqmask |= (1ull << i);
}
-
- nlm_irq_mask |= (1ULL << IRQ_TIMER);
+ nodep->irqmask = irqmask;
}
void __init arch_init_irq(void)
{
/* Initialize the irq descriptors */
- init_nlm_common_irqs();
-
- write_c0_eimr(nlm_irq_mask);
+ nlm_init_percpu_irqs();
+ nlm_init_node_irqs(0);
+ write_c0_eimr(nlm_current_node()->irqmask);
+#if defined(CONFIG_CPU_XLR)
+ nlm_setup_fmn_irq();
+#endif
}
-void __cpuinit nlm_smp_irq_init(void)
+void nlm_smp_irq_init(int hwcpuid)
{
- /* set interrupt mask for non-zero cpus */
- write_c0_eimr(nlm_irq_mask);
+ int node, cpu;
+
+ node = hwcpuid / NLM_CPUS_PER_NODE;
+ cpu = hwcpuid % NLM_CPUS_PER_NODE;
+
+ if (cpu == 0 && node != 0)
+ nlm_init_node_irqs(node);
+ write_c0_eimr(nlm_current_node()->irqmask);
}
asmlinkage void plat_irq_dispatch(void)
{
uint64_t eirr;
- int i;
+ int i, node;
+ node = nlm_nodeid();
eirr = read_c0_eirr() & read_c0_eimr();
- if (eirr & (1 << IRQ_TIMER)) {
- do_IRQ(IRQ_TIMER);
- return;
- }
i = __ilog2_u64(eirr);
if (i == -1)
return;
- do_IRQ(i);
+ /* per-CPU IRQs don't need translation */
+ if (eirr & PERCPU_IRQ_MASK) {
+ do_IRQ(i);
+ return;
+ }
+
+ /* top level irq handling */
+ do_IRQ(nlm_irq_to_xirq(node, i));
}
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index fab316de57e9..a080d9ee3cd7 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -59,12 +59,17 @@
void nlm_send_ipi_single(int logical_cpu, unsigned int action)
{
- int cpu = cpu_logical_map(logical_cpu);
+ int cpu, node;
+ uint64_t picbase;
+
+ cpu = cpu_logical_map(logical_cpu);
+ node = cpu / NLM_CPUS_PER_NODE;
+ picbase = nlm_get_node(node)->picbase;
if (action & SMP_CALL_FUNCTION)
- nlm_pic_send_ipi(nlm_pic_base, cpu, IRQ_IPI_SMP_FUNCTION, 0);
+ nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_FUNCTION, 0);
if (action & SMP_RESCHEDULE_YOURSELF)
- nlm_pic_send_ipi(nlm_pic_base, cpu, IRQ_IPI_SMP_RESCHEDULE, 0);
+ nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_RESCHEDULE, 0);
}
void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action)
@@ -96,11 +101,12 @@ void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc)
void nlm_early_init_secondary(int cpu)
{
change_c0_config(CONF_CM_CMASK, 0x3);
- write_c0_ebase((uint32_t)nlm_common_ebase);
#ifdef CONFIG_CPU_XLP
- if (hard_smp_processor_id() % 4 == 0)
+ /* mmu init, once per core */
+ if (cpu % NLM_THREADS_PER_CORE == 0)
xlp_mmu_init();
#endif
+ write_c0_ebase(nlm_current_node()->ebase);
}
/*
@@ -108,8 +114,12 @@ void nlm_early_init_secondary(int cpu)
*/
static void __cpuinit nlm_init_secondary(void)
{
- current_cpu_data.core = hard_smp_processor_id() / 4;
- nlm_smp_irq_init();
+ int hwtid;
+
+ hwtid = hard_smp_processor_id();
+ current_cpu_data.core = hwtid / NLM_THREADS_PER_CORE;
+ nlm_percpu_init(hwtid);
+ nlm_smp_irq_init(hwtid);
}
void nlm_prepare_cpus(unsigned int max_cpus)
@@ -120,9 +130,6 @@ void nlm_prepare_cpus(unsigned int max_cpus)
void nlm_smp_finish(void)
{
-#ifdef notyet
- nlm_common_msgring_cpu_init();
-#endif
local_irq_enable();
}
@@ -142,27 +149,27 @@ cpumask_t phys_cpu_present_map;
void nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
{
- unsigned long gp = (unsigned long)task_thread_info(idle);
- unsigned long sp = (unsigned long)__KSTK_TOS(idle);
- int cpu = cpu_logical_map(logical_cpu);
+ int cpu, node;
- nlm_next_sp = sp;
- nlm_next_gp = gp;
+ cpu = cpu_logical_map(logical_cpu);
+ node = cpu / NLM_CPUS_PER_NODE;
+ nlm_next_sp = (unsigned long)__KSTK_TOS(idle);
+ nlm_next_gp = (unsigned long)task_thread_info(idle);
- /* barrier */
+ /* barrier for sp/gp store above */
__sync();
- nlm_pic_send_ipi(nlm_pic_base, cpu, 1, 1);
+ nlm_pic_send_ipi(nlm_get_node(node)->picbase, cpu, 1, 1); /* NMI */
}
void __init nlm_smp_setup(void)
{
unsigned int boot_cpu;
- int num_cpus, i;
+ int num_cpus, i, ncore;
boot_cpu = hard_smp_processor_id();
- cpus_clear(phys_cpu_present_map);
+ cpumask_clear(&phys_cpu_present_map);
- cpu_set(boot_cpu, phys_cpu_present_map);
+ cpumask_set_cpu(boot_cpu, &phys_cpu_present_map);
__cpu_number_map[boot_cpu] = 0;
__cpu_logical_map[0] = boot_cpu;
set_cpu_possible(0, true);
@@ -174,7 +181,7 @@ void __init nlm_smp_setup(void)
* it is only set for ASPs (see smpboot.S)
*/
if (nlm_cpu_ready[i]) {
- cpu_set(i, phys_cpu_present_map);
+ cpumask_set_cpu(i, &phys_cpu_present_map);
__cpu_number_map[i] = num_cpus;
__cpu_logical_map[num_cpus] = i;
set_cpu_possible(num_cpus, true);
@@ -182,20 +189,28 @@ void __init nlm_smp_setup(void)
}
}
+ /* check with the cores we have worken up */
+ for (ncore = 0, i = 0; i < NLM_NR_NODES; i++)
+ ncore += hweight32(nlm_get_node(i)->coremask);
+
pr_info("Phys CPU present map: %lx, possible map %lx\n",
- (unsigned long)phys_cpu_present_map.bits[0],
+ (unsigned long)cpumask_bits(&phys_cpu_present_map)[0],
(unsigned long)cpumask_bits(cpu_possible_mask)[0]);
- pr_info("Detected %i Slave CPU(s)\n", num_cpus);
+ pr_info("Detected (%dc%dt) %d Slave CPU(s)\n", ncore,
+ nlm_threads_per_core, num_cpus);
nlm_set_nmi_handler(nlm_boot_secondary_cpus);
}
-static int nlm_parse_cpumask(u32 cpu_mask)
+static int nlm_parse_cpumask(cpumask_t *wakeup_mask)
{
uint32_t core0_thr_mask, core_thr_mask;
- int threadmode, i;
+ int threadmode, i, j;
- core0_thr_mask = cpu_mask & 0xf;
+ core0_thr_mask = 0;
+ for (i = 0; i < NLM_THREADS_PER_CORE; i++)
+ if (cpumask_test_cpu(i, wakeup_mask))
+ core0_thr_mask |= (1 << i);
switch (core0_thr_mask) {
case 1:
nlm_threads_per_core = 1;
@@ -214,25 +229,23 @@ static int nlm_parse_cpumask(u32 cpu_mask)
}
/* Verify other cores CPU masks */
- nlm_coremask = 1;
- nlm_cpumask = core0_thr_mask;
- for (i = 1; i < 8; i++) {
- core_thr_mask = (cpu_mask >> (i * 4)) & 0xf;
- if (core_thr_mask) {
- if (core_thr_mask != core0_thr_mask)
+ for (i = 0; i < NR_CPUS; i += NLM_THREADS_PER_CORE) {
+ core_thr_mask = 0;
+ for (j = 0; j < NLM_THREADS_PER_CORE; j++)
+ if (cpumask_test_cpu(i + j, wakeup_mask))
+ core_thr_mask |= (1 << j);
+ if (core_thr_mask != 0 && core_thr_mask != core0_thr_mask)
goto unsupp;
- nlm_coremask |= 1 << i;
- nlm_cpumask |= core0_thr_mask << (4 * i);
- }
}
return threadmode;
unsupp:
- panic("Unsupported CPU mask %x\n", cpu_mask);
+ panic("Unsupported CPU mask %lx\n",
+ (unsigned long)cpumask_bits(wakeup_mask)[0]);
return 0;
}
-int __cpuinit nlm_wakeup_secondary_cpus(u32 wakeup_mask)
+int __cpuinit nlm_wakeup_secondary_cpus(void)
{
unsigned long reset_vec;
char *reset_data;
@@ -244,7 +257,7 @@ int __cpuinit nlm_wakeup_secondary_cpus(u32 wakeup_mask)
(nlm_reset_entry_end - nlm_reset_entry));
/* verify the mask and setup core config variables */
- threadmode = nlm_parse_cpumask(wakeup_mask);
+ threadmode = nlm_parse_cpumask(&nlm_cpumask);
/* Setup CPU init parameters */
reset_data = (char *)CKSEG1ADDR(RESET_DATA_PHYS);
diff --git a/arch/mips/netlogic/common/smpboot.S b/arch/mips/netlogic/common/smpboot.S
index a13355cc97eb..a0b74874bebe 100644
--- a/arch/mips/netlogic/common/smpboot.S
+++ b/arch/mips/netlogic/common/smpboot.S
@@ -61,7 +61,7 @@
li t0, LSU_DEFEATURE
mfcr t1, t0
- lui t2, 0x4080 /* Enable Unaligned Access, L2HPE */
+ lui t2, 0xc080 /* SUE, Enable Unaligned Access, L2HPE */
or t1, t1, t2
#ifdef XLP_AX_WORKAROUND
li t2, ~0xe /* S1RCM */
@@ -186,7 +186,7 @@ EXPORT(nlm_boot_siblings)
* jump to the secondary wait function.
*/
mfc0 v0, CP0_EBASE, 1
- andi v0, 0x7f /* v0 <- node/core */
+ andi v0, 0x3ff /* v0 <- node/core */
/* Init MMU in the first thread after changing THREAD_MODE
* register (Ax Errata?)
@@ -263,6 +263,8 @@ NESTED(nlm_boot_secondary_cpus, 16, sp)
PTR_L gp, 0(t1)
/* a0 has the processor id */
+ mfc0 a0, CP0_EBASE, 1
+ andi a0, 0x3ff /* a0 <- node/core */
PTR_LA t0, nlm_early_init_secondary
jalr t0
nop
diff --git a/arch/mips/netlogic/xlp/nlm_hal.c b/arch/mips/netlogic/xlp/nlm_hal.c
index 6c65ac701912..529e74742d9f 100644
--- a/arch/mips/netlogic/xlp/nlm_hal.c
+++ b/arch/mips/netlogic/xlp/nlm_hal.c
@@ -40,23 +40,23 @@
#include <asm/mipsregs.h>
#include <asm/time.h>
+#include <asm/netlogic/common.h>
#include <asm/netlogic/haldefs.h>
#include <asm/netlogic/xlp-hal/iomap.h>
#include <asm/netlogic/xlp-hal/xlp.h>
#include <asm/netlogic/xlp-hal/pic.h>
#include <asm/netlogic/xlp-hal/sys.h>
-/* These addresses are computed by the nlm_hal_init() */
-uint64_t nlm_io_base;
-uint64_t nlm_sys_base;
-uint64_t nlm_pic_base;
-
/* Main initialization */
-void nlm_hal_init(void)
+void nlm_node_init(int node)
{
- nlm_io_base = CKSEG1ADDR(XLP_DEFAULT_IO_BASE);
- nlm_sys_base = nlm_get_sys_regbase(0); /* node 0 */
- nlm_pic_base = nlm_get_pic_regbase(0); /* node 0 */
+ struct nlm_soc_info *nodep;
+
+ nodep = nlm_get_node(node);
+ nodep->sysbase = nlm_get_sys_regbase(node);
+ nodep->picbase = nlm_get_pic_regbase(node);
+ nodep->ebase = read_c0_ebase() & (~((1 << 12) - 1));
+ spin_lock_init(&nodep->piclock);
}
int nlm_irq_to_irt(int irq)
@@ -100,52 +100,15 @@ int nlm_irq_to_irt(int irq)
}
}
-int nlm_irt_to_irq(int irt)
-{
- switch (irt) {
- case PIC_IRT_UART_0_INDEX:
- return PIC_UART_0_IRQ;
- case PIC_IRT_UART_1_INDEX:
- return PIC_UART_1_IRQ;
- case PIC_IRT_PCIE_LINK_0_INDEX:
- return PIC_PCIE_LINK_0_IRQ;
- case PIC_IRT_PCIE_LINK_1_INDEX:
- return PIC_PCIE_LINK_1_IRQ;
- case PIC_IRT_PCIE_LINK_2_INDEX:
- return PIC_PCIE_LINK_2_IRQ;
- case PIC_IRT_PCIE_LINK_3_INDEX:
- return PIC_PCIE_LINK_3_IRQ;
- case PIC_IRT_EHCI_0_INDEX:
- return PIC_EHCI_0_IRQ;
- case PIC_IRT_EHCI_1_INDEX:
- return PIC_EHCI_1_IRQ;
- case PIC_IRT_OHCI_0_INDEX:
- return PIC_OHCI_0_IRQ;
- case PIC_IRT_OHCI_1_INDEX:
- return PIC_OHCI_1_IRQ;
- case PIC_IRT_OHCI_2_INDEX:
- return PIC_OHCI_2_IRQ;
- case PIC_IRT_OHCI_3_INDEX:
- return PIC_OHCI_3_IRQ;
- case PIC_IRT_MMC_INDEX:
- return PIC_MMC_IRQ;
- case PIC_IRT_I2C_0_INDEX:
- return PIC_I2C_0_IRQ;
- case PIC_IRT_I2C_1_INDEX:
- return PIC_I2C_1_IRQ;
- default:
- return -1;
- }
-}
-
-unsigned int nlm_get_core_frequency(int core)
+unsigned int nlm_get_core_frequency(int node, int core)
{
unsigned int pll_divf, pll_divr, dfs_div, ext_div;
unsigned int rstval, dfsval, denom;
- uint64_t num;
+ uint64_t num, sysbase;
- rstval = nlm_read_sys_reg(nlm_sys_base, SYS_POWER_ON_RESET_CFG);
- dfsval = nlm_read_sys_reg(nlm_sys_base, SYS_CORE_DFS_DIV_VALUE);
+ sysbase = nlm_get_node(node)->sysbase;
+ rstval = nlm_read_sys_reg(sysbase, SYS_POWER_ON_RESET_CFG);
+ dfsval = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIV_VALUE);
pll_divf = ((rstval >> 10) & 0x7f) + 1;
pll_divr = ((rstval >> 8) & 0x3) + 1;
ext_div = ((rstval >> 30) & 0x3) + 1;
@@ -159,5 +122,5 @@ unsigned int nlm_get_core_frequency(int core)
unsigned int nlm_get_cpu_frequency(void)
{
- return nlm_get_core_frequency(0);
+ return nlm_get_core_frequency(0, 0);
}
diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c
index d8997098defd..4894d62043ac 100644
--- a/arch/mips/netlogic/xlp/setup.c
+++ b/arch/mips/netlogic/xlp/setup.c
@@ -52,26 +52,40 @@
#include <asm/netlogic/xlp-hal/xlp.h>
#include <asm/netlogic/xlp-hal/sys.h>
-unsigned long nlm_common_ebase = 0x0;
-
-/* default to uniprocessor */
-uint32_t nlm_coremask = 1, nlm_cpumask = 1;
-int nlm_threads_per_core = 1;
+uint64_t nlm_io_base;
+struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
+cpumask_t nlm_cpumask = CPU_MASK_CPU0;
+unsigned int nlm_threads_per_core;
extern u32 __dtb_start[];
static void nlm_linux_exit(void)
{
- nlm_write_sys_reg(nlm_sys_base, SYS_CHIP_RESET, 1);
+ uint64_t sysbase = nlm_get_node(0)->sysbase;
+
+ nlm_write_sys_reg(sysbase, SYS_CHIP_RESET, 1);
for ( ; ; )
cpu_wait();
}
void __init plat_mem_setup(void)
{
+ void *fdtp;
+
panic_timeout = 5;
_machine_restart = (void (*)(char *))nlm_linux_exit;
_machine_halt = nlm_linux_exit;
pm_power_off = nlm_linux_exit;
+
+ /*
+ * If no FDT pointer is passed in, use the built-in FDT.
+ * device_tree_init() does not handle CKSEG0 pointers in
+ * 64-bit, so convert pointer.
+ */
+ fdtp = (void *)(long)fw_arg0;
+ if (!fdtp)
+ fdtp = __dtb_start;
+ fdtp = phys_to_virt(__pa(fdtp));
+ early_init_devtree(fdtp);
}
const char *get_system_type(void)
@@ -94,27 +108,19 @@ void xlp_mmu_init(void)
(13 + (ffz(PM_DEFAULT_MASK >> 13) / 2)));
}
-void __init prom_init(void)
+void nlm_percpu_init(int hwcpuid)
{
- void *fdtp;
+}
+void __init prom_init(void)
+{
+ nlm_io_base = CKSEG1ADDR(XLP_DEFAULT_IO_BASE);
xlp_mmu_init();
- nlm_hal_init();
-
- /*
- * If no FDT pointer is passed in, use the built-in FDT.
- * device_tree_init() does not handle CKSEG0 pointers in
- * 64-bit, so convert pointer.
- */
- fdtp = (void *)(long)fw_arg0;
- if (!fdtp)
- fdtp = __dtb_start;
- fdtp = phys_to_virt(__pa(fdtp));
- early_init_devtree(fdtp);
+ nlm_node_init(0);
- nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1));
#ifdef CONFIG_SMP
- nlm_wakeup_secondary_cpus(0xffffffff);
+ cpumask_setall(&nlm_cpumask);
+ nlm_wakeup_secondary_cpus();
/* update TLB size after waking up threads */
current_cpu_data.tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
diff --git a/arch/mips/netlogic/xlp/wakeup.c b/arch/mips/netlogic/xlp/wakeup.c
index 44d923ff3846..cb9010642ac3 100644
--- a/arch/mips/netlogic/xlp/wakeup.c
+++ b/arch/mips/netlogic/xlp/wakeup.c
@@ -51,45 +51,72 @@
#include <asm/netlogic/xlp-hal/xlp.h>
#include <asm/netlogic/xlp-hal/sys.h>
-static void xlp_enable_secondary_cores(void)
+static int xlp_wakeup_core(uint64_t sysbase, int core)
{
- uint32_t core, value, coremask, syscoremask;
+ uint32_t coremask, value;
int count;
- /* read cores in reset from SYS block */
- syscoremask = nlm_read_sys_reg(nlm_sys_base, SYS_CPU_RESET);
+ coremask = (1 << core);
- /* update user specified */
- nlm_coremask = nlm_coremask & (syscoremask | 1);
+ /* Enable CPU clock */
+ value = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL);
+ value &= ~coremask;
+ nlm_write_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL, value);
- for (core = 1; core < 8; core++) {
- coremask = 1 << core;
- if ((nlm_coremask & coremask) == 0)
- continue;
+ /* Remove CPU Reset */
+ value = nlm_read_sys_reg(sysbase, SYS_CPU_RESET);
+ value &= ~coremask;
+ nlm_write_sys_reg(sysbase, SYS_CPU_RESET, value);
- /* Enable CPU clock */
- value = nlm_read_sys_reg(nlm_sys_base, SYS_CORE_DFS_DIS_CTRL);
- value &= ~coremask;
- nlm_write_sys_reg(nlm_sys_base, SYS_CORE_DFS_DIS_CTRL, value);
+ /* Poll for CPU to mark itself coherent */
+ count = 100000;
+ do {
+ value = nlm_read_sys_reg(sysbase, SYS_CPU_NONCOHERENT_MODE);
+ } while ((value & coremask) != 0 && --count > 0);
- /* Remove CPU Reset */
- value = nlm_read_sys_reg(nlm_sys_base, SYS_CPU_RESET);
- value &= ~coremask;
- nlm_write_sys_reg(nlm_sys_base, SYS_CPU_RESET, value);
+ return count != 0;
+}
+
+static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
+{
+ struct nlm_soc_info *nodep;
+ uint64_t syspcibase;
+ uint32_t syscoremask;
+ int core, n, cpu;
+
+ for (n = 0; n < NLM_NR_NODES; n++) {
+ syspcibase = nlm_get_sys_pcibase(n);
+ if (nlm_read_reg(syspcibase, 0) == 0xffffffff)
+ break;
+
+ /* read cores in reset from SYS and account for boot cpu */
+ nlm_node_init(n);
+ nodep = nlm_get_node(n);
+ syscoremask = nlm_read_sys_reg(nodep->sysbase, SYS_CPU_RESET);
+ if (n == 0)
+ syscoremask |= 1;
+
+ for (core = 0; core < NLM_CORES_PER_NODE; core++) {
+ /* see if the core exists */
+ if ((syscoremask & (1 << core)) == 0)
+ continue;
- /* Poll for CPU to mark itself coherent */
- count = 100000;
- do {
- value = nlm_read_sys_reg(nlm_sys_base,
- SYS_CPU_NONCOHERENT_MODE);
- } while ((value & coremask) != 0 && count-- > 0);
+ /* see if at least the first thread is enabled */
+ cpu = (n * NLM_CORES_PER_NODE + core)
+ * NLM_THREADS_PER_CORE;
+ if (!cpumask_test_cpu(cpu, wakeup_mask))
+ continue;
- if (count == 0)
- pr_err("Failed to enable core %d\n", core);
+ /* wake up the core */
+ if (xlp_wakeup_core(nodep->sysbase, core))
+ nodep->coremask |= 1u << core;
+ else
+ pr_err("Failed to enable core %d\n", core);
+ }
}
}
-void xlp_wakeup_secondary_cpus(void)
+void xlp_wakeup_secondary_cpus()
{
/*
* In case of u-boot, the secondaries are in reset
@@ -98,5 +125,5 @@ void xlp_wakeup_secondary_cpus(void)
xlp_boot_core0_siblings();
/* now get other cores out of reset */
- xlp_enable_secondary_cores();
+ xlp_enable_secondary_cores(&nlm_cpumask);
}
diff --git a/arch/mips/netlogic/xlr/Makefile b/arch/mips/netlogic/xlr/Makefile
index c287dea87570..05902bc6f080 100644
--- a/arch/mips/netlogic/xlr/Makefile
+++ b/arch/mips/netlogic/xlr/Makefile
@@ -1,2 +1,2 @@
-obj-y += setup.o platform.o platform-flash.o
-obj-$(CONFIG_SMP) += wakeup.o
+obj-y += fmn.o fmn-config.o setup.o platform.o platform-flash.o
+obj-$(CONFIG_SMP) += wakeup.o
diff --git a/arch/mips/netlogic/xlr/fmn-config.c b/arch/mips/netlogic/xlr/fmn-config.c
new file mode 100644
index 000000000000..bed2cffa1008
--- /dev/null
+++ b/arch/mips/netlogic/xlr/fmn-config.c
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2003-2012 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/cpu-info.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+
+#include <asm/mipsregs.h>
+#include <asm/netlogic/xlr/fmn.h>
+#include <asm/netlogic/xlr/xlr.h>
+#include <asm/netlogic/common.h>
+#include <asm/netlogic/haldefs.h>
+
+struct xlr_board_fmn_config xlr_board_fmn_config;
+
+static void __maybe_unused print_credit_config(struct xlr_fmn_info *fmn_info)
+{
+ int bkt;
+
+ pr_info("Bucket size :\n");
+ pr_info("Station\t: Size\n");
+ for (bkt = 0; bkt < 16; bkt++)
+ pr_info(" %d %d %d %d %d %d %d %d\n",
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 0],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 1],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 2],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 3],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 4],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 5],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 6],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 7]);
+ pr_info("\n");
+
+ pr_info("Credits distribution :\n");
+ pr_info("Station\t: Size\n");
+ for (bkt = 0; bkt < 16; bkt++)
+ pr_info(" %d %d %d %d %d %d %d %d\n",
+ fmn_info->credit_config[(bkt * 8) + 0],
+ fmn_info->credit_config[(bkt * 8) + 1],
+ fmn_info->credit_config[(bkt * 8) + 2],
+ fmn_info->credit_config[(bkt * 8) + 3],
+ fmn_info->credit_config[(bkt * 8) + 4],
+ fmn_info->credit_config[(bkt * 8) + 5],
+ fmn_info->credit_config[(bkt * 8) + 6],
+ fmn_info->credit_config[(bkt * 8) + 7]);
+ pr_info("\n");
+}
+
+static void check_credit_distribution(void)
+{
+ struct xlr_board_fmn_config *cfg = &xlr_board_fmn_config;
+ int bkt, n, total_credits, ncores;
+
+ ncores = hweight32(nlm_current_node()->coremask);
+ for (bkt = 0; bkt < 128; bkt++) {
+ total_credits = 0;
+ for (n = 0; n < ncores; n++)
+ total_credits += cfg->cpu[n].credit_config[bkt];
+ total_credits += cfg->gmac[0].credit_config[bkt];
+ total_credits += cfg->gmac[1].credit_config[bkt];
+ total_credits += cfg->dma.credit_config[bkt];
+ total_credits += cfg->cmp.credit_config[bkt];
+ total_credits += cfg->sae.credit_config[bkt];
+ total_credits += cfg->xgmac[0].credit_config[bkt];
+ total_credits += cfg->xgmac[1].credit_config[bkt];
+ if (total_credits > cfg->bucket_size[bkt])
+ pr_err("ERROR: Bucket %d: credits (%d) > size (%d)\n",
+ bkt, total_credits, cfg->bucket_size[bkt]);
+ }
+ pr_info("Credit distribution complete.\n");
+}
+
+/**
+ * Configure bucket size and credits for a device. 'size' is the size of
+ * the buckets for the device. This size is distributed among all the CPUs
+ * so that all of them can send messages to the device.
+ *
+ * The device is also given 'cpu_credits' to send messages to the CPUs
+ *
+ * @dev_info: FMN information structure for each devices
+ * @start_stn_id: Starting station id of dev_info
+ * @end_stn_id: End station id of dev_info
+ * @num_buckets: Total number of buckets for den_info
+ * @cpu_credits: Allowed credits to cpu for each devices pointing by dev_info
+ * @size: Size of the each buckets in the device station
+ */
+static void setup_fmn_cc(struct xlr_fmn_info *dev_info, int start_stn_id,
+ int end_stn_id, int num_buckets, int cpu_credits, int size)
+{
+ int i, j, num_core, n, credits_per_cpu;
+ struct xlr_fmn_info *cpu = xlr_board_fmn_config.cpu;
+
+ num_core = hweight32(nlm_current_node()->coremask);
+ dev_info->num_buckets = num_buckets;
+ dev_info->start_stn_id = start_stn_id;
+ dev_info->end_stn_id = end_stn_id;
+
+ n = num_core;
+ if (num_core == 3)
+ n = 4;
+
+ for (i = start_stn_id; i <= end_stn_id; i++) {
+ xlr_board_fmn_config.bucket_size[i] = size;
+
+ /* Dividing device credits equally to cpus */
+ credits_per_cpu = size / n;
+ for (j = 0; j < num_core; j++)
+ cpu[j].credit_config[i] = credits_per_cpu;
+
+ /* credits left to distribute */
+ credits_per_cpu = size - (credits_per_cpu * num_core);
+
+ /* distribute the remaining credits (if any), among cores */
+ for (j = 0; (j < num_core) && (credits_per_cpu >= 4); j++) {
+ cpu[j].credit_config[i] += 4;
+ credits_per_cpu -= 4;
+ }
+ }
+
+ /* Distributing cpu per bucket credits to devices */
+ for (i = 0; i < num_core; i++) {
+ for (j = 0; j < FMN_CORE_NBUCKETS; j++)
+ dev_info->credit_config[(i * 8) + j] = cpu_credits;
+ }
+}
+
+/*
+ * Each core has 256 slots and 8 buckets,
+ * Configure the 8 buckets each with 32 slots
+ */
+static void setup_cpu_fmninfo(struct xlr_fmn_info *cpu, int num_core)
+{
+ int i, j;
+
+ for (i = 0; i < num_core; i++) {
+ cpu[i].start_stn_id = (8 * i);
+ cpu[i].end_stn_id = (8 * i + 8);
+
+ for (j = cpu[i].start_stn_id; j < cpu[i].end_stn_id; j++)
+ xlr_board_fmn_config.bucket_size[j] = 32;
+ }
+}
+
+/**
+ * Setup the FMN details for each devices according to the device available
+ * in each variant of XLR/XLS processor
+ */
+void xlr_board_info_setup(void)
+{
+ struct xlr_fmn_info *cpu = xlr_board_fmn_config.cpu;
+ struct xlr_fmn_info *gmac = xlr_board_fmn_config.gmac;
+ struct xlr_fmn_info *xgmac = xlr_board_fmn_config.xgmac;
+ struct xlr_fmn_info *dma = &xlr_board_fmn_config.dma;
+ struct xlr_fmn_info *cmp = &xlr_board_fmn_config.cmp;
+ struct xlr_fmn_info *sae = &xlr_board_fmn_config.sae;
+ int processor_id, num_core;
+
+ num_core = hweight32(nlm_current_node()->coremask);
+ processor_id = read_c0_prid() & 0xff00;
+
+ setup_cpu_fmninfo(cpu, num_core);
+ switch (processor_id) {
+ case PRID_IMP_NETLOGIC_XLS104:
+ case PRID_IMP_NETLOGIC_XLS108:
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 16, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 8, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 8, 128);
+ break;
+
+ case PRID_IMP_NETLOGIC_XLS204:
+ case PRID_IMP_NETLOGIC_XLS208:
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 16, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 8, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 8, 128);
+ break;
+
+ case PRID_IMP_NETLOGIC_XLS404:
+ case PRID_IMP_NETLOGIC_XLS408:
+ case PRID_IMP_NETLOGIC_XLS404B:
+ case PRID_IMP_NETLOGIC_XLS408B:
+ case PRID_IMP_NETLOGIC_XLS416B:
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 8, 32);
+ setup_fmn_cc(&gmac[1], FMN_STNID_GMAC1_FR_0,
+ FMN_STNID_GMAC1_TX3, 8, 8, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 4, 64);
+ setup_fmn_cc(cmp, FMN_STNID_CMP_0,
+ FMN_STNID_CMP_3, 4, 4, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 8, 128);
+ break;
+
+ case PRID_IMP_NETLOGIC_XLS412B:
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 8, 32);
+ setup_fmn_cc(&gmac[1], FMN_STNID_GMAC1_FR_0,
+ FMN_STNID_GMAC1_TX3, 8, 8, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 4, 64);
+ setup_fmn_cc(cmp, FMN_STNID_CMP_0,
+ FMN_STNID_CMP_3, 4, 4, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 8, 128);
+ break;
+
+ case PRID_IMP_NETLOGIC_XLR308:
+ case PRID_IMP_NETLOGIC_XLR308C:
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 16, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 8, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 4, 128);
+ break;
+
+ case PRID_IMP_NETLOGIC_XLR532:
+ case PRID_IMP_NETLOGIC_XLR532C:
+ case PRID_IMP_NETLOGIC_XLR516C:
+ case PRID_IMP_NETLOGIC_XLR508C:
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 16, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 8, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 4, 128);
+ break;
+
+ case PRID_IMP_NETLOGIC_XLR732:
+ case PRID_IMP_NETLOGIC_XLR716:
+ setup_fmn_cc(&xgmac[0], FMN_STNID_XMAC0_00_TX,
+ FMN_STNID_XMAC0_15_TX, 8, 0, 32);
+ setup_fmn_cc(&xgmac[1], FMN_STNID_XMAC1_00_TX,
+ FMN_STNID_XMAC1_15_TX, 8, 0, 32);
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 24, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 4, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 4, 128);
+ break;
+ default:
+ pr_err("Unknown CPU with processor ID [%d]\n", processor_id);
+ pr_err("Error: Cannot initialize FMN credits.\n");
+ }
+
+ check_credit_distribution();
+
+#if 0 /* debug */
+ print_credit_config(&cpu[0]);
+ print_credit_config(&gmac[0]);
+#endif
+}
diff --git a/arch/mips/netlogic/xlr/fmn.c b/arch/mips/netlogic/xlr/fmn.c
new file mode 100644
index 000000000000..4d74f03de506
--- /dev/null
+++ b/arch/mips/netlogic/xlr/fmn.c
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2003-2012 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/irqreturn.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+
+#include <asm/mipsregs.h>
+#include <asm/netlogic/interrupt.h>
+#include <asm/netlogic/xlr/fmn.h>
+#include <asm/netlogic/common.h>
+
+#define COP2_CC_INIT_CPU_DEST(dest, conf) \
+do { \
+ nlm_write_c2_cc##dest(0, conf[(dest * 8) + 0]); \
+ nlm_write_c2_cc##dest(1, conf[(dest * 8) + 1]); \
+ nlm_write_c2_cc##dest(2, conf[(dest * 8) + 2]); \
+ nlm_write_c2_cc##dest(3, conf[(dest * 8) + 3]); \
+ nlm_write_c2_cc##dest(4, conf[(dest * 8) + 4]); \
+ nlm_write_c2_cc##dest(5, conf[(dest * 8) + 5]); \
+ nlm_write_c2_cc##dest(6, conf[(dest * 8) + 6]); \
+ nlm_write_c2_cc##dest(7, conf[(dest * 8) + 7]); \
+} while (0)
+
+struct fmn_message_handler {
+ void (*action)(int, int, int, int, struct nlm_fmn_msg *, void *);
+ void *arg;
+} msg_handlers[128];
+
+/*
+ * FMN interrupt handler. We configure the FMN so that any messages in
+ * any of the CPU buckets will trigger an interrupt on the CPU.
+ * The message can be from any device on the FMN (like NAE/SAE/DMA).
+ * The source station id is used to figure out which of the registered
+ * handlers have to be called.
+ */
+static irqreturn_t fmn_message_handler(int irq, void *data)
+{
+ struct fmn_message_handler *hndlr;
+ int bucket, rv;
+ int size = 0, code = 0, src_stnid = 0;
+ struct nlm_fmn_msg msg;
+ uint32_t mflags, bkt_status;
+
+ mflags = nlm_cop2_enable();
+ /* Disable message ring interrupt */
+ nlm_fmn_setup_intr(irq, 0);
+ while (1) {
+ /* 8 bkts per core, [24:31] each bit represents one bucket
+ * Bit is Zero if bucket is not empty */
+ bkt_status = (nlm_read_c2_status() >> 24) & 0xff;
+ if (bkt_status == 0xff)
+ break;
+ for (bucket = 0; bucket < 8; bucket++) {
+ /* Continue on empty bucket */
+ if (bkt_status & (1 << bucket))
+ continue;
+ rv = nlm_fmn_receive(bucket, &size, &code, &src_stnid,
+ &msg);
+ if (rv != 0)
+ continue;
+
+ hndlr = &msg_handlers[src_stnid];
+ if (hndlr->action == NULL)
+ pr_warn("No msgring handler for stnid %d\n",
+ src_stnid);
+ else {
+ nlm_cop2_restore(mflags);
+ hndlr->action(bucket, src_stnid, size, code,
+ &msg, hndlr->arg);
+ mflags = nlm_cop2_enable();
+ }
+ }
+ };
+ /* Enable message ring intr, to any thread in core */
+ nlm_fmn_setup_intr(irq, (1 << nlm_threads_per_core) - 1);
+ nlm_cop2_restore(mflags);
+ return IRQ_HANDLED;
+}
+
+struct irqaction fmn_irqaction = {
+ .handler = fmn_message_handler,
+ .flags = IRQF_PERCPU,
+ .name = "fmn",
+};
+
+void xlr_percpu_fmn_init(void)
+{
+ struct xlr_fmn_info *cpu_fmn_info;
+ int *bucket_sizes;
+ uint32_t flags;
+ int id;
+
+ BUG_ON(nlm_thread_id() != 0);
+ id = nlm_core_id();
+
+ bucket_sizes = xlr_board_fmn_config.bucket_size;
+ cpu_fmn_info = &xlr_board_fmn_config.cpu[id];
+ flags = nlm_cop2_enable();
+
+ /* Setup bucket sizes for the core. */
+ nlm_write_c2_bucksize(0, bucket_sizes[id * 8 + 0]);
+ nlm_write_c2_bucksize(1, bucket_sizes[id * 8 + 1]);
+ nlm_write_c2_bucksize(2, bucket_sizes[id * 8 + 2]);
+ nlm_write_c2_bucksize(3, bucket_sizes[id * 8 + 3]);
+ nlm_write_c2_bucksize(4, bucket_sizes[id * 8 + 4]);
+ nlm_write_c2_bucksize(5, bucket_sizes[id * 8 + 5]);
+ nlm_write_c2_bucksize(6, bucket_sizes[id * 8 + 6]);
+ nlm_write_c2_bucksize(7, bucket_sizes[id * 8 + 7]);
+
+ /*
+ * For sending FMN messages, we need credits on the destination
+ * bucket. Program the credits this core has on the 128 possible
+ * destination buckets.
+ * We cannot use a loop here, because the the first argument has
+ * to be a constant integer value.
+ */
+ COP2_CC_INIT_CPU_DEST(0, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(1, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(2, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(3, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(4, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(5, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(6, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(7, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(8, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(9, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(10, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(11, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(12, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(13, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(14, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(15, cpu_fmn_info->credit_config);
+
+ /* enable FMN interrupts on this CPU */
+ nlm_fmn_setup_intr(IRQ_FMN, (1 << nlm_threads_per_core) - 1);
+ nlm_cop2_restore(flags);
+}
+
+
+/*
+ * Register a FMN message handler with respect to the source station id
+ * @stnid: source station id
+ * @action: Handler function pointer
+ */
+int nlm_register_fmn_handler(int start_stnid, int end_stnid,
+ void (*action)(int, int, int, int, struct nlm_fmn_msg *, void *),
+ void *arg)
+{
+ int sstnid;
+
+ for (sstnid = start_stnid; sstnid <= end_stnid; sstnid++) {
+ msg_handlers[sstnid].arg = arg;
+ smp_wmb();
+ msg_handlers[sstnid].action = action;
+ }
+ pr_debug("Registered FMN msg handler for stnid %d-%d\n",
+ start_stnid, end_stnid);
+ return 0;
+}
+
+void nlm_setup_fmn_irq(void)
+{
+ uint32_t flags;
+
+ /* setup irq only once */
+ setup_irq(IRQ_FMN, &fmn_irqaction);
+
+ flags = nlm_cop2_enable();
+ nlm_fmn_setup_intr(IRQ_FMN, (1 << nlm_threads_per_core) - 1);
+ nlm_cop2_restore(flags);
+}
diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c
index 81b1d311834f..4e7f49d3d5a8 100644
--- a/arch/mips/netlogic/xlr/setup.c
+++ b/arch/mips/netlogic/xlr/setup.c
@@ -49,16 +49,15 @@
#include <asm/netlogic/xlr/iomap.h>
#include <asm/netlogic/xlr/pic.h>
#include <asm/netlogic/xlr/gpio.h>
+#include <asm/netlogic/xlr/fmn.h>
uint64_t nlm_io_base = DEFAULT_NETLOGIC_IO_BASE;
-uint64_t nlm_pic_base;
struct psb_info nlm_prom_info;
-unsigned long nlm_common_ebase = 0x0;
-
/* default to uniprocessor */
-uint32_t nlm_coremask = 1, nlm_cpumask = 1;
-int nlm_threads_per_core = 1;
+unsigned int nlm_threads_per_core = 1;
+struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
+cpumask_t nlm_cpumask = CPU_MASK_CPU0;
static void __init nlm_early_serial_setup(void)
{
@@ -113,6 +112,12 @@ void __init prom_free_prom_memory(void)
/* Nothing yet */
}
+void nlm_percpu_init(int hwcpuid)
+{
+ if (hwcpuid % 4 == 0)
+ xlr_percpu_fmn_init();
+}
+
static void __init build_arcs_cmdline(int *argv)
{
int i, remain, len;
@@ -176,9 +181,19 @@ static void prom_add_memory(void)
}
}
+static void nlm_init_node(void)
+{
+ struct nlm_soc_info *nodep;
+
+ nodep = nlm_current_node();
+ nodep->picbase = nlm_mmio_base(NETLOGIC_IO_PIC_OFFSET);
+ nodep->ebase = read_c0_ebase() & (~((1 << 12) - 1));
+ spin_lock_init(&nodep->piclock);
+}
+
void __init prom_init(void)
{
- int *argv, *envp; /* passed as 32 bit ptrs */
+ int i, *argv, *envp; /* passed as 32 bit ptrs */
struct psb_info *prom_infop;
/* truncate to 32 bit and sign extend all args */
@@ -187,15 +202,19 @@ void __init prom_init(void)
prom_infop = (struct psb_info *)(long)(int)fw_arg3;
nlm_prom_info = *prom_infop;
- nlm_pic_base = nlm_mmio_base(NETLOGIC_IO_PIC_OFFSET);
+ nlm_init_node();
nlm_early_serial_setup();
build_arcs_cmdline(argv);
- nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1));
prom_add_memory();
#ifdef CONFIG_SMP
- nlm_wakeup_secondary_cpus(nlm_prom_info.online_cpu_map);
+ for (i = 0; i < 32; i++)
+ if (nlm_prom_info.online_cpu_map & (1 << i))
+ cpumask_set_cpu(i, &nlm_cpumask);
+ nlm_wakeup_secondary_cpus();
register_smp_ops(&nlm_smp_ops);
#endif
+ xlr_board_info_setup();
+ xlr_percpu_fmn_init();
}
diff --git a/arch/mips/netlogic/xlr/wakeup.c b/arch/mips/netlogic/xlr/wakeup.c
index db5d987d4881..3ebf7411d67b 100644
--- a/arch/mips/netlogic/xlr/wakeup.c
+++ b/arch/mips/netlogic/xlr/wakeup.c
@@ -33,6 +33,7 @@
*/
#include <linux/init.h>
+#include <linux/delay.h>
#include <linux/threads.h>
#include <asm/asm.h>
@@ -50,18 +51,34 @@
int __cpuinit xlr_wakeup_secondary_cpus(void)
{
- unsigned int i, boot_cpu;
+ struct nlm_soc_info *nodep;
+ unsigned int i, j, boot_cpu;
/*
* In case of RMI boot, hit with NMI to get the cores
* from bootloader to linux code.
*/
+ nodep = nlm_get_node(0);
boot_cpu = hard_smp_processor_id();
nlm_set_nmi_handler(nlm_rmiboot_preboot);
for (i = 0; i < NR_CPUS; i++) {
- if (i == boot_cpu || (nlm_cpumask & (1u << i)) == 0)
+ if (i == boot_cpu || !cpumask_test_cpu(i, &nlm_cpumask))
continue;
- nlm_pic_send_ipi(nlm_pic_base, i, 1, 1); /* send NMI */
+ nlm_pic_send_ipi(nodep->picbase, i, 1, 1); /* send NMI */
+ }
+
+ /* Fill up the coremask early */
+ nodep->coremask = 1;
+ for (i = 1; i < NLM_CORES_PER_NODE; i++) {
+ for (j = 1000000; j > 0; j--) {
+ if (nlm_cpu_ready[i * NLM_THREADS_PER_CORE])
+ break;
+ udelay(10);
+ }
+ if (j != 0)
+ nodep->coremask |= (1u << i);
+ else
+ pr_err("Failed to wakeup core %d\n", i);
}
return 0;
OpenPOWER on IntegriCloud