summaryrefslogtreecommitdiffstats
path: root/arch/mips/netlogic/common
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/netlogic/common')
-rw-r--r--arch/mips/netlogic/common/earlycons.c2
-rw-r--r--arch/mips/netlogic/common/smpboot.S157
2 files changed, 103 insertions, 56 deletions
diff --git a/arch/mips/netlogic/common/earlycons.c b/arch/mips/netlogic/common/earlycons.c
index f193f7b3bd81..1902fa22d277 100644
--- a/arch/mips/netlogic/common/earlycons.c
+++ b/arch/mips/netlogic/common/earlycons.c
@@ -54,7 +54,7 @@ void prom_putchar(char c)
#elif defined(CONFIG_CPU_XLR)
uartbase = nlm_mmio_base(NETLOGIC_IO_UART_0_OFFSET);
#endif
- while (nlm_read_reg(uartbase, UART_LSR) == 0)
+ while ((nlm_read_reg(uartbase, UART_LSR) & UART_LSR_THRE) == 0)
;
nlm_write_reg(uartbase, UART_TX, c);
}
diff --git a/arch/mips/netlogic/common/smpboot.S b/arch/mips/netlogic/common/smpboot.S
index c138b1a6dec3..a13355cc97eb 100644
--- a/arch/mips/netlogic/common/smpboot.S
+++ b/arch/mips/netlogic/common/smpboot.S
@@ -54,28 +54,68 @@
XLP_IO_SYS_OFFSET(node) + XLP_IO_PCI_HDRSZ + \
SYS_CPU_NONCOHERENT_MODE * 4
-.macro __config_lsu
- li t0, LSU_DEFEATURE
- mfcr t1, t0
+#define XLP_AX_WORKAROUND /* enable Ax silicon workarounds */
- lui t2, 0x4080 /* Enable Unaligned Access, L2HPE */
- or t1, t1, t2
- li t2, ~0xe /* S1RCM */
+/* Enable XLP features and workarounds in the LSU */
+.macro xlp_config_lsu
+ li t0, LSU_DEFEATURE
+ mfcr t1, t0
+
+ lui t2, 0x4080 /* Enable Unaligned Access, L2HPE */
+ or t1, t1, t2
+#ifdef XLP_AX_WORKAROUND
+ li t2, ~0xe /* S1RCM */
and t1, t1, t2
- mtcr t1, t0
+#endif
+ mtcr t1, t0
- li t0, SCHED_DEFEATURE
- lui t1, 0x0100 /* Experimental: Disable BRU accepting ALU ops */
- mtcr t1, t0
+#ifdef XLP_AX_WORKAROUND
+ li t0, SCHED_DEFEATURE
+ lui t1, 0x0100 /* Disable BRU accepting ALU ops */
+ mtcr t1, t0
+#endif
+.endm
+
+/*
+ * This is the code that will be copied to the reset entry point for
+ * XLR and XLP. The XLP cores start here when they are woken up. This
+ * is also the NMI entry point.
+ */
+.macro xlp_flush_l1_dcache
+ li t0, LSU_DEBUG_DATA0
+ li t1, LSU_DEBUG_ADDR
+ li t2, 0 /* index */
+ li t3, 0x1000 /* loop count */
+1:
+ sll v0, t2, 5
+ mtcr zero, t0
+ ori v1, v0, 0x3 /* way0 | write_enable | write_active */
+ mtcr v1, t1
+2:
+ mfcr v1, t1
+ andi v1, 0x1 /* wait for write_active == 0 */
+ bnez v1, 2b
+ nop
+ mtcr zero, t0
+ ori v1, v0, 0x7 /* way1 | write_enable | write_active */
+ mtcr v1, t1
+3:
+ mfcr v1, t1
+ andi v1, 0x1 /* wait for write_active == 0 */
+ bnez v1, 3b
+ nop
+ addi t2, 1
+ bne t3, t2, 1b
+ nop
.endm
/*
* The cores can come start when they are woken up. This is also the NMI
* entry, so check that first.
*
- * The data corresponding to reset is stored at RESET_DATA_PHYS location,
- * this will have the thread mask (used when core is woken up) and the
- * current NMI handler in case we reached here for an NMI.
+ * The data corresponding to reset/NMI is stored at RESET_DATA_PHYS
+ * location, this will have the thread mask (used when core is woken up)
+ * and the current NMI handler in case we reached here for an NMI.
*
* When a core or thread is newly woken up, it loops in a 'wait'. When
* the CPU really needs waking up, we send an NMI to it, with the NMI
@@ -89,12 +129,12 @@
FEXPORT(nlm_reset_entry)
dmtc0 k0, $22, 6
dmtc0 k1, $22, 7
- mfc0 k0, CP0_STATUS
- li k1, 0x80000
- and k1, k0, k1
- beqz k1, 1f /* go to real reset entry */
+ mfc0 k0, CP0_STATUS
+ li k1, 0x80000
+ and k1, k0, k1
+ beqz k1, 1f /* go to real reset entry */
nop
- li k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */
+ li k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */
ld k0, BOOT_NMI_HANDLER(k1)
jr k0
nop
@@ -114,21 +154,25 @@ FEXPORT(nlm_reset_entry)
li t2, SYS_CPU_COHERENT_BASE(0)
add t2, t2, t3 /* t2 <- SYS offset for node */
lw t1, 0(t2)
- and t1, t1, t0
- sw t1, 0(t2)
+ and t1, t1, t0
+ sw t1, 0(t2)
/* read back to ensure complete */
- lw t1, 0(t2)
+ lw t1, 0(t2)
sync
/* Configure LSU on Non-0 Cores. */
- __config_lsu
+ xlp_config_lsu
+ /* FALL THROUGH */
/*
* Wake up sibling threads from the initial thread in
* a core.
*/
EXPORT(nlm_boot_siblings)
+ /* core L1D flush before enable threads */
+ xlp_flush_l1_dcache
+ /* Enable hw threads by writing to MAP_THREADMODE of the core */
li t0, CKSEG1ADDR(RESET_DATA_PHYS)
lw t1, BOOT_THREAD_MODE(t0) /* t1 <- thread mode */
li t0, ((CPU_BLOCKID_MAP << 8) | MAP_THREADMODE)
@@ -139,31 +183,28 @@ EXPORT(nlm_boot_siblings)
/*
* The new hardware thread starts at the next instruction
* For all the cases other than core 0 thread 0, we will
- * jump to the secondary wait function.
- */
+ * jump to the secondary wait function.
+ */
mfc0 v0, CP0_EBASE, 1
andi v0, 0x7f /* v0 <- node/core */
-#if 1
- /* A0 errata - Write MMU_SETUP after changing thread mode register. */
+ /* Init MMU in the first thread after changing THREAD_MODE
+ * register (Ax Errata?)
+ */
andi v1, v0, 0x3 /* v1 <- thread id */
bnez v1, 2f
nop
- li t0, MMU_SETUP
- li t1, 0
- mtcr t1, t0
- ehb
-#endif
+ li t0, MMU_SETUP
+ li t1, 0
+ mtcr t1, t0
+ _ehb
-2: beqz v0, 4f
+2: beqz v0, 4f /* boot cpu (cpuid == 0)? */
nop
/* setup status reg */
- mfc0 t1, CP0_STATUS
- li t0, ST0_BEV
- or t1, t0
- xor t1, t0
+ move t1, zero
#ifdef CONFIG_64BIT
ori t1, ST0_KX
#endif
@@ -183,9 +224,9 @@ EXPORT(nlm_boot_siblings)
* For the boot CPU, we have to restore registers and
* return
*/
-4: dmfc0 t0, $4, 2 /* restore SP from UserLocal */
+4: dmfc0 t0, $4, 2 /* restore SP from UserLocal */
li t1, 0xfadebeef
- dmtc0 t1, $4, 2 /* restore SP from UserLocal */
+ dmtc0 t1, $4, 2 /* restore SP from UserLocal */
PTR_SUBU sp, t0, PT_SIZE
RESTORE_ALL
jr ra
@@ -193,7 +234,7 @@ EXPORT(nlm_boot_siblings)
EXPORT(nlm_reset_entry_end)
FEXPORT(xlp_boot_core0_siblings) /* "Master" cpu starts from here */
- __config_lsu
+ xlp_config_lsu
dmtc0 sp, $4, 2 /* SP saved in UserLocal */
SAVE_ALL
sync
@@ -210,6 +251,12 @@ FEXPORT(xlp_boot_core0_siblings) /* "Master" cpu starts from here */
__CPUINIT
NESTED(nlm_boot_secondary_cpus, 16, sp)
+ /* Initialize CP0 Status */
+ move t1, zero
+#ifdef CONFIG_64BIT
+ ori t1, ST0_KX
+#endif
+ mtc0 t1, CP0_STATUS
PTR_LA t1, nlm_next_sp
PTR_L sp, 0(t1)
PTR_LA t1, nlm_next_gp
@@ -234,36 +281,36 @@ END(nlm_boot_secondary_cpus)
*/
__CPUINIT
NESTED(nlm_rmiboot_preboot, 16, sp)
- mfc0 t0, $15, 1 # read ebase
- andi t0, 0x1f # t0 has the processor_id()
- andi t2, t0, 0x3 # thread no
- sll t0, 2 # offset in cpu array
+ mfc0 t0, $15, 1 /* read ebase */
+ andi t0, 0x1f /* t0 has the processor_id() */
+ andi t2, t0, 0x3 /* thread num */
+ sll t0, 2 /* offset in cpu array */
- PTR_LA t1, nlm_cpu_ready # mark CPU ready
+ PTR_LA t1, nlm_cpu_ready /* mark CPU ready */
PTR_ADDU t1, t0
li t3, 1
sw t3, 0(t1)
- bnez t2, 1f # skip thread programming
- nop # for non zero hw threads
+ bnez t2, 1f /* skip thread programming */
+ nop /* for thread id != 0 */
/*
- * MMU setup only for first thread in core
+ * XLR MMU setup only for first thread in core
*/
li t0, 0x400
mfcr t1, t0
- li t2, 6 # XLR thread mode mask
+ li t2, 6 /* XLR thread mode mask */
nor t3, t2, zero
- and t2, t1, t2 # t2 - current thread mode
+ and t2, t1, t2 /* t2 - current thread mode */
li v0, CKSEG1ADDR(RESET_DATA_PHYS)
- lw v1, BOOT_THREAD_MODE(v0) # v1 - new thread mode
+ lw v1, BOOT_THREAD_MODE(v0) /* v1 - new thread mode */
sll v1, 1
- beq v1, t2, 1f # same as request value
- nop # nothing to do */
+ beq v1, t2, 1f /* same as request value */
+ nop /* nothing to do */
- and t2, t1, t3 # mask out old thread mode
- or t1, t2, v1 # put in new value
- mtcr t1, t0 # update core control
+ and t2, t1, t3 /* mask out old thread mode */
+ or t1, t2, v1 /* put in new value */
+ mtcr t1, t0 /* update core control */
1: wait
j 1b
OpenPOWER on IntegriCloud