summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/head_fsl_booke.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/head_fsl_booke.S')
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S253
1 files changed, 103 insertions, 150 deletions
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index e581524d85bc..3cb52fa0eda3 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -39,6 +39,7 @@
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
+#include <asm/cache.h>
#include "head_booke.h"
/* As with the other PowerPC ports, it is expected that when code
@@ -150,16 +151,11 @@ skpinv: addi r6,r6,1 /* Increment */
/* Invalidate TLB0 */
li r6,0x04
tlbivax 0,r6
-#ifdef CONFIG_SMP
- tlbsync
-#endif
+ TLBSYNC
/* Invalidate TLB1 */
li r6,0x0c
tlbivax 0,r6
-#ifdef CONFIG_SMP
- tlbsync
-#endif
- msync
+ TLBSYNC
/* 3. Setup a temp mapping and jump to it */
andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */
@@ -237,10 +233,7 @@ skpinv: addi r6,r6,1 /* Increment */
/* Invalidate TLB1 */
li r9,0x0c
tlbivax 0,r9
-#ifdef CONFIG_SMP
- tlbsync
-#endif
- msync
+ TLBSYNC
/* 6. Setup KERNELBASE mapping in TLB1[0] */
lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
@@ -282,10 +275,7 @@ skpinv: addi r6,r6,1 /* Increment */
/* Invalidate TLB1 */
li r9,0x0c
tlbivax 0,r9
-#ifdef CONFIG_SMP
- tlbsync
-#endif
- msync
+ TLBSYNC
/* Establish the interrupt vector offsets */
SET_IVOR(0, CriticalInput);
@@ -304,7 +294,7 @@ skpinv: addi r6,r6,1 /* Increment */
SET_IVOR(13, DataTLBError);
SET_IVOR(14, InstructionTLBError);
SET_IVOR(15, DebugDebug);
-#if defined(CONFIG_E500)
+#if defined(CONFIG_E500) && !defined(CONFIG_PPC_E500MC)
SET_IVOR(15, DebugCrit);
#endif
SET_IVOR(32, SPEUnavailable);
@@ -313,6 +303,9 @@ skpinv: addi r6,r6,1 /* Increment */
#ifndef CONFIG_E200
SET_IVOR(35, PerformanceMonitor);
#endif
+#ifdef CONFIG_PPC_E500MC
+ SET_IVOR(36, Doorbell);
+#endif
/* Establish the interrupt vector base */
lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
@@ -479,90 +472,16 @@ interrupt_base:
/* Data Storage Interrupt */
START_EXCEPTION(DataStorage)
- mtspr SPRN_SPRG0, r10 /* Save some working registers */
- mtspr SPRN_SPRG1, r11
- mtspr SPRN_SPRG4W, r12
- mtspr SPRN_SPRG5W, r13
- mfcr r11
- mtspr SPRN_SPRG7W, r11
-
- /*
- * Check if it was a store fault, if not then bail
- * because a user tried to access a kernel or
- * read-protected page. Otherwise, get the
- * offending address and handle it.
- */
- mfspr r10, SPRN_ESR
- andis. r10, r10, ESR_ST@h
- beq 2f
-
- mfspr r10, SPRN_DEAR /* Get faulting address */
-
- /* If we are faulting a kernel address, we have to use the
- * kernel page tables.
- */
- lis r11, PAGE_OFFSET@h
- cmplw 0, r10, r11
- bge 2f
-
- /* Get the PGD for the current thread */
-3:
- mfspr r11,SPRN_SPRG3
- lwz r11,PGDIR(r11)
-4:
- FIND_PTE
-
- /* Are _PAGE_USER & _PAGE_RW set & _PAGE_HWWRITE not? */
- andi. r13, r11, _PAGE_RW|_PAGE_USER|_PAGE_HWWRITE
- cmpwi 0, r13, _PAGE_RW|_PAGE_USER
- bne 2f /* Bail if not */
-
- /* Update 'changed'. */
- ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
- stw r11, PTE_FLAGS_OFFSET(r12) /* Update Linux page table */
-
- /* MAS2 not updated as the entry does exist in the tlb, this
- fault taken to detect state transition (eg: COW -> DIRTY)
- */
- andi. r11, r11, _PAGE_HWEXEC
- rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */
- ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */
-
- /* update search PID in MAS6, AS = 0 */
- mfspr r12, SPRN_PID0
- slwi r12, r12, 16
- mtspr SPRN_MAS6, r12
-
- /* find the TLB index that caused the fault. It has to be here. */
- tlbsx 0, r10
-
- /* only update the perm bits, assume the RPN is fine */
- mfspr r12, SPRN_MAS3
- rlwimi r12, r11, 0, 20, 31
- mtspr SPRN_MAS3,r12
- tlbwe
-
- /* Done...restore registers and get out of here. */
- mfspr r11, SPRN_SPRG7R
- mtcr r11
- mfspr r13, SPRN_SPRG5R
- mfspr r12, SPRN_SPRG4R
- mfspr r11, SPRN_SPRG1
- mfspr r10, SPRN_SPRG0
- rfi /* Force context change */
-
-2:
- /*
- * The bailout. Restore registers to pre-exception conditions
- * and call the heavyweights to help us out.
- */
- mfspr r11, SPRN_SPRG7R
- mtcr r11
- mfspr r13, SPRN_SPRG5R
- mfspr r12, SPRN_SPRG4R
- mfspr r11, SPRN_SPRG1
- mfspr r10, SPRN_SPRG0
- b data_access
+ NORMAL_EXCEPTION_PROLOG
+ mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
+ stw r5,_ESR(r11)
+ mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
+ andis. r10,r5,(ESR_ILK|ESR_DLK)@h
+ bne 1f
+ EXC_XFER_EE_LITE(0x0300, handle_page_fault)
+1:
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ EXC_XFER_EE_LITE(0x0300, CacheLockingException)
/* Instruction Storage Interrupt */
INSTRUCTION_STORAGE_EXCEPTION
@@ -641,15 +560,30 @@ interrupt_base:
lwz r11,PGDIR(r11)
4:
+ /* Mask of required permission bits. Note that while we
+ * do copy ESR:ST to _PAGE_RW position as trying to write
+ * to an RO page is pretty common, we don't do it with
+ * _PAGE_DIRTY. We could do it, but it's a fairly rare
+ * event so I'd rather take the overhead when it happens
+ * rather than adding an instruction here. We should measure
+ * whether the whole thing is worth it in the first place
+ * as we could avoid loading SPRN_ESR completely in the first
+ * place...
+ *
+ * TODO: Is it worth doing that mfspr & rlwimi in the first
+ * place or can we save a couple of instructions here ?
+ */
+ mfspr r12,SPRN_ESR
+ li r13,_PAGE_PRESENT|_PAGE_ACCESSED
+ rlwimi r13,r12,11,29,29
+
FIND_PTE
- andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
- beq 2f /* Bail if not present */
+ andc. r13,r13,r11 /* Check permission */
+ bne 2f /* Bail if permission mismach */
#ifdef CONFIG_PTE_64BIT
lwz r13, 0(r12)
#endif
- ori r11, r11, _PAGE_ACCESSED
- stw r11, PTE_FLAGS_OFFSET(r12)
/* Jump to common tlb load */
b finish_tlb_load
@@ -663,7 +597,7 @@ interrupt_base:
mfspr r12, SPRN_SPRG4R
mfspr r11, SPRN_SPRG1
mfspr r10, SPRN_SPRG0
- b data_access
+ b DataStorage
/* Instruction TLB Error Interrupt */
/*
@@ -701,15 +635,16 @@ interrupt_base:
lwz r11,PGDIR(r11)
4:
+ /* Make up the required permissions */
+ li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_HWEXEC
+
FIND_PTE
- andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
- beq 2f /* Bail if not present */
+ andc. r13,r13,r11 /* Check permission */
+ bne 2f /* Bail if permission mismach */
#ifdef CONFIG_PTE_64BIT
lwz r13, 0(r12)
#endif
- ori r11, r11, _PAGE_ACCESSED
- stw r11, PTE_FLAGS_OFFSET(r12)
/* Jump to common TLB load point */
b finish_tlb_load
@@ -750,10 +685,13 @@ interrupt_base:
/* Performance Monitor */
EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
+#ifdef CONFIG_PPC_E500MC
+ EXCEPTION(0x2070, Doorbell, unknown_exception, EXC_XFER_EE)
+#endif
/* Debug Interrupt */
DEBUG_DEBUG_EXCEPTION
-#if defined(CONFIG_E500)
+#if defined(CONFIG_E500) && !defined(CONFIG_PPC_E500MC)
DEBUG_CRIT_EXCEPTION
#endif
@@ -761,29 +699,13 @@ interrupt_base:
* Local functions
*/
- /*
- * Data TLB exceptions will bail out to this point
- * if they can't resolve the lightweight TLB fault.
- */
-data_access:
- NORMAL_EXCEPTION_PROLOG
- mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
- stw r5,_ESR(r11)
- mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
- andis. r10,r5,(ESR_ILK|ESR_DLK)@h
- bne 1f
- EXC_XFER_EE_LITE(0x0300, handle_page_fault)
-1:
- addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_EE_LITE(0x0300, CacheLockingException)
-
/*
-
* Both the instruction and data TLB miss get to this
* point to load the TLB.
* r10 - EA of fault
* r11 - TLB (info from Linux PTE)
- * r12, r13 - available to use
+ * r12 - available to use
+ * r13 - upper bits of PTE (if PTE_64BIT) or available to use
* CR5 - results of addr >= PAGE_OFFSET
* MAS0, MAS1 - loaded with proper value when we get here
* MAS2, MAS3 - will need additional info from Linux PTE
@@ -805,20 +727,14 @@ finish_tlb_load:
#endif
mtspr SPRN_MAS2, r12
- bge 5, 1f
-
- /* is user addr */
- andi. r12, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC)
+ li r10, (_PAGE_HWEXEC | _PAGE_PRESENT)
+ rlwimi r10, r11, 31, 29, 29 /* extract _PAGE_DIRTY into SW */
+ and r12, r11, r10
andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */
- srwi r10, r12, 1
- or r12, r12, r10 /* Copy user perms into supervisor */
- iseleq r12, 0, r12
- b 2f
-
- /* is kernel addr */
-1: rlwinm r12, r11, 31, 29, 29 /* Extract _PAGE_HWWRITE into SW */
- ori r12, r12, (MAS3_SX | MAS3_SR)
-
+ slwi r10, r12, 1
+ or r10, r10, r12
+ iseleq r12, r12, r10
+
#ifdef CONFIG_PTE_64BIT
2: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */
rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */
@@ -1065,6 +981,52 @@ _GLOBAL(set_context)
isync /* Force context change */
blr
+_GLOBAL(flush_dcache_L1)
+ mfspr r3,SPRN_L1CFG0
+
+ rlwinm r5,r3,9,3 /* Extract cache block size */
+ twlgti r5,1 /* Only 32 and 64 byte cache blocks
+ * are currently defined.
+ */
+ li r4,32
+ subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) -
+ * log2(number of ways)
+ */
+ slw r5,r4,r5 /* r5 = cache block size */
+
+ rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */
+ mulli r7,r7,13 /* An 8-way cache will require 13
+ * loads per set.
+ */
+ slw r7,r7,r6
+
+ /* save off HID0 and set DCFA */
+ mfspr r8,SPRN_HID0
+ ori r9,r8,HID0_DCFA@l
+ mtspr SPRN_HID0,r9
+ isync
+
+ lis r4,KERNELBASE@h
+ mtctr r7
+
+1: lwz r3,0(r4) /* Load... */
+ add r4,r4,r5
+ bdnz 1b
+
+ msync
+ lis r4,KERNELBASE@h
+ mtctr r7
+
+1: dcbf 0,r4 /* ...and flush. */
+ add r4,r4,r5
+ bdnz 1b
+
+ /* restore HID0 */
+ mtspr SPRN_HID0,r8
+ isync
+
+ blr
+
/*
* We put a few things here that have to be page-aligned. This stuff
* goes at the beginning of the data segment, which is page-aligned.
@@ -1080,15 +1042,6 @@ empty_zero_page:
swapper_pg_dir:
.space PGD_TABLE_SIZE
-/* Reserved 4k for the critical exception stack & 4k for the machine
- * check stack per CPU for kernel mode exceptions */
- .section .bss
- .align 12
-exception_stack_bottom:
- .space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS
- .globl exception_stack_top
-exception_stack_top:
-
/*
* Room for two PTE pointers, usually the kernel and current user pointers
* to their respective root page table.
OpenPOWER on IntegriCloud