summaryrefslogtreecommitdiffstats
path: root/pk/ppe42/ppe42_exceptions.S
diff options
context:
space:
mode:
Diffstat (limited to 'pk/ppe42/ppe42_exceptions.S')
-rw-r--r--pk/ppe42/ppe42_exceptions.S543
1 files changed, 226 insertions, 317 deletions
diff --git a/pk/ppe42/ppe42_exceptions.S b/pk/ppe42/ppe42_exceptions.S
index 19100e1e..b3ce3551 100644
--- a/pk/ppe42/ppe42_exceptions.S
+++ b/pk/ppe42/ppe42_exceptions.S
@@ -74,9 +74,7 @@ __instruction_storage:
.org __vectors + 0x00A0
__external_interrupt_vector:
- _pk_fast_ctx_push
-
- b __external_interrupt
+ _pk_ctx_push_as_needed __get_ext_irq
############################################################
# 0x00C0 : Alignment Exception
@@ -99,9 +97,7 @@ __alignment_exception:
### switches between threads. They can also be used by the code
### to signal an exception in an error scenario.
__program_exception:
- _pk_fast_ctx_push
-
- b program_exception_handler
+ _pk_ctx_push_as_needed program_exception_handler
############################################################
@@ -110,9 +106,7 @@ __program_exception:
.org __vectors + 0x0100
__dec_interrupt:
- _pk_fast_ctx_push
- li %r3, PPE42_IRQ_DEC
- b dec_handler
+ _pk_ctx_push_as_needed dec_handler
############################################################
# 0x0120 : FIT Interrupts
@@ -120,8 +114,7 @@ __dec_interrupt:
.org __vectors + 0x0120
__fit_interrupt:
- #b fit_handler
- b .
+ _pk_ctx_push_as_needed fit_handler
############################################################
# 0x0140 : Watchdog Interrupts
@@ -129,183 +122,7 @@ __fit_interrupt:
.org __vectors + 0x0140
__watchdog_interrupt:
- #b watchdog_handler
- b .
-
-
-
-
-
- .global __pk_irq_fast2full
-__pk_irq_fast2full:
-
- ## Convert a fast-mode to a full-mode interrupt by saving the
- ## (volatile - fast) context, and switching to the appropriate system
- ## stack.
-
- ## Entry invariants:
- ## 1. The SP/stack must be exactly as it was when the fast-mode
- ## handler was entered.
- ## 2. No changes have been made to the MSR - the interrupt level must
- ## remain disabled.
- ## 3. The handler owns the fast context and has not modified the other
- ## register context. This routine can only use the (volatile -
- ## fast) register context.
-
- ## 41 (linear) instructions plus alignmenmt
-
- ## Start by pushing the (volatile - fast) context. Technically we also
- ## need to save the CR as our contract with the handler is not to
- ## disturb any of its register state.
-
- _pk_vol_fast_ctx_push
- mfcr %r10
- mfsprg0 %r8
-
- ## At entry here the (volatile - fast) context has been pushed,
- ## R8 has SPRG0 and R10 contains the saved CR.
-
- ## Note that it would violate a kernel/API invariant if this routine
- ## were entered from outside an interrupt context.
-
-fast2full_noncritical:
-
- ## switch to the kernel stack if we haven't already done so. (SPRG0[RI] = 0)
- #bb1wi %r8, RI_BIT, 1f //branches if the RI_BIT is '1'
-
- extrwi %r9, %r8, 8, 8
- cmpwi %r9, 1
- bne 1f
-
- _stwsd %r1, __pk_saved_sp_noncritical
- _lwzsd %r1, __pk_noncritical_stack
-
-1:
-
- .if (PK_ERROR_CHECK_KERNEL | PK_ERROR_CHECK_API)
- #bb1wi %r8, PI_BIT, 2f //branches if PI_BIT is '1'
- cmpwi %r9, 0
- bne 2f
- _pk_panic PPE42_IRQ_FAST2FULL_INVARIANT
-2:
- .endif
-
- mtcr0 %r10
- blr
-
- .global __pk_irq_full_mode_exit
-__pk_irq_full_mode_exit:
-
- ## Exit a full-mode handler.
-
- ## Entry invariants:
- ## 1. The SP/stack must be in exactly the same state it was left in at
- ## the exit of __pk_irq_fast2full.
- ## 2. It is assumed the the preemption rules of PK have been followed
- ## - in particular that critical handlers have not enabled
- ## non-critical interrupts.
-
- ## We can freely modify the volatile context here - the handler is done
- ## and we will restore the interrupted volatile context.
-
- ## 22 linear instructions
-
- ## If the critical count is non-zero, then the PK preemption rules
- ## guarantee that we are exiting from a critical interrupt
- ## handler. This test is safe to make even if critical interrupts are
- ## enabled, because the variable is set exactly once in a critical
- ## section.
-
- mfsprg0 %r3
-
- ## Exiting a full-mode non-critical handler is more complex than the
- ## critical case, because the handler may have made a new
- ## highest-priority thread runnable and we may need to go through a
- ## delayed scheduling step.
-
- ## Note that the idle thread is treated as a special case. The idle
- ## thread has no permanent register context. To avoid having to
- ## allocate a stack area for the idle thread, the idle thread
- ## 'uses' the non-critical stack. When the idle thread is interrupted
- ## the (redundant) context is pushed, but is then effectively lost.
- ## Whenever we restore the idle thread we simply reenter the idle
- ## thread entry point.
-
- ## At entry:
- ## 1. R3 holds the value of SPRG0 (__PkKernelContext)
-
- ## 33 linear instructions.
-
-full_exit_noncritical:
-
- ## Enter a critical section for the return from interrupt, in the event
- ## that the handler enabled preemption.
-
- _pk_critical_section_enter %r4, %r5
-
- ## If the non-critical count is > 1 then this is a nested interrupt
- ## and we can simply pop the context and RFI.
-
- extrwi. %r4, %r3, 8, 8
-
- ## If SPRG0[RI] = 1 then this is a recursive interrupt
- ## and we can simply pop the context and RFI. Note that it would
- ## violate a kernel/API invariant if this routine were entered from
- ## outside an interrupt context (interrupt level == 0).
-
- .if (PK_ERROR_CHECK_KERNEL | PK_ERROR_CHECK_API)
- #bb1wi %r3, PI_BIT, 1f //branch if the PI bit is set
- bne 1f
- _pk_panic PPE42_IRQ_FULL_EXIT_INVARIANT
-1:
- .endif
-
- cmpwi %r4, 1
- bne exit_noncritical_without_switch
-
- ## Otherwise, restore the saved stack pointer and continue.
-
- _lwzsd %r1, __pk_saved_sp_noncritical
-
- ## If we are not in thread mode (i.e., we took an interrupt in an
- ## interupt-only configuration of PK or after pk_initialize() but
- ## before pk_start_threads) simply pop the context and RFI - in this
- ## case we'll most likely be returning to main() or the non-thread-mode
- ## idle thread.
-
- andi. %r4, %r3, PPE42_THREAD_MODE
- beq exit_noncritical_without_switch
-
- ## Now, check for a delayed context switch. If none is pending, we can
- ## exit (after a check for the idle thread special case).
-
- _lwzsd %r3, __pk_delayed_switch
- cmpwi %r3, 0
- bne noncritical_switch
-
- _lwzsd %r3, __pk_current_thread
- cmpwi %r3, 0
- beq __pk_idle_thread
-
-exit_noncritical_without_switch:
- _pk_vol_fast_ctx_pop
- b fast_exit_noncritical
-
- ## The non-critical interrupt activated a delayed context switch. The
- ## C-level code has taken care of the scheduling decisions - we simply
- ## need to implement them here.
-
-noncritical_switch:
-
- ## Clear the delayed switch flag and go to the context switch code to
- ## finish the switch.
-
- li %r3, 0
- _stwsd %r3, __pk_delayed_switch
-
- b thread_save_non_volatile_and_switch
-
-
+ _pk_ctx_push_as_needed watchdog_handler
@@ -317,30 +134,33 @@ noncritical_switch:
__pk_idle_thread:
- ## The idle thread 'uses' the non-critical stack. Any register context
+ ## The idle thread 'uses' the kernel stack. Any register context
## pushed here is redundant and is wiped out/ignored every time the
## idle thread is re-scheduled.
## The idle thread simply establishes a default machine context and
## enters the wait-enable state. The idle thread is always entered
- ## with non-critical interrupts disabled.
+ ## with interrupts disabled.
##
## The kernel context is initialized to indicate that the idle thread
- ## is running - the idle thread priority is PK_THREADS, and the
- ## 'thread-mode' bit is asserted as well.
+ ## is running - the idle thread priority is PK_THREADS, the
+ ## 'thread-mode' bit is asserted and so is the 'discard-ctx" bit.
+ ## In addition, the previous kernel context is stored in the lower
+ ## 16 bits.
##
## This loop can also be called from the PK bootloader if main()
## returns - in which case we don't muck with the SPRG0 or the stack
## pointer.
-
- li %r3, (PK_THREADS | PPE42_THREAD_MODE)
+ mfsprg0 %r3
+ srwi %r3, %r3, 16
+ oris %r3, %r3, (PK_THREADS << 8) | PPE42_THREAD_MODE | PPE42_DISCARD_CTX
mtsprg0 %r3
- _lwzsd %r1, __pk_noncritical_stack
+ _lwzsd %r1, __pk_kernel_stack
-__pk_idle_thread_from_bootloader:
+__pk_idle_thread_from_bootloader:
+
+ PK_KERN_TRACE_ASM16("ENTER_IDLE_STATE")
- #li %r3, PK_THREADS
- #PK_TRACE_THREAD_SWITCH %r3, %r4
_lwzsd %r3, __pk_thread_machine_context_default
_oriwa %r3, %r3, MSR_WE
mtmsr %r3
@@ -364,27 +184,14 @@ dec_handler:
## interrupt by writing the DIS back into the TSR before calling the
## handler. The timer handler does not take any arguments.
- _save_update_kernel_context %r3, %r4
+ li %r4, PPE42_IRQ_DEC
+ _update_kernel_context %r4, %r3
_liwa %r3, TSR_DIS
mttsr %r3
- _pk_irq_fast2full __pk_timer_handler
-
-
-
-
-
- ## Exit traces are moved here because the code area (0x100 bytes)
- ## reserved for individual interrupts is overflowing when tracing is
- ## enabled. This is kind of a hack: We know that this trace only
- ## occurs when we're about to exit the fast context, at a place
- ## where we can use any of the fast registers.
-#if 0
-__pk_trace_noncritical_irq_exit:
- #PK_TRACE_NONCRITICAL_IRQ_EXIT %r3, %r4
- blr
-#endif
+ bl __pk_timer_handler
+ b check_for_ext_interrupt
program_exception_handler:
## first check if exception was caused by an illegal 'sc' instruction
@@ -393,46 +200,14 @@ program_exception_handler:
cmpwbeq %r3, %r4, __sc_helper
_pk_panic PPE42_ILLEGAL_INSTRUCTION
- ## SRR0 is currently pointing to the 'sc' instruction. We need to advance it
+ ## Saved SRR0 is currently pointing to the 'sc' instruction. We need to advance it
## to the next instruction so that we don't end up in an endless loop (something
## that the ppc sc instruction does automatically).
__sc_helper:
- mfsrr0 %r3
- addi %r3, %r3, 4
- mtsrr0 %r3
-
-__system_call:
-
- ## The program exception is used by PK as a handy way to start a
- ## context switch, as the continuation address and MSR of the thread to
- ## be swapped out are saved in SRR0 and SRR1.
-
- ## Non-critical interrupts are disabled at entry.
-
- ## Begin by saving the volatile context of the current thread.
- ## NOTE: fast context has already been saved prior to branching here.
-
- _pk_vol_fast_ctx_push
-
-thread_save_non_volatile_and_switch:
-
- ## Finish the thread context save by pushing the non-volatile context
- ## and saving the resulting stack pointer in the thread structure. If
- ## the current thread is the idle thread this step is bypassed.
-
- ## This symbol is also used as an entry point by the non-critical
- ## interrupt handler - non-critical interrupts are disabled here.
-
- _lwzsd %r3, __pk_current_thread
- cmpwi %r3, 0
- beq __pk_next_thread_resume
-
- _pk_non_vol_ctx_push
- stw %r1, PK_THREAD_OFFSET_SAVED_STACK_POINTER(%r3)
-
- ## The next thread becomes the current thread, and we switch to its
- ## stack - unless the new thread is the idle thread, in which case it
- ## (the idle thread) is simply resumed.
+ mfsrr0 %r4
+ _lwzsd %r3, __pk_saved_sp
+ addi %r4, %r4, 4
+ stw %r4, PK_CTX_SRR0(%r3)
.global __pk_next_thread_resume
__pk_next_thread_resume:
@@ -440,38 +215,36 @@ __pk_next_thread_resume:
_lwzsd %r3, __pk_next_thread
_stwsd %r3, __pk_current_thread
- cmpwi %r3, 0
- beq __pk_idle_thread
+ ## Enter the wait enabled state if the thread pointer is null
+ bwz %r3, __pk_idle_thread
+ ## switch to the new thread stack
lwz %r1, PK_THREAD_OFFSET_SAVED_STACK_POINTER(%r3)
- ## Restore the thread context and resume the new thread. The kernel
- ## context in thread mode is simply the thread priority OR'ed with the
- ## thread-mode flag. All other fields are cleared.
+ ## load sprg0 from the stack and update the thread priority
+ ## in case it changed.
+restore_and_update_sprg0:
+ _lbzsd %r31, __pk_next_priority
- _pk_non_vol_ctx_pop
- _pk_vol_fast_ctx_pop
-
- _lbzsd %r3, __pk_next_priority
- #PK_TRACE_THREAD_SWITCH %r3, %r4
- ori %r3, %r3, PPE42_THREAD_MODE
- mtsprg0 %r3
+ PK_KERN_TRACE_ASM16("RESUME_THREAD(%d)", %r31)
- _pk_fast_ctx_pop
- rfi
+ lwz %r3, PK_CTX_KERNEL_CTX(%r1)
+ rlwimi %r3, %r31, 24, 2, 7
+ mtsprg0 %r3
+ b ctx_pop
+
fit_handler:
- ## The FIT handler is user defined, and is a fast-mode handler. By
+ ## The FIT handler is user defined. By
## convention the kernel clears the interrupt by writing the FIS back
## into the TSR.
- _pk_fast_ctx_push
-
- _lwzsd %r3, __ppe42_fit_arg
li %r4, PPE42_IRQ_FIT
- _save_update_kernel_context %r4, %r6
+ _update_kernel_context %r4, %r3
+
+ _lwzsd %r3, __ppe42_fit_arg
_liwa %r6, TSR_FIS
mttsr %r6
@@ -480,18 +253,15 @@ fit_handler:
mtlr %r6
blrl
- b fast_exit_noncritical
+ b check_for_ext_interrupt
watchdog_handler:
## Watchdog setup is described in the PK Specification.
## The kernel clears TSR[WIS] prior to calling the handler.
- ## The watchdog handler is a critical, fast-mode handler.
- _pk_fast_ctx_push
-
- li %r3, PPE42_IRQ_WATCHDOG
+ li %r4, PPE42_IRQ_WATCHDOG
- _save_update_kernel_context %r3, %r6
+ _update_kernel_context %r4, %r3
_liwa %r6, TSR_WIS
mttsr %r6
@@ -500,68 +270,99 @@ watchdog_handler:
mtlr %r6
blrl
- b .
+ b check_for_ext_interrupt
-#if 0
-debug_handler:
+ ## Check if we can disard the interrupted context.
+ ## This routine expects r3, r4, lr, and cr to already be pushed.
+ ## It also expects r3 to hold the address of the function to jump
+ ## to after the interrupted context has been pushed (if necessary).
- ## PK does nothing upon reception of the debug interrupt other
- ## than calling the handler (if non-0). The debug handler is a
- ## fast-mode handler.
+ .align 5
+ctx_check_discard:
- _pk_fast_ctx_push
+ ## Prepare to jump to the branch function that was passed in
+ mtlr %r3
- li %r3, PPE42_IRQ_DEBUG
-
- _save_update_kernel_context %r3, %r6
+ ## Check if the DISCARD_CTX bit is set in the kernel context
+ mfsprg0 %r3
+ bb0wi %r3, PPE42_DISCARD_CTX_BIT, ctx_continue_push
- _lwzsd %r6, __ppe42_debug_routine
- cmpwi %r6, 0
- mtlr %r6
- beq debug_exit
- blrl
-
-debug_exit:
- b fast_exit_critical
-#endif
-
- .align 5
-__external_interrupt:
+ctx_discard:
+ ## DISCARD_CTX bit was set. Discard stack and branch to interrupt
+ ## handler code
+ addi %r1, %r1, PK_CTX_SIZE
+ blr
- ## The non-critical interrupt handler entry point is re-entrant - A
- ## handler may allow preemption, which could cause another entry here.
+ ## DISCARD_CTX bit was not set. Continue saving full context.
+ ## (r3, r4, lr, and cr have already been saved for us) and
+ ## r3 contains the interrupted kernel context
+
+ctx_continue_push:
+
+ stvd %d5, PK_CTX_GPR5(%r1)
+ stvd %d7, PK_CTX_GPR7(%r1)
+ stvd %d9, PK_CTX_GPR9(%r1)
+ stvd %d28, PK_CTX_GPR28(%r1)
+ stvd %d30, PK_CTX_GPR30(%r1)
+ mfxer %r5
+ mfctr %r6
+ stvd %d5, PK_CTX_XER(%r1)
+ mfsrr0 %r7
+ mfsrr1 %r8
+ stvd %d7, PK_CTX_SRR0(%r1)
+ stw %r0, PK_CTX_GPR0(%r1)
+ stw %r3, PK_CTX_KERNEL_CTX(%r1)
- ## Entry invariants:
- ## 1. Non-critical interupts are disabled;
- ## 2. The SP points to a thread stack or the non-critical stack.
+ ## If the 'processing interrupt' bit is set then we were already
+ ## using the kernel stack and don't need to modify or save the current
+ ## stack pointer.
+ bb1wi %r3, PPE42_PROC_IRQ_BIT, ctx_push_completed
- ## Since fast-mode handlers can not use PK services or alter the
- ## machine context, the exit of a fast mode handler is a simple RF(C)I.
+ ## load the pointer to the current thread control block
+ _lwzsd %r4, __pk_current_thread
- ## Begin by pushing the fast context on the current stack.
-
- ## _pk_fast_ctx_push was called prior to branching here. No need to call it here.
+ ## don't save the stack pointer in the thread control block
+ ## if the current thread was the idle thread (null pointer)
+ bwz %r4, switch_to_kernel_stack
+
+ ## we interrupted a bonafide thread, so save off the stack
+ ## pointer
+ stw %r1, PK_THREAD_OFFSET_SAVED_STACK_POINTER(%r4)
+
+switch_to_kernel_stack:
+ _stwsd %r1, __pk_saved_sp
+ _lwzsd %r1, __pk_kernel_stack
+
+ctx_push_completed:
+ blr
- ## Load the base address for the external interrupt table
+__get_ext_irq:
+
+ ## Entry invariants:
+ ## 1. external interupts are disabled;
+ ## 2. previous context has ben saved off
+ ## 3. r3 contains the kernel context
+ ## 4. r1 points to the kernel stack
- ## TODO: This is HW Macro specific code that is responsible for finding the
+ ## This is HW Macro specific code that is responsible for finding the
## IRQ # and storing it in r4 (phantom IRQ's are assigned a value of EXTERNAL_IRQS).
hwmacro_get_ext_irq
## An active or phantom IRQ was found.
+ ## R3 has the context of the interrupted thread or bottom half
## R4 has the IRQ number.
## The IRQ is converted into a pointer to an 8-byte handler
## structure, and the handler is dispatched. The call is made with the
## parameters:
- ## R3 = private
+ ## R3 = private data ptr
## R4 = irq
-external_irq_found:
+call_external_irq_handler:
- _save_update_kernel_context %r4, %r5
+ _update_kernel_context %r4, %r3
slwi %r3, %r4, 3 //multiply the irq# by 8
_liw %r6, __ppe42_irq_handlers
lwzx %r5, %r6, %r3
@@ -570,12 +371,120 @@ external_irq_found:
mtlr %r5
blrl
- ## Pop the stack/RFI when (if) it returns here.
+ ## Once the interrupt handler returns, check if any interrupts are
+ ## waiting and handle them now.
+
+check_for_ext_interrupt:
+
+ ## Set the CTX_DISCARD bit in the kernel context so that if there is
+ ## an interrupt it will not bother saving the full context.
+ mfsprg0 %r31
+ oris %r31, %r31, PPE42_DISCARD_CTX
+ mtsprg0 %r31
+
+ ###### Enable/Disable External Interrupts #####
+ wrteei 1
+ wrteei 0
+
+ ## If we made it this far, there must not be any interrupts pending.
+ ## If bottom half processing was interrupted we need to restore it
+check_interrupted_bh:
+
+ ## If the thread ID is 33 then the bottom half handler was interrupted
+ ## and needs to be restored.
+ extrwi %r4, %r31, 6, 2
+ cmpwi %r4, 33
+ beq ctx_pop_with_sprg0
+
+check_for_bh:
+ ## if the bottom half queue is pointing to itself then the queue is
+ ## empty and there are no bottom halves that need processing.
+ _lwzsd %r4, _pk_bh_queue
+ lwz %r5, 0(%r4)
+ cmplwbeq %r4, %r5, restore_interrupted_sp
+
+process_bottom_halves:
+ ## Clear the CTX_DISCARD bit so that interrupted bottom half context
+ ## will be saved in case an interrupt occurs after this point. Also
+ ## set the thread ID to 33 so that we know to restore the bottom half
+ ## context that was interrupted.
+ rlwinm %r3, %r31, 0, 9, 1 //clear thread id + discard bit
+ oris %r3, %r3, 0x2100 //set thread id to 33
+ mtsprg0 %r3 //set bottom half context
+
+ ## branch to a C function that processes bottom halves
+ wrteei 1
+ bl _pk_process_bh
+ wrteei 0
+
+ ## restore the previous kernel context (with discard bit set)
+ mtsprg0 %r31
+
+restore_interrupted_sp:
+ ## restore the interrupted thread stack pointer
+ _lwzsd %r1, __pk_saved_sp
+
+ ## If we are not in thread mode (i.e., we took an interrupt in an
+ ## interupt-only configuration of PK or after pk_initialize() but
+ ## before pk_start_threads) simply pop the context and RFI - in this
+ ## case we'll most likely be returning to main() or the non-thread-mode
+ ## idle thread.
+
+check_thread_mode:
+ bb0wi %r31, PPE42_THREAD_MODE_BIT, ctx_pop_with_sprg0
-fast_exit_noncritical:
+ ## Check if external interrupt activated a delayed context switch. The
+ ## C-level code has taken care of the scheduling decisions - we simply
+ ## need to implement them here.
+check_for_ctx_switch:
+
+ _lwzsd %r3, __pk_delayed_switch
+ bwz %r3, check_for_idle_thread
+
+ ## Clear the delayed switch flag and go to the context switch code to
+ ## finish the switch.
+
+ li %r3, 0
+ _stwsd %r3, __pk_delayed_switch
- _pk_fast_ctx_pop_exit
+ b __pk_next_thread_resume
+
+ ## check if we should switch to the wait enabled state (idle)
+check_for_idle_thread:
+ _lwzsd %r3, __pk_current_thread
+ bwz %r3, __pk_idle_thread
+
+ctx_pop_with_sprg0:
+ ## we must ensure that interrupts are disabled while restoring context
+ ##
+ ## restore sprg0 from the saved context
+ lwz %r0, PK_CTX_KERNEL_CTX(%r1)
+ mtsprg0 %r0
+#if PK_KERNEL_TRACE_ENABLE
+ srwi %r0, %r0, 16
+ PK_KERN_TRACE_ASM16("RESUME_CONTEXT(0x%04x)", %r0)
+#endif
+ctx_pop:
+ lwz %r0, PK_CTX_GPR0(%r1)
+ lvd %d7, PK_CTX_SRR0(%r1)
+ mtsrr1 %r8
+ mtsrr0 %r7
+ lvd %d5, PK_CTX_XER(%r1)
+ mtctr %r6
+ mtxer %r5
+ lvd %d30, PK_CTX_GPR30(%r1)
+ lvd %d28, PK_CTX_GPR28(%r1)
+ lvd %d9, PK_CTX_GPR9(%r1)
+ lvd %d7, PK_CTX_GPR7(%r1)
+ lvd %d5, PK_CTX_GPR5(%r1)
+ lvd %d3, PK_CTX_CR(%r1)
+ mtlr %r4
+ mtcr %r3
+ lvd %d3, PK_CTX_GPR3(%r1)
+ addi %r1, %r1, PK_CTX_SIZE
+ rfi
+
/// \endcond
OpenPOWER on IntegriCloud