summaryrefslogtreecommitdiffstats
path: root/pk/ppe42
diff options
context:
space:
mode:
Diffstat (limited to 'pk/ppe42')
-rw-r--r--pk/ppe42/ppe42.h117
-rw-r--r--pk/ppe42/ppe42_boot.S2
-rw-r--r--pk/ppe42/ppe42_context.h382
-rw-r--r--pk/ppe42/ppe42_core.c63
-rw-r--r--pk/ppe42/ppe42_exceptions.S543
-rw-r--r--pk/ppe42/ppe42_init.c5
-rw-r--r--pk/ppe42/ppe42_irq.h112
-rw-r--r--pk/ppe42/ppe42_irq_core.c4
-rw-r--r--pk/ppe42/ppe42_thread_init.S54
9 files changed, 383 insertions, 899 deletions
diff --git a/pk/ppe42/ppe42.h b/pk/ppe42/ppe42.h
index 91ef5bf4..ecc990e3 100644
--- a/pk/ppe42/ppe42.h
+++ b/pk/ppe42/ppe42.h
@@ -278,10 +278,8 @@ do {*(volatile uint32_t *)(addr) = (data);} while(0)
/// The default thread machine context has MSR[CE], MSR[EE] and MSR[ME] set,
/// and all other MSR bits cleared.
///
-/// The default definition allows critical, non-critical and machine check
-/// exceptions. Debug interrupts are not enabled by default. This definition
-/// can be overriden by the application. If MMU protection is enabled then
-/// the IR/DR bits are also modeably set.
+/// The default definition allows external and machine check exceptions. This
+/// definition can be overriden by the application.
#ifndef PK_THREAD_MACHINE_CONTEXT_DEFAULT
#define PK_THREAD_MACHINE_CONTEXT_DEFAULT \
@@ -573,57 +571,72 @@ __pk_stack_create_initial_frame(PkAddress *stack, size_t *size) \
/// information. Instead it defines an API that the port must provide to the
/// portable kernel.
///
-/// In the PPE42 port, the kernel context is maintained in USPRG0. This
-/// 32-bit value is treated as 5 distinct fields as indicated in the structure
-/// definition. For certain tests it's also helpful to look at the two
-/// interrupt counters as a single 0/non-0 field.
+/// In the PPE42 port, the kernel context is maintained in SPRG0. This
+/// 32-bit value is treated as 6 distinct fields as indicated in the structure
+/// definition.
typedef union {
uint32_t value;
struct {
- /// The critical interrupt nesting level. If this field is non-zero,
- /// then interrupt priority and preemption rules guarantee that a
- /// critical interrupt handler is running, and the \c irq field will
- /// contain the PkIrqId of the currently active critical interrupt.
- unsigned reserved : 8;
-
- /// The non-critical interrupt nesting level. If this field is
- /// non-zero and the \c critical_interrupts field is 0, then interrupt
- /// priority and preemption rules guarantee that a noncritical
- /// interrupt handler is running, and the \c irq field will contain
- /// the PkIrqId of the currently active noncritical interrupt.
- unsigned noncritical_interrupts : 8;
-
- /// The PkIrqId of the currently running (or last run) handler. If
- /// either of the interrupt nesting levels are non-0, then this is the
- /// PkIrqId of the IRQ that is currently executing.
- unsigned irq : 8;
-
/// A flag indicating that PK is in thread mode after a call of
/// pk_start_threads().
unsigned thread_mode : 1;
+ /// If this field is non-zero then PK is processing an interrupt
+ /// and the \c irq field will contain the PkIrqId of the interrupt
+ /// that kicked off interrupt processing.
+ unsigned processing_interrupt : 1;
+
/// The priority of the currently running thread. In an interrupt
/// context, this is the priority of the thread that was interrupted.
- unsigned thread_priority : 7;
+ unsigned thread_priority : 6;
- } fields;
+ /// This bit tracks whether the current context can be discarded or
+ /// if the context must be saved. If the processor takes an interrupt
+ /// and this bit is set, then the current context will be discarded.
+ /// This bit is set at the end of handling an interrupt and prior
+ /// to entering the wait enabled state.
+ unsigned discard_ctx : 1;
- struct {
+ /// The PkIrqId of the currently running (or last run) handler. If
+ /// \c processing_interrupt is set, then this is the
+ /// PkIrqId of the IRQ that is currently executing.
+ unsigned irq : 7;
- unsigned also_ignore : 8;
+ /// Each PPE application will define (or not) the interpretation of
+ /// this field. Since SPRG0 is saved and restored during during thread
+ /// context switches, this field can be used to record the progress of
+ /// individual threads. The kernel and/or application will provide
+ /// APIs or macros to read and write this field.
+ unsigned app_specific : 16;
- /// Used as a 0/non-0 flag for interrupt context.
- unsigned interrupt_context : 8;
+ } fields;
- /// Ignore
- unsigned ignore : 16;
+} __PkKernelContext;
- } merged_fields;
+// These APIs are provided for applications to get and set the app_specific
+// field of the kernel context which is held in sprg0.
-} __PkKernelContext;
+static inline uint16_t ppe42_app_ctx_get(void)
+{
+ __PkKernelContext __ctx;
+ __ctx.value = mfspr(SPRN_SPRG0);
+ return __ctx.fields.app_specific;
+}
+
+static inline void ppe42_app_ctx_set(uint16_t app_ctx)
+{
+ PkMachineContext mctx;
+ __PkKernelContext __ctx;
+ mctx = mfmsr();
+ wrteei(0);
+ __ctx.value = mfspr(SPRN_SPRG0);
+ __ctx.fields.app_specific = app_ctx;
+ mtspr(SPRN_SPRG0, __ctx.value);
+ mtmsr(mctx);
+}
// These APIs are provided to the PK portable kernel by the port.
@@ -642,7 +655,7 @@ typedef union {
({ \
__PkKernelContext __ctx; \
__ctx.value = mfspr(SPRN_SPRG0); \
- __ctx.fields.thread_mode && !__ctx.merged_fields.interrupt_context;})
+ __ctx.fields.thread_mode && !__ctx.fields.processing_interrupt;})
/// PK is executing an interrupt handler of any priority.
@@ -651,28 +664,9 @@ typedef union {
({ \
__PkKernelContext __ctx; \
__ctx.value = mfspr(SPRN_SPRG0); \
- __ctx.merged_fields.interrupt_context;})
-
+ __ctx.fields.processing_interrupt;})
-/// PK is executing a non-critical interrupt handler.
-
-#define __pk_kernel_context_noncritical_interrupt() \
- ({ \
- __PkKernelContext __ctx; \
- __ctx.value = mfspr(SPRN_SPRG0); \
- __ctx.fields.noncritical_interrupts && \
- !__ctx.fields.critical_interrupts;})
-
-/// Return the noncritical interrupt nesting level
-
-#define __pk_noncritical_level() \
- ({ \
- __PkKernelContext __ctx; \
- __ctx.value = mfspr(SPRN_SPRG0); \
- __ctx.fields.noncritical_interrupts; })
-
-
// PK requires the port to define the type PkThreadQueue, which is a
// priority queue (where 0 is the highest priority). This queue must be able
// to handle PK_THREADS + 1 priorities (the last for the idle thread) The
@@ -740,7 +734,7 @@ __pk_thread_queue_count(volatile PkThreadQueue* queue)
PkMachineContext ctx; \
pk_critical_section_enter(&ctx); \
asm volatile ("mr 1, %0; mtlr %1; blrl" : : \
- "r" (__pk_noncritical_stack), \
+ "r" (__pk_kernel_stack), \
"r" (__pk_start_threads)); \
PK_PANIC(PK_START_THREADS_RETURNED); \
} while (0)
@@ -750,8 +744,13 @@ __pk_thread_queue_count(volatile PkThreadQueue* queue)
/// The __PkKernelContext 'thread_mode' bit as a flag
-#define PPE42_THREAD_MODE 0x80
+#define PPE42_THREAD_MODE 0x8000
+#define PPE42_PROC_IRQ 0x4000
+#define PPE42_DISCARD_CTX 0x0080
+#define PPE42_THREAD_MODE_BIT 0
+#define PPE42_PROC_IRQ_BIT 1
+#define PPE42_DISCARD_CTX_BIT 8
#ifndef __ASSEMBLER__
diff --git a/pk/ppe42/ppe42_boot.S b/pk/ppe42/ppe42_boot.S
index 7e8ecee6..32fdfd3a 100644
--- a/pk/ppe42/ppe42_boot.S
+++ b/pk/ppe42/ppe42_boot.S
@@ -115,7 +115,7 @@ __reset_trap:
#sync
## The MSR to be used during the rest of intialization is
- ## established. This MSR should NOT enable critical or non-critical
+ ## established. This MSR should NOT enable
## interrupts, but could enable machine check exceptions.
_liwa %r3, PPE42_MSR_INITIAL
diff --git a/pk/ppe42/ppe42_context.h b/pk/ppe42/ppe42_context.h
index f13ec8a7..2412565b 100644
--- a/pk/ppe42/ppe42_context.h
+++ b/pk/ppe42/ppe42_context.h
@@ -54,59 +54,24 @@
// system-reserved in ABI applications, and is the base for read-only small data
// in EABI applications.
-// A fair amount of complexity is involved in handling the non-critical and
-// critical interrupt levels, and the emphasis on performance of fast-mode
-// interrupt handlers. Several different approaches and philosophies could
-// have been implemented - this is only one. In this implementation
-// critical/non-critical interrupt levels are treated more or less the same,
-// and the interrupt priority is just that - a kind of preemption priority.
-// Critical interrupt handling does have a little less overhead because it
-// does not have a thread scheduling step at the end.
-
-// A full context save takes place in 3 or 4 steps. Thread switches always do
-// steps 1, 2 and 3.
-// 1. The fast context that is always saved in response to every interrupt;
-// 1a. The optional save/update of the kernel context for interrupts.
-// 2. The (volatile - fast) context that is saved if an interrupt handler
-// switches from fast-mode to full-mode.
-// 3. The non-volatile context that is saved when a thread is switched out.
-
// USPRG0 holds the __PkKernelContext structure (defined in ppe42.h) that
// represents the current kernel context. The layout is as follows:
//
// Bits Meaning
// ==============
-// 0:7 The critical interrupt count
-// 8:15 The non-critical interrupt count
-// 16:23 The IRQ currently being processed
-// 24 The 'thread_mode' flag
-// 25:31 The thread priority of the running thread
+// 0 The 'thread_mode' flag
+// 1 The 'processing_interrupt" flag
+// 2:7 The thread priority of the running thread
+// 8 The 'discard_ctx' flag
+// 9:15 The IRQ currently being processed
+// 16:31 The application specific data
//
// When PK is initialized USPRG0 is initialized to 0. When thread-mode is
-// entered (by pk_start_threads()) bit 24 is set to 1. In order to support
-// PgP/OCC firmware, once initialized (with pk_initialize()) PK can simply
+// entered (by pk_start_threads()) bit 0 is set to 1. If desired,
+// once initialized (with pk_initialize()) PK can simply
// handle interrupts, reverting back to the non-thread-mode idle loop when
// there's nothing to do.
//
-// Note that it would require a serious error for the interrupt counts to ever
-// equal or exceed 2**8 as this would imply runaway reentrancy and stack
-// overflow. In fact it is most likely an error if an interrupt handler is
-// ever re-entered while active.
-
-// Registers SRR2 and SRR3 are always saved in IRQ context because
-// __pk_irq_fast2full must save the (volatile - fast) context to provide
-// working registers before it can look at USPRG0 to determine critical
-// vs. non-critical context. However, when restoring a non-critical interrupt
-// or thread these registers need not be restored. SRR2 and SRR3 are never
-// saved or restored for thread context switches, because threads always
-// operate at noncritical level.
-
-// When MMU protection is enabled, relocation/protection is re-established
-// immediately upon entry to the interrupt handler, before any memory
-// operations (load/store) take place. This requires using SPRG0 and SPGR4
-// for temporary storage for noncritical/critical handlers respectively in
-// accordance with the PK conventions for SPRGn usage by fast-mode
-// interrupts.
## ------------------------------------------------------------
## Unused registers for embedded PPE42`
@@ -119,240 +84,77 @@
.set UNUSED_GPR13, 0xd # Dedicated; (E)ABI read-write small data area
## ------------------------------------------------------------
- ## Flags for context push/pop
- ## ------------------------------------------------------------
-
- .set PK_THREAD_CONTEXT, 0
- .set PK_IRQ_CONTEXT, 1
-
- ## ------------------------------------------------------------
- ## The PK fast context layout for Embedded PPE42
+ ## The PK context layout for Embedded PPE42
## ------------------------------------------------------------
- .set PK_FAST_CTX_GPR1, 0x00 # Dedicated; Stack pointer
- .set PK_FAST_CTX_HANDLER_LR, 0x04 # Slot for handler to store LR
- .set PK_FAST_CTX_GPR3, 0x08 # Volatile; Parameter; Return Value
- .set PK_FAST_CTX_GPR4, 0x0c # Volatile; Parameter
- .set PK_FAST_CTX_GPR5, 0x10 # Volatile; Parameter
- .set PK_FAST_CTX_GPR6, 0x14 # Volatile; Parameter
- .set PK_FAST_CTX_CR, 0x18 # Condition register
- .set PK_FAST_CTX_LR, 0x1c # Link register SPRN 0x008
- .set PK_FAST_CTX_KERNEL_CTX, 0x20 # Saved __PkKernelContext for IRQ
-
- .set PK_FAST_CTX_SIZE, 0x28 # Must be 8-byte aligned
+ .set PK_CTX_GPR1, 0x00 # Dedicated; Stack pointer
+ .set PK_CTX_LINKAGE, 0x04 # Slot for handler to store LR
+ .set PK_CTX_GPR3, 0x08 # Volatile; Parameter; Return Value
+ .set PK_CTX_GPR4, 0x0c # Volatile; Parameter
+ .set PK_CTX_GPR5, 0x10 # Volatile; Parameter
+ .set PK_CTX_GPR6, 0x14 # Volatile; Parameter
+ .set PK_CTX_CR, 0x18 # Condition register
+ .set PK_CTX_LR, 0x1c # Link register
- ## ------------------------------------------------------------
- ## The PK (volatile - fast) context layout for Embedded PPE42
- ## ------------------------------------------------------------
+ .set PK_CTX_GPR7, 0x20 # Volatile; Parameter
+ .set PK_CTX_GPR8, 0x24 # Volatile; Parameter
+ .set PK_CTX_GPR9, 0x28 # Volatile; Parameter
+ .set PK_CTX_GPR10, 0x2c # Volatile; Parameter
+ .set PK_CTX_GPR28, 0x30 # Non-volatile
+ .set PK_CTX_GPR29, 0x34 # Non-volatile
+ .set PK_CTX_GPR30, 0x38 # Non-volatile
+ .set PK_CTX_GPR31, 0x3c # Non-volatile
- .set PK_VOL_FAST_CTX_GPR1, 0x00 # Dedicated; Stack pointer
- .set PK_VOL_FAST_CTX_HANDLER_LR, 0x04 # Slot for handler to store LR
- .set PK_VOL_FAST_CTX_GPR7, 0x08 # Volatile; Parameter
- .set PK_VOL_FAST_CTX_GPR8, 0x0c # Volatile; Parameter
- .set PK_VOL_FAST_CTX_GPR9, 0x10 # Volatile; Parameter
- .set PK_VOL_FAST_CTX_GPR10, 0x14 # Volatile; Parameter
- .set PK_VOL_FAST_CTX_XER, 0x18 # Fixed-point exception register SPRN 0x001
- .set PK_VOL_FAST_CTX_CTR, 0x1c # Count register SPRN 0x009
- .set PK_VOL_FAST_CTX_SRR0, 0x20 # Save/restore register 0 SPRN 0x01a
- .set PK_VOL_FAST_CTX_SRR1, 0x24 # Save/restore register 1 SPRN 0x01b
- .set PK_VOL_FAST_CTX_GPR0, 0x28 # Volatile; Language specific
+ .set PK_CTX_XER, 0x40 # Fixed-point exception register
+ .set PK_CTX_CTR, 0x44 # Count register
+ .set PK_CTX_SRR0, 0x48 # Save/restore register 0
+ .set PK_CTX_SRR1, 0x4c # Save/restore register 1
+ .set PK_CTX_GPR0, 0x50 # Volatile; Language specific
+ .set PK_CTX_KERNEL_CTX, 0x54 # Saved __PkKernelContext for IRQ
- .set PK_VOL_FAST_CTX_SIZE, 0x30 # Must be 8-byte aligned
+ .set PK_CTX_SIZE, 0x58 # Must be 8-byte aligned
## ------------------------------------------------------------
- ## The PK non-volatile context layout for Embedded PowerPC
- ## ------------------------------------------------------------
-
- ## The 'preferred form' for stmw is for the LSB of R31 to fall into the
- ## end of a 16-byte aligned block.
-
- .set PK_NON_VOL_CTX_GPR1, 0x00 # Dedicated; Stack Pointer
- .set PK_NON_VOL_CTX_HANDLER_LR, 0x04 # Slot for handler to store LR
- .set PK_NON_VOL_CTX_GPR28, 0x08 # Non-volatile
- .set PK_NON_VOL_CTX_GPR29, 0x0c # Non-volatile
- .set PK_NON_VOL_CTX_GPR30, 0x10 # Non-volatile
- .set PK_NON_VOL_CTX_GPR31, 0x14 # Non-volatile
-
- .set PK_NON_VOL_CTX_SIZE, 0x18 # Must be 8-byte aligned
-
- ## ------------------------------------------------------------
- ## Save/restore the fast context
+ ## Push the interrupted context if necessary
+ ##
+ ## This macro saves off some context in preparation for calling
+ ## the pk_ctx_check_discard routine. This is an attempt to use
+ ## the 32 byte cache more efficiently.
##
- ## 11 Instructions, 8 Loads/Stores : If MMU is disabled
- ## 17 Instructions, 8 Loads/Stores : If MMU is enabled
+ ## 8 Instructions
## ------------------------------------------------------------
##
- ## Without MMU support, an EIEIO is always executed at the entry point
- ## to gauarantee that all memory operations (especially MMIO
- ## operations) have completed prior to execution of the interrupt
- ## handler.
- ##
- ## If MMU support is enabled, address translation is re-established
- ## immediately at the entry of each interrupt, prior to performing any
- ## loads or stores. PK currently only supports using the MMU for
- ## protection, not for address translation. Therfore it is 'legal'
- ## to change translation modes a with an MTMSR followed by an
- ## ISYNC. This is much simpler then the complex instruction sequence
- ## that would be required if we had to set up RFI/RFCI sequences to
- ## change the execution context at this point.
- ##
- ## Note that since we are not really doing address translation, it
- ## would also be in keeping with the 'fast interrupt' idea to defer
- ## reenabling translation (protection) until the fast-to-full sequence
- ## was executed for full-mode interrupts, and run fast-mode interrupts
- ## unprotected. However here we chose to run all interrupts with MMU
- ## protection.
- ##
- ## Unfortunately the simple MTMSR;ISYNC sequence exposes a serious bug
- ## in the PPE42 core that causes the stack-pointer store instruction
- ## to generate a seemingly random, *real-mode* address in certain cases
- ## when this instruction in a noncritical interrupt prologue is
- ## interrupted by a critical interrupt. This bug is described in
- ## HW239446. The workaround is to follow the ISYNC sith a SYNC - which
- ## eliminates the problem for reasons still unknown. On the bright side
- ## this SYNC might also serve the same purpose as the EIEIO in the
- ## non-MMU case, guaranteeing that all MMIO has completed prior to the
- ## interrupt handler. However without the initial EIEIO we still
- ## experience failures, so this seemingly redundant instruction also
- ## remains in place. This requirement is assumed to be related to the
- ## HW239446 issue.
-
- .macro _pk_fast_ctx_push
-
- stwu %r1, -PK_FAST_CTX_SIZE(%r1)
-
- stvd %d3, PK_FAST_CTX_GPR3(%r1)
- stvd %d5, PK_FAST_CTX_GPR5(%r1)
+
+ .macro _pk_ctx_push_as_needed branch_addr:req
+ stwu %r1, -PK_CTX_SIZE(%r1)
+ stvd %d3, PK_CTX_GPR3(%r1)
mfcr %r3
mflr %r4
-
- stvd %d3, PK_FAST_CTX_CR(%r1)
-
+ stvd %d3, PK_CTX_CR(%r1)
+ _liw %r3, \branch_addr
+ b ctx_check_discard
.endm
- .macro _pk_fast_ctx_pop
-
- lvd %d3, PK_FAST_CTX_CR(%r1)
-
- mtcr0 %r3
- mtlr %r4
-
- lvd %d3, PK_FAST_CTX_GPR3(%r1)
- lvd %d5, PK_FAST_CTX_GPR5(%r1)
-
- lwz %r1, 0(%r1)
-
- .endm
-
## ------------------------------------------------------------
- ## Save/update the kernel context in response to an interrupt. This is
- ## not part of the fast context save because for external interupts the
- ## IRQ is not determined until later.
+ ## update the kernel context in response to an interrupt.
## ------------------------------------------------------------
- ## The kernel context is saved, then updated with the currently active
- ## IRQ in bits 16:23. The correct interrupt count is incremented and
- ## the context is returned to SPRG0.
-
- .macro _save_update_kernel_context irqreg, ctxreg
-
- #PK_TRACE_NONCRITICAL_IRQ_ENTRY \irqreg, \ctxreg
+ ## The kernel context is updated with the currently active
+ ## IRQ in bits 9:15.
- mfsprg0 \ctxreg
- stw \ctxreg, PK_FAST_CTX_KERNEL_CTX(%r1)
- #rlwimi \ctxreg, \irqreg, 24, 9, 15 //set the irq #
- rlwimi \ctxreg, \irqreg, 8, 16, 23 //set the irq #
- #oris \ctxreg, \ctxreg, 0x4000 //set the 'processing interrupt' PI bit
- addis \ctxreg, \ctxreg, 0x0001 //increment the irq count
+ .macro _update_kernel_context irqreg, ctxreg
+ rlwimi \ctxreg, \irqreg, 16, 9, 15 //set the irq #
+ oris \ctxreg, \ctxreg, 0x4000 //set the 'processing_interrupt' flag
mtsprg0 \ctxreg
- .endm
-
- ## ------------------------------------------------------------
- ## Fast-mode context pop and RF(C)I. This is only used by
- ## interrupt handlers - the thread context switch has its own
- ## code to handle updating USPRG0 for thread mode.
- ## ------------------------------------------------------------
-
- .macro _pk_fast_ctx_pop_exit
-
- .if PK_KERNEL_TRACE_ENABLE
- bl __pk_trace_noncritical_irq_exit
- .endif
-
- lwz %r3, PK_FAST_CTX_KERNEL_CTX(%r1)
- mtsprg0 %r3
- _pk_fast_ctx_pop
- rfi
-
- .endm
-
- ## ------------------------------------------------------------
- ## Save/restore the (volatile - fast) context
- ##
- ## Thread - 15 Instructions, 11 Loads/Stores
- ## IRQ - 19(15) Instructions, 13(11) Loads/Stores
- ## ------------------------------------------------------------
-
- .macro _pk_vol_fast_ctx_push
-
- stwu %r1, -PK_VOL_FAST_CTX_SIZE(%r1)
-
- stw %r0, PK_VOL_FAST_CTX_GPR0(%r1)
- stvd %d7, PK_VOL_FAST_CTX_GPR7(%r1)
- stvd %d9, PK_VOL_FAST_CTX_GPR9(%r1)
-
- mfxer %r7
- mfctr %r8
- mfsrr0 %r9
- mfsrr1 %r10
-
- stvd %d7, PK_VOL_FAST_CTX_XER(%r1)
- stvd %d9, PK_VOL_FAST_CTX_SRR0(%r1)
-
- .endm
-
-
- .macro _pk_vol_fast_ctx_pop
-
- lvd %d7, PK_VOL_FAST_CTX_XER(%r1)
- lvd %d9, PK_VOL_FAST_CTX_SRR0(%r1)
-
- mtxer %r7
- mtctr %r8
- mtsrr0 %r9
- mtsrr1 %r10
-
- lwz %r0, PK_VOL_FAST_CTX_GPR0(%r1)
- lvd %d7, PK_VOL_FAST_CTX_GPR7(%r1)
- lvd %d9, PK_VOL_FAST_CTX_GPR9(%r1)
-
- lwz %r1, 0(%r1)
-
- .endm
-
- ## ------------------------------------------------------------
- ## Save/restore the non-volatile context on the stack
- ##
- ## 2 Instructions, 19 Loads/Stores
- ## ------------------------------------------------------------
-
- .macro _pk_non_vol_ctx_push
-
- stwu %r1, -PK_NON_VOL_CTX_SIZE(%r1)
- stvd %d28, PK_NON_VOL_CTX_GPR28(%r1)
- stvd %d30, PK_NON_VOL_CTX_GPR30(%r1)
-
- .endm
-
-
- .macro _pk_non_vol_ctx_pop
-
- lvd %d28, PK_NON_VOL_CTX_GPR28(%r1)
- lvd %d30, PK_NON_VOL_CTX_GPR30(%r1)
- lwz %r1, 0(%r1)
+#if PK_KERNEL_TRACE_ENABLE
+ mr %r31, \irqreg
+ srwi \ctxreg, \ctxreg, 16
+ PK_KERN_TRACE_ASM16("INTERRUPT_CONTEXT(0x%04x)", \ctxreg)
+ mr \irqreg, %r31
+#endif
.endm
@@ -364,89 +166,33 @@
/// thread->saved_stack_pointer when a thread is fully context-switched out.
typedef struct {
-
- uint32_t r1_nv;
- uint32_t link_nv;
- uint32_t r28;
- uint32_t r29;
- uint32_t r30;
- uint32_t r31;
- uint32_t r1_vf;
- uint32_t link_vf;
- uint32_t r7;
- uint32_t r8;
- uint32_t r9;
- uint32_t r10;
- uint32_t xer;
- uint32_t ctr;
- uint32_t srr0;
- uint32_t srr1;
- uint32_t r0;
- uint32_t pad;
uint32_t r1;
- uint32_t link_fast;
+ uint32_t linkage;
uint32_t r3;
uint32_t r4;
uint32_t r5;
uint32_t r6;
uint32_t cr;
uint32_t lr;
- uint32_t sprg0;
-
-} PkThreadContext;
-
-/// PK thread context of an interrupted thread (full-mode handler)
-///
-/// When a thread is interrupted by a full-mode interrupt handler, this is the
-/// layout of the stack area pointed to by either __pk_saved_sp_noncritical
-/// or __pk_saved_sp_critical.
-typedef struct {
-
- uint32_t r1_vf;
- uint32_t link_vf;
uint32_t r7;
uint32_t r8;
uint32_t r9;
uint32_t r10;
+ uint32_t r28;
+ uint32_t r29;
+ uint32_t r30;
+ uint32_t r31;
+
uint32_t xer;
uint32_t ctr;
uint32_t srr0;
uint32_t srr1;
uint32_t r0;
- uint32_t pad;
- uint32_t r1;
- uint32_t link_fast;
- uint32_t r3;
- uint32_t r4;
- uint32_t r5;
- uint32_t r6;
- uint32_t cr;
- uint32_t lr;
uint32_t sprg0;
-} PkThreadContextFullIrq;
-
-
-/// PK thread context of an interrupted thread (fast-mode handler)
-///
-/// When a thread is interrupted by a fast-mode interrupt handler, this is the
-/// layout of the stack area pointed to by R1 - unless the fast-mode interrupt
-/// handler extends the stack.
-
-typedef struct {
-
- uint32_t r1;
- uint32_t link_fast;
- uint32_t r3;
- uint32_t r4;
- uint32_t r5;
- uint32_t r6;
- uint32_t cr;
- uint32_t lr;
- uint32_t sprg0;
+} PkThreadContext;
-} PkThreadContextFastIrq;
#endif /* __ASSEMBLER__ */
diff --git a/pk/ppe42/ppe42_core.c b/pk/ppe42/ppe42_core.c
index b49b8855..2fa86e68 100644
--- a/pk/ppe42/ppe42_core.c
+++ b/pk/ppe42/ppe42_core.c
@@ -29,64 +29,6 @@ ppe42_timebase_data_t ppe42_tb_data = {0};
PkTimebase ppe42_64bit_timebase = 0;
-/// Enable interrupt preemption
-///
-/// This API can only be called from an interrupt context. Threads will
-/// always be preempted by interrupts unless they explicitly disable
-/// interrupts with the \c pk_interrupt_disable() API. It is legal to call
-/// this API redundantly.
-///
-/// Be careful when enabling interrupt handler preemption that the interrupt
-/// being handled does not/can not trigger again, as this could rapidly lead
-/// to stack overflows.
-///
-/// Return values other then PK_OK (0) are errors; see \ref pk_errors
-///
-/// \retval 0 Successful completion
-///
-/// \retval -PK_ILLEGAL_CONTEXT The API call was not made from an interrupt
-/// context.
-
-int
-pk_interrupt_preemption_enable()
-{
- if (PK_ERROR_CHECK_API) {
- PK_ERROR_UNLESS_ANY_INTERRUPT_CONTEXT();
- }
-
- wrteei(1);
-
- return PK_OK;
-}
-
-
-/// Disable interrupt preemption
-///
-/// This API can only be called from an interrupt context. Threads will
-/// always be preempted by interrupts unless they explicitly disable
-/// interrupts with the \c pk_interrupt_disable() API. It is legal to call
-/// this API redundantly.
-///
-/// Return values other then PK_OK (0) are errors; see \ref pk_errors
-///
-/// \retval 0 Successful completion
-///
-/// \retval -PK_ILLEGAL_CONTEXT The API call was not made from an interrupt
-/// context.
-
-int
-pk_interrupt_preemption_disable()
-{
- if (PK_ERROR_CHECK_API) {
- PK_ERROR_UNLESS_ANY_INTERRUPT_CONTEXT();
- }
-
- wrteei(0);
-
- return PK_OK;
-}
-
-
#if PK_TIMER_SUPPORT
// The tickless kernel timer mechanism for PPE42
@@ -160,10 +102,7 @@ __pk_schedule_hardware_timeout(PkTimebase timeout)
}
else
{
- //FIXME: We have to multiply the difference by 16
- //to workaround missing support for selecting the
- //external dec_timer clock source for the decrementer.
- diff = (timeout - now) << 4;
+ diff = (timeout - now);
if (diff > 0xfffffffful)
{
diff --git a/pk/ppe42/ppe42_exceptions.S b/pk/ppe42/ppe42_exceptions.S
index 19100e1e..b3ce3551 100644
--- a/pk/ppe42/ppe42_exceptions.S
+++ b/pk/ppe42/ppe42_exceptions.S
@@ -74,9 +74,7 @@ __instruction_storage:
.org __vectors + 0x00A0
__external_interrupt_vector:
- _pk_fast_ctx_push
-
- b __external_interrupt
+ _pk_ctx_push_as_needed __get_ext_irq
############################################################
# 0x00C0 : Alignment Exception
@@ -99,9 +97,7 @@ __alignment_exception:
### switches between threads. They can also be used by the code
### to signal an exception in an error scenario.
__program_exception:
- _pk_fast_ctx_push
-
- b program_exception_handler
+ _pk_ctx_push_as_needed program_exception_handler
############################################################
@@ -110,9 +106,7 @@ __program_exception:
.org __vectors + 0x0100
__dec_interrupt:
- _pk_fast_ctx_push
- li %r3, PPE42_IRQ_DEC
- b dec_handler
+ _pk_ctx_push_as_needed dec_handler
############################################################
# 0x0120 : FIT Interrupts
@@ -120,8 +114,7 @@ __dec_interrupt:
.org __vectors + 0x0120
__fit_interrupt:
- #b fit_handler
- b .
+ _pk_ctx_push_as_needed fit_handler
############################################################
# 0x0140 : Watchdog Interrupts
@@ -129,183 +122,7 @@ __fit_interrupt:
.org __vectors + 0x0140
__watchdog_interrupt:
- #b watchdog_handler
- b .
-
-
-
-
-
- .global __pk_irq_fast2full
-__pk_irq_fast2full:
-
- ## Convert a fast-mode to a full-mode interrupt by saving the
- ## (volatile - fast) context, and switching to the appropriate system
- ## stack.
-
- ## Entry invariants:
- ## 1. The SP/stack must be exactly as it was when the fast-mode
- ## handler was entered.
- ## 2. No changes have been made to the MSR - the interrupt level must
- ## remain disabled.
- ## 3. The handler owns the fast context and has not modified the other
- ## register context. This routine can only use the (volatile -
- ## fast) register context.
-
- ## 41 (linear) instructions plus alignmenmt
-
- ## Start by pushing the (volatile - fast) context. Technically we also
- ## need to save the CR as our contract with the handler is not to
- ## disturb any of its register state.
-
- _pk_vol_fast_ctx_push
- mfcr %r10
- mfsprg0 %r8
-
- ## At entry here the (volatile - fast) context has been pushed,
- ## R8 has SPRG0 and R10 contains the saved CR.
-
- ## Note that it would violate a kernel/API invariant if this routine
- ## were entered from outside an interrupt context.
-
-fast2full_noncritical:
-
- ## switch to the kernel stack if we haven't already done so. (SPRG0[RI] = 0)
- #bb1wi %r8, RI_BIT, 1f //branches if the RI_BIT is '1'
-
- extrwi %r9, %r8, 8, 8
- cmpwi %r9, 1
- bne 1f
-
- _stwsd %r1, __pk_saved_sp_noncritical
- _lwzsd %r1, __pk_noncritical_stack
-
-1:
-
- .if (PK_ERROR_CHECK_KERNEL | PK_ERROR_CHECK_API)
- #bb1wi %r8, PI_BIT, 2f //branches if PI_BIT is '1'
- cmpwi %r9, 0
- bne 2f
- _pk_panic PPE42_IRQ_FAST2FULL_INVARIANT
-2:
- .endif
-
- mtcr0 %r10
- blr
-
- .global __pk_irq_full_mode_exit
-__pk_irq_full_mode_exit:
-
- ## Exit a full-mode handler.
-
- ## Entry invariants:
- ## 1. The SP/stack must be in exactly the same state it was left in at
- ## the exit of __pk_irq_fast2full.
- ## 2. It is assumed the the preemption rules of PK have been followed
- ## - in particular that critical handlers have not enabled
- ## non-critical interrupts.
-
- ## We can freely modify the volatile context here - the handler is done
- ## and we will restore the interrupted volatile context.
-
- ## 22 linear instructions
-
- ## If the critical count is non-zero, then the PK preemption rules
- ## guarantee that we are exiting from a critical interrupt
- ## handler. This test is safe to make even if critical interrupts are
- ## enabled, because the variable is set exactly once in a critical
- ## section.
-
- mfsprg0 %r3
-
- ## Exiting a full-mode non-critical handler is more complex than the
- ## critical case, because the handler may have made a new
- ## highest-priority thread runnable and we may need to go through a
- ## delayed scheduling step.
-
- ## Note that the idle thread is treated as a special case. The idle
- ## thread has no permanent register context. To avoid having to
- ## allocate a stack area for the idle thread, the idle thread
- ## 'uses' the non-critical stack. When the idle thread is interrupted
- ## the (redundant) context is pushed, but is then effectively lost.
- ## Whenever we restore the idle thread we simply reenter the idle
- ## thread entry point.
-
- ## At entry:
- ## 1. R3 holds the value of SPRG0 (__PkKernelContext)
-
- ## 33 linear instructions.
-
-full_exit_noncritical:
-
- ## Enter a critical section for the return from interrupt, in the event
- ## that the handler enabled preemption.
-
- _pk_critical_section_enter %r4, %r5
-
- ## If the non-critical count is > 1 then this is a nested interrupt
- ## and we can simply pop the context and RFI.
-
- extrwi. %r4, %r3, 8, 8
-
- ## If SPRG0[RI] = 1 then this is a recursive interrupt
- ## and we can simply pop the context and RFI. Note that it would
- ## violate a kernel/API invariant if this routine were entered from
- ## outside an interrupt context (interrupt level == 0).
-
- .if (PK_ERROR_CHECK_KERNEL | PK_ERROR_CHECK_API)
- #bb1wi %r3, PI_BIT, 1f //branch if the PI bit is set
- bne 1f
- _pk_panic PPE42_IRQ_FULL_EXIT_INVARIANT
-1:
- .endif
-
- cmpwi %r4, 1
- bne exit_noncritical_without_switch
-
- ## Otherwise, restore the saved stack pointer and continue.
-
- _lwzsd %r1, __pk_saved_sp_noncritical
-
- ## If we are not in thread mode (i.e., we took an interrupt in an
- ## interupt-only configuration of PK or after pk_initialize() but
- ## before pk_start_threads) simply pop the context and RFI - in this
- ## case we'll most likely be returning to main() or the non-thread-mode
- ## idle thread.
-
- andi. %r4, %r3, PPE42_THREAD_MODE
- beq exit_noncritical_without_switch
-
- ## Now, check for a delayed context switch. If none is pending, we can
- ## exit (after a check for the idle thread special case).
-
- _lwzsd %r3, __pk_delayed_switch
- cmpwi %r3, 0
- bne noncritical_switch
-
- _lwzsd %r3, __pk_current_thread
- cmpwi %r3, 0
- beq __pk_idle_thread
-
-exit_noncritical_without_switch:
- _pk_vol_fast_ctx_pop
- b fast_exit_noncritical
-
- ## The non-critical interrupt activated a delayed context switch. The
- ## C-level code has taken care of the scheduling decisions - we simply
- ## need to implement them here.
-
-noncritical_switch:
-
- ## Clear the delayed switch flag and go to the context switch code to
- ## finish the switch.
-
- li %r3, 0
- _stwsd %r3, __pk_delayed_switch
-
- b thread_save_non_volatile_and_switch
-
-
+ _pk_ctx_push_as_needed watchdog_handler
@@ -317,30 +134,33 @@ noncritical_switch:
__pk_idle_thread:
- ## The idle thread 'uses' the non-critical stack. Any register context
+ ## The idle thread 'uses' the kernel stack. Any register context
## pushed here is redundant and is wiped out/ignored every time the
## idle thread is re-scheduled.
## The idle thread simply establishes a default machine context and
## enters the wait-enable state. The idle thread is always entered
- ## with non-critical interrupts disabled.
+ ## with interrupts disabled.
##
## The kernel context is initialized to indicate that the idle thread
- ## is running - the idle thread priority is PK_THREADS, and the
- ## 'thread-mode' bit is asserted as well.
+ ## is running - the idle thread priority is PK_THREADS, the
+ ## 'thread-mode' bit is asserted and so is the 'discard-ctx" bit.
+ ## In addition, the previous kernel context is stored in the lower
+ ## 16 bits.
##
## This loop can also be called from the PK bootloader if main()
## returns - in which case we don't muck with the SPRG0 or the stack
## pointer.
-
- li %r3, (PK_THREADS | PPE42_THREAD_MODE)
+ mfsprg0 %r3
+ srwi %r3, %r3, 16
+ oris %r3, %r3, (PK_THREADS << 8) | PPE42_THREAD_MODE | PPE42_DISCARD_CTX
mtsprg0 %r3
- _lwzsd %r1, __pk_noncritical_stack
+ _lwzsd %r1, __pk_kernel_stack
-__pk_idle_thread_from_bootloader:
+__pk_idle_thread_from_bootloader:
+
+ PK_KERN_TRACE_ASM16("ENTER_IDLE_STATE")
- #li %r3, PK_THREADS
- #PK_TRACE_THREAD_SWITCH %r3, %r4
_lwzsd %r3, __pk_thread_machine_context_default
_oriwa %r3, %r3, MSR_WE
mtmsr %r3
@@ -364,27 +184,14 @@ dec_handler:
## interrupt by writing the DIS back into the TSR before calling the
## handler. The timer handler does not take any arguments.
- _save_update_kernel_context %r3, %r4
+ li %r4, PPE42_IRQ_DEC
+ _update_kernel_context %r4, %r3
_liwa %r3, TSR_DIS
mttsr %r3
- _pk_irq_fast2full __pk_timer_handler
-
-
-
-
-
- ## Exit traces are moved here because the code area (0x100 bytes)
- ## reserved for individual interrupts is overflowing when tracing is
- ## enabled. This is kind of a hack: We know that this trace only
- ## occurs when we're about to exit the fast context, at a place
- ## where we can use any of the fast registers.
-#if 0
-__pk_trace_noncritical_irq_exit:
- #PK_TRACE_NONCRITICAL_IRQ_EXIT %r3, %r4
- blr
-#endif
+ bl __pk_timer_handler
+ b check_for_ext_interrupt
program_exception_handler:
## first check if exception was caused by an illegal 'sc' instruction
@@ -393,46 +200,14 @@ program_exception_handler:
cmpwbeq %r3, %r4, __sc_helper
_pk_panic PPE42_ILLEGAL_INSTRUCTION
- ## SRR0 is currently pointing to the 'sc' instruction. We need to advance it
+ ## Saved SRR0 is currently pointing to the 'sc' instruction. We need to advance it
## to the next instruction so that we don't end up in an endless loop (something
## that the ppc sc instruction does automatically).
__sc_helper:
- mfsrr0 %r3
- addi %r3, %r3, 4
- mtsrr0 %r3
-
-__system_call:
-
- ## The program exception is used by PK as a handy way to start a
- ## context switch, as the continuation address and MSR of the thread to
- ## be swapped out are saved in SRR0 and SRR1.
-
- ## Non-critical interrupts are disabled at entry.
-
- ## Begin by saving the volatile context of the current thread.
- ## NOTE: fast context has already been saved prior to branching here.
-
- _pk_vol_fast_ctx_push
-
-thread_save_non_volatile_and_switch:
-
- ## Finish the thread context save by pushing the non-volatile context
- ## and saving the resulting stack pointer in the thread structure. If
- ## the current thread is the idle thread this step is bypassed.
-
- ## This symbol is also used as an entry point by the non-critical
- ## interrupt handler - non-critical interrupts are disabled here.
-
- _lwzsd %r3, __pk_current_thread
- cmpwi %r3, 0
- beq __pk_next_thread_resume
-
- _pk_non_vol_ctx_push
- stw %r1, PK_THREAD_OFFSET_SAVED_STACK_POINTER(%r3)
-
- ## The next thread becomes the current thread, and we switch to its
- ## stack - unless the new thread is the idle thread, in which case it
- ## (the idle thread) is simply resumed.
+ mfsrr0 %r4
+ _lwzsd %r3, __pk_saved_sp
+ addi %r4, %r4, 4
+ stw %r4, PK_CTX_SRR0(%r3)
.global __pk_next_thread_resume
__pk_next_thread_resume:
@@ -440,38 +215,36 @@ __pk_next_thread_resume:
_lwzsd %r3, __pk_next_thread
_stwsd %r3, __pk_current_thread
- cmpwi %r3, 0
- beq __pk_idle_thread
+ ## Enter the wait enabled state if the thread pointer is null
+ bwz %r3, __pk_idle_thread
+ ## switch to the new thread stack
lwz %r1, PK_THREAD_OFFSET_SAVED_STACK_POINTER(%r3)
- ## Restore the thread context and resume the new thread. The kernel
- ## context in thread mode is simply the thread priority OR'ed with the
- ## thread-mode flag. All other fields are cleared.
+ ## load sprg0 from the stack and update the thread priority
+ ## in case it changed.
+restore_and_update_sprg0:
+ _lbzsd %r31, __pk_next_priority
- _pk_non_vol_ctx_pop
- _pk_vol_fast_ctx_pop
-
- _lbzsd %r3, __pk_next_priority
- #PK_TRACE_THREAD_SWITCH %r3, %r4
- ori %r3, %r3, PPE42_THREAD_MODE
- mtsprg0 %r3
+ PK_KERN_TRACE_ASM16("RESUME_THREAD(%d)", %r31)
- _pk_fast_ctx_pop
- rfi
+ lwz %r3, PK_CTX_KERNEL_CTX(%r1)
+ rlwimi %r3, %r31, 24, 2, 7
+ mtsprg0 %r3
+ b ctx_pop
+
fit_handler:
- ## The FIT handler is user defined, and is a fast-mode handler. By
+ ## The FIT handler is user defined. By
## convention the kernel clears the interrupt by writing the FIS back
## into the TSR.
- _pk_fast_ctx_push
-
- _lwzsd %r3, __ppe42_fit_arg
li %r4, PPE42_IRQ_FIT
- _save_update_kernel_context %r4, %r6
+ _update_kernel_context %r4, %r3
+
+ _lwzsd %r3, __ppe42_fit_arg
_liwa %r6, TSR_FIS
mttsr %r6
@@ -480,18 +253,15 @@ fit_handler:
mtlr %r6
blrl
- b fast_exit_noncritical
+ b check_for_ext_interrupt
watchdog_handler:
## Watchdog setup is described in the PK Specification.
## The kernel clears TSR[WIS] prior to calling the handler.
- ## The watchdog handler is a critical, fast-mode handler.
- _pk_fast_ctx_push
-
- li %r3, PPE42_IRQ_WATCHDOG
+ li %r4, PPE42_IRQ_WATCHDOG
- _save_update_kernel_context %r3, %r6
+ _update_kernel_context %r4, %r3
_liwa %r6, TSR_WIS
mttsr %r6
@@ -500,68 +270,99 @@ watchdog_handler:
mtlr %r6
blrl
- b .
+ b check_for_ext_interrupt
-#if 0
-debug_handler:
+ ## Check if we can disard the interrupted context.
+ ## This routine expects r3, r4, lr, and cr to already be pushed.
+ ## It also expects r3 to hold the address of the function to jump
+ ## to after the interrupted context has been pushed (if necessary).
- ## PK does nothing upon reception of the debug interrupt other
- ## than calling the handler (if non-0). The debug handler is a
- ## fast-mode handler.
+ .align 5
+ctx_check_discard:
- _pk_fast_ctx_push
+ ## Prepare to jump to the branch function that was passed in
+ mtlr %r3
- li %r3, PPE42_IRQ_DEBUG
-
- _save_update_kernel_context %r3, %r6
+ ## Check if the DISCARD_CTX bit is set in the kernel context
+ mfsprg0 %r3
+ bb0wi %r3, PPE42_DISCARD_CTX_BIT, ctx_continue_push
- _lwzsd %r6, __ppe42_debug_routine
- cmpwi %r6, 0
- mtlr %r6
- beq debug_exit
- blrl
-
-debug_exit:
- b fast_exit_critical
-#endif
-
- .align 5
-__external_interrupt:
+ctx_discard:
+ ## DISCARD_CTX bit was set. Discard stack and branch to interrupt
+ ## handler code
+ addi %r1, %r1, PK_CTX_SIZE
+ blr
- ## The non-critical interrupt handler entry point is re-entrant - A
- ## handler may allow preemption, which could cause another entry here.
+ ## DISCARD_CTX bit was not set. Continue saving full context.
+ ## (r3, r4, lr, and cr have already been saved for us) and
+ ## r3 contains the interrupted kernel context
+
+ctx_continue_push:
+
+ stvd %d5, PK_CTX_GPR5(%r1)
+ stvd %d7, PK_CTX_GPR7(%r1)
+ stvd %d9, PK_CTX_GPR9(%r1)
+ stvd %d28, PK_CTX_GPR28(%r1)
+ stvd %d30, PK_CTX_GPR30(%r1)
+ mfxer %r5
+ mfctr %r6
+ stvd %d5, PK_CTX_XER(%r1)
+ mfsrr0 %r7
+ mfsrr1 %r8
+ stvd %d7, PK_CTX_SRR0(%r1)
+ stw %r0, PK_CTX_GPR0(%r1)
+ stw %r3, PK_CTX_KERNEL_CTX(%r1)
- ## Entry invariants:
- ## 1. Non-critical interupts are disabled;
- ## 2. The SP points to a thread stack or the non-critical stack.
+ ## If the 'processing interrupt' bit is set then we were already
+ ## using the kernel stack and don't need to modify or save the current
+ ## stack pointer.
+ bb1wi %r3, PPE42_PROC_IRQ_BIT, ctx_push_completed
- ## Since fast-mode handlers can not use PK services or alter the
- ## machine context, the exit of a fast mode handler is a simple RF(C)I.
+ ## load the pointer to the current thread control block
+ _lwzsd %r4, __pk_current_thread
- ## Begin by pushing the fast context on the current stack.
-
- ## _pk_fast_ctx_push was called prior to branching here. No need to call it here.
+ ## don't save the stack pointer in the thread control block
+ ## if the current thread was the idle thread (null pointer)
+ bwz %r4, switch_to_kernel_stack
+
+ ## we interrupted a bonafide thread, so save off the stack
+ ## pointer
+ stw %r1, PK_THREAD_OFFSET_SAVED_STACK_POINTER(%r4)
+
+switch_to_kernel_stack:
+ _stwsd %r1, __pk_saved_sp
+ _lwzsd %r1, __pk_kernel_stack
+
+ctx_push_completed:
+ blr
- ## Load the base address for the external interrupt table
+__get_ext_irq:
+
+ ## Entry invariants:
+ ## 1. external interupts are disabled;
+ ## 2. previous context has ben saved off
+ ## 3. r3 contains the kernel context
+ ## 4. r1 points to the kernel stack
- ## TODO: This is HW Macro specific code that is responsible for finding the
+ ## This is HW Macro specific code that is responsible for finding the
## IRQ # and storing it in r4 (phantom IRQ's are assigned a value of EXTERNAL_IRQS).
hwmacro_get_ext_irq
## An active or phantom IRQ was found.
+ ## R3 has the context of the interrupted thread or bottom half
## R4 has the IRQ number.
## The IRQ is converted into a pointer to an 8-byte handler
## structure, and the handler is dispatched. The call is made with the
## parameters:
- ## R3 = private
+ ## R3 = private data ptr
## R4 = irq
-external_irq_found:
+call_external_irq_handler:
- _save_update_kernel_context %r4, %r5
+ _update_kernel_context %r4, %r3
slwi %r3, %r4, 3 //multiply the irq# by 8
_liw %r6, __ppe42_irq_handlers
lwzx %r5, %r6, %r3
@@ -570,12 +371,120 @@ external_irq_found:
mtlr %r5
blrl
- ## Pop the stack/RFI when (if) it returns here.
+ ## Once the interrupt handler returns, check if any interrupts are
+ ## waiting and handle them now.
+
+check_for_ext_interrupt:
+
+ ## Set the CTX_DISCARD bit in the kernel context so that if there is
+ ## an interrupt it will not bother saving the full context.
+ mfsprg0 %r31
+ oris %r31, %r31, PPE42_DISCARD_CTX
+ mtsprg0 %r31
+
+ ###### Enable/Disable External Interrupts #####
+ wrteei 1
+ wrteei 0
+
+ ## If we made it this far, there must not be any interrupts pending.
+ ## If bottom half processing was interrupted we need to restore it
+check_interrupted_bh:
+
+ ## If the thread ID is 33 then the bottom half handler was interrupted
+ ## and needs to be restored.
+ extrwi %r4, %r31, 6, 2
+ cmpwi %r4, 33
+ beq ctx_pop_with_sprg0
+
+check_for_bh:
+ ## if the bottom half queue is pointing to itself then the queue is
+ ## empty and there are no bottom halves that need processing.
+ _lwzsd %r4, _pk_bh_queue
+ lwz %r5, 0(%r4)
+ cmplwbeq %r4, %r5, restore_interrupted_sp
+
+process_bottom_halves:
+ ## Clear the CTX_DISCARD bit so that interrupted bottom half context
+ ## will be saved in case an interrupt occurs after this point. Also
+ ## set the thread ID to 33 so that we know to restore the bottom half
+ ## context that was interrupted.
+ rlwinm %r3, %r31, 0, 9, 1 //clear thread id + discard bit
+ oris %r3, %r3, 0x2100 //set thread id to 33
+ mtsprg0 %r3 //set bottom half context
+
+ ## branch to a C function that processes bottom halves
+ wrteei 1
+ bl _pk_process_bh
+ wrteei 0
+
+ ## restore the previous kernel context (with discard bit set)
+ mtsprg0 %r31
+
+restore_interrupted_sp:
+ ## restore the interrupted thread stack pointer
+ _lwzsd %r1, __pk_saved_sp
+
+ ## If we are not in thread mode (i.e., we took an interrupt in an
+ ## interupt-only configuration of PK or after pk_initialize() but
+ ## before pk_start_threads) simply pop the context and RFI - in this
+ ## case we'll most likely be returning to main() or the non-thread-mode
+ ## idle thread.
+
+check_thread_mode:
+ bb0wi %r31, PPE42_THREAD_MODE_BIT, ctx_pop_with_sprg0
-fast_exit_noncritical:
+ ## Check if external interrupt activated a delayed context switch. The
+ ## C-level code has taken care of the scheduling decisions - we simply
+ ## need to implement them here.
+check_for_ctx_switch:
+
+ _lwzsd %r3, __pk_delayed_switch
+ bwz %r3, check_for_idle_thread
+
+ ## Clear the delayed switch flag and go to the context switch code to
+ ## finish the switch.
+
+ li %r3, 0
+ _stwsd %r3, __pk_delayed_switch
- _pk_fast_ctx_pop_exit
+ b __pk_next_thread_resume
+
+ ## check if we should switch to the wait enabled state (idle)
+check_for_idle_thread:
+ _lwzsd %r3, __pk_current_thread
+ bwz %r3, __pk_idle_thread
+
+ctx_pop_with_sprg0:
+ ## we must ensure that interrupts are disabled while restoring context
+ ##
+ ## restore sprg0 from the saved context
+ lwz %r0, PK_CTX_KERNEL_CTX(%r1)
+ mtsprg0 %r0
+#if PK_KERNEL_TRACE_ENABLE
+ srwi %r0, %r0, 16
+ PK_KERN_TRACE_ASM16("RESUME_CONTEXT(0x%04x)", %r0)
+#endif
+ctx_pop:
+ lwz %r0, PK_CTX_GPR0(%r1)
+ lvd %d7, PK_CTX_SRR0(%r1)
+ mtsrr1 %r8
+ mtsrr0 %r7
+ lvd %d5, PK_CTX_XER(%r1)
+ mtctr %r6
+ mtxer %r5
+ lvd %d30, PK_CTX_GPR30(%r1)
+ lvd %d28, PK_CTX_GPR28(%r1)
+ lvd %d9, PK_CTX_GPR9(%r1)
+ lvd %d7, PK_CTX_GPR7(%r1)
+ lvd %d5, PK_CTX_GPR5(%r1)
+ lvd %d3, PK_CTX_CR(%r1)
+ mtlr %r4
+ mtcr %r3
+ lvd %d3, PK_CTX_GPR3(%r1)
+ addi %r1, %r1, PK_CTX_SIZE
+ rfi
+
/// \endcond
diff --git a/pk/ppe42/ppe42_init.c b/pk/ppe42/ppe42_init.c
index 52659aab..a832e620 100644
--- a/pk/ppe42/ppe42_init.c
+++ b/pk/ppe42/ppe42_init.c
@@ -16,7 +16,7 @@
// Note that __ppe42_system_setup() is called from the PK bootloader early
// in the initialization, at a point before the aplication has enabled
-// critical or external interruts.
+// interrupts.
// This function is expected to be defined by the macro specific code (GPE, CME, SBE)
void __hwmacro_setup(void);
@@ -38,13 +38,14 @@ __ppe42_system_setup()
__ppe42_irq_handlers[irq].handler = __ppe42_phantom_irq_handler;
// Initialize special interrupt handlers
-/*
+
__ppe42_fit_routine = __ppe42_default_irq_handler;
__ppe42_fit_arg = 0;
__ppe42_watchdog_routine = __ppe42_default_irq_handler;
__ppe42_watchdog_arg = 0;
+/*
__ppe42_debug_routine = __ppe42_default_irq_handler;
__ppe42_debug_arg = 0;
*/
diff --git a/pk/ppe42/ppe42_irq.h b/pk/ppe42/ppe42_irq.h
index 6567af64..89948d60 100644
--- a/pk/ppe42/ppe42_irq.h
+++ b/pk/ppe42/ppe42_irq.h
@@ -139,79 +139,6 @@ UNLESS__PPE42_IRQ_CORE_C__(extern)
volatile
void* __ppe42_debug_arg;
-
-// Note: Why PK_IRQ_FAST2FULL (below) is implemented so strangely.
-//
-// I am adamant that I want to have a a macro in the 'C' environment to create
-// these bridge functions. However the limitations of the C preprocessor and
-// the intelligence of the GCC 'asm' facility consipre against a
-// straightforward solution. The only way that I was able to find to get
-// naked assembly code into the output stream is to use 'asm' with simple
-// strings - I couldn't make it work with any kind of argument, as 'asm' would
-// reinterpret the arguments and resulting assembler code in various ways.
-//
-// There is another alternative that I tried wherby I created a subroutine
-// call and then filled in the subroutine body with 'asm' code. However, the
-// subroutine wrapper that GCC creates only works for PowerPC fast-mode
-// handlers if GCC is invoked with optimization, which ensures that the
-// wrapper doesn't touch the stack pointer or other registers. True, we'll
-// always use optimization, but I did not want to have to make this
-// requirement for using this macro.
-
-/// This macro creates a 'bridge' handler that converts the initial fast-mode
-/// IRQ dispatch into a call of a full-mode IRQ handler. The full-mode
-/// handler is defined by the user (presumably as a \c C subroutine) and has
-/// the same prototype (type PkIrqHandler) as the fast handler.
-///
-/// \param fast_handler This will be the global function name of the fast
-/// IRQ handler created by this macro. This is the symbol
-/// that should be passed in as the \a handler argument
-/// of \c pk_irq_setup() and \c pk_irq_handler_set().
-///
-/// \param full_handler This is the name of the user-defined full-mode
-/// handler which is invoked through this bridge.
-///
-/// \e BUG \e ALERT : Beware of passing the \c full_handler to IRQ setup
-/// APIs. This won't be caught by the compiler (because the \c full_handler
-/// has the correct prototype) and will lead to nasty bugs. Always pass in
-/// the \c fast_handler symbol to IRQ setup APIS.
-///
-/// The code stream injected into the GCC assembler output in response to
-///
-/// PK_IRQ_FAST2FULL(fast_handler, full_handler)
-///
-/// is (comments added for clarification) :
-///
-/// \code
-/// .text
-/// .global fast_handler
-/// .align 5 # Hard-coded PPE42 cache-line alignment
-/// fast_handler = . # Can't macro expand LABEL: - this is equivalent
-/// bl __pk_irq_fast2full # The fast-mode to full-mode conversion sequence
-/// bl full_handler
-/// b __pk_irq_full_mode_exit
-/// \endcode
-///
-/// The macro also declares the prototype of the fast handler:
-///
-/// \code
-/// PK_IRQ_HANDLER(fast_handler);
-/// \endcode
-///
-
-#define PK_IRQ_FAST2FULL(fast_handler, full_handler) \
- PK_IRQ_HANDLER(fast_handler); \
- __PK_IRQ_FAST2FULL(.global fast_handler, fast_handler = ., bl full_handler)
-
-#define __PK_IRQ_FAST2FULL(global, label, call) \
-asm(".text"); \
-asm(#global); \
-asm(".align 5"); \
-asm(#label); \
-asm("bl __pk_irq_fast2full"); \
-asm(#call); \
-asm("b __pk_irq_full_mode_exit");
-
#endif /* __ASSEMBLER__ */
// It's hard to be portable and get all of the definitions and headers in the
@@ -228,45 +155,6 @@ asm("b __pk_irq_full_mode_exit");
/// \page ppe42_irq_macros_page PPE42 PK IRQ Assembler Macros
///
///
-/// \section fast2full_asm Fast-Mode to Full-Mode Handler Conversion
-///
-/// This macro produces the calling sequence required to convert a
-/// fast-mode interrupt handler to a full-mode interrupt handler. The
-/// full-mode handler is implemented by another subroutine. The
-/// requirements for invoking this macro are:
-///
-/// \li The stack pointer and stack must be exactly as they were when the
-/// fast-mode handler was entered.
-///
-/// \li No changes have been made to the MSR - the interrupt level must
-/// remain disabled.
-///
-/// \li The handler owns the fast context and has not modified the other
-/// register context. The conversion process will not modify any
-/// register in the fast context (other than the LR used for
-/// subroutine linkage).
-///
-/// The final condition above means that the \a full_handler will
-/// begin with the fast-mode context exactly as it was (save for LR)
-/// at conversion, including the contents of GPR3-7 (the first 5
-/// PowerPC ABI paramater passing registers) and the entire CR.
-///
-/// Forms:
-///
-/// \c _pk_irq_fast2full \a full_handler
-/// \cond
-
-#ifdef __ASSEMBLER__
-
- .macro _pk_irq_fast2full full_handler
- bl __pk_irq_fast2full
- bl \full_handler
- b __pk_irq_full_mode_exit
- .endm
-
-#endif /* __ASSEMBLER__ */
-
-/// \endcond
#ifndef __ASSEMBLER__
diff --git a/pk/ppe42/ppe42_irq_core.c b/pk/ppe42/ppe42_irq_core.c
index 8e8f29f5..791f935d 100644
--- a/pk/ppe42/ppe42_irq_core.c
+++ b/pk/ppe42/ppe42_irq_core.c
@@ -32,8 +32,8 @@ __ppe42_default_irq_handler(void* arg, PkIrqId irq)
/// This function is installed by default to handle the case that the
-/// interrupt dispatch code is entered in response to an external critical or
-/// non-critical interrupt, but no interrupt is found pending in the interrupt
+/// interrupt dispatch code is entered in response to an external
+/// interrupt, but no interrupt is found pending in the interrupt
/// controller. This should never happen, as it would indicate that a
/// 'glitch' occurred on the external interrupt input
/// to the PPE42 core.
diff --git a/pk/ppe42/ppe42_thread_init.S b/pk/ppe42/ppe42_thread_init.S
index 6e6c34fe..7185f7c7 100644
--- a/pk/ppe42/ppe42_thread_init.S
+++ b/pk/ppe42/ppe42_thread_init.S
@@ -48,54 +48,56 @@ __pk_thread_context_initialize:
stw %r7, \prefix\reg(%r6)
.endm
- ## Initialize a fast context on the thread stack. The CR is cleared,
+ ## Initialize volatile context on the thread stack. The CR is cleared,
## the LR = pk_complete(), R3 has the private parameter.
lwz %r6, PK_THREAD_OFFSET_SAVED_STACK_POINTER(%r3)
- stwu %r6, -PK_FAST_CTX_SIZE(%r6)
+ stwu %r6, -PK_CTX_SIZE(%r6)
li %r7, 0
- stw %r7, PK_FAST_CTX_CR(%r6)
+ stw %r7, PK_CTX_CR(%r6)
_liw %r7, pk_complete
- stw %r7, PK_FAST_CTX_LR(%r6)
+ stw %r7, PK_CTX_LR(%r6)
- stw %r5, PK_FAST_CTX_GPR3(%r6)
+ stw %r5, PK_CTX_GPR3(%r6)
- _gpr_init PK_FAST_CTX_GPR, 4, 0x0404
- _gpr_init PK_FAST_CTX_GPR, 5, 0x0505
- _gpr_init PK_FAST_CTX_GPR, 6, 0x0606
+ _gpr_init PK_CTX_GPR, 4, 0x0404
+ _gpr_init PK_CTX_GPR, 5, 0x0505
+ _gpr_init PK_CTX_GPR, 6, 0x0606
- ## Initialize the (volatile - fast) context on the thread stack. XER
- ## and CTR are clear, SRR0 = thread_routine, SRR1 = default machine
+ ## XER and CTR are clear, SRR0 = thread_routine, SRR1 = default machine
## context.
- stwu %r6, -PK_VOL_FAST_CTX_SIZE(%r6)
-
li %r7, 0
- stw %r7, PK_VOL_FAST_CTX_XER(%r6)
- stw %r7, PK_VOL_FAST_CTX_CTR(%r6)
+ stw %r7, PK_CTX_XER(%r6)
+ stw %r7, PK_CTX_CTR(%r6)
- stw %r4, PK_VOL_FAST_CTX_SRR0(%r6)
+ stw %r4, PK_CTX_SRR0(%r6)
_lwzsd %r7, __pk_thread_machine_context_default
- stw %r7, PK_VOL_FAST_CTX_SRR1(%r6)
+ stw %r7, PK_CTX_SRR1(%r6)
- _gpr_init PK_VOL_FAST_CTX_GPR, 0, 0x0000
- _gpr_init PK_VOL_FAST_CTX_GPR, 7, 0x0707
- _gpr_init PK_VOL_FAST_CTX_GPR, 8, 0x0808
- _gpr_init PK_VOL_FAST_CTX_GPR, 9, 0x0909
- _gpr_init PK_VOL_FAST_CTX_GPR, 10, 0x1010
+ _gpr_init PK_CTX_GPR, 0, 0x0000
+ _gpr_init PK_CTX_GPR, 7, 0x0707
+ _gpr_init PK_CTX_GPR, 8, 0x0808
+ _gpr_init PK_CTX_GPR, 9, 0x0909
+ _gpr_init PK_CTX_GPR, 10, 0x1010
## Initialize the non-volatile context on the thread stack.
- stwu %r6, -PK_NON_VOL_CTX_SIZE(%r6)
+ _gpr_init PK_CTX_GPR, 28, 0x2828
+ _gpr_init PK_CTX_GPR, 29, 0x2929
+ _gpr_init PK_CTX_GPR, 30, 0x3030
+ _gpr_init PK_CTX_GPR, 31, 0x3131
+
+ ## Initialize the kernel context on the thread stack.
+ ## Note: Thread priority is set later each time the thread is
+ ## resumed.
- _gpr_init PK_NON_VOL_CTX_GPR, 28, 0x2828
- _gpr_init PK_NON_VOL_CTX_GPR, 29, 0x2929
- _gpr_init PK_NON_VOL_CTX_GPR, 30, 0x3030
- _gpr_init PK_NON_VOL_CTX_GPR, 31, 0x3131
+ lis %r7, PPE42_THREAD_MODE
+ stw %r7, PK_CTX_KERNEL_CTX(%r6)
## Initialization is done - the stack pointer is stored back in the
## thread.
OpenPOWER on IntegriCloud