summaryrefslogtreecommitdiffstats
path: root/pk
diff options
context:
space:
mode:
authorGlenn Miles <milesg@us.ibm.com>2015-04-06 16:47:35 -0500
committerDerk Rembold <rembold@de.ibm.com>2015-04-21 09:37:29 -0500
commit43733c105f4cf5866e04502fe9506f375a5e6edb (patch)
tree8e66fd3fd1e4baf5be70f00fab4cc1b32011d40e /pk
parent7f2c101774507059eeedbe7744da4ad923c53405 (diff)
downloadtalos-sbe-43733c105f4cf5866e04502fe9506f375a5e6edb.tar.gz
talos-sbe-43733c105f4cf5866e04502fe9506f375a5e6edb.zip
Minor changes to kernel API
-Removed support for fast irq handlers -Removed sleep_absolute interface -Removed pk_timer_create_nonpreemptible interface -Removed pk_interrupt_preemption_enable/disable interfaces -Removed references to critical/noncritical irqs -Added application context interfaces -Added bottom-half support -Moved timer handler from interrupt to bottom-half -Moved thread utility functions into a separate file Change-Id: Ie39fe0363ef52b195a808a8390cc12c2c7478674 Reviewed-on: http://gfw160.aus.stglabs.ibm.com:8080/gerrit/16898 Reviewed-by: Glenn R. Miles <milesg@us.ibm.com> Reviewed-by: Thi N. Tran <thi@us.ibm.com> Reviewed-by: Richard J. Knight <rjknight@us.ibm.com> Reviewed-by: Gregory S. Still <stillgs@us.ibm.com> Reviewed-by: Derk Rembold <rembold@de.ibm.com> Tested-by: Derk Rembold <rembold@de.ibm.com>
Diffstat (limited to 'pk')
-rw-r--r--pk/gpe/gpe_common.h13
-rw-r--r--pk/gpe/gpe_irq_init.c6
-rw-r--r--pk/gpe/gpe_scom_handler.S13
-rw-r--r--pk/kernel/pk_api.h111
-rw-r--r--pk/kernel/pk_bh_core.c29
-rw-r--r--pk/kernel/pk_init.c39
-rw-r--r--pk/kernel/pk_kernel.h36
-rw-r--r--pk/kernel/pk_semaphore_core.c22
-rw-r--r--pk/kernel/pk_thread.h56
-rw-r--r--pk/kernel/pk_thread_core.c424
-rw-r--r--pk/kernel/pk_thread_init.c9
-rw-r--r--pk/kernel/pk_thread_util.c291
-rw-r--r--pk/kernel/pk_timer_core.c85
-rw-r--r--pk/kernel/pk_timer_init.c91
-rw-r--r--pk/kernel/pkkernelfiles.mk4
-rw-r--r--pk/ppe/ppe_common.h19
-rw-r--r--pk/ppe42/ppe42.h117
-rw-r--r--pk/ppe42/ppe42_boot.S2
-rw-r--r--pk/ppe42/ppe42_context.h382
-rw-r--r--pk/ppe42/ppe42_core.c63
-rw-r--r--pk/ppe42/ppe42_exceptions.S543
-rw-r--r--pk/ppe42/ppe42_init.c5
-rw-r--r--pk/ppe42/ppe42_irq.h112
-rw-r--r--pk/ppe42/ppe42_irq_core.c4
-rw-r--r--pk/ppe42/ppe42_thread_init.S54
-rw-r--r--pk/std/std_common.h6
-rw-r--r--pk/std/std_irq_init.c6
-rw-r--r--pk/trace/pk_trace.h2
-rw-r--r--pk/trace/pk_trace_core.c4
29 files changed, 971 insertions, 1577 deletions
diff --git a/pk/gpe/gpe_common.h b/pk/gpe/gpe_common.h
index 05ec4464..043a4802 100644
--- a/pk/gpe/gpe_common.h
+++ b/pk/gpe/gpe_common.h
@@ -35,17 +35,20 @@
/// Check for interrupts pending in status register 0 while the IRQ is
/// computed. The IRQ is expected to be stored in r4. If no IRQ is
/// pending then load the phantom irq # (EXTERNAL_IRQS).
+///
+/// r1, r2, r3, and r13 must not be modified. All other registers may be used.
+///
.macro hwmacro_get_ext_irq
- _lwzi %r3, %r3, GPE_GISR0(APPCFG_OCC_INSTANCE_ID)
- cntlzw %r4, %r3
- cmpwible %r4, 31, external_irq_found #branch if irq is lt or eq to 31
+ _lwzi %r5, %r5, GPE_GISR0(APPCFG_OCC_INSTANCE_ID)
+ cntlzw %r4, %r5
+ cmpwible %r4, 31, call_external_irq_handler #branch if irq is lt or eq to 31
## No IRQ pending in interrupt set 0. Try set 1.
## Note: irq # will be 64 (EXTERNAL_IRQS) if no bits were set in either register
- _lwzi %r3, %r3, GPE_GISR1(APPCFG_OCC_INSTANCE_ID)
- cntlzw %r4, %r3
+ _lwzi %r6, %r6, GPE_GISR1(APPCFG_OCC_INSTANCE_ID)
+ cntlzw %r4, %r6
addi %r4, %r4, 32
.endm
diff --git a/pk/gpe/gpe_irq_init.c b/pk/gpe/gpe_irq_init.c
index 078d2fa7..061d5d10 100644
--- a/pk/gpe/gpe_irq_init.c
+++ b/pk/gpe/gpe_irq_init.c
@@ -21,8 +21,7 @@
/// interrupt status in the controller.
///
/// Note that PK allows this API to be called from any context, and changes
-/// to the interrupt controller are made from a critical
-/// section.
+/// to the interrupt controller are made from a critical section.
///
/// Return values other then PK_OK (0) are errors; see \ref pk_errors
///
@@ -72,8 +71,7 @@ pk_irq_setup(PkIrqId irq,
/// Return values other then PK_OK (0) are errors; see \ref pk_errors
///
/// Note that PK allows this API to be called from any context, and changes
-/// to the interrupt controller are made from a critical
-/// section.
+/// to the interrupt controller are made from a critical section.
///
/// \retval 0 Successful completion
///
diff --git a/pk/gpe/gpe_scom_handler.S b/pk/gpe/gpe_scom_handler.S
index 4806390a..44daa52b 100644
--- a/pk/gpe/gpe_scom_handler.S
+++ b/pk/gpe/gpe_scom_handler.S
@@ -7,9 +7,6 @@
/// \file gpe_scom_handler.S
/// \brief Interrupt handler code for SCOM requests from the ppc405
///
-/// This handler is implemented as a fast-mode handler, which means
-/// that only r3, r4, r5, r6, cr0, and lr have been saved off and
-/// are available to be used.
.nolist
#include "pk.h"
@@ -17,7 +14,7 @@
.list
## This function handles requests from the ppc405 to perform a getscom
- ## or putsom operation.
+ ## or putscom operation.
##
## The ppc405 must supply a request in the following format:
##
@@ -48,8 +45,8 @@ gpe_scom_handler:
lwz %r4, OCCHW_SCOM_ADDR_OFFSET(%r3)
## Mask all SIB errors
- mfmsr %r5
- _oriwa %r5, %r5, MSR_SEM
+ mfmsr %r7
+ _oriwa %r5, %r7, MSR_SEM
mtmsr %r5
## Check bit 0 of the scom address to determine if this
@@ -80,6 +77,10 @@ _get_scom_status:
## then, store it to the OISR0_CLR address
_stwi %r3, %r4, OCB_OISR0_CLR
+ ## restore the MSR as it was before we changed it
+ mtmsr %r7
+
+ ## return
blr
_do_putscom:
diff --git a/pk/kernel/pk_api.h b/pk/kernel/pk_api.h
index 3d0c4a3f..7e1c9702 100644
--- a/pk/kernel/pk_api.h
+++ b/pk/kernel/pk_api.h
@@ -236,7 +236,7 @@
///
/// 2 : (\b Default - Currently Unimplemented) In addition to prepatterning,
/// stack utilization is computed at the exit of context switches and
-/// noncritical interrupt processing. The maximum utilization is stored in
+/// interrupt processing. The maximum utilization is stored in
/// the thread data structure. The kernel will panic if stack overflow is
/// detected. Stack utilization is not computed for the idle thread.
@@ -263,7 +263,7 @@
/// pk_app_cfg.h.
///
/// The PK_START_THREADS_HOOK runs as a pseudo-interrupt handler on the
-/// noncritical interrupt stack, with noncritical interrupts disabled.
+/// kernel stack, with external interrupts disabled.
#ifndef PK_START_THREADS_HOOK
#define PK_START_THREADS_HOOK do {} while (0)
@@ -363,32 +363,11 @@
//Kernel trace macros
#if !PK_KERNEL_TRACE_ENABLE
-
-#define PK_TRACE_THREAD_SLEEP(priority)
-#define PK_TRACE_THREAD_WAKEUP(priority)
-#define PK_TRACE_THREAD_SEMAPHORE_PEND(priority)
-#define PK_TRACE_THREAD_SEMAPHORE_POST(priority)
-#define PK_TRACE_THREAD_SEMAPHORE_TIMEOUT(priority)
-#define PK_TRACE_THREAD_SUSPENDED(priority)
-#define PK_TRACE_THREAD_DELETED(priority)
-#define PK_TRACE_THREAD_COMPLETED(priority)
-#define PK_TRACE_THREAD_MAPPED_RUNNABLE(priority)
-#define PK_TRACE_THREAD_MAPPED_SEMAPHORE_PEND(priority)
-#define PK_TRACE_THREAD_MAPPED_SLEEPING(priority)
-
+#define PK_KERN_TRACE(...)
+#define PK_KERN_TRACE_ASM16(...)
#else
-
-#define PK_TRACE_THREAD_SLEEP(priority) PKTRACE("THREAD_SLEEP(%d)", priority)
-#define PK_TRACE_THREAD_WAKEUP(priority) PKTRACE("THREAD_WAKEUP(%d)", priority)
-#define PK_TRACE_THREAD_SEMAPHORE_PEND(priority) PKTRACE("SEMAPHORE_PEND(%d)", priority)
-#define PK_TRACE_THREAD_SEMAPHORE_POST(priority) PKTRACE("SEMAPHORE_POST(%d)", priority)
-#define PK_TRACE_THREAD_SEMAPHORE_TIMEOUT(priority) PKTRACE("SEMAPHORE_TIMEOUT(%d)", priority)
-#define PK_TRACE_THREAD_SUSPENDED(priority) PKTRACE("THREAD_SUSPENDED(%d)", priority)
-#define PK_TRACE_THREAD_DELETED(priority) PKTRACE("THREAD_DELETED(%d)", priority)
-#define PK_TRACE_THREAD_COMPLETED(priority) PKTRACE("THREAD_COMPLETED(%d)", priority)
-#define PK_TRACE_THREAD_MAPPED_RUNNABLE(priority) PKTRACE("THREAD_MAPPED_RUNNABLE(%d)", priority)
-#define PK_TRACE_THREAD_MAPPED_SEMAPHORE_PEND(priority) PKTRACE("THREAD_MAPPED_SEMAPHORE_PEND(%d)", priority)
-#define PK_TRACE_THREAD_MAPPED_SLEEPING(priority) PKTRACE("THREAD_MAPPED_SLEEPING(%d)", priority)
+#define PK_KERN_TRACE(...) PK_TRACE(__VA_ARGS__)
+#define PK_KERN_TRACE_ASM16(...) PK_TRACE_ASM16(__VA_ARGS__)
#endif /* PK_KERNEL_TRACE_ENABLE */
@@ -486,7 +465,7 @@ typedef struct {
} PkSemaphore;
-/// Compile-time initialize an PkSemaphore structure
+/// Compile-time initialize a PkSemaphore structure
///
/// This low-level macro creates a structure initializatin of an PkSemaphore
/// structure. This can be used for example to create compile-time initialized
@@ -603,23 +582,8 @@ typedef struct PkTimer {
/// field is initialized to a pointer to the thread.
void *arg;
- /// Options for timer processing; See \ref pk_timer_options
- uint8_t options;
-
} PkTimer;
-/// \defgroup pk_timer_options PK Timer Options
-/// @{
-
-/// Allow interrupt preemption during the callback
-///
-/// This is the normal mode for PkTimer objects scheduled by PK kernal
-/// mechanisms. The timer callbacks effectively run as if inside a
-/// highest-priority thread, allowing other interrupts to preempt them.
-#define PK_TIMER_CALLBACK_PREEMPTIBLE 0x1
-
-/// @}
-
// Threads
@@ -663,11 +627,33 @@ typedef struct {
} PkThread;
+typedef void (*PkBhHandler)(void *);
+
+#define PK_BH_HANDLER(handler) void handler(void *)
+
+typedef struct {
+
+ /// The bottom half queue management pointers
+ ///
+ /// This pointer container is defined as the first element of the
+ /// structure to allow the PkBottomHalf to be cast to a PkDeque and
+ /// vice-versa.
+ PkDeque deque;
+
+ /// The bottom half handler
+ PkBhHandler bh_handler;
+
+ /// Private data passed to the handler.
+ void *arg;
+
+} PkBottomHalf;
+
+
// Initialization APIs
int
-pk_initialize(PkAddress noncritical_stack,
- size_t noncritical_stack_size,
+pk_initialize(PkAddress kernel_stack,
+ size_t kernel_stack_size,
PkTimebase initial_timebase,
uint32_t timebase_frequency_hz);
@@ -677,13 +663,6 @@ pk_initialize(PkAddress noncritical_stack,
PkTimebase
pk_timebase_get(void);
-// Interrupt preemption APIs
-
-int
-pk_interrupt_preemption_enable(void);
-
-int
-pk_interrupt_preemption_disable(void);
// Timer APIs
@@ -692,10 +671,6 @@ pk_timer_create(PkTimer *timer,
PkTimerCallback callback,
void *arg);
-int
-pk_timer_create_nonpreemptible(PkTimer *timer,
- PkTimerCallback callback,
- void *arg);
int
pk_timer_schedule(PkTimer *timer,
@@ -735,9 +710,6 @@ int
pk_complete(void);
int
-pk_sleep_absolute(PkTimebase time);
-
-int
pk_sleep(PkInterval interval);
int
@@ -927,6 +899,27 @@ pk_deque_delete(PkDeque *element)
element->next = 0;
}
+// Bottom Half APIs
+
+extern PkDeque _pk_bh_queue;
+
+static inline void
+pk_bh_schedule(PkBottomHalf *bottom_half)
+{
+ pk_deque_push_back(&_pk_bh_queue, (PkDeque *)bottom_half);
+}
+
+#define PK_BH_INIT(_handler, _arg) \
+{\
+ .deque = PK_DEQUE_ELEMENT_INIT(), \
+ .bh_handler = _handler, \
+ .arg = _arg \
+}
+
+#define PK_BH_STATIC_CREATE(bh_name, handler, arg) \
+PkBottomHalf bh_name = PK_BH_INIT(handler, arg)
+
+
//Trace function prototypes
void pk_trace_tiny(uint32_t i_parm);
void pk_trace_big(uint32_t i_hash_and_count,
diff --git a/pk/kernel/pk_bh_core.c b/pk/kernel/pk_bh_core.c
new file mode 100644
index 00000000..8a6181cb
--- /dev/null
+++ b/pk/kernel/pk_bh_core.c
@@ -0,0 +1,29 @@
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2015
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+/// \file pk_bh_core.c
+/// \brief PK bottom half APIs
+///
+/// The entry points in this file are considered 'core' routines that will
+/// always be present at runtime in any PK application.
+
+#include "pk.h"
+
+/// Statically initialize the bottom half queue
+PK_DEQUE_SENTINEL_STATIC_CREATE(_pk_bh_queue);
+
+void _pk_process_bh(void)
+{
+ PkBottomHalf *bh;
+ while((bh = (PkBottomHalf*)pk_deque_pop_front(&_pk_bh_queue)) != 0)
+ {
+ bh->bh_handler(bh->arg);
+ }
+ return;
+}
+
+
+#undef __PK_THREAD_CORE_C__
diff --git a/pk/kernel/pk_init.c b/pk/kernel/pk_init.c
index 0add4214..4c7cd138 100644
--- a/pk/kernel/pk_init.c
+++ b/pk/kernel/pk_init.c
@@ -58,17 +58,12 @@ void pk_set_timebase_rshift(uint32_t timebase_freq_hz)
/// Initialize PK.
///
-/// \param noncritical_stack A stack area for noncritical interrupt handlers.
+/// \param kernel_stack A stack area for interrupt and bottom-half handlers.
///
-/// \param noncritical_stack_size The size (in bytes) of the stack area for
-/// noncritical interrupt handlers.
+/// \param kernel_stack_size The size (in bytes) of the stack area for
+/// interrupt and bottom-half handlers.
///
-/// \param critical_stack A stack area for critical interrupt handlers.
-///
-/// \param critical_stack_size The size (in bytes) of the stack area for
-/// critical interrupt handlers.
-///
-/// \param initial_timebase The initial value of the PK timebase. If this
+/// \param initial_timebase The initial value of the PK timebase.
/// argument is given as the special value \c PK_TIMEBASE_CONTINUE, then the
/// timebase is not reset.
///
@@ -93,16 +88,16 @@ void pk_set_timebase_rshift(uint32_t timebase_freq_hz)
// reset everything at initialization.
int
-pk_initialize(PkAddress noncritical_stack,
- size_t noncritical_stack_size,
+pk_initialize(PkAddress kernel_stack,
+ size_t kernel_stack_size,
PkTimebase initial_timebase,
uint32_t timebase_frequency_hz)
{
int rc;
if (PK_ERROR_CHECK_API) {
- PK_ERROR_IF((noncritical_stack == 0) ||
- (noncritical_stack_size == 0),
+ PK_ERROR_IF((kernel_stack == 0) ||
+ (kernel_stack_size == 0),
PK_INVALID_ARGUMENT_INIT);
}
@@ -110,13 +105,13 @@ pk_initialize(PkAddress noncritical_stack,
__pk_thread_machine_context_default = PK_THREAD_MACHINE_CONTEXT_DEFAULT;
- rc = __pk_stack_init(&noncritical_stack, &noncritical_stack_size);
+ rc = __pk_stack_init(&kernel_stack, &kernel_stack_size);
if (rc) {
return rc;
}
- __pk_noncritical_stack = noncritical_stack;
- __pk_noncritical_stack_size = noncritical_stack_size;
+ __pk_kernel_stack = kernel_stack;
+ __pk_kernel_stack_size = kernel_stack_size;
#if PK_TIMER_SUPPORT
@@ -131,19 +126,21 @@ pk_initialize(PkAddress noncritical_stack,
extern PkTimer g_pk_trace_timer;
extern PkTraceBuffer g_pk_trace_buf;
- // Schedule the timer that puts a 64bit timestamp in the trace buffer
- // periodically. This allows us to use 32bit timestamps.
- pk_timer_schedule(&g_pk_trace_timer,
- PK_TRACE_TIMER_PERIOD);
-
//set the trace timebase HZ
g_pk_trace_buf.hz = timebase_frequency_hz;
+ //set the shift adjustment to get us closer to the true
+ //timebase frequency (versus what was hardcoded)
pk_set_timebase_rshift(timebase_frequency_hz);
//set the timebase ajdustment for trace synchronization
pk_trace_set_timebase(initial_timebase);
+ // Schedule the timer that puts a 64bit timestamp in the trace buffer
+ // periodically. This allows us to use 32bit timestamps.
+ pk_timer_schedule(&g_pk_trace_timer,
+ PK_TRACE_TIMER_PERIOD);
+
#endif /* PK_TRACE_SUPPORT */
#endif /* PK_TIMER_SUPPORT */
diff --git a/pk/kernel/pk_kernel.h b/pk/kernel/pk_kernel.h
index 7e88b828..85b028a2 100644
--- a/pk/kernel/pk_kernel.h
+++ b/pk/kernel/pk_kernel.h
@@ -33,33 +33,19 @@
#ifndef __ASSEMBLER__
-/// This is the stack pointer saved when switching from a thread or
-/// non-critical interrupt context to a full-mode critical interrupt context.
-
-UNLESS__PK_CORE_C__(extern)
-volatile
-PkAddress __pk_saved_sp_critical;
-
-/// The critical interrupt stack; constant once defined by the call of
-/// pk_initialize().
-
-UNLESS__PK_CORE_C__(extern)
-volatile
-PkAddress __pk_critical_stack;
-
-/// This is the stack pointer saved when switching from a thread context to a
-/// full-mode non-critical interrupt context.
+/// This is the stack pointer saved when switching from a thread context to an
+/// interrupt context.
UNLESS__PK_CORE_C__(extern)
volatile
-PkAddress __pk_saved_sp_noncritical;
+PkAddress __pk_saved_sp;
-/// The non-critical interrupt stack; constant once defined by the call of
+/// The kernel stack; constant once defined by the call of
/// pk_initialize().
UNLESS__PK_CORE_C__(extern)
volatile
-PkAddress __pk_noncritical_stack;
+PkAddress __pk_kernel_stack;
/// This is the run queue - the queue of mapped runnable tasks.
UNLESS__PK_CORE_C__(extern)
@@ -68,7 +54,7 @@ PkThreadQueue __pk_run_queue;
/// This flag is set by \c __pk_schedule() if a new highest-priority thread
/// becomes runnable during an interrupt handler. The context switch will
-/// take place at the end of non-critical interrupt processing, and the
+/// take place at the end of interrupt processing, and the
/// interrupt handling code will clear the flag.
UNLESS__PK_CORE_C__(extern)
@@ -146,17 +132,11 @@ volatile
PkMachineContext __pk_thread_machine_context_default;
-/// The size of the noncritical stack (bytes).
-
-UNLESS__PK_CORE_C__(extern)
-volatile
-size_t __pk_noncritical_stack_size;
-
-/// The size of the critical stack (bytes).
+/// The size of the kernel stack (bytes).
UNLESS__PK_CORE_C__(extern)
volatile
-size_t __pk_critical_stack_size;
+size_t __pk_kernel_stack_size;
/// This table maps priorities to threads, and contains PK_THREADS + 1
/// entries. The final entry is for the idle thread and will always be null
diff --git a/pk/kernel/pk_semaphore_core.c b/pk/kernel/pk_semaphore_core.c
index d5e6d30c..0e1e34d4 100644
--- a/pk/kernel/pk_semaphore_core.c
+++ b/pk/kernel/pk_semaphore_core.c
@@ -30,9 +30,6 @@
///
/// \retval 0 Successful completion
///
-/// \retval -PK_ILLEGAL_CONTEXT The API was called from a critical interrupt
-/// context.
-///
/// \retval -PK_INVALID_SEMAPHORE_AT_POST The \a semaphore is a null (0) pointer.
///
/// \retval -PK_SEMAPHORE_OVERFLOW The \a max_count argument supplied when
@@ -58,7 +55,7 @@ pk_semaphore_post(PkSemaphore *semaphore)
__pk_thread_queue_delete(&(semaphore->pending_threads), priority);
__pk_thread_queue_insert(&__pk_run_queue, priority);
- PK_TRACE_THREAD_SEMAPHORE_POST(priority);
+ PK_KERN_TRACE("THREAD_SEMAPHORE_POST(%d)", priority);
__pk_schedule();
@@ -127,9 +124,6 @@ pk_semaphore_post(PkSemaphore *semaphore)
///
/// The following return codes are error codes:
///
-/// \retval -PK_ILLEGAL_CONTEXT The API was called from a critical interrupt
-/// context.
-///
/// \retval -PK_INVALID_SEMAPHORE_AT_PEND The \a semaphore is a null (0)
/// pointer.
///
@@ -181,7 +175,7 @@ pk_semaphore_pend(PkSemaphore *semaphore,
thread->semaphore = semaphore;
thread->flags |= PK_THREAD_FLAG_SEMAPHORE_PEND;
- PK_TRACE_THREAD_SEMAPHORE_PEND(priority);
+ PK_KERN_TRACE("THREAD_SEMAPHORE_PEND(%d)", priority);
if (timeout != PK_WAIT_FOREVER) {
timer = &(thread->timer);
@@ -231,9 +225,6 @@ pk_semaphore_pend(PkSemaphore *semaphore,
///
/// \retval 0 Successful completion
///
-/// \retval -PK_ILLEGAL_CONTEXT The API was called from a critical interrupt
-/// context.
-///
/// \retval -PK_INVALID_SEMAPHORE_AT_RELEASE The \a semaphore is a null (0)
/// pointer.
@@ -271,9 +262,7 @@ pk_semaphore_release_all(PkSemaphore* semaphore)
/// parameter to the null pointer (0) if this information is not required.
///
/// The information returned by this API can only be guaranteed consistent if
-/// the API is called from a critical section. Since the
-/// implementation of this API does not require a critical section, it is not
-/// an error to call this API from a critical interrupt context.
+/// the API is called from a critical section.
///
/// Return values other than PK_OK (0) are errors; see \ref pk_errors
///
@@ -306,7 +295,7 @@ pk_semaphore_info_get(PkSemaphore* semaphore,
/// An simple interrupt handler that posts to a semaphore.
///
/// To implement basic event-driven blocking of a thread, install
-/// pk_semaphore_post_handler() as the handler for a non-critical interrupt
+/// pk_semaphore_post_handler() as the handler for an interrupt
/// and provide a pointer to the semaphore as the \a arg argument in
/// pk_irq_handler_set(). The semaphore should be initialized with
/// pk_semaphore_create(&sem, 0, 1). This handler simply disables (masks)
@@ -319,12 +308,11 @@ pk_semaphore_info_get(PkSemaphore* semaphore,
/// condition in the device before re-enabling the interrupt.
#if 0
void
-pk_semaphore_post_handler_full(void *arg, PkIrqId irq, int priority)
+pk_semaphore_post_handler(void *arg, PkIrqId irq, int priority)
{
pk_irq_disable(irq);
pk_irq_status_clear(irq);
pk_semaphore_post((PkSemaphore *)arg);
}
-PK_IRQ_FAST2FULL(pk_semaphore_post_handler, pk_semaphore_post_handler_full);
#endif
diff --git a/pk/kernel/pk_thread.h b/pk/kernel/pk_thread.h
new file mode 100644
index 00000000..acc32525
--- /dev/null
+++ b/pk/kernel/pk_thread.h
@@ -0,0 +1,56 @@
+#ifndef __PK_THREAD_H__
+#define __PK_THREAD_H__
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2015
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+/// \file pk_thread.h
+/// \brief Contains private declarations and definitions needed for threads
+///
+
+void
+__pk_thread_map(PkThread* thread);
+
+void
+__pk_thread_unmap(PkThread *thread);
+
+
+// Interrupts must be disabled at entry.
+
+static inline int
+__pk_thread_is_active(PkThread *thread)
+{
+ return ((thread->state != PK_THREAD_STATE_COMPLETED) &&
+ (thread->state != PK_THREAD_STATE_DELETED));
+}
+
+
+// Interrupts must be disabled at entry.
+
+static inline int
+__pk_thread_is_mapped(PkThread *thread)
+{
+ return (thread->state == PK_THREAD_STATE_MAPPED);
+}
+
+
+// Interrupts must be disabled at entry. This is only called on mapped threads.
+
+static inline int
+__pk_thread_is_runnable(PkThread *thread)
+{
+ return __pk_thread_queue_member(&__pk_run_queue, thread->priority);
+}
+
+
+// Interrupts must be disabled at entry.
+
+static inline PkThread*
+__pk_thread_at_priority(PkThreadPriority priority)
+{
+ return (PkThread*)__pk_priority_map[priority];
+}
+
+#endif /* __PK_THREAD_H__ */
diff --git a/pk/kernel/pk_thread_core.c b/pk/kernel/pk_thread_core.c
index 56e083d4..2966eb9b 100644
--- a/pk/kernel/pk_thread_core.c
+++ b/pk/kernel/pk_thread_core.c
@@ -11,52 +11,12 @@
/// always be present at runtime in any PK application that enables threads.
#include "pk.h"
+#include "pk_thread.h"
#define __PK_THREAD_CORE_C__
-// This routine is only used locally. Noncritical interrupts must be disabled
-// at entry.
-
-static inline int
-__pk_thread_is_active(PkThread *thread)
-{
- return ((thread->state != PK_THREAD_STATE_COMPLETED) &&
- (thread->state != PK_THREAD_STATE_DELETED));
-}
-
-
-// This routine is only used locally. Noncritical interrupts must be disabled
-// at entry.
-
-static inline int
-__pk_thread_is_mapped(PkThread *thread)
-{
- return (thread->state == PK_THREAD_STATE_MAPPED);
-}
-
-
-// This routine is only used locally. Noncritical interrupts must be disabled
-// at entry. This is only called on mapped threads.
-
-static inline int
-__pk_thread_is_runnable(PkThread *thread)
-{
- return __pk_thread_queue_member(&__pk_run_queue, thread->priority);
-}
-
-
-// This routine is only used locally. Noncritical interrupts must be disabled
-// at entry.
-
-static inline PkThread*
-__pk_thread_at_priority(PkThreadPriority priority)
-{
- return (PkThread*)__pk_priority_map[priority];
-}
-
-
-// This routine is only used locally. Noncritical interrupts must be disabled
+// This routine is only used locally. Interrupts must be disabled
// at entry. The caller must also have checked that the priority is free.
// This routine is only called on threads known to be in a suspended state,
// either PK_THREAD_STATE_SUSPENDED_RUNNABLE or
@@ -96,17 +56,17 @@ __pk_thread_map(PkThread* thread)
if (PK_KERNEL_TRACE_ENABLE) {
if (__pk_thread_is_runnable(thread)) {
- PK_TRACE_THREAD_MAPPED_RUNNABLE(priority);
+ PK_KERN_TRACE("THREAD_MAPPED_RUNNABLE(%d)", priority);
} else if (thread->flags & PK_THREAD_FLAG_SEMAPHORE_PEND) {
- PK_TRACE_THREAD_MAPPED_SEMAPHORE_PEND(priority);
+ PK_KERN_TRACE("THREAD_MAPPED_SEMAPHORE_PEND(%d)", priority);
} else {
- PK_TRACE_THREAD_MAPPED_SLEEPING(priority);
+ PK_KERN_TRACE("THREAD_MAPPED_SLEEPING(%d)", priority);
}
}
}
-// This routine is only used locally. Noncritical interrupts must be disabled
+// This routine is only used locally. Interrupts must be disabled
// at entry. This routine is only ever called on threads in the
// PK_THREAD_STATE_MAPPED. Unmapping a thread removes it from the priority
// map, the run queue and any semaphore pend, but does not cancel any
@@ -214,9 +174,9 @@ __pk_thread_delete(PkThread *thread, PkThreadState final_state)
if (PK_KERNEL_TRACE_ENABLE) {
if (final_state == PK_THREAD_STATE_DELETED) {
- PK_TRACE_THREAD_DELETED(thread->priority);
+ PK_KERN_TRACE("THREAD_DELETED(%d)", thread->priority);
} else {
- PK_TRACE_THREAD_COMPLETED(thread->priority);
+ PK_KERN_TRACE("THREAD_COMPLETED(%d)", thread->priority);
}
}
@@ -243,17 +203,17 @@ __pk_thread_delete(PkThread *thread, PkThreadState final_state)
// pk_semaphore_release_all(), cancelling any semaphore timeouts is deferred
// until the thread runs again.
//
-// __pk_thread_timeout() is currenly the only timer interrupt called from a
-// critical section.
-//
// Note that we do not create trace events for unmapped threads since the trace
// tag only encodes the priority, which may be in use by a mapped thread.
void
__pk_thread_timeout(void *arg)
{
+ PkMachineContext ctx;
PkThread *thread = (PkThread *)arg;
+ pk_critical_section_enter(&ctx);
+
switch (thread->state) {
case PK_THREAD_STATE_MAPPED:
@@ -275,19 +235,21 @@ __pk_thread_timeout(void *arg)
default:
PK_PANIC(PK_THREAD_TIMEOUT_STATE);
}
+
+ pk_critical_section_exit(&ctx);
}
// This routine serves as a container for the PK_START_THREADS_HOOK and
// actually starts threads. The helper routine __pk_call_pk_start_threads()
// arranges this routine to be called with interrupts disabled while running
-// on the noncritical interrupt stack.
+// on the kernel stack.
//
// The reason for this roundabout is that we want to be able to run a hook
// routine (transparent to the application) that can hand over every last byte
// of free memory to "malloc()" - including the stack of main(). Since we
// always need to run on some stack, we chose to run the hook on the kernel
-// noncritical interrupt stack. However to do this safely we need to make sure
+// stack. However to do this safely we need to make sure
// that no interrupts will happen during this time. When __pk_thread_resume()
// is finally called all stack-based context is lost but it doesn't matter at
// that point - it's a one-way street into thread execution.
@@ -367,9 +329,6 @@ pk_start_threads(void)
/// \retval 0 Successful completion, including calls on a \a thread that is
/// already mapped.
///
-/// \retval -PK_ILLEGAL_CONTEXT_THREAD The API was called
-/// from a critical interrupt context.
-///
/// \retval -PK_INVALID_THREAD_AT_RESUME1 The \a thread is a null (0) pointer.
///
/// \retval -PK_INVALID_THREAD_AT_RESUME2 The \a thread is not active,
@@ -440,9 +399,6 @@ pk_thread_resume(PkThread *thread)
/// \retval 0 Successful completion, including calls on a \a thread that is
/// already suspended.
///
-/// \retval -PK_ILLEGAL_CONTEXT_THREAD The API was called from a critical
-/// interrupt context.
-///
/// \retval -PK_INVALID_THREAD_AT_SUSPEND1 The \a thread is a null (0) pointer
///
/// \retval -PK_INVALID_THREAD_AT_SUSPEND2 The \a thread is not active,
@@ -467,7 +423,7 @@ pk_thread_suspend(PkThread *thread)
if (__pk_thread_is_mapped(thread)) {
- PK_TRACE_THREAD_SUSPENDED(thread->priority);
+ PK_KERN_TRACE("THREAD_SUSPENDED(%d)", thread->priority);
__pk_thread_unmap(thread);
__pk_schedule();
}
@@ -497,8 +453,6 @@ pk_thread_suspend(PkThread *thread)
/// \retval 0 Successful completion, including calls on a \a thread that has
/// completed or had already been deleted.
///
-/// \retval -PK_ILLEGAL_CONTEXT_THREAD The API was called from a critical
-/// interrupt context.
///
/// \retval -PK_INVALID_THREAD_AT_DELETE The \a thread is a null (0) pointer.
@@ -552,67 +506,6 @@ pk_complete(void)
return PK_OK;
}
-
-/// Sleep a thread until an absolute time
-///
-/// \param time An absolute time as measured by the PK timebase
-///
-/// Threads can use this API to sleep until an absolute time. Sleeping threads
-/// are not scheduled, although they maintain their priorities. This differs
-/// from thread suspension, where the suspended thread relinquishes its
-/// priority. When the sleep timer times out the thread becomes runnable
-/// again, and will run as soon as it becomes the highest-priority mapped
-/// runnable thread.
-///
-/// Sleeping threads may also be later suspended. In this case the Sleep timer
-/// continues to run, and if it times out before the thread is resumed the
-/// thread will be immediately runnable when it is resumed.
-///
-/// See the PK specification for a full discussion of how PK handles
-/// scheduling events at absolute times "in the past". Briefly stated, if the
-/// \a time is in the past, the thread will Sleep for the briefest possible
-/// period supported by the hardware.
-///
-/// Return values other than PK_OK (0) are errors; see \ref pk_errors
-///
-/// \retval 0 Successful completion.
-///
-/// \retval -PK_ILLEGAL_CONTEXT_THREAD The API was not called from a thread
-/// context.
-
-// Note: Casting __pk_current_thread removes the 'volatile' attribute.
-
-int
-pk_sleep_absolute(PkTimebase time)
-{
- PkMachineContext ctx;
- PkThread *current;
-
- if (PK_ERROR_CHECK_API) {
- PK_ERROR_UNLESS_THREAD_CONTEXT();
- }
-
- pk_critical_section_enter(&ctx);
-
- current = (PkThread *)__pk_current_thread;
-
- current->timer.timeout = time;
- __pk_timer_schedule(&(current->timer));
-
- current->flags |= PK_THREAD_FLAG_TIMER_PEND;
-
- PK_TRACE_THREAD_SLEEP(current->priority);
-
- __pk_thread_queue_delete(&__pk_run_queue, current->priority);
- __pk_schedule();
-
- current->flags &= ~(PK_THREAD_FLAG_TIMER_PEND | PK_THREAD_FLAG_TIMED_OUT);
-
- pk_critical_section_exit(&ctx);
-
- return PK_OK;
-}
-
/// Sleep a thread for an interval relative to the current time.
///
/// \param interval A time interval relative to the current timebase.
@@ -646,294 +539,35 @@ pk_sleep_absolute(PkTimebase time)
int
pk_sleep(PkInterval interval)
{
- return pk_sleep_absolute(pk_timebase_get() + PK_INTERVAL_SCALE(interval));
-}
-
-
-/// Get information about a thread.
-///
-/// \param thread A pointer to the PkThread to query
-///
-/// \param state The value returned through this pointer is the current state
-/// of the thread; See \ref pk_thread_states. The caller can set this
-/// parameter to the null pointer (0) if this information is not required.
-///
-/// \param priority The value returned through this pointer is the current
-/// priority of the thread. The caller can set this parameter to the null
-/// pointer (0) if this information is not required.
-///
-/// \param runnable The value returned through this pointer is 1 if the thread
-/// is in state PK_THREAD_STATE_MAPPED and is currently in the run queue
-/// (i.e., neither blocked on a semaphore nor sleeping), otherwise 0. The
-/// caller can set this parameter to the null pointer (0) if this information
-/// is not required.
-///
-/// The information returned by this API can only be guaranteed consistent if
-/// the API is called from a critical section. Since the
-/// implementation of this API does not enforce a critical section, it is not
-/// an error to call this API from a critical interrupt context.
-///
-/// Return values other than PK_OK (0) are errors; see \ref pk_errors
-///
-/// \retval 0 Successful completion
-///
-/// \retval -PK_INVALID_THREAD_AT_INFO The \a thread is a null (0) pointer.
-
-int
-pk_thread_info_get(PkThread *thread,
- PkThreadState *state,
- PkThreadPriority *priority,
- int *runnable)
-{
- if (PK_ERROR_CHECK_API) {
- PK_ERROR_IF(thread == 0, PK_INVALID_THREAD_AT_INFO);
- }
-
- if (state) {
- *state = thread->state;
- }
- if (priority) {
- *priority = thread->priority;
- }
- if (runnable) {
- *runnable = ((thread->state == PK_THREAD_STATE_MAPPED) &&
- __pk_thread_queue_member(&__pk_run_queue,
- thread->priority));
- }
- return PK_OK;
-}
-
-
-/// Change the priority of a thread.
-///
-/// \param thread The thread whose priority will be changed
-///
-/// \param new_priority The new priority of the thread
-///
-/// \param old_priority The value returned through this pointer is the
-/// old priority of the thread prior to the change. The caller can set
-/// this parameter to the null pointer (0) if this information is not
-/// required.
-///
-/// Thread priorities can be changed by the \c pk_thread_priority_change()
-/// API. This call will fail if the thread pointer is invalid or if the thread
-/// is mapped and the new priority is currently in use. The call will succeed
-/// even if the \a thread is suspended, completed or deleted. The
-/// application-level scheduling algorithm is completely responsible for the
-/// correctness of the application in the event of suspended, completed or
-/// deleted threads.
-///
-/// Return values other than PK_OK (0) are errors; see \ref pk_errors
-///
-/// \retval 0 Successful completion, including the redundant case of
-/// attempting to change the priority of the thread to its current priority.
-///
-/// \retval -PK_ILLEGAL_CONTEXT_THREAD the API was called from a critical
-/// interrupt context.
-///
-/// \retval -PK_INVALID_THREAD_AT_CHANGE The \a thread is null (0) or
-/// otherwise invalid.
-///
-/// \retval -PK_INVALID_ARGUMENT_THREAD_CHANGE The \a new_priority is invalid.
-///
-/// \retval -PK_PRIORITY_IN_USE_AT_CHANGE The \a thread is mapped and the \a
-/// new_priority is currently in use by another thread.
-
-int
-pk_thread_priority_change(PkThread *thread,
- PkThreadPriority new_priority,
- PkThreadPriority *old_priority)
-{
+ PkTimebase time;
PkMachineContext ctx;
- PkThreadPriority priority;
-
- if (PK_ERROR_CHECK_API) {
- PK_ERROR_IF(thread == 0, PK_INVALID_THREAD_AT_CHANGE);
- PK_ERROR_IF(new_priority > PK_THREADS,
- PK_INVALID_ARGUMENT_THREAD_CHANGE);
- }
-
- pk_critical_section_enter(&ctx);
-
- priority = thread->priority;
-
- if (priority != new_priority) {
-
- if (!__pk_thread_is_mapped(thread)) {
-
- thread->priority = new_priority;
-
- } else {
-
- if (PK_ERROR_CHECK_API) {
- PK_ERROR_IF_CRITICAL(__pk_priority_map[new_priority] != 0,
- PK_PRIORITY_IN_USE_AT_CHANGE,
- &ctx);
- }
-
- __pk_thread_unmap(thread);
- thread->priority = new_priority;
- __pk_thread_map(thread);
- __pk_schedule();
- }
- }
-
- if (old_priority) {
- *old_priority = priority;
- }
-
- pk_critical_section_exit(&ctx);
-
- return PK_OK;
-}
-
-
-/// Return a pointer to the thread (if any) mapped at a given priority.
-///
-/// \param priority The thread priority of interest
-///
-/// \param thread The value returned through this pointer is a pointer to the
-/// thread currently mapped at the given priority level. If no thread is
-/// mapped, or if the \a priority is the priority of the idle thread, the
-/// pointer returned will be null (0).
-///
-/// The information returned by this API can only be guaranteed consistent if
-/// the API is called from a critical section. Since the
-/// implementation of this API does not require a critical section, it is not
-/// an error to call this API from a critical interrupt context.
-///
-/// Return values other than PK_OK (0) are errors; see \ref pk_errors
-///
-/// \retval 0 Successful completion.
-///
-/// \retval -PK_INVALID_ARGUMENT_THREAD_PRIORITY The \a priority is invalid
-/// or the \a thread parameter is null (0).
+ PkThread *current;
-int
-pk_thread_at_priority(PkThreadPriority priority,
- PkThread **thread)
-{
if (PK_ERROR_CHECK_API) {
- PK_ERROR_IF((priority > PK_THREADS) || (thread == 0),
- PK_INVALID_ARGUMENT_THREAD_PRIORITY);
+ PK_ERROR_UNLESS_THREAD_CONTEXT();
}
- *thread = __pk_thread_at_priority(priority);
+ time = pk_timebase_get() + PK_INTERVAL_SCALE(interval);
- return PK_OK;
-}
-
-
-/// Swap thread priorities
-///
-/// \param thread_a A pointer to an initialized PkThread
-///
-/// \param thread_b A pointer to an initialized PkThread
-///
-/// This API swaps the priorities of \a thread_a and \a thread_b. The API is
-/// provided to support general and efficient application-directed scheduling
-/// algorithms. The requirements on the \a thread_a and \a thread_b arguments
-/// are that they are valid pointers to initialized PkThread structures, that
-/// the current thread priorities of both threads are legal, and that if a
-/// thread is currently mapped, that the new thread priority is not otherwise
-/// in use.
-///
-/// The API does not require either thread to be mapped, or even to be active.
-/// It is legal for one or both of the swap partners to be suspended, deleted
-/// or completed threads. The application is completely responsible for the
-/// correctness of scheduling algorithms that might operate on inactive or
-/// suspended threads.
-///
-/// The API does not change the mapped status of a thread. A thread will be
-/// mapped after the call of pk_thread_priority_swap() if and only if it was
-/// mapped prior to the call. If the new priority of a mapped thread is
-/// currently in use (by a thread other than the swap partner), then the
-/// PK_PRIORITY_IN_USE_AT_SWAP error is signalled and the swap does not take
-/// place. This could only happen if the swap partner is not currently mapped.
-///
-/// It is legal for a thread to swap its own priority with another thread. The
-/// degenerate case that \a thread_a and \a thread_b are equal is also legal -
-/// but has no effect.
-///
-/// Return values other than PK_OK (0) are errors; see \ref pk_errors
-///
-/// \retval 0 Successful completion, including the redundant cases that do not
-/// actually change priorities, or the cases that assign new priorities to
-/// suspended, completed or deleted threads.
-///
-/// \retval -PK_ILLEGAL_CONTEXT_THREAD the API was called from a critical
-/// interrupt context.
-///
-/// \retval -PK_INVALID_THREAD_AT_SWAP1 One or both of \a thread_a and
-/// \a thread_b is null (0) or otherwise invalid,
-///
-/// \retval -PK_INVALID_THREAD_AT_SWAP2 the priorities of One or both of
-/// \a thread_a and \a thread_b are invalid.
-///
-/// \retval -PK_INVALID_ARGUMENT One or both of the priorities
-/// of \a thread_a and \a thread_b is invalid.
-///
-/// \retval -PK_PRIORITY_IN_USE_AT_SWAP Returned if a thread is mapped and the
-/// new thread priority is currently in use by another thread (other than the
-/// swap partner).
-
-int
-pk_thread_priority_swap(PkThread* thread_a, PkThread* thread_b)
-{
- PkMachineContext ctx;
- PkThreadPriority priority_a, priority_b;
- int mapped_a, mapped_b;
+ pk_critical_section_enter(&ctx);
- if (PK_ERROR_CHECK_API) {
- PK_ERROR_IF((thread_a == 0) || (thread_b == 0),
- PK_INVALID_THREAD_AT_SWAP1);
- }
+ current = (PkThread *)__pk_current_thread;
- pk_critical_section_enter(&ctx);
+ current->timer.timeout = time;
+ __pk_timer_schedule(&(current->timer));
- if (thread_a != thread_b) {
+ current->flags |= PK_THREAD_FLAG_TIMER_PEND;
- mapped_a = __pk_thread_is_mapped(thread_a);
- mapped_b = __pk_thread_is_mapped(thread_b);
- priority_a = thread_a->priority;
- priority_b = thread_b->priority;
+ PK_KERN_TRACE("THREAD_SLEEP(%d)", current->priority);
- if (PK_ERROR_CHECK_API) {
- int priority_in_use;
- PK_ERROR_IF_CRITICAL((priority_a > PK_THREADS) ||
- (priority_b > PK_THREADS),
- PK_INVALID_THREAD_AT_SWAP2,
- &ctx);
- priority_in_use =
- (mapped_a && !mapped_b &&
- (__pk_thread_at_priority(priority_b) != 0)) ||
- (!mapped_a && mapped_b &&
- (__pk_thread_at_priority(priority_a) != 0));
- PK_ERROR_IF_CRITICAL(priority_in_use,
- PK_PRIORITY_IN_USE_AT_SWAP, &ctx);
- }
+ __pk_thread_queue_delete(&__pk_run_queue, current->priority);
+ __pk_schedule();
- if (mapped_a) {
- __pk_thread_unmap(thread_a);
- }
- if (mapped_b) {
- __pk_thread_unmap(thread_b);
- }
- thread_a->priority = priority_b;
- thread_b->priority = priority_a;
- if (mapped_a) {
- __pk_thread_map(thread_a);
- }
- if (mapped_b) {
- __pk_thread_map(thread_b);
- }
- __pk_schedule();
- }
+ current->flags &= ~(PK_THREAD_FLAG_TIMER_PEND | PK_THREAD_FLAG_TIMED_OUT);
pk_critical_section_exit(&ctx);
return PK_OK;
}
-
#undef __PK_THREAD_CORE_C__
diff --git a/pk/kernel/pk_thread_init.c b/pk/kernel/pk_thread_init.c
index 686f3512..97bce4c4 100644
--- a/pk/kernel/pk_thread_init.c
+++ b/pk/kernel/pk_thread_init.c
@@ -53,9 +53,6 @@
///
/// \retval -PK_INVALID_THREAD_AT_CREATE The \a thread is a null (0) pointer.
///
-/// \retval -PK_ILLEGAL_CONTEXT The API was called from a critical interrupt
-/// context.
-///
/// \retval -PK_INVALID_ARGUMENT_THREAD1 the \a thread_routine is null (0)
///
/// \retval -PK_INVALID_ARGUMENT_THREAD2 the \a priority is invalid,
@@ -113,9 +110,9 @@ pk_thread_create(PkThread *thread,
thread->state = PK_THREAD_STATE_SUSPENDED_RUNNABLE;
thread->flags = 0;
- pk_timer_create_nonpreemptible(&(thread->timer),
- __pk_thread_timeout,
- (void *)thread);
+ pk_timer_create(&(thread->timer),
+ __pk_thread_timeout,
+ (void *)thread);
__pk_thread_context_initialize(thread, thread_routine, arg);
diff --git a/pk/kernel/pk_thread_util.c b/pk/kernel/pk_thread_util.c
new file mode 100644
index 00000000..bf2e21b7
--- /dev/null
+++ b/pk/kernel/pk_thread_util.c
@@ -0,0 +1,291 @@
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2014
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+/// \file pk_thread_util.c
+/// \brief PK thread utility APIs
+///
+/// The entry points in this file are considered extra routines that will
+/// only be included in a PK application that enables threads and uses at
+/// least one of these interfaces.
+
+#include "pk.h"
+#include "pk_thread.h"
+
+/// Get information about a thread.
+///
+/// \param thread A pointer to the PkThread to query
+///
+/// \param state The value returned through this pointer is the current state
+/// of the thread; See \ref pk_thread_states. The caller can set this
+/// parameter to the null pointer (0) if this information is not required.
+///
+/// \param priority The value returned through this pointer is the current
+/// priority of the thread. The caller can set this parameter to the null
+/// pointer (0) if this information is not required.
+///
+/// \param runnable The value returned through this pointer is 1 if the thread
+/// is in state PK_THREAD_STATE_MAPPED and is currently in the run queue
+/// (i.e., neither blocked on a semaphore nor sleeping), otherwise 0. The
+/// caller can set this parameter to the null pointer (0) if this information
+/// is not required.
+///
+/// The information returned by this API can only be guaranteed consistent if
+/// the API is called from a critical section.
+///
+/// Return values other than PK_OK (0) are errors; see \ref pk_errors
+///
+/// \retval 0 Successful completion
+///
+/// \retval -PK_INVALID_THREAD_AT_INFO The \a thread is a null (0) pointer.
+
+int
+pk_thread_info_get(PkThread *thread,
+ PkThreadState *state,
+ PkThreadPriority *priority,
+ int *runnable)
+{
+ if (PK_ERROR_CHECK_API) {
+ PK_ERROR_IF(thread == 0, PK_INVALID_THREAD_AT_INFO);
+ }
+
+ if (state) {
+ *state = thread->state;
+ }
+ if (priority) {
+ *priority = thread->priority;
+ }
+ if (runnable) {
+ *runnable = ((thread->state == PK_THREAD_STATE_MAPPED) &&
+ __pk_thread_queue_member(&__pk_run_queue,
+ thread->priority));
+ }
+ return PK_OK;
+}
+
+
+/// Change the priority of a thread.
+///
+/// \param thread The thread whose priority will be changed
+///
+/// \param new_priority The new priority of the thread
+///
+/// \param old_priority The value returned through this pointer is the
+/// old priority of the thread prior to the change. The caller can set
+/// this parameter to the null pointer (0) if this information is not
+/// required.
+///
+/// Thread priorities can be changed by the \c pk_thread_priority_change()
+/// API. This call will fail if the thread pointer is invalid or if the thread
+/// is mapped and the new priority is currently in use. The call will succeed
+/// even if the \a thread is suspended, completed or deleted. The
+/// application-level scheduling algorithm is completely responsible for the
+/// correctness of the application in the event of suspended, completed or
+/// deleted threads.
+///
+/// Return values other than PK_OK (0) are errors; see \ref pk_errors
+///
+/// \retval 0 Successful completion, including the redundant case of
+/// attempting to change the priority of the thread to its current priority.
+///
+/// \retval -PK_INVALID_THREAD_AT_CHANGE The \a thread is null (0) or
+/// otherwise invalid.
+///
+/// \retval -PK_INVALID_ARGUMENT_THREAD_CHANGE The \a new_priority is invalid.
+///
+/// \retval -PK_PRIORITY_IN_USE_AT_CHANGE The \a thread is mapped and the \a
+/// new_priority is currently in use by another thread.
+
+int
+pk_thread_priority_change(PkThread *thread,
+ PkThreadPriority new_priority,
+ PkThreadPriority *old_priority)
+{
+ PkMachineContext ctx;
+ PkThreadPriority priority;
+
+ if (PK_ERROR_CHECK_API) {
+ PK_ERROR_IF(thread == 0, PK_INVALID_THREAD_AT_CHANGE);
+ PK_ERROR_IF(new_priority > PK_THREADS,
+ PK_INVALID_ARGUMENT_THREAD_CHANGE);
+ }
+
+ pk_critical_section_enter(&ctx);
+
+ priority = thread->priority;
+
+ if (priority != new_priority) {
+
+ if (!__pk_thread_is_mapped(thread)) {
+
+ thread->priority = new_priority;
+
+ } else {
+
+ if (PK_ERROR_CHECK_API) {
+ PK_ERROR_IF_CRITICAL(__pk_priority_map[new_priority] != 0,
+ PK_PRIORITY_IN_USE_AT_CHANGE,
+ &ctx);
+ }
+
+ __pk_thread_unmap(thread);
+ thread->priority = new_priority;
+ __pk_thread_map(thread);
+ __pk_schedule();
+ }
+ }
+
+ if (old_priority) {
+ *old_priority = priority;
+ }
+
+ pk_critical_section_exit(&ctx);
+
+ return PK_OK;
+}
+
+
+/// Return a pointer to the thread (if any) mapped at a given priority.
+///
+/// \param priority The thread priority of interest
+///
+/// \param thread The value returned through this pointer is a pointer to the
+/// thread currently mapped at the given priority level. If no thread is
+/// mapped, or if the \a priority is the priority of the idle thread, the
+/// pointer returned will be null (0).
+///
+/// The information returned by this API can only be guaranteed consistent if
+/// the API is called from a critical section.
+///
+/// Return values other than PK_OK (0) are errors; see \ref pk_errors
+///
+/// \retval 0 Successful completion.
+///
+/// \retval -PK_INVALID_ARGUMENT_THREAD_PRIORITY The \a priority is invalid
+/// or the \a thread parameter is null (0).
+
+int
+pk_thread_at_priority(PkThreadPriority priority,
+ PkThread **thread)
+{
+ if (PK_ERROR_CHECK_API) {
+ PK_ERROR_IF((priority > PK_THREADS) || (thread == 0),
+ PK_INVALID_ARGUMENT_THREAD_PRIORITY);
+ }
+
+ *thread = __pk_thread_at_priority(priority);
+
+ return PK_OK;
+}
+
+
+/// Swap thread priorities
+///
+/// \param thread_a A pointer to an initialized PkThread
+///
+/// \param thread_b A pointer to an initialized PkThread
+///
+/// This API swaps the priorities of \a thread_a and \a thread_b. The API is
+/// provided to support general and efficient application-directed scheduling
+/// algorithms. The requirements on the \a thread_a and \a thread_b arguments
+/// are that they are valid pointers to initialized PkThread structures, that
+/// the current thread priorities of both threads are legal, and that if a
+/// thread is currently mapped, that the new thread priority is not otherwise
+/// in use.
+///
+/// The API does not require either thread to be mapped, or even to be active.
+/// It is legal for one or both of the swap partners to be suspended, deleted
+/// or completed threads. The application is completely responsible for the
+/// correctness of scheduling algorithms that might operate on inactive or
+/// suspended threads.
+///
+/// The API does not change the mapped status of a thread. A thread will be
+/// mapped after the call of pk_thread_priority_swap() if and only if it was
+/// mapped prior to the call. If the new priority of a mapped thread is
+/// currently in use (by a thread other than the swap partner), then the
+/// PK_PRIORITY_IN_USE_AT_SWAP error is signalled and the swap does not take
+/// place. This could only happen if the swap partner is not currently mapped.
+///
+/// It is legal for a thread to swap its own priority with another thread. The
+/// degenerate case that \a thread_a and \a thread_b are equal is also legal -
+/// but has no effect.
+///
+/// Return values other than PK_OK (0) are errors; see \ref pk_errors
+///
+/// \retval 0 Successful completion, including the redundant cases that do not
+/// actually change priorities, or the cases that assign new priorities to
+/// suspended, completed or deleted threads.
+///
+/// \retval -PK_INVALID_THREAD_AT_SWAP1 One or both of \a thread_a and
+/// \a thread_b is null (0) or otherwise invalid,
+///
+/// \retval -PK_INVALID_THREAD_AT_SWAP2 the priorities of One or both of
+/// \a thread_a and \a thread_b are invalid.
+///
+/// \retval -PK_INVALID_ARGUMENT One or both of the priorities
+/// of \a thread_a and \a thread_b is invalid.
+///
+/// \retval -PK_PRIORITY_IN_USE_AT_SWAP Returned if a thread is mapped and the
+/// new thread priority is currently in use by another thread (other than the
+/// swap partner).
+
+int
+pk_thread_priority_swap(PkThread* thread_a, PkThread* thread_b)
+{
+ PkMachineContext ctx;
+ PkThreadPriority priority_a, priority_b;
+ int mapped_a, mapped_b;
+
+ if (PK_ERROR_CHECK_API) {
+ PK_ERROR_IF((thread_a == 0) || (thread_b == 0),
+ PK_INVALID_THREAD_AT_SWAP1);
+ }
+
+ pk_critical_section_enter(&ctx);
+
+ if (thread_a != thread_b) {
+
+ mapped_a = __pk_thread_is_mapped(thread_a);
+ mapped_b = __pk_thread_is_mapped(thread_b);
+ priority_a = thread_a->priority;
+ priority_b = thread_b->priority;
+
+ if (PK_ERROR_CHECK_API) {
+ int priority_in_use;
+ PK_ERROR_IF_CRITICAL((priority_a > PK_THREADS) ||
+ (priority_b > PK_THREADS),
+ PK_INVALID_THREAD_AT_SWAP2,
+ &ctx);
+ priority_in_use =
+ (mapped_a && !mapped_b &&
+ (__pk_thread_at_priority(priority_b) != 0)) ||
+ (!mapped_a && mapped_b &&
+ (__pk_thread_at_priority(priority_a) != 0));
+ PK_ERROR_IF_CRITICAL(priority_in_use,
+ PK_PRIORITY_IN_USE_AT_SWAP, &ctx);
+ }
+
+ if (mapped_a) {
+ __pk_thread_unmap(thread_a);
+ }
+ if (mapped_b) {
+ __pk_thread_unmap(thread_b);
+ }
+ thread_a->priority = priority_b;
+ thread_b->priority = priority_a;
+ if (mapped_a) {
+ __pk_thread_map(thread_a);
+ }
+ if (mapped_b) {
+ __pk_thread_map(thread_b);
+ }
+ __pk_schedule();
+ }
+
+ pk_critical_section_exit(&ctx);
+
+ return PK_OK;
+}
+
diff --git a/pk/kernel/pk_timer_core.c b/pk/kernel/pk_timer_core.c
index bc90a3e7..59b9d628 100644
--- a/pk/kernel/pk_timer_core.c
+++ b/pk/kernel/pk_timer_core.c
@@ -16,7 +16,7 @@
/// opens up the possibility of scheduling events "in the past". PK
/// uniformly handles this case by scheduling "past" events to occur 1
/// timebase tick in the future, so that timer callbacks are always run in the
-/// expected noncritical interrupt context.
+/// expected interrupt context.
///
/// PK implements the time queue as a simple unordered list of events, plus a
/// dedicated variable that holds the earliest timeout of any event in the
@@ -65,7 +65,15 @@
#include "pk.h"
-// This routine is only used in this file, and will always be called in
+// Declare the timer bottom half handler
+static PK_BH_HANDLER(__pk_timer_bh_handler);
+
+// Define the timer bottom half handler that the interrupt handler will
+// schedule
+PK_BH_STATIC_CREATE(pk_timer_bh, __pk_timer_bh_handler, 0);
+
+
+// This routine is only used in this file, and will always be called in a
// critical section.
static inline int
@@ -78,7 +86,7 @@ timer_active(PkTimer* timer)
// This is the kernel version of pk_timer_cancel().
//
// This routine is used here and by thread and semaphore routines.
-// Noncritical interrupts must be disabled at entry.
+// External interrupts must be disabled at entry.
//
// If the timer is active, then there is a special case if we are going to
// delete the 'cursor' - that is the timer that __pk_timer_handler() is going
@@ -114,7 +122,7 @@ __pk_timer_cancel(PkTimer *timer)
// This is the kernel version of pk_timer_schedule().
//
// This routine is used here and by thread and semaphore routines.
-// Noncritical interrupts must be disabled at entry.
+// interrupts must be disabled at entry.
//
// Unless the timer is already active it is enqueued in the doubly-linked
// timer list by inserting the timer at the end of the queue. Then the
@@ -144,8 +152,7 @@ __pk_timer_schedule(PkTimer* timer)
// deletions and other factors, there may not actually be a timer in the queue
// that has timed out - but it doesn't matter (other than for efficiency).
//
-// Noncritical interrupts are (must be) disabled at entry, and this invariant
-// is checked. This routine must not be entered reentrantly.
+// This routine must not be entered reentrantly.
//
// First, time out any timers that have expired. Timers in the queue are
// unordered, so we have to check every one. Since passing through the
@@ -158,9 +165,8 @@ __pk_timer_schedule(PkTimer* timer)
// On each pass through the loop tq->next_timeout computes the minimum timeout
// of events remaining in the queue. This is the only part of the kernel that
// searches a list of indefinite length. Kernel interrupt latency is mitigated
-// by running callbacks with interrupts disabled either during or after the
-// call for timed out events, and also after every check for events that have
-// not timed out.
+// by running this function as a bottom half. As such, interrupts are only
+// disabled when explicitly requested.
//
// Because interrupt preemption is enabled during processing, and preempting
// handlers may invoke time queue operations, we need to establish a pointer
@@ -171,9 +177,10 @@ __pk_timer_schedule(PkTimer* timer)
// The main loop iterates on the PkDeque form of the time queue, casting each
// element back up to the PkTimer as it is processed.
-void
-__pk_timer_handler()
+static void
+__pk_timer_bh_handler(void* arg)
{
+ PkMachineContext ctx;
PkTimeQueue* tq;
PkTimebase now;
PkTimer* timer;
@@ -182,20 +189,28 @@ __pk_timer_handler()
tq = &__pk_time_queue;
+ // Check if we entered the function while it was running in another context.
if (PK_ERROR_CHECK_KERNEL) {
if (tq->cursor != 0) {
PK_PANIC(PK_TIMER_HANDLER_INVARIANT);
}
}
- while ((now = pk_timebase_get()) >= tq->next_timeout) {
+ pk_critical_section_enter(&ctx);
+ while ((now = pk_timebase_get()) >= tq->next_timeout) {
tq->next_timeout = PK_TIMEBASE_MAX;
timer_deque = ((PkDeque*)tq)->next;
+ // Iterate through the entire timer list, calling the callback of
+ // timed-out elements and finding the timer that will timeout next,
+ // which is stored in tq->next_timeout.
while (timer_deque != (PkDeque*)tq) {
timer = (PkTimer*)timer_deque;
+
+ // Setting this to a non-zero value indicates we are in the middle
+ // of processing the time queue.
tq->cursor = timer_deque->next;
if (timer->timeout <= now) {
@@ -209,42 +224,49 @@ __pk_timer_handler()
pk_deque_delete(timer_deque);
+ pk_critical_section_exit(&ctx);
+
callback = timer->callback;
if (callback) {
- if (timer->options & PK_TIMER_CALLBACK_PREEMPTIBLE) {
- pk_interrupt_preemption_enable();
- callback(timer->arg);
- } else {
- callback(timer->arg);
- pk_interrupt_preemption_enable();
- }
+ callback(timer->arg);
}
- pk_interrupt_preemption_disable();
} else {
// This timer has not timed out. Its timeout will simply
- // participate in the computation of the next timeout. For
- // interrupt latency reasons we always allow a period of
- // interrupt preemption.
-
+ // participate in the computation of the next timeout.
tq->next_timeout = MIN(timer->timeout, tq->next_timeout);
- pk_interrupt_preemption_enable();
- pk_interrupt_preemption_disable();
+ pk_critical_section_exit(&ctx);
}
timer_deque = tq->cursor;
+ pk_critical_section_enter(&ctx);
}
+
+ // Time has passed since we checked the time. Loop back
+ // to check the time again and see if enough time has passed
+ // that the next timer has timed out too.
}
+ pk_critical_section_exit(&ctx);
+
+ // This marks that we are no longer processing the time queue
tq->cursor = 0;
// Finally, reschedule the next timeout
-
__pk_schedule_hardware_timeout(tq->next_timeout);
}
+void
+__pk_timer_handler(void)
+{
+ //schedule the timer bottom half handler which
+ //is preemptible.
+ pk_bh_schedule(&pk_timer_bh);
+}
+
+
/// Schedule a timer for an interval relative to the current time.
///
/// \param timer The PkTimer to schedule.
@@ -264,8 +286,6 @@ __pk_timer_handler()
/// \retval -PK_INVALID_TIMER_AT_SCHEDULE A a null (0) pointer was provided as
/// the \a timer argument.
///
-/// \retval -PK_ILLEGAL_CONTEXT_TIMER The call was made from a critical
-/// interrupt context.
int
pk_timer_schedule(PkTimer *timer,
@@ -313,9 +333,6 @@ pk_timer_schedule(PkTimer *timer,
///
/// \retval -PK_INVALID_TIMER_AT_CANCEL The \a timer is a null (0) pointer.
///
-/// \retval -PK_ILLEGAL_CONTEXT_TIMER The call was made from a critical
-/// interrupt context.
-///
int
pk_timer_cancel(PkTimer *timer)
@@ -353,9 +370,7 @@ pk_timer_cancel(PkTimer *timer)
/// null pointer (0) if this information is not required.
///
/// The information returned by this API can only be guaranteed consistent if
-/// the API is called from a critical section. Since the
-/// implementation of this API does not require a critical section, it is not
-/// an error to call this API from a critical interrupt context.
+/// the API is called from a critical section.
///
/// Return values other than PK_OK (0) are errors; see \ref pk_errors
///
diff --git a/pk/kernel/pk_timer_init.c b/pk/kernel/pk_timer_init.c
index 9b8f0e1d..457f78c4 100644
--- a/pk/kernel/pk_timer_init.c
+++ b/pk/kernel/pk_timer_init.c
@@ -13,29 +13,8 @@
#include "pk.h"
-// Implementation of timer creation
-static int
-_pk_timer_create(PkTimer *timer,
- PkTimerCallback callback,
- void *arg,
- int options)
-{
- if (PK_ERROR_CHECK_API) {
- PK_ERROR_IF((timer == 0), PK_INVALID_TIMER_AT_CREATE);
- }
-
- pk_deque_element_create((PkDeque*)timer);
- timer->timeout = 0;
- timer->callback = callback;
- timer->arg = arg;
- timer->options = options;
-
- return PK_OK;
-}
-
-
-/// Create (initialize) a preemptible timer.
+/// Create (initialize) a timer.
///
/// \param timer The PkTimer to initialize.
///
@@ -48,10 +27,10 @@ _pk_timer_create(PkTimer *timer,
/// timer in the kernel time queue. Timers can be cancelled by a call of
/// pk_timer_cancel().
///
-/// Timers created with pk_timer_create() are always run as noncritical
-/// interrupt handlers with interrupt preemption enabled. Timer callbacks are
-/// free to enter critical sections of any priorioty if required, but must
-/// always exit with noncritical interrupts enabled.
+/// Timers created with pk_timer_create() are always run as
+/// bottom-half handlers with interrupt preemption enabled. Timer callbacks are
+/// free to enter critical sections if required, but must
+/// always exit with interrupts enabled.
///
/// Caution: PK has no way to know if an PkTimer structure provided to
/// pk_timer_create() is safe to use as a timer, and will silently modify
@@ -63,59 +42,21 @@ _pk_timer_create(PkTimer *timer,
///
/// \retval -PK_INVALID_TIMER_AT_CREATE The \a timer is a null (0) pointer.
-int
+int
pk_timer_create(PkTimer *timer,
- PkTimerCallback callback,
- void *arg)
+ PkTimerCallback callback,
+ void *arg)
{
- return _pk_timer_create(timer, callback, arg,
- PK_TIMER_CALLBACK_PREEMPTIBLE);
-}
-
+ if (PK_ERROR_CHECK_API) {
+ PK_ERROR_IF((timer == 0), PK_INVALID_TIMER_AT_CREATE);
+ }
-/// Create (initialize) a nonpreemptible timer.
-///
-/// \param timer The PkTimer to initialize.
-///
-/// \param callback The timer callback
-///
-/// \param arg Private data provided to the callback.
-///
-/// Once created with pk_timer_create_preemptible() a timer can be scheduled
-/// with pk_timer_schedule() or pk_timer_schedule_absolute(), which queues
-/// the timer in the kernel time queue. Timers can be cancelled by a call of
-/// pk_timer_cancel().
-///
-/// Timers created with pk_timer_create_nonpreemptible() are always run as
-/// noncritical interrupt handlers with interrupt preemption disabled. Timer
-/// callbacks are free to later enable preemption if desired, but must always
-/// exit with noncritical interrupts disabled.
-///
-/// \note The use of pk_timer_create_nonpreemptible() should be rare, and the
-/// timer callbacks should be short and sweet to avoid long interrupt
-/// latencies for other interrupts. This API was initially introduced for use
-/// by the PK kernel itself when scheduling thread-timer callbacks to avoid
-/// potential race conditions with other interrupts that may modify thread
-/// state or the state of the time queue. Applications may also require this
-/// facility to guarantee a consistent state in the event that other
-/// interrupts may cancel the timer.
-///
-/// Caution: PK has no way to know if an PkTimer structure provided to
-/// pk_timer_create() is safe to use as a timer, and will silently modify
-/// whatever memory is provided.
-///
-/// Return values other then PK_OK (0) are errors; see \ref pk_errors
-///
-/// \retval 0 Successful completion
-///
-/// \retval -PK_INVALID_TIMER_AT_CREATE The \a timer is a null (0) pointer.
+ pk_deque_element_create((PkDeque*)timer);
+ timer->timeout = 0;
+ timer->callback = callback;
+ timer->arg = arg;
-int
-pk_timer_create_nonpreemptible(PkTimer *timer,
- PkTimerCallback callback,
- void *arg)
-{
- return _pk_timer_create(timer, callback, arg, 0);
+ return PK_OK;
}
diff --git a/pk/kernel/pkkernelfiles.mk b/pk/kernel/pkkernelfiles.mk
index 958e4ddf..bb1c310b 100644
--- a/pk/kernel/pkkernelfiles.mk
+++ b/pk/kernel/pkkernelfiles.mk
@@ -21,11 +21,11 @@
##########################################################################
# Object Files
##########################################################################
-PK-C-SOURCES = pk_core.c pk_init.c pk_stack_init.c
+PK-C-SOURCES = pk_core.c pk_init.c pk_stack_init.c pk_bh_core.c
PK-TIMER-C-SOURCES += pk_timer_core.c pk_timer_init.c
-PK-THREAD-C-SOURCES += pk_thread_init.c pk_thread_core.c \
+PK-THREAD-C-SOURCES += pk_thread_init.c pk_thread_core.c pk_thread_util.c \
pk_semaphore_init.c pk_semaphore_core.c
PK_OBJECTS += $(PK-C-SOURCES:.c=.o)
diff --git a/pk/ppe/ppe_common.h b/pk/ppe/ppe_common.h
index bc8519ea..c7df60bd 100644
--- a/pk/ppe/ppe_common.h
+++ b/pk/ppe/ppe_common.h
@@ -27,23 +27,12 @@
#define EXTERNAL_IRQS 64
#ifdef __ASSEMBLER__
-/// This macro contains PPE specific code for determining what IRQ caused the
-/// external exception handler to be invoked by the PPE
-
-/// Load noncritical status 0 and the handler array base address. Check
-/// for interrupts pending in status register 0 while the IRQ is
-/// computed. The IRQ is expected to be stored in r3.
+/// This macro contains PPE specific code.
+/// Since standalone models of the PPE do not support external interrupts
+/// we just set the code to 64 (phantom interrupt)
.macro hwmacro_get_ext_irq
-#_lwzi %r4, %r4, OCB_ONISR0
- cntlzw %r3, %r4
- cmpwible %r3, 31, external_irq_found #branch if irq is lt or eq to 31
-
- ## No IRQ pending in interrupt set 0. Try set 1.
-
-#_lwzi %r4, %r4, OCB_ONISR1
- cntlzw %r3, %r4
- addi %r3, %r3, 32
+ li %r4, 64
.endm
diff --git a/pk/ppe42/ppe42.h b/pk/ppe42/ppe42.h
index 91ef5bf4..ecc990e3 100644
--- a/pk/ppe42/ppe42.h
+++ b/pk/ppe42/ppe42.h
@@ -278,10 +278,8 @@ do {*(volatile uint32_t *)(addr) = (data);} while(0)
/// The default thread machine context has MSR[CE], MSR[EE] and MSR[ME] set,
/// and all other MSR bits cleared.
///
-/// The default definition allows critical, non-critical and machine check
-/// exceptions. Debug interrupts are not enabled by default. This definition
-/// can be overriden by the application. If MMU protection is enabled then
-/// the IR/DR bits are also modeably set.
+/// The default definition allows external and machine check exceptions. This
+/// definition can be overriden by the application.
#ifndef PK_THREAD_MACHINE_CONTEXT_DEFAULT
#define PK_THREAD_MACHINE_CONTEXT_DEFAULT \
@@ -573,57 +571,72 @@ __pk_stack_create_initial_frame(PkAddress *stack, size_t *size) \
/// information. Instead it defines an API that the port must provide to the
/// portable kernel.
///
-/// In the PPE42 port, the kernel context is maintained in USPRG0. This
-/// 32-bit value is treated as 5 distinct fields as indicated in the structure
-/// definition. For certain tests it's also helpful to look at the two
-/// interrupt counters as a single 0/non-0 field.
+/// In the PPE42 port, the kernel context is maintained in SPRG0. This
+/// 32-bit value is treated as 6 distinct fields as indicated in the structure
+/// definition.
typedef union {
uint32_t value;
struct {
- /// The critical interrupt nesting level. If this field is non-zero,
- /// then interrupt priority and preemption rules guarantee that a
- /// critical interrupt handler is running, and the \c irq field will
- /// contain the PkIrqId of the currently active critical interrupt.
- unsigned reserved : 8;
-
- /// The non-critical interrupt nesting level. If this field is
- /// non-zero and the \c critical_interrupts field is 0, then interrupt
- /// priority and preemption rules guarantee that a noncritical
- /// interrupt handler is running, and the \c irq field will contain
- /// the PkIrqId of the currently active noncritical interrupt.
- unsigned noncritical_interrupts : 8;
-
- /// The PkIrqId of the currently running (or last run) handler. If
- /// either of the interrupt nesting levels are non-0, then this is the
- /// PkIrqId of the IRQ that is currently executing.
- unsigned irq : 8;
-
/// A flag indicating that PK is in thread mode after a call of
/// pk_start_threads().
unsigned thread_mode : 1;
+ /// If this field is non-zero then PK is processing an interrupt
+ /// and the \c irq field will contain the PkIrqId of the interrupt
+ /// that kicked off interrupt processing.
+ unsigned processing_interrupt : 1;
+
/// The priority of the currently running thread. In an interrupt
/// context, this is the priority of the thread that was interrupted.
- unsigned thread_priority : 7;
+ unsigned thread_priority : 6;
- } fields;
+ /// This bit tracks whether the current context can be discarded or
+ /// if the context must be saved. If the processor takes an interrupt
+ /// and this bit is set, then the current context will be discarded.
+ /// This bit is set at the end of handling an interrupt and prior
+ /// to entering the wait enabled state.
+ unsigned discard_ctx : 1;
- struct {
+ /// The PkIrqId of the currently running (or last run) handler. If
+ /// \c processing_interrupt is set, then this is the
+ /// PkIrqId of the IRQ that is currently executing.
+ unsigned irq : 7;
- unsigned also_ignore : 8;
+ /// Each PPE application will define (or not) the interpretation of
+ /// this field. Since SPRG0 is saved and restored during during thread
+ /// context switches, this field can be used to record the progress of
+ /// individual threads. The kernel and/or application will provide
+ /// APIs or macros to read and write this field.
+ unsigned app_specific : 16;
- /// Used as a 0/non-0 flag for interrupt context.
- unsigned interrupt_context : 8;
+ } fields;
- /// Ignore
- unsigned ignore : 16;
+} __PkKernelContext;
- } merged_fields;
+// These APIs are provided for applications to get and set the app_specific
+// field of the kernel context which is held in sprg0.
-} __PkKernelContext;
+static inline uint16_t ppe42_app_ctx_get(void)
+{
+ __PkKernelContext __ctx;
+ __ctx.value = mfspr(SPRN_SPRG0);
+ return __ctx.fields.app_specific;
+}
+
+static inline void ppe42_app_ctx_set(uint16_t app_ctx)
+{
+ PkMachineContext mctx;
+ __PkKernelContext __ctx;
+ mctx = mfmsr();
+ wrteei(0);
+ __ctx.value = mfspr(SPRN_SPRG0);
+ __ctx.fields.app_specific = app_ctx;
+ mtspr(SPRN_SPRG0, __ctx.value);
+ mtmsr(mctx);
+}
// These APIs are provided to the PK portable kernel by the port.
@@ -642,7 +655,7 @@ typedef union {
({ \
__PkKernelContext __ctx; \
__ctx.value = mfspr(SPRN_SPRG0); \
- __ctx.fields.thread_mode && !__ctx.merged_fields.interrupt_context;})
+ __ctx.fields.thread_mode && !__ctx.fields.processing_interrupt;})
/// PK is executing an interrupt handler of any priority.
@@ -651,28 +664,9 @@ typedef union {
({ \
__PkKernelContext __ctx; \
__ctx.value = mfspr(SPRN_SPRG0); \
- __ctx.merged_fields.interrupt_context;})
-
+ __ctx.fields.processing_interrupt;})
-/// PK is executing a non-critical interrupt handler.
-
-#define __pk_kernel_context_noncritical_interrupt() \
- ({ \
- __PkKernelContext __ctx; \
- __ctx.value = mfspr(SPRN_SPRG0); \
- __ctx.fields.noncritical_interrupts && \
- !__ctx.fields.critical_interrupts;})
-
-/// Return the noncritical interrupt nesting level
-
-#define __pk_noncritical_level() \
- ({ \
- __PkKernelContext __ctx; \
- __ctx.value = mfspr(SPRN_SPRG0); \
- __ctx.fields.noncritical_interrupts; })
-
-
// PK requires the port to define the type PkThreadQueue, which is a
// priority queue (where 0 is the highest priority). This queue must be able
// to handle PK_THREADS + 1 priorities (the last for the idle thread) The
@@ -740,7 +734,7 @@ __pk_thread_queue_count(volatile PkThreadQueue* queue)
PkMachineContext ctx; \
pk_critical_section_enter(&ctx); \
asm volatile ("mr 1, %0; mtlr %1; blrl" : : \
- "r" (__pk_noncritical_stack), \
+ "r" (__pk_kernel_stack), \
"r" (__pk_start_threads)); \
PK_PANIC(PK_START_THREADS_RETURNED); \
} while (0)
@@ -750,8 +744,13 @@ __pk_thread_queue_count(volatile PkThreadQueue* queue)
/// The __PkKernelContext 'thread_mode' bit as a flag
-#define PPE42_THREAD_MODE 0x80
+#define PPE42_THREAD_MODE 0x8000
+#define PPE42_PROC_IRQ 0x4000
+#define PPE42_DISCARD_CTX 0x0080
+#define PPE42_THREAD_MODE_BIT 0
+#define PPE42_PROC_IRQ_BIT 1
+#define PPE42_DISCARD_CTX_BIT 8
#ifndef __ASSEMBLER__
diff --git a/pk/ppe42/ppe42_boot.S b/pk/ppe42/ppe42_boot.S
index 7e8ecee6..32fdfd3a 100644
--- a/pk/ppe42/ppe42_boot.S
+++ b/pk/ppe42/ppe42_boot.S
@@ -115,7 +115,7 @@ __reset_trap:
#sync
## The MSR to be used during the rest of intialization is
- ## established. This MSR should NOT enable critical or non-critical
+ ## established. This MSR should NOT enable
## interrupts, but could enable machine check exceptions.
_liwa %r3, PPE42_MSR_INITIAL
diff --git a/pk/ppe42/ppe42_context.h b/pk/ppe42/ppe42_context.h
index f13ec8a7..2412565b 100644
--- a/pk/ppe42/ppe42_context.h
+++ b/pk/ppe42/ppe42_context.h
@@ -54,59 +54,24 @@
// system-reserved in ABI applications, and is the base for read-only small data
// in EABI applications.
-// A fair amount of complexity is involved in handling the non-critical and
-// critical interrupt levels, and the emphasis on performance of fast-mode
-// interrupt handlers. Several different approaches and philosophies could
-// have been implemented - this is only one. In this implementation
-// critical/non-critical interrupt levels are treated more or less the same,
-// and the interrupt priority is just that - a kind of preemption priority.
-// Critical interrupt handling does have a little less overhead because it
-// does not have a thread scheduling step at the end.
-
-// A full context save takes place in 3 or 4 steps. Thread switches always do
-// steps 1, 2 and 3.
-// 1. The fast context that is always saved in response to every interrupt;
-// 1a. The optional save/update of the kernel context for interrupts.
-// 2. The (volatile - fast) context that is saved if an interrupt handler
-// switches from fast-mode to full-mode.
-// 3. The non-volatile context that is saved when a thread is switched out.
-
// USPRG0 holds the __PkKernelContext structure (defined in ppe42.h) that
// represents the current kernel context. The layout is as follows:
//
// Bits Meaning
// ==============
-// 0:7 The critical interrupt count
-// 8:15 The non-critical interrupt count
-// 16:23 The IRQ currently being processed
-// 24 The 'thread_mode' flag
-// 25:31 The thread priority of the running thread
+// 0 The 'thread_mode' flag
+// 1 The 'processing_interrupt" flag
+// 2:7 The thread priority of the running thread
+// 8 The 'discard_ctx' flag
+// 9:15 The IRQ currently being processed
+// 16:31 The application specific data
//
// When PK is initialized USPRG0 is initialized to 0. When thread-mode is
-// entered (by pk_start_threads()) bit 24 is set to 1. In order to support
-// PgP/OCC firmware, once initialized (with pk_initialize()) PK can simply
+// entered (by pk_start_threads()) bit 0 is set to 1. If desired,
+// once initialized (with pk_initialize()) PK can simply
// handle interrupts, reverting back to the non-thread-mode idle loop when
// there's nothing to do.
//
-// Note that it would require a serious error for the interrupt counts to ever
-// equal or exceed 2**8 as this would imply runaway reentrancy and stack
-// overflow. In fact it is most likely an error if an interrupt handler is
-// ever re-entered while active.
-
-// Registers SRR2 and SRR3 are always saved in IRQ context because
-// __pk_irq_fast2full must save the (volatile - fast) context to provide
-// working registers before it can look at USPRG0 to determine critical
-// vs. non-critical context. However, when restoring a non-critical interrupt
-// or thread these registers need not be restored. SRR2 and SRR3 are never
-// saved or restored for thread context switches, because threads always
-// operate at noncritical level.
-
-// When MMU protection is enabled, relocation/protection is re-established
-// immediately upon entry to the interrupt handler, before any memory
-// operations (load/store) take place. This requires using SPRG0 and SPGR4
-// for temporary storage for noncritical/critical handlers respectively in
-// accordance with the PK conventions for SPRGn usage by fast-mode
-// interrupts.
## ------------------------------------------------------------
## Unused registers for embedded PPE42`
@@ -119,240 +84,77 @@
.set UNUSED_GPR13, 0xd # Dedicated; (E)ABI read-write small data area
## ------------------------------------------------------------
- ## Flags for context push/pop
- ## ------------------------------------------------------------
-
- .set PK_THREAD_CONTEXT, 0
- .set PK_IRQ_CONTEXT, 1
-
- ## ------------------------------------------------------------
- ## The PK fast context layout for Embedded PPE42
+ ## The PK context layout for Embedded PPE42
## ------------------------------------------------------------
- .set PK_FAST_CTX_GPR1, 0x00 # Dedicated; Stack pointer
- .set PK_FAST_CTX_HANDLER_LR, 0x04 # Slot for handler to store LR
- .set PK_FAST_CTX_GPR3, 0x08 # Volatile; Parameter; Return Value
- .set PK_FAST_CTX_GPR4, 0x0c # Volatile; Parameter
- .set PK_FAST_CTX_GPR5, 0x10 # Volatile; Parameter
- .set PK_FAST_CTX_GPR6, 0x14 # Volatile; Parameter
- .set PK_FAST_CTX_CR, 0x18 # Condition register
- .set PK_FAST_CTX_LR, 0x1c # Link register SPRN 0x008
- .set PK_FAST_CTX_KERNEL_CTX, 0x20 # Saved __PkKernelContext for IRQ
-
- .set PK_FAST_CTX_SIZE, 0x28 # Must be 8-byte aligned
+ .set PK_CTX_GPR1, 0x00 # Dedicated; Stack pointer
+ .set PK_CTX_LINKAGE, 0x04 # Slot for handler to store LR
+ .set PK_CTX_GPR3, 0x08 # Volatile; Parameter; Return Value
+ .set PK_CTX_GPR4, 0x0c # Volatile; Parameter
+ .set PK_CTX_GPR5, 0x10 # Volatile; Parameter
+ .set PK_CTX_GPR6, 0x14 # Volatile; Parameter
+ .set PK_CTX_CR, 0x18 # Condition register
+ .set PK_CTX_LR, 0x1c # Link register
- ## ------------------------------------------------------------
- ## The PK (volatile - fast) context layout for Embedded PPE42
- ## ------------------------------------------------------------
+ .set PK_CTX_GPR7, 0x20 # Volatile; Parameter
+ .set PK_CTX_GPR8, 0x24 # Volatile; Parameter
+ .set PK_CTX_GPR9, 0x28 # Volatile; Parameter
+ .set PK_CTX_GPR10, 0x2c # Volatile; Parameter
+ .set PK_CTX_GPR28, 0x30 # Non-volatile
+ .set PK_CTX_GPR29, 0x34 # Non-volatile
+ .set PK_CTX_GPR30, 0x38 # Non-volatile
+ .set PK_CTX_GPR31, 0x3c # Non-volatile
- .set PK_VOL_FAST_CTX_GPR1, 0x00 # Dedicated; Stack pointer
- .set PK_VOL_FAST_CTX_HANDLER_LR, 0x04 # Slot for handler to store LR
- .set PK_VOL_FAST_CTX_GPR7, 0x08 # Volatile; Parameter
- .set PK_VOL_FAST_CTX_GPR8, 0x0c # Volatile; Parameter
- .set PK_VOL_FAST_CTX_GPR9, 0x10 # Volatile; Parameter
- .set PK_VOL_FAST_CTX_GPR10, 0x14 # Volatile; Parameter
- .set PK_VOL_FAST_CTX_XER, 0x18 # Fixed-point exception register SPRN 0x001
- .set PK_VOL_FAST_CTX_CTR, 0x1c # Count register SPRN 0x009
- .set PK_VOL_FAST_CTX_SRR0, 0x20 # Save/restore register 0 SPRN 0x01a
- .set PK_VOL_FAST_CTX_SRR1, 0x24 # Save/restore register 1 SPRN 0x01b
- .set PK_VOL_FAST_CTX_GPR0, 0x28 # Volatile; Language specific
+ .set PK_CTX_XER, 0x40 # Fixed-point exception register
+ .set PK_CTX_CTR, 0x44 # Count register
+ .set PK_CTX_SRR0, 0x48 # Save/restore register 0
+ .set PK_CTX_SRR1, 0x4c # Save/restore register 1
+ .set PK_CTX_GPR0, 0x50 # Volatile; Language specific
+ .set PK_CTX_KERNEL_CTX, 0x54 # Saved __PkKernelContext for IRQ
- .set PK_VOL_FAST_CTX_SIZE, 0x30 # Must be 8-byte aligned
+ .set PK_CTX_SIZE, 0x58 # Must be 8-byte aligned
## ------------------------------------------------------------
- ## The PK non-volatile context layout for Embedded PowerPC
- ## ------------------------------------------------------------
-
- ## The 'preferred form' for stmw is for the LSB of R31 to fall into the
- ## end of a 16-byte aligned block.
-
- .set PK_NON_VOL_CTX_GPR1, 0x00 # Dedicated; Stack Pointer
- .set PK_NON_VOL_CTX_HANDLER_LR, 0x04 # Slot for handler to store LR
- .set PK_NON_VOL_CTX_GPR28, 0x08 # Non-volatile
- .set PK_NON_VOL_CTX_GPR29, 0x0c # Non-volatile
- .set PK_NON_VOL_CTX_GPR30, 0x10 # Non-volatile
- .set PK_NON_VOL_CTX_GPR31, 0x14 # Non-volatile
-
- .set PK_NON_VOL_CTX_SIZE, 0x18 # Must be 8-byte aligned
-
- ## ------------------------------------------------------------
- ## Save/restore the fast context
+ ## Push the interrupted context if necessary
+ ##
+ ## This macro saves off some context in preparation for calling
+ ## the pk_ctx_check_discard routine. This is an attempt to use
+ ## the 32 byte cache more efficiently.
##
- ## 11 Instructions, 8 Loads/Stores : If MMU is disabled
- ## 17 Instructions, 8 Loads/Stores : If MMU is enabled
+ ## 8 Instructions
## ------------------------------------------------------------
##
- ## Without MMU support, an EIEIO is always executed at the entry point
- ## to gauarantee that all memory operations (especially MMIO
- ## operations) have completed prior to execution of the interrupt
- ## handler.
- ##
- ## If MMU support is enabled, address translation is re-established
- ## immediately at the entry of each interrupt, prior to performing any
- ## loads or stores. PK currently only supports using the MMU for
- ## protection, not for address translation. Therfore it is 'legal'
- ## to change translation modes a with an MTMSR followed by an
- ## ISYNC. This is much simpler then the complex instruction sequence
- ## that would be required if we had to set up RFI/RFCI sequences to
- ## change the execution context at this point.
- ##
- ## Note that since we are not really doing address translation, it
- ## would also be in keeping with the 'fast interrupt' idea to defer
- ## reenabling translation (protection) until the fast-to-full sequence
- ## was executed for full-mode interrupts, and run fast-mode interrupts
- ## unprotected. However here we chose to run all interrupts with MMU
- ## protection.
- ##
- ## Unfortunately the simple MTMSR;ISYNC sequence exposes a serious bug
- ## in the PPE42 core that causes the stack-pointer store instruction
- ## to generate a seemingly random, *real-mode* address in certain cases
- ## when this instruction in a noncritical interrupt prologue is
- ## interrupted by a critical interrupt. This bug is described in
- ## HW239446. The workaround is to follow the ISYNC sith a SYNC - which
- ## eliminates the problem for reasons still unknown. On the bright side
- ## this SYNC might also serve the same purpose as the EIEIO in the
- ## non-MMU case, guaranteeing that all MMIO has completed prior to the
- ## interrupt handler. However without the initial EIEIO we still
- ## experience failures, so this seemingly redundant instruction also
- ## remains in place. This requirement is assumed to be related to the
- ## HW239446 issue.
-
- .macro _pk_fast_ctx_push
-
- stwu %r1, -PK_FAST_CTX_SIZE(%r1)
-
- stvd %d3, PK_FAST_CTX_GPR3(%r1)
- stvd %d5, PK_FAST_CTX_GPR5(%r1)
+
+ .macro _pk_ctx_push_as_needed branch_addr:req
+ stwu %r1, -PK_CTX_SIZE(%r1)
+ stvd %d3, PK_CTX_GPR3(%r1)
mfcr %r3
mflr %r4
-
- stvd %d3, PK_FAST_CTX_CR(%r1)
-
+ stvd %d3, PK_CTX_CR(%r1)
+ _liw %r3, \branch_addr
+ b ctx_check_discard
.endm
- .macro _pk_fast_ctx_pop
-
- lvd %d3, PK_FAST_CTX_CR(%r1)
-
- mtcr0 %r3
- mtlr %r4
-
- lvd %d3, PK_FAST_CTX_GPR3(%r1)
- lvd %d5, PK_FAST_CTX_GPR5(%r1)
-
- lwz %r1, 0(%r1)
-
- .endm
-
## ------------------------------------------------------------
- ## Save/update the kernel context in response to an interrupt. This is
- ## not part of the fast context save because for external interupts the
- ## IRQ is not determined until later.
+ ## update the kernel context in response to an interrupt.
## ------------------------------------------------------------
- ## The kernel context is saved, then updated with the currently active
- ## IRQ in bits 16:23. The correct interrupt count is incremented and
- ## the context is returned to SPRG0.
-
- .macro _save_update_kernel_context irqreg, ctxreg
-
- #PK_TRACE_NONCRITICAL_IRQ_ENTRY \irqreg, \ctxreg
+ ## The kernel context is updated with the currently active
+ ## IRQ in bits 9:15.
- mfsprg0 \ctxreg
- stw \ctxreg, PK_FAST_CTX_KERNEL_CTX(%r1)
- #rlwimi \ctxreg, \irqreg, 24, 9, 15 //set the irq #
- rlwimi \ctxreg, \irqreg, 8, 16, 23 //set the irq #
- #oris \ctxreg, \ctxreg, 0x4000 //set the 'processing interrupt' PI bit
- addis \ctxreg, \ctxreg, 0x0001 //increment the irq count
+ .macro _update_kernel_context irqreg, ctxreg
+ rlwimi \ctxreg, \irqreg, 16, 9, 15 //set the irq #
+ oris \ctxreg, \ctxreg, 0x4000 //set the 'processing_interrupt' flag
mtsprg0 \ctxreg
- .endm
-
- ## ------------------------------------------------------------
- ## Fast-mode context pop and RF(C)I. This is only used by
- ## interrupt handlers - the thread context switch has its own
- ## code to handle updating USPRG0 for thread mode.
- ## ------------------------------------------------------------
-
- .macro _pk_fast_ctx_pop_exit
-
- .if PK_KERNEL_TRACE_ENABLE
- bl __pk_trace_noncritical_irq_exit
- .endif
-
- lwz %r3, PK_FAST_CTX_KERNEL_CTX(%r1)
- mtsprg0 %r3
- _pk_fast_ctx_pop
- rfi
-
- .endm
-
- ## ------------------------------------------------------------
- ## Save/restore the (volatile - fast) context
- ##
- ## Thread - 15 Instructions, 11 Loads/Stores
- ## IRQ - 19(15) Instructions, 13(11) Loads/Stores
- ## ------------------------------------------------------------
-
- .macro _pk_vol_fast_ctx_push
-
- stwu %r1, -PK_VOL_FAST_CTX_SIZE(%r1)
-
- stw %r0, PK_VOL_FAST_CTX_GPR0(%r1)
- stvd %d7, PK_VOL_FAST_CTX_GPR7(%r1)
- stvd %d9, PK_VOL_FAST_CTX_GPR9(%r1)
-
- mfxer %r7
- mfctr %r8
- mfsrr0 %r9
- mfsrr1 %r10
-
- stvd %d7, PK_VOL_FAST_CTX_XER(%r1)
- stvd %d9, PK_VOL_FAST_CTX_SRR0(%r1)
-
- .endm
-
-
- .macro _pk_vol_fast_ctx_pop
-
- lvd %d7, PK_VOL_FAST_CTX_XER(%r1)
- lvd %d9, PK_VOL_FAST_CTX_SRR0(%r1)
-
- mtxer %r7
- mtctr %r8
- mtsrr0 %r9
- mtsrr1 %r10
-
- lwz %r0, PK_VOL_FAST_CTX_GPR0(%r1)
- lvd %d7, PK_VOL_FAST_CTX_GPR7(%r1)
- lvd %d9, PK_VOL_FAST_CTX_GPR9(%r1)
-
- lwz %r1, 0(%r1)
-
- .endm
-
- ## ------------------------------------------------------------
- ## Save/restore the non-volatile context on the stack
- ##
- ## 2 Instructions, 19 Loads/Stores
- ## ------------------------------------------------------------
-
- .macro _pk_non_vol_ctx_push
-
- stwu %r1, -PK_NON_VOL_CTX_SIZE(%r1)
- stvd %d28, PK_NON_VOL_CTX_GPR28(%r1)
- stvd %d30, PK_NON_VOL_CTX_GPR30(%r1)
-
- .endm
-
-
- .macro _pk_non_vol_ctx_pop
-
- lvd %d28, PK_NON_VOL_CTX_GPR28(%r1)
- lvd %d30, PK_NON_VOL_CTX_GPR30(%r1)
- lwz %r1, 0(%r1)
+#if PK_KERNEL_TRACE_ENABLE
+ mr %r31, \irqreg
+ srwi \ctxreg, \ctxreg, 16
+ PK_KERN_TRACE_ASM16("INTERRUPT_CONTEXT(0x%04x)", \ctxreg)
+ mr \irqreg, %r31
+#endif
.endm
@@ -364,89 +166,33 @@
/// thread->saved_stack_pointer when a thread is fully context-switched out.
typedef struct {
-
- uint32_t r1_nv;
- uint32_t link_nv;
- uint32_t r28;
- uint32_t r29;
- uint32_t r30;
- uint32_t r31;
- uint32_t r1_vf;
- uint32_t link_vf;
- uint32_t r7;
- uint32_t r8;
- uint32_t r9;
- uint32_t r10;
- uint32_t xer;
- uint32_t ctr;
- uint32_t srr0;
- uint32_t srr1;
- uint32_t r0;
- uint32_t pad;
uint32_t r1;
- uint32_t link_fast;
+ uint32_t linkage;
uint32_t r3;
uint32_t r4;
uint32_t r5;
uint32_t r6;
uint32_t cr;
uint32_t lr;
- uint32_t sprg0;
-
-} PkThreadContext;
-
-/// PK thread context of an interrupted thread (full-mode handler)
-///
-/// When a thread is interrupted by a full-mode interrupt handler, this is the
-/// layout of the stack area pointed to by either __pk_saved_sp_noncritical
-/// or __pk_saved_sp_critical.
-typedef struct {
-
- uint32_t r1_vf;
- uint32_t link_vf;
uint32_t r7;
uint32_t r8;
uint32_t r9;
uint32_t r10;
+ uint32_t r28;
+ uint32_t r29;
+ uint32_t r30;
+ uint32_t r31;
+
uint32_t xer;
uint32_t ctr;
uint32_t srr0;
uint32_t srr1;
uint32_t r0;
- uint32_t pad;
- uint32_t r1;
- uint32_t link_fast;
- uint32_t r3;
- uint32_t r4;
- uint32_t r5;
- uint32_t r6;
- uint32_t cr;
- uint32_t lr;
uint32_t sprg0;
-} PkThreadContextFullIrq;
-
-
-/// PK thread context of an interrupted thread (fast-mode handler)
-///
-/// When a thread is interrupted by a fast-mode interrupt handler, this is the
-/// layout of the stack area pointed to by R1 - unless the fast-mode interrupt
-/// handler extends the stack.
-
-typedef struct {
-
- uint32_t r1;
- uint32_t link_fast;
- uint32_t r3;
- uint32_t r4;
- uint32_t r5;
- uint32_t r6;
- uint32_t cr;
- uint32_t lr;
- uint32_t sprg0;
+} PkThreadContext;
-} PkThreadContextFastIrq;
#endif /* __ASSEMBLER__ */
diff --git a/pk/ppe42/ppe42_core.c b/pk/ppe42/ppe42_core.c
index b49b8855..2fa86e68 100644
--- a/pk/ppe42/ppe42_core.c
+++ b/pk/ppe42/ppe42_core.c
@@ -29,64 +29,6 @@ ppe42_timebase_data_t ppe42_tb_data = {0};
PkTimebase ppe42_64bit_timebase = 0;
-/// Enable interrupt preemption
-///
-/// This API can only be called from an interrupt context. Threads will
-/// always be preempted by interrupts unless they explicitly disable
-/// interrupts with the \c pk_interrupt_disable() API. It is legal to call
-/// this API redundantly.
-///
-/// Be careful when enabling interrupt handler preemption that the interrupt
-/// being handled does not/can not trigger again, as this could rapidly lead
-/// to stack overflows.
-///
-/// Return values other then PK_OK (0) are errors; see \ref pk_errors
-///
-/// \retval 0 Successful completion
-///
-/// \retval -PK_ILLEGAL_CONTEXT The API call was not made from an interrupt
-/// context.
-
-int
-pk_interrupt_preemption_enable()
-{
- if (PK_ERROR_CHECK_API) {
- PK_ERROR_UNLESS_ANY_INTERRUPT_CONTEXT();
- }
-
- wrteei(1);
-
- return PK_OK;
-}
-
-
-/// Disable interrupt preemption
-///
-/// This API can only be called from an interrupt context. Threads will
-/// always be preempted by interrupts unless they explicitly disable
-/// interrupts with the \c pk_interrupt_disable() API. It is legal to call
-/// this API redundantly.
-///
-/// Return values other then PK_OK (0) are errors; see \ref pk_errors
-///
-/// \retval 0 Successful completion
-///
-/// \retval -PK_ILLEGAL_CONTEXT The API call was not made from an interrupt
-/// context.
-
-int
-pk_interrupt_preemption_disable()
-{
- if (PK_ERROR_CHECK_API) {
- PK_ERROR_UNLESS_ANY_INTERRUPT_CONTEXT();
- }
-
- wrteei(0);
-
- return PK_OK;
-}
-
-
#if PK_TIMER_SUPPORT
// The tickless kernel timer mechanism for PPE42
@@ -160,10 +102,7 @@ __pk_schedule_hardware_timeout(PkTimebase timeout)
}
else
{
- //FIXME: We have to multiply the difference by 16
- //to workaround missing support for selecting the
- //external dec_timer clock source for the decrementer.
- diff = (timeout - now) << 4;
+ diff = (timeout - now);
if (diff > 0xfffffffful)
{
diff --git a/pk/ppe42/ppe42_exceptions.S b/pk/ppe42/ppe42_exceptions.S
index 19100e1e..b3ce3551 100644
--- a/pk/ppe42/ppe42_exceptions.S
+++ b/pk/ppe42/ppe42_exceptions.S
@@ -74,9 +74,7 @@ __instruction_storage:
.org __vectors + 0x00A0
__external_interrupt_vector:
- _pk_fast_ctx_push
-
- b __external_interrupt
+ _pk_ctx_push_as_needed __get_ext_irq
############################################################
# 0x00C0 : Alignment Exception
@@ -99,9 +97,7 @@ __alignment_exception:
### switches between threads. They can also be used by the code
### to signal an exception in an error scenario.
__program_exception:
- _pk_fast_ctx_push
-
- b program_exception_handler
+ _pk_ctx_push_as_needed program_exception_handler
############################################################
@@ -110,9 +106,7 @@ __program_exception:
.org __vectors + 0x0100
__dec_interrupt:
- _pk_fast_ctx_push
- li %r3, PPE42_IRQ_DEC
- b dec_handler
+ _pk_ctx_push_as_needed dec_handler
############################################################
# 0x0120 : FIT Interrupts
@@ -120,8 +114,7 @@ __dec_interrupt:
.org __vectors + 0x0120
__fit_interrupt:
- #b fit_handler
- b .
+ _pk_ctx_push_as_needed fit_handler
############################################################
# 0x0140 : Watchdog Interrupts
@@ -129,183 +122,7 @@ __fit_interrupt:
.org __vectors + 0x0140
__watchdog_interrupt:
- #b watchdog_handler
- b .
-
-
-
-
-
- .global __pk_irq_fast2full
-__pk_irq_fast2full:
-
- ## Convert a fast-mode to a full-mode interrupt by saving the
- ## (volatile - fast) context, and switching to the appropriate system
- ## stack.
-
- ## Entry invariants:
- ## 1. The SP/stack must be exactly as it was when the fast-mode
- ## handler was entered.
- ## 2. No changes have been made to the MSR - the interrupt level must
- ## remain disabled.
- ## 3. The handler owns the fast context and has not modified the other
- ## register context. This routine can only use the (volatile -
- ## fast) register context.
-
- ## 41 (linear) instructions plus alignmenmt
-
- ## Start by pushing the (volatile - fast) context. Technically we also
- ## need to save the CR as our contract with the handler is not to
- ## disturb any of its register state.
-
- _pk_vol_fast_ctx_push
- mfcr %r10
- mfsprg0 %r8
-
- ## At entry here the (volatile - fast) context has been pushed,
- ## R8 has SPRG0 and R10 contains the saved CR.
-
- ## Note that it would violate a kernel/API invariant if this routine
- ## were entered from outside an interrupt context.
-
-fast2full_noncritical:
-
- ## switch to the kernel stack if we haven't already done so. (SPRG0[RI] = 0)
- #bb1wi %r8, RI_BIT, 1f //branches if the RI_BIT is '1'
-
- extrwi %r9, %r8, 8, 8
- cmpwi %r9, 1
- bne 1f
-
- _stwsd %r1, __pk_saved_sp_noncritical
- _lwzsd %r1, __pk_noncritical_stack
-
-1:
-
- .if (PK_ERROR_CHECK_KERNEL | PK_ERROR_CHECK_API)
- #bb1wi %r8, PI_BIT, 2f //branches if PI_BIT is '1'
- cmpwi %r9, 0
- bne 2f
- _pk_panic PPE42_IRQ_FAST2FULL_INVARIANT
-2:
- .endif
-
- mtcr0 %r10
- blr
-
- .global __pk_irq_full_mode_exit
-__pk_irq_full_mode_exit:
-
- ## Exit a full-mode handler.
-
- ## Entry invariants:
- ## 1. The SP/stack must be in exactly the same state it was left in at
- ## the exit of __pk_irq_fast2full.
- ## 2. It is assumed the the preemption rules of PK have been followed
- ## - in particular that critical handlers have not enabled
- ## non-critical interrupts.
-
- ## We can freely modify the volatile context here - the handler is done
- ## and we will restore the interrupted volatile context.
-
- ## 22 linear instructions
-
- ## If the critical count is non-zero, then the PK preemption rules
- ## guarantee that we are exiting from a critical interrupt
- ## handler. This test is safe to make even if critical interrupts are
- ## enabled, because the variable is set exactly once in a critical
- ## section.
-
- mfsprg0 %r3
-
- ## Exiting a full-mode non-critical handler is more complex than the
- ## critical case, because the handler may have made a new
- ## highest-priority thread runnable and we may need to go through a
- ## delayed scheduling step.
-
- ## Note that the idle thread is treated as a special case. The idle
- ## thread has no permanent register context. To avoid having to
- ## allocate a stack area for the idle thread, the idle thread
- ## 'uses' the non-critical stack. When the idle thread is interrupted
- ## the (redundant) context is pushed, but is then effectively lost.
- ## Whenever we restore the idle thread we simply reenter the idle
- ## thread entry point.
-
- ## At entry:
- ## 1. R3 holds the value of SPRG0 (__PkKernelContext)
-
- ## 33 linear instructions.
-
-full_exit_noncritical:
-
- ## Enter a critical section for the return from interrupt, in the event
- ## that the handler enabled preemption.
-
- _pk_critical_section_enter %r4, %r5
-
- ## If the non-critical count is > 1 then this is a nested interrupt
- ## and we can simply pop the context and RFI.
-
- extrwi. %r4, %r3, 8, 8
-
- ## If SPRG0[RI] = 1 then this is a recursive interrupt
- ## and we can simply pop the context and RFI. Note that it would
- ## violate a kernel/API invariant if this routine were entered from
- ## outside an interrupt context (interrupt level == 0).
-
- .if (PK_ERROR_CHECK_KERNEL | PK_ERROR_CHECK_API)
- #bb1wi %r3, PI_BIT, 1f //branch if the PI bit is set
- bne 1f
- _pk_panic PPE42_IRQ_FULL_EXIT_INVARIANT
-1:
- .endif
-
- cmpwi %r4, 1
- bne exit_noncritical_without_switch
-
- ## Otherwise, restore the saved stack pointer and continue.
-
- _lwzsd %r1, __pk_saved_sp_noncritical
-
- ## If we are not in thread mode (i.e., we took an interrupt in an
- ## interupt-only configuration of PK or after pk_initialize() but
- ## before pk_start_threads) simply pop the context and RFI - in this
- ## case we'll most likely be returning to main() or the non-thread-mode
- ## idle thread.
-
- andi. %r4, %r3, PPE42_THREAD_MODE
- beq exit_noncritical_without_switch
-
- ## Now, check for a delayed context switch. If none is pending, we can
- ## exit (after a check for the idle thread special case).
-
- _lwzsd %r3, __pk_delayed_switch
- cmpwi %r3, 0
- bne noncritical_switch
-
- _lwzsd %r3, __pk_current_thread
- cmpwi %r3, 0
- beq __pk_idle_thread
-
-exit_noncritical_without_switch:
- _pk_vol_fast_ctx_pop
- b fast_exit_noncritical
-
- ## The non-critical interrupt activated a delayed context switch. The
- ## C-level code has taken care of the scheduling decisions - we simply
- ## need to implement them here.
-
-noncritical_switch:
-
- ## Clear the delayed switch flag and go to the context switch code to
- ## finish the switch.
-
- li %r3, 0
- _stwsd %r3, __pk_delayed_switch
-
- b thread_save_non_volatile_and_switch
-
-
+ _pk_ctx_push_as_needed watchdog_handler
@@ -317,30 +134,33 @@ noncritical_switch:
__pk_idle_thread:
- ## The idle thread 'uses' the non-critical stack. Any register context
+ ## The idle thread 'uses' the kernel stack. Any register context
## pushed here is redundant and is wiped out/ignored every time the
## idle thread is re-scheduled.
## The idle thread simply establishes a default machine context and
## enters the wait-enable state. The idle thread is always entered
- ## with non-critical interrupts disabled.
+ ## with interrupts disabled.
##
## The kernel context is initialized to indicate that the idle thread
- ## is running - the idle thread priority is PK_THREADS, and the
- ## 'thread-mode' bit is asserted as well.
+ ## is running - the idle thread priority is PK_THREADS, the
+ ## 'thread-mode' bit is asserted and so is the 'discard-ctx" bit.
+ ## In addition, the previous kernel context is stored in the lower
+ ## 16 bits.
##
## This loop can also be called from the PK bootloader if main()
## returns - in which case we don't muck with the SPRG0 or the stack
## pointer.
-
- li %r3, (PK_THREADS | PPE42_THREAD_MODE)
+ mfsprg0 %r3
+ srwi %r3, %r3, 16
+ oris %r3, %r3, (PK_THREADS << 8) | PPE42_THREAD_MODE | PPE42_DISCARD_CTX
mtsprg0 %r3
- _lwzsd %r1, __pk_noncritical_stack
+ _lwzsd %r1, __pk_kernel_stack
-__pk_idle_thread_from_bootloader:
+__pk_idle_thread_from_bootloader:
+
+ PK_KERN_TRACE_ASM16("ENTER_IDLE_STATE")
- #li %r3, PK_THREADS
- #PK_TRACE_THREAD_SWITCH %r3, %r4
_lwzsd %r3, __pk_thread_machine_context_default
_oriwa %r3, %r3, MSR_WE
mtmsr %r3
@@ -364,27 +184,14 @@ dec_handler:
## interrupt by writing the DIS back into the TSR before calling the
## handler. The timer handler does not take any arguments.
- _save_update_kernel_context %r3, %r4
+ li %r4, PPE42_IRQ_DEC
+ _update_kernel_context %r4, %r3
_liwa %r3, TSR_DIS
mttsr %r3
- _pk_irq_fast2full __pk_timer_handler
-
-
-
-
-
- ## Exit traces are moved here because the code area (0x100 bytes)
- ## reserved for individual interrupts is overflowing when tracing is
- ## enabled. This is kind of a hack: We know that this trace only
- ## occurs when we're about to exit the fast context, at a place
- ## where we can use any of the fast registers.
-#if 0
-__pk_trace_noncritical_irq_exit:
- #PK_TRACE_NONCRITICAL_IRQ_EXIT %r3, %r4
- blr
-#endif
+ bl __pk_timer_handler
+ b check_for_ext_interrupt
program_exception_handler:
## first check if exception was caused by an illegal 'sc' instruction
@@ -393,46 +200,14 @@ program_exception_handler:
cmpwbeq %r3, %r4, __sc_helper
_pk_panic PPE42_ILLEGAL_INSTRUCTION
- ## SRR0 is currently pointing to the 'sc' instruction. We need to advance it
+ ## Saved SRR0 is currently pointing to the 'sc' instruction. We need to advance it
## to the next instruction so that we don't end up in an endless loop (something
## that the ppc sc instruction does automatically).
__sc_helper:
- mfsrr0 %r3
- addi %r3, %r3, 4
- mtsrr0 %r3
-
-__system_call:
-
- ## The program exception is used by PK as a handy way to start a
- ## context switch, as the continuation address and MSR of the thread to
- ## be swapped out are saved in SRR0 and SRR1.
-
- ## Non-critical interrupts are disabled at entry.
-
- ## Begin by saving the volatile context of the current thread.
- ## NOTE: fast context has already been saved prior to branching here.
-
- _pk_vol_fast_ctx_push
-
-thread_save_non_volatile_and_switch:
-
- ## Finish the thread context save by pushing the non-volatile context
- ## and saving the resulting stack pointer in the thread structure. If
- ## the current thread is the idle thread this step is bypassed.
-
- ## This symbol is also used as an entry point by the non-critical
- ## interrupt handler - non-critical interrupts are disabled here.
-
- _lwzsd %r3, __pk_current_thread
- cmpwi %r3, 0
- beq __pk_next_thread_resume
-
- _pk_non_vol_ctx_push
- stw %r1, PK_THREAD_OFFSET_SAVED_STACK_POINTER(%r3)
-
- ## The next thread becomes the current thread, and we switch to its
- ## stack - unless the new thread is the idle thread, in which case it
- ## (the idle thread) is simply resumed.
+ mfsrr0 %r4
+ _lwzsd %r3, __pk_saved_sp
+ addi %r4, %r4, 4
+ stw %r4, PK_CTX_SRR0(%r3)
.global __pk_next_thread_resume
__pk_next_thread_resume:
@@ -440,38 +215,36 @@ __pk_next_thread_resume:
_lwzsd %r3, __pk_next_thread
_stwsd %r3, __pk_current_thread
- cmpwi %r3, 0
- beq __pk_idle_thread
+ ## Enter the wait enabled state if the thread pointer is null
+ bwz %r3, __pk_idle_thread
+ ## switch to the new thread stack
lwz %r1, PK_THREAD_OFFSET_SAVED_STACK_POINTER(%r3)
- ## Restore the thread context and resume the new thread. The kernel
- ## context in thread mode is simply the thread priority OR'ed with the
- ## thread-mode flag. All other fields are cleared.
+ ## load sprg0 from the stack and update the thread priority
+ ## in case it changed.
+restore_and_update_sprg0:
+ _lbzsd %r31, __pk_next_priority
- _pk_non_vol_ctx_pop
- _pk_vol_fast_ctx_pop
-
- _lbzsd %r3, __pk_next_priority
- #PK_TRACE_THREAD_SWITCH %r3, %r4
- ori %r3, %r3, PPE42_THREAD_MODE
- mtsprg0 %r3
+ PK_KERN_TRACE_ASM16("RESUME_THREAD(%d)", %r31)
- _pk_fast_ctx_pop
- rfi
+ lwz %r3, PK_CTX_KERNEL_CTX(%r1)
+ rlwimi %r3, %r31, 24, 2, 7
+ mtsprg0 %r3
+ b ctx_pop
+
fit_handler:
- ## The FIT handler is user defined, and is a fast-mode handler. By
+ ## The FIT handler is user defined. By
## convention the kernel clears the interrupt by writing the FIS back
## into the TSR.
- _pk_fast_ctx_push
-
- _lwzsd %r3, __ppe42_fit_arg
li %r4, PPE42_IRQ_FIT
- _save_update_kernel_context %r4, %r6
+ _update_kernel_context %r4, %r3
+
+ _lwzsd %r3, __ppe42_fit_arg
_liwa %r6, TSR_FIS
mttsr %r6
@@ -480,18 +253,15 @@ fit_handler:
mtlr %r6
blrl
- b fast_exit_noncritical
+ b check_for_ext_interrupt
watchdog_handler:
## Watchdog setup is described in the PK Specification.
## The kernel clears TSR[WIS] prior to calling the handler.
- ## The watchdog handler is a critical, fast-mode handler.
- _pk_fast_ctx_push
-
- li %r3, PPE42_IRQ_WATCHDOG
+ li %r4, PPE42_IRQ_WATCHDOG
- _save_update_kernel_context %r3, %r6
+ _update_kernel_context %r4, %r3
_liwa %r6, TSR_WIS
mttsr %r6
@@ -500,68 +270,99 @@ watchdog_handler:
mtlr %r6
blrl
- b .
+ b check_for_ext_interrupt
-#if 0
-debug_handler:
+ ## Check if we can disard the interrupted context.
+ ## This routine expects r3, r4, lr, and cr to already be pushed.
+ ## It also expects r3 to hold the address of the function to jump
+ ## to after the interrupted context has been pushed (if necessary).
- ## PK does nothing upon reception of the debug interrupt other
- ## than calling the handler (if non-0). The debug handler is a
- ## fast-mode handler.
+ .align 5
+ctx_check_discard:
- _pk_fast_ctx_push
+ ## Prepare to jump to the branch function that was passed in
+ mtlr %r3
- li %r3, PPE42_IRQ_DEBUG
-
- _save_update_kernel_context %r3, %r6
+ ## Check if the DISCARD_CTX bit is set in the kernel context
+ mfsprg0 %r3
+ bb0wi %r3, PPE42_DISCARD_CTX_BIT, ctx_continue_push
- _lwzsd %r6, __ppe42_debug_routine
- cmpwi %r6, 0
- mtlr %r6
- beq debug_exit
- blrl
-
-debug_exit:
- b fast_exit_critical
-#endif
-
- .align 5
-__external_interrupt:
+ctx_discard:
+ ## DISCARD_CTX bit was set. Discard stack and branch to interrupt
+ ## handler code
+ addi %r1, %r1, PK_CTX_SIZE
+ blr
- ## The non-critical interrupt handler entry point is re-entrant - A
- ## handler may allow preemption, which could cause another entry here.
+ ## DISCARD_CTX bit was not set. Continue saving full context.
+ ## (r3, r4, lr, and cr have already been saved for us) and
+ ## r3 contains the interrupted kernel context
+
+ctx_continue_push:
+
+ stvd %d5, PK_CTX_GPR5(%r1)
+ stvd %d7, PK_CTX_GPR7(%r1)
+ stvd %d9, PK_CTX_GPR9(%r1)
+ stvd %d28, PK_CTX_GPR28(%r1)
+ stvd %d30, PK_CTX_GPR30(%r1)
+ mfxer %r5
+ mfctr %r6
+ stvd %d5, PK_CTX_XER(%r1)
+ mfsrr0 %r7
+ mfsrr1 %r8
+ stvd %d7, PK_CTX_SRR0(%r1)
+ stw %r0, PK_CTX_GPR0(%r1)
+ stw %r3, PK_CTX_KERNEL_CTX(%r1)
- ## Entry invariants:
- ## 1. Non-critical interupts are disabled;
- ## 2. The SP points to a thread stack or the non-critical stack.
+ ## If the 'processing interrupt' bit is set then we were already
+ ## using the kernel stack and don't need to modify or save the current
+ ## stack pointer.
+ bb1wi %r3, PPE42_PROC_IRQ_BIT, ctx_push_completed
- ## Since fast-mode handlers can not use PK services or alter the
- ## machine context, the exit of a fast mode handler is a simple RF(C)I.
+ ## load the pointer to the current thread control block
+ _lwzsd %r4, __pk_current_thread
- ## Begin by pushing the fast context on the current stack.
-
- ## _pk_fast_ctx_push was called prior to branching here. No need to call it here.
+ ## don't save the stack pointer in the thread control block
+ ## if the current thread was the idle thread (null pointer)
+ bwz %r4, switch_to_kernel_stack
+
+ ## we interrupted a bonafide thread, so save off the stack
+ ## pointer
+ stw %r1, PK_THREAD_OFFSET_SAVED_STACK_POINTER(%r4)
+
+switch_to_kernel_stack:
+ _stwsd %r1, __pk_saved_sp
+ _lwzsd %r1, __pk_kernel_stack
+
+ctx_push_completed:
+ blr
- ## Load the base address for the external interrupt table
+__get_ext_irq:
+
+ ## Entry invariants:
+ ## 1. external interupts are disabled;
+ ## 2. previous context has ben saved off
+ ## 3. r3 contains the kernel context
+ ## 4. r1 points to the kernel stack
- ## TODO: This is HW Macro specific code that is responsible for finding the
+ ## This is HW Macro specific code that is responsible for finding the
## IRQ # and storing it in r4 (phantom IRQ's are assigned a value of EXTERNAL_IRQS).
hwmacro_get_ext_irq
## An active or phantom IRQ was found.
+ ## R3 has the context of the interrupted thread or bottom half
## R4 has the IRQ number.
## The IRQ is converted into a pointer to an 8-byte handler
## structure, and the handler is dispatched. The call is made with the
## parameters:
- ## R3 = private
+ ## R3 = private data ptr
## R4 = irq
-external_irq_found:
+call_external_irq_handler:
- _save_update_kernel_context %r4, %r5
+ _update_kernel_context %r4, %r3
slwi %r3, %r4, 3 //multiply the irq# by 8
_liw %r6, __ppe42_irq_handlers
lwzx %r5, %r6, %r3
@@ -570,12 +371,120 @@ external_irq_found:
mtlr %r5
blrl
- ## Pop the stack/RFI when (if) it returns here.
+ ## Once the interrupt handler returns, check if any interrupts are
+ ## waiting and handle them now.
+
+check_for_ext_interrupt:
+
+ ## Set the CTX_DISCARD bit in the kernel context so that if there is
+ ## an interrupt it will not bother saving the full context.
+ mfsprg0 %r31
+ oris %r31, %r31, PPE42_DISCARD_CTX
+ mtsprg0 %r31
+
+ ###### Enable/Disable External Interrupts #####
+ wrteei 1
+ wrteei 0
+
+ ## If we made it this far, there must not be any interrupts pending.
+ ## If bottom half processing was interrupted we need to restore it
+check_interrupted_bh:
+
+ ## If the thread ID is 33 then the bottom half handler was interrupted
+ ## and needs to be restored.
+ extrwi %r4, %r31, 6, 2
+ cmpwi %r4, 33
+ beq ctx_pop_with_sprg0
+
+check_for_bh:
+ ## if the bottom half queue is pointing to itself then the queue is
+ ## empty and there are no bottom halves that need processing.
+ _lwzsd %r4, _pk_bh_queue
+ lwz %r5, 0(%r4)
+ cmplwbeq %r4, %r5, restore_interrupted_sp
+
+process_bottom_halves:
+ ## Clear the CTX_DISCARD bit so that interrupted bottom half context
+ ## will be saved in case an interrupt occurs after this point. Also
+ ## set the thread ID to 33 so that we know to restore the bottom half
+ ## context that was interrupted.
+ rlwinm %r3, %r31, 0, 9, 1 //clear thread id + discard bit
+ oris %r3, %r3, 0x2100 //set thread id to 33
+ mtsprg0 %r3 //set bottom half context
+
+ ## branch to a C function that processes bottom halves
+ wrteei 1
+ bl _pk_process_bh
+ wrteei 0
+
+ ## restore the previous kernel context (with discard bit set)
+ mtsprg0 %r31
+
+restore_interrupted_sp:
+ ## restore the interrupted thread stack pointer
+ _lwzsd %r1, __pk_saved_sp
+
+ ## If we are not in thread mode (i.e., we took an interrupt in an
+ ## interupt-only configuration of PK or after pk_initialize() but
+ ## before pk_start_threads) simply pop the context and RFI - in this
+ ## case we'll most likely be returning to main() or the non-thread-mode
+ ## idle thread.
+
+check_thread_mode:
+ bb0wi %r31, PPE42_THREAD_MODE_BIT, ctx_pop_with_sprg0
-fast_exit_noncritical:
+ ## Check if external interrupt activated a delayed context switch. The
+ ## C-level code has taken care of the scheduling decisions - we simply
+ ## need to implement them here.
+check_for_ctx_switch:
+
+ _lwzsd %r3, __pk_delayed_switch
+ bwz %r3, check_for_idle_thread
+
+ ## Clear the delayed switch flag and go to the context switch code to
+ ## finish the switch.
+
+ li %r3, 0
+ _stwsd %r3, __pk_delayed_switch
- _pk_fast_ctx_pop_exit
+ b __pk_next_thread_resume
+
+ ## check if we should switch to the wait enabled state (idle)
+check_for_idle_thread:
+ _lwzsd %r3, __pk_current_thread
+ bwz %r3, __pk_idle_thread
+
+ctx_pop_with_sprg0:
+ ## we must ensure that interrupts are disabled while restoring context
+ ##
+ ## restore sprg0 from the saved context
+ lwz %r0, PK_CTX_KERNEL_CTX(%r1)
+ mtsprg0 %r0
+#if PK_KERNEL_TRACE_ENABLE
+ srwi %r0, %r0, 16
+ PK_KERN_TRACE_ASM16("RESUME_CONTEXT(0x%04x)", %r0)
+#endif
+ctx_pop:
+ lwz %r0, PK_CTX_GPR0(%r1)
+ lvd %d7, PK_CTX_SRR0(%r1)
+ mtsrr1 %r8
+ mtsrr0 %r7
+ lvd %d5, PK_CTX_XER(%r1)
+ mtctr %r6
+ mtxer %r5
+ lvd %d30, PK_CTX_GPR30(%r1)
+ lvd %d28, PK_CTX_GPR28(%r1)
+ lvd %d9, PK_CTX_GPR9(%r1)
+ lvd %d7, PK_CTX_GPR7(%r1)
+ lvd %d5, PK_CTX_GPR5(%r1)
+ lvd %d3, PK_CTX_CR(%r1)
+ mtlr %r4
+ mtcr %r3
+ lvd %d3, PK_CTX_GPR3(%r1)
+ addi %r1, %r1, PK_CTX_SIZE
+ rfi
+
/// \endcond
diff --git a/pk/ppe42/ppe42_init.c b/pk/ppe42/ppe42_init.c
index 52659aab..a832e620 100644
--- a/pk/ppe42/ppe42_init.c
+++ b/pk/ppe42/ppe42_init.c
@@ -16,7 +16,7 @@
// Note that __ppe42_system_setup() is called from the PK bootloader early
// in the initialization, at a point before the aplication has enabled
-// critical or external interruts.
+// interrupts.
// This function is expected to be defined by the macro specific code (GPE, CME, SBE)
void __hwmacro_setup(void);
@@ -38,13 +38,14 @@ __ppe42_system_setup()
__ppe42_irq_handlers[irq].handler = __ppe42_phantom_irq_handler;
// Initialize special interrupt handlers
-/*
+
__ppe42_fit_routine = __ppe42_default_irq_handler;
__ppe42_fit_arg = 0;
__ppe42_watchdog_routine = __ppe42_default_irq_handler;
__ppe42_watchdog_arg = 0;
+/*
__ppe42_debug_routine = __ppe42_default_irq_handler;
__ppe42_debug_arg = 0;
*/
diff --git a/pk/ppe42/ppe42_irq.h b/pk/ppe42/ppe42_irq.h
index 6567af64..89948d60 100644
--- a/pk/ppe42/ppe42_irq.h
+++ b/pk/ppe42/ppe42_irq.h
@@ -139,79 +139,6 @@ UNLESS__PPE42_IRQ_CORE_C__(extern)
volatile
void* __ppe42_debug_arg;
-
-// Note: Why PK_IRQ_FAST2FULL (below) is implemented so strangely.
-//
-// I am adamant that I want to have a a macro in the 'C' environment to create
-// these bridge functions. However the limitations of the C preprocessor and
-// the intelligence of the GCC 'asm' facility consipre against a
-// straightforward solution. The only way that I was able to find to get
-// naked assembly code into the output stream is to use 'asm' with simple
-// strings - I couldn't make it work with any kind of argument, as 'asm' would
-// reinterpret the arguments and resulting assembler code in various ways.
-//
-// There is another alternative that I tried wherby I created a subroutine
-// call and then filled in the subroutine body with 'asm' code. However, the
-// subroutine wrapper that GCC creates only works for PowerPC fast-mode
-// handlers if GCC is invoked with optimization, which ensures that the
-// wrapper doesn't touch the stack pointer or other registers. True, we'll
-// always use optimization, but I did not want to have to make this
-// requirement for using this macro.
-
-/// This macro creates a 'bridge' handler that converts the initial fast-mode
-/// IRQ dispatch into a call of a full-mode IRQ handler. The full-mode
-/// handler is defined by the user (presumably as a \c C subroutine) and has
-/// the same prototype (type PkIrqHandler) as the fast handler.
-///
-/// \param fast_handler This will be the global function name of the fast
-/// IRQ handler created by this macro. This is the symbol
-/// that should be passed in as the \a handler argument
-/// of \c pk_irq_setup() and \c pk_irq_handler_set().
-///
-/// \param full_handler This is the name of the user-defined full-mode
-/// handler which is invoked through this bridge.
-///
-/// \e BUG \e ALERT : Beware of passing the \c full_handler to IRQ setup
-/// APIs. This won't be caught by the compiler (because the \c full_handler
-/// has the correct prototype) and will lead to nasty bugs. Always pass in
-/// the \c fast_handler symbol to IRQ setup APIS.
-///
-/// The code stream injected into the GCC assembler output in response to
-///
-/// PK_IRQ_FAST2FULL(fast_handler, full_handler)
-///
-/// is (comments added for clarification) :
-///
-/// \code
-/// .text
-/// .global fast_handler
-/// .align 5 # Hard-coded PPE42 cache-line alignment
-/// fast_handler = . # Can't macro expand LABEL: - this is equivalent
-/// bl __pk_irq_fast2full # The fast-mode to full-mode conversion sequence
-/// bl full_handler
-/// b __pk_irq_full_mode_exit
-/// \endcode
-///
-/// The macro also declares the prototype of the fast handler:
-///
-/// \code
-/// PK_IRQ_HANDLER(fast_handler);
-/// \endcode
-///
-
-#define PK_IRQ_FAST2FULL(fast_handler, full_handler) \
- PK_IRQ_HANDLER(fast_handler); \
- __PK_IRQ_FAST2FULL(.global fast_handler, fast_handler = ., bl full_handler)
-
-#define __PK_IRQ_FAST2FULL(global, label, call) \
-asm(".text"); \
-asm(#global); \
-asm(".align 5"); \
-asm(#label); \
-asm("bl __pk_irq_fast2full"); \
-asm(#call); \
-asm("b __pk_irq_full_mode_exit");
-
#endif /* __ASSEMBLER__ */
// It's hard to be portable and get all of the definitions and headers in the
@@ -228,45 +155,6 @@ asm("b __pk_irq_full_mode_exit");
/// \page ppe42_irq_macros_page PPE42 PK IRQ Assembler Macros
///
///
-/// \section fast2full_asm Fast-Mode to Full-Mode Handler Conversion
-///
-/// This macro produces the calling sequence required to convert a
-/// fast-mode interrupt handler to a full-mode interrupt handler. The
-/// full-mode handler is implemented by another subroutine. The
-/// requirements for invoking this macro are:
-///
-/// \li The stack pointer and stack must be exactly as they were when the
-/// fast-mode handler was entered.
-///
-/// \li No changes have been made to the MSR - the interrupt level must
-/// remain disabled.
-///
-/// \li The handler owns the fast context and has not modified the other
-/// register context. The conversion process will not modify any
-/// register in the fast context (other than the LR used for
-/// subroutine linkage).
-///
-/// The final condition above means that the \a full_handler will
-/// begin with the fast-mode context exactly as it was (save for LR)
-/// at conversion, including the contents of GPR3-7 (the first 5
-/// PowerPC ABI paramater passing registers) and the entire CR.
-///
-/// Forms:
-///
-/// \c _pk_irq_fast2full \a full_handler
-/// \cond
-
-#ifdef __ASSEMBLER__
-
- .macro _pk_irq_fast2full full_handler
- bl __pk_irq_fast2full
- bl \full_handler
- b __pk_irq_full_mode_exit
- .endm
-
-#endif /* __ASSEMBLER__ */
-
-/// \endcond
#ifndef __ASSEMBLER__
diff --git a/pk/ppe42/ppe42_irq_core.c b/pk/ppe42/ppe42_irq_core.c
index 8e8f29f5..791f935d 100644
--- a/pk/ppe42/ppe42_irq_core.c
+++ b/pk/ppe42/ppe42_irq_core.c
@@ -32,8 +32,8 @@ __ppe42_default_irq_handler(void* arg, PkIrqId irq)
/// This function is installed by default to handle the case that the
-/// interrupt dispatch code is entered in response to an external critical or
-/// non-critical interrupt, but no interrupt is found pending in the interrupt
+/// interrupt dispatch code is entered in response to an external
+/// interrupt, but no interrupt is found pending in the interrupt
/// controller. This should never happen, as it would indicate that a
/// 'glitch' occurred on the external interrupt input
/// to the PPE42 core.
diff --git a/pk/ppe42/ppe42_thread_init.S b/pk/ppe42/ppe42_thread_init.S
index 6e6c34fe..7185f7c7 100644
--- a/pk/ppe42/ppe42_thread_init.S
+++ b/pk/ppe42/ppe42_thread_init.S
@@ -48,54 +48,56 @@ __pk_thread_context_initialize:
stw %r7, \prefix\reg(%r6)
.endm
- ## Initialize a fast context on the thread stack. The CR is cleared,
+ ## Initialize volatile context on the thread stack. The CR is cleared,
## the LR = pk_complete(), R3 has the private parameter.
lwz %r6, PK_THREAD_OFFSET_SAVED_STACK_POINTER(%r3)
- stwu %r6, -PK_FAST_CTX_SIZE(%r6)
+ stwu %r6, -PK_CTX_SIZE(%r6)
li %r7, 0
- stw %r7, PK_FAST_CTX_CR(%r6)
+ stw %r7, PK_CTX_CR(%r6)
_liw %r7, pk_complete
- stw %r7, PK_FAST_CTX_LR(%r6)
+ stw %r7, PK_CTX_LR(%r6)
- stw %r5, PK_FAST_CTX_GPR3(%r6)
+ stw %r5, PK_CTX_GPR3(%r6)
- _gpr_init PK_FAST_CTX_GPR, 4, 0x0404
- _gpr_init PK_FAST_CTX_GPR, 5, 0x0505
- _gpr_init PK_FAST_CTX_GPR, 6, 0x0606
+ _gpr_init PK_CTX_GPR, 4, 0x0404
+ _gpr_init PK_CTX_GPR, 5, 0x0505
+ _gpr_init PK_CTX_GPR, 6, 0x0606
- ## Initialize the (volatile - fast) context on the thread stack. XER
- ## and CTR are clear, SRR0 = thread_routine, SRR1 = default machine
+ ## XER and CTR are clear, SRR0 = thread_routine, SRR1 = default machine
## context.
- stwu %r6, -PK_VOL_FAST_CTX_SIZE(%r6)
-
li %r7, 0
- stw %r7, PK_VOL_FAST_CTX_XER(%r6)
- stw %r7, PK_VOL_FAST_CTX_CTR(%r6)
+ stw %r7, PK_CTX_XER(%r6)
+ stw %r7, PK_CTX_CTR(%r6)
- stw %r4, PK_VOL_FAST_CTX_SRR0(%r6)
+ stw %r4, PK_CTX_SRR0(%r6)
_lwzsd %r7, __pk_thread_machine_context_default
- stw %r7, PK_VOL_FAST_CTX_SRR1(%r6)
+ stw %r7, PK_CTX_SRR1(%r6)
- _gpr_init PK_VOL_FAST_CTX_GPR, 0, 0x0000
- _gpr_init PK_VOL_FAST_CTX_GPR, 7, 0x0707
- _gpr_init PK_VOL_FAST_CTX_GPR, 8, 0x0808
- _gpr_init PK_VOL_FAST_CTX_GPR, 9, 0x0909
- _gpr_init PK_VOL_FAST_CTX_GPR, 10, 0x1010
+ _gpr_init PK_CTX_GPR, 0, 0x0000
+ _gpr_init PK_CTX_GPR, 7, 0x0707
+ _gpr_init PK_CTX_GPR, 8, 0x0808
+ _gpr_init PK_CTX_GPR, 9, 0x0909
+ _gpr_init PK_CTX_GPR, 10, 0x1010
## Initialize the non-volatile context on the thread stack.
- stwu %r6, -PK_NON_VOL_CTX_SIZE(%r6)
+ _gpr_init PK_CTX_GPR, 28, 0x2828
+ _gpr_init PK_CTX_GPR, 29, 0x2929
+ _gpr_init PK_CTX_GPR, 30, 0x3030
+ _gpr_init PK_CTX_GPR, 31, 0x3131
+
+ ## Initialize the kernel context on the thread stack.
+ ## Note: Thread priority is set later each time the thread is
+ ## resumed.
- _gpr_init PK_NON_VOL_CTX_GPR, 28, 0x2828
- _gpr_init PK_NON_VOL_CTX_GPR, 29, 0x2929
- _gpr_init PK_NON_VOL_CTX_GPR, 30, 0x3030
- _gpr_init PK_NON_VOL_CTX_GPR, 31, 0x3131
+ lis %r7, PPE42_THREAD_MODE
+ stw %r7, PK_CTX_KERNEL_CTX(%r6)
## Initialization is done - the stack pointer is stored back in the
## thread.
diff --git a/pk/std/std_common.h b/pk/std/std_common.h
index 3c8fda2d..38bc1621 100644
--- a/pk/std/std_common.h
+++ b/pk/std/std_common.h
@@ -26,14 +26,14 @@
/// Check for interrupts pending in the interrupt status register while the IRQ
/// is computed. The IRQ is expected to be stored in r4. If no IRQ is
/// pending then load the phantom irq # (EXTERNAL_IRQS).
-/// Only the following registers have been saved off and can be used:
-/// r3, r4, r5, r6, cr0, lr
+///
+/// r1, r2, r3, and r13 must not be modified. All other registers may be used.
///
.macro hwmacro_get_ext_irq
_lvdg d5, STD_LCL_EISTR #load the 64bit interrupt status into d5
cntlzw r4, r5
- cmpwible r4, 31, external_irq_found #branch if irq is lt or eq to 31
+ cmpwible r4, 31, call_external_irq_handler #branch if irq is lt or eq to 31
## No IRQ pending in r5. Try r6.
## Note: irq # will be 64 (phantom irq) if no bits were set in either register
diff --git a/pk/std/std_irq_init.c b/pk/std/std_irq_init.c
index 3c383caf..80ae0f19 100644
--- a/pk/std/std_irq_init.c
+++ b/pk/std/std_irq_init.c
@@ -21,8 +21,7 @@
/// interrupt status in the controller.
///
/// Note that PK allows this API to be called from any context, and changes
-/// to the interrupt controller are made from a critical
-/// section.
+/// to the interrupt controller are made from a critical section.
///
/// Return values other then PK_OK (0) are errors; see \ref pk_errors
///
@@ -71,8 +70,7 @@ pk_irq_setup(PkIrqId irq,
/// Return values other then PK_OK (0) are errors; see \ref pk_errors
///
/// Note that PK allows this API to be called from any context, and changes
-/// to the interrupt controller are made from a critical
-/// section.
+/// to the interrupt controller are made from a critical section.
///
/// \retval 0 Successful completion
///
diff --git a/pk/trace/pk_trace.h b/pk/trace/pk_trace.h
index bcb1ce06..b3b18014 100644
--- a/pk/trace/pk_trace.h
+++ b/pk/trace/pk_trace.h
@@ -74,6 +74,8 @@
#define PK_TRACE_MAX_PARMS 4
+//This is the maximum number of bytes allowed to be traced in a binary trace
+//entry.
//The trace version needs to change if this changes.
#define PK_TRACE_MAX_BINARY 256
diff --git a/pk/trace/pk_trace_core.c b/pk/trace/pk_trace_core.c
index 007337bc..b70f7ed3 100644
--- a/pk/trace/pk_trace_core.c
+++ b/pk/trace/pk_trace_core.c
@@ -23,12 +23,10 @@ void pk_trace_timer_callback(void* arg);
//Static initialization of the trace timer
PkTimer g_pk_trace_timer = {
- .deque.next = 0,
- .deque.previous = 0,
+ .deque = PK_DEQUE_ELEMENT_INIT(),
.timeout = 0,
.callback = pk_trace_timer_callback,
.arg = 0,
- .options = PK_TIMER_CALLBACK_PREEMPTIBLE,
};
//Static initialization of the pk trace buffer
OpenPOWER on IntegriCloud