summaryrefslogtreecommitdiffstats
path: root/src/ssx/ppc405
diff options
context:
space:
mode:
authorStephan Broyles <sbroyles@us.ibm.com>2014-11-05 19:09:37 -0600
committerStephan Broyles <sbroyles@us.ibm.com>2014-11-05 19:22:32 -0600
commit9976c207cdb20871880bd2f4cf123cf4cb6a8b0f (patch)
tree1cf9ed8f23085e6fe3e0e6046fc30dcb7e02ccf2 /src/ssx/ppc405
parent2f8ce357b89d361b5091d88aea91416011b73ccb (diff)
downloadtalos-occ-9976c207cdb20871880bd2f4cf123cf4cb6a8b0f.tar.gz
talos-occ-9976c207cdb20871880bd2f4cf123cf4cb6a8b0f.zip
Added remaining occ files.
Change-Id: I91a748d3dcf3161a6a3eedcb376fcaf1e4dfe655
Diffstat (limited to 'src/ssx/ppc405')
-rwxr-xr-xsrc/ssx/ppc405/Makefile40
-rwxr-xr-xsrc/ssx/ppc405/ppc405.h771
-rwxr-xr-xsrc/ssx/ppc405/ppc405_boot.S242
-rw-r--r--src/ssx/ppc405/ppc405_breakpoint.S103
-rwxr-xr-xsrc/ssx/ppc405/ppc405_cache.h113
-rwxr-xr-xsrc/ssx/ppc405/ppc405_cache_core.c115
-rwxr-xr-xsrc/ssx/ppc405/ppc405_cache_init.S314
-rwxr-xr-xsrc/ssx/ppc405/ppc405_context.h594
-rwxr-xr-xsrc/ssx/ppc405/ppc405_core.c183
-rwxr-xr-xsrc/ssx/ppc405/ppc405_dcr.h55
-rwxr-xr-xsrc/ssx/ppc405/ppc405_exceptions.S832
-rwxr-xr-xsrc/ssx/ppc405/ppc405_init.c80
-rwxr-xr-xsrc/ssx/ppc405/ppc405_irq.h342
-rwxr-xr-xsrc/ssx/ppc405/ppc405_irq_core.c49
-rwxr-xr-xsrc/ssx/ppc405/ppc405_irq_init.c169
-rwxr-xr-xsrc/ssx/ppc405/ppc405_lib_core.c42
-rwxr-xr-xsrc/ssx/ppc405/ppc405_mmu.c474
-rwxr-xr-xsrc/ssx/ppc405/ppc405_mmu.h170
-rwxr-xr-xsrc/ssx/ppc405/ppc405_mmu_asm.S73
-rwxr-xr-xsrc/ssx/ppc405/ppc405_msr.h85
-rwxr-xr-xsrc/ssx/ppc405/ppc405_spr.h319
-rwxr-xr-xsrc/ssx/ppc405/ppc405_thread_init.S126
-rwxr-xr-xsrc/ssx/ppc405/ssx_port_types.h44
-rwxr-xr-xsrc/ssx/ppc405/ssxppc405files.mk53
24 files changed, 5388 insertions, 0 deletions
diff --git a/src/ssx/ppc405/Makefile b/src/ssx/ppc405/Makefile
new file mode 100755
index 0000000..89c2c58
--- /dev/null
+++ b/src/ssx/ppc405/Makefile
@@ -0,0 +1,40 @@
+# $Id: Makefile,v 1.2 2013/12/12 16:12:38 bcbrock Exp $
+
+# This Makefile is designed to be invoked with the -I argument set to
+# the location of the "ssx.mk" for the build
+
+# >> gitprep
+# Path cleanup for GNU builds
+SSX = ..
+PGP = $(SSX)/pgp
+
+include $(PGP)/ssx.mk
+# << gitprep
+include ssxppc405files.mk
+
+ifeq "$(SSX_TIMER_SUPPORT)" "1"
+PPC405_OBJECTS += ${PPC405-TIMER-C-SOURCES:.c=.o} ${PPC405-TIMER-S-SOURCES:.S=.o}
+endif
+
+ifeq "$(SSX_THREAD_SUPPORT)" "1"
+PPC405_OBJECTS += ${PPC405-THREAD-C-SOURCES:.c=.o} ${PPC405-THREAD-S-SOURCES:.S=.o}
+endif
+
+ifeq "$(PPC405_MMU_SUPPORT)" "1"
+PPC405_OBJECTS += ${PPC405-MMU-C-SOURCES:.c=.o} ${PPC405-MMU-S-SOURCES:.S=.o}
+endif
+
+
+all: local
+ $(MAKE) -I ../pgp -C ../ppc32
+
+local: $(PPC405_OBJECTS)
+
+.PHONY : clean
+clean:
+ rm -f *.o *.d *.d.*
+ $(MAKE) -I ../pgp -C ../ppc32 clean
+
+ifneq ($(MAKECMDGOALS),clean)
+include $(PPC405_OBJECTS:.o=.d)
+endif \ No newline at end of file
diff --git a/src/ssx/ppc405/ppc405.h b/src/ssx/ppc405/ppc405.h
new file mode 100755
index 0000000..18b0c53
--- /dev/null
+++ b/src/ssx/ppc405/ppc405.h
@@ -0,0 +1,771 @@
+#ifndef __PPC405_H__
+#define __PPC405_H__
+
+// $Id: ppc405.h,v 1.3 2014/02/03 01:30:42 daviddu Exp $
+// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ssx/ppc405/ppc405.h,v $
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2013
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+/// \file ppc405.h
+/// \brief PowerPC 405 port header for SSX
+
+// The 405 has a 32-byte line and 2-way set associative caches. The cache
+// configuration varies by chip/ASIC.
+//
+// Regarding the DCACHE_TAG_MASK, used by dcache_flush_all: The IBM
+// documentation on the D-cache tag sizes doesn't make any sense to me - it
+// claims the tag size is constant regardless of the size of the cache.
+// However the Xilinx documentation for their 405 core (which has the same
+// 16KB cache as PgP) is consistent with the way the DCACHE_TAG_MASK is
+// defined here.
+
+#define CACHE_LINE_SIZE 32
+#define LOG_CACHE_LINE_SIZE 5
+
+#define ICACHE_WAYS 2
+#define DCACHE_WAYS 2
+
+#define LOG_ICACHE_WAYS 1
+#define LOG_DCACHE_WAYS 1
+
+#define ICACHE_LINES (ICACHE_SIZE / CACHE_LINE_SIZE)
+#define DCACHE_LINES (DCACHE_SIZE / CACHE_LINE_SIZE)
+
+#define DCACHE_TAG_MASK \
+ ((0xffffffff << (LOG_DCACHE_SIZE - LOG_DCACHE_WAYS)) & 0xffffffff)
+
+#ifdef CHIP_PGP
+
+#define ICACHE_SIZE (16 * 1024)
+#define DCACHE_SIZE (16 * 1024)
+
+#define LOG_ICACHE_SIZE 14
+#define LOG_DCACHE_SIZE 14
+
+#else
+#error "Please define the cache configuration of the processor"
+#endif
+
+// Macros to define where declared code is actually compiled
+
+#ifdef __PPC405_C__
+#define IF__PPC405_CORE_C__(x) x
+#define UNLESS__PPC405_CORE_C__(x)
+#else
+#define IF__PPC405_CORE_C__(x)
+#define UNLESS__PPC405_CORE_C__(x) x
+#endif
+
+#ifdef __PPC405_IRQ_CORE_C__
+#define IF__PPC405_IRQ_CORE_C__(x) x
+#define UNLESS__PPC405_IRQ_CORE_C__(x)
+#else
+#define IF__PPC405_IRQ_CORE_C__(x)
+#define UNLESS__PPC405_IRQ_CORE_C__(x) x
+#endif
+
+
+#ifdef CHIP_PGP
+#include "pgp.h"
+#endif
+
+#include "ppc32.h"
+#include "ppc405_dcr.h"
+#include "ppc405_spr.h"
+#include "ppc405_msr.h"
+#include "ppc405_irq.h"
+#include "ppc405_cache.h"
+
+#if PPC405_MMU_SUPPORT
+
+#include "ppc405_mmu.h"
+
+#ifndef PPC405_IR_SUPPORT
+#define PPC405_IR_SUPPORT 1
+#endif
+
+#ifndef PPC405_DR_SUPPORT
+#define PPC405_DR_SUPPORT 1
+#endif
+
+#define PPC405_RELOCATION_MODE \
+ ((PPC405_IR_SUPPORT * MSR_IR) | (PPC405_DR_SUPPORT * MSR_DR))
+
+#ifndef __ASSEMBLER__
+void*
+memcpy_real(void* dest, const void* src, size_t n);
+#endif
+
+#else /* PPC405_MMU_SUPPORT */
+
+#define PPC405_RELOCATION_MODE 0
+
+#ifndef __ASSEMBLER__
+static inline void*
+memcpy_real(void* dest, const void* src, size_t n)
+{
+ memcpy(dest, src, n);
+ icache_invalidate_all();
+ return dest;
+}
+#endif
+
+#endif /* PPC405_MMU_SUPPORT */
+
+/// By default, in MMU mode free space is read/write only, just like normal
+/// data. Saome applications may need to execute from free space however, and
+/// can override this default.
+
+#ifndef EXECUTABLE_FREE_SPACE
+#define EXECUTABLE_FREE_SPACE 0
+#endif
+
+#include "ppc405_context.h"
+
+// PPC405 stack characteristics for SSX. The pre-pattern pattern is selected
+// to be easily recognizable yet be an illegal instruction.
+
+#define SSX_STACK_DIRECTION -1
+#define SSX_STACK_PRE_DECREMENT 1
+#define SSX_STACK_ALIGNMENT 8
+#define SSX_STACK_TYPE unsigned int
+#define SSX_STACK_PATTERN 0x03abcdef
+
+// Kernel data structure offsets for assembler code
+
+#define SSX_THREAD_OFFSET_SAVED_STACK_POINTER 0
+#define SSX_THREAD_OFFSET_STACK_LIMIT 4
+#define SSX_THREAD_OFFSET_STACK_BASE 8
+
+// SSX boot loader panic codes
+
+#define PPC405_BOOT_CCR0_MODIFY_FAILED 0x00405000
+#define PPC405_BOOT_VECTORS_NOT_ALIGNED 0x00405001
+
+// Interrupt handler panic codes
+
+#define PPC405_DEFAULT_IRQ_HANDLER 0x00405010
+#define PPC405_DEFAULT_SPECIAL_HANDLER 0x00405011
+#define PPC405_PHANTOM_INTERRUPT 0x00405012
+#define PPC405_PROGRAM_HALT 0x00405013
+
+
+// Exception handling invariant panic codes
+
+#define PPC405_IRQ_FULL_EXIT_INVARIANT 0x00405020
+#define PPC405_IRQ_FAST2FULL_INVARIANT 0x00405021
+
+
+// API error panic codes
+
+#define PPC405_CACHE_ALIGNMENT 0x00405030
+
+// Application-overrideable definitions
+
+/// The default thread machine context has MSR[CE], MSR[EE] and MSR[ME] set,
+/// and all other MSR bits cleared.
+///
+/// The default definition allows critical, non-critical and machine check
+/// exceptions. Debug interrupts are not enabled by default. This definition
+/// can be overriden by the application. If MMU protection is enabled then
+/// the IR/DR bits are also modeably set.
+
+#ifndef SSX_THREAD_MACHINE_CONTEXT_DEFAULT
+#define SSX_THREAD_MACHINE_CONTEXT_DEFAULT \
+ (MSR_CE | MSR_EE | MSR_ME | PPC405_RELOCATION_MODE)
+#endif
+
+
+#ifndef __ASSEMBLER__
+
+/// The SSX kernel default panic sequence for C code
+///
+/// By default a kernel panic from C code forces external debug mode then
+/// generates a \c trap instruction followed by the error code. The \a code
+/// argument must be a compile-time integer immediate. This definition can be
+/// overriden by the application.
+///
+/// The OCC may be running in internal debug mode for various reasons, and
+/// TRAP-ing in internal debug mode would lead to an infinite loop in the
+/// default Program Interrupt handler - which itself would be a TRAP (since
+/// that's the default implementation of SSX_PANIC(). Therefore by default
+/// the panic is implemented as a special code sequence that forces the core
+/// into external debug mode before issuing a TRAP which will halt the core.
+/// To preserve the state we use the special global variables
+/// __ssx_panic_save_dbcr0 and __ssx_panic_save_r3 defined in ppc405_core.c.
+/// The original value of DBCR0 is destroyed, but can be recovered from the
+/// global. In the end %r3 is reloaded from temporary storage and will be
+/// unchanged at the halt.
+///
+/// Note that there is a small chance that an interrupt will fire and
+/// interrupt this code before the halt - in general there is no way around
+/// this.
+///
+/// The Simics environment does not model Debug events correctly. It executes
+/// the TRAP as an illegal instruction and branches to the Program Interrupt
+/// handler, destroying the contents of SRR0 and SRR1. Therefore we always
+/// insert a special Simics magic breakpoint (which is an effective NOP)
+/// before the hardware trap. The special-form magic instruction is
+/// recognized by our Simics support scripts which decode the kernel state and
+/// try to help the user interpret what happened based on the TRAP code.
+
+#ifndef SSX_PANIC
+
+#define SSX_PANIC(code) \
+ do { \
+ barrier(); \
+ asm volatile ("stw %r3, __ssx_panic_save_r3@sda21(0)"); \
+ asm volatile ("mfdbcr0 %r3"); \
+ asm volatile ("stw %r3, __ssx_panic_save_dbcr0@sda21(0)"); \
+ asm volatile ("lwz %r3, __ssx_panic_dbcr0@sda21(0)"); \
+ asm volatile ("mtdbcr0 %r3"); \
+ asm volatile ("isync"); \
+ asm volatile ("lwz %r3, __ssx_panic_save_r3@sda21(0)"); \
+ asm volatile ("rlwimi 1,1,0,0,0"); \
+ asm volatile ("trap"); \
+ asm volatile (".long %0" : : "i" (code)); \
+ } while (0)
+
+// These variables are used by the SSX_PANIC() definition above to save and
+// restore state. __ssx_panic_dbcr0 is the value loaded into DBCR0 to force
+// traps to halt the OCC and freeze the timers.
+
+#ifdef __PPC405_CORE_C__
+uint32_t __ssx_panic_save_r3;
+uint32_t __ssx_panic_save_dbcr0;
+uint32_t __ssx_panic_dbcr0 = DBCR0_EDM | DBCR0_TDE | DBCR0_FT;
+#endif
+
+#endif // SSX_PANIC
+
+/// This is the Simics 'magic breakpoint' instruction.
+///
+/// Note that this form does not include a memory barrier, as doing so might
+/// change the semantics of the program. There is an alternative form
+/// SIMICS_MAGIC_BREAKPOINT_BARRIER that does include a barrier.
+
+#define SIMICS_MAGIC_BREAKPOINT asm volatile ("rlwimi 0,0,0,0,0")
+
+/// This is the Simics 'magic breakpoint' instruction including a memory
+/// barrier.
+///
+/// Note that the memory barrier guarantees that all variables held in
+/// registers are flushed to memory before the breakpoint, however this might
+/// change the semantics of the program. There is an alternative form of
+/// SIMICS_MAGIC_BREAKPOINT that does not include a barrier. If the idea is
+/// to use the breakpoint for tracing code execution in Simics, the barrier
+/// form may be preferred so that variable values will be visible in memory.
+
+#define SIMICS_MAGIC_BREAKPOINT_BARRIER \
+ asm volatile ("rlwimi 0,0,0,0,0" : : : "memory")
+
+
+#else // __ASSEMBLER__
+
+/// This is the Simics 'magic breakpoint' instruction. An assembler macro
+/// form is also provided for use within macros.
+
+#define SIMICS_MAGIC_BREAKPOINT rlwimi 0,0,0,0,0
+
+ .macro _simics_magic_breakpoint
+ rlwimi 0,0,0,0,0
+ .endm
+
+/// The SSX kernel panic default panic sequence for assembler code
+///
+/// By default a kernel panic from assembler forces external debug mode then
+/// generates a \c trap instruction followed by the error code. The \a code
+/// argument must be a compile-time integer immediate. This definition can be
+/// overriden by the application.
+///
+/// See the comments for the non-ASSEMBLER version for further details. Note
+/// that the code space reserved for exception handlers is only 8
+/// instructions, so in the assembler context we don't save DBCR0 as doing so
+/// would require 10.
+
+#ifndef SSX_PANIC
+
+#define SSX_PANIC(code) _ssx_panic code
+
+ .macro _ssx_panic, code
+ _stwsd %r3, __ssx_panic_save_r3
+ _lwzsd %r3, __ssx_panic_dbcr0
+ mtdbcr0 %r3
+ isync
+ _lwzsd %r3, __ssx_panic_save_r3
+ rlwimi 1,1,0,0,0
+ trap
+ .long \code
+ .endm
+
+#endif // SSX_PANIC
+
+#endif // __ASSEMBLER__
+
+
+// Application-overridible definitions for the SSX boot loader
+
+/// In order to enable the default kernel panic (a trap) to halt the machine,
+/// the Debug Control Register 0 (DBCR0) is initialized in externel debug
+/// mode, with the Trap Debug Event enabled so that the trap will not cause a
+/// program exception, and the FT bit set so that the timers will freeze.
+/// This definition can be overridden by the application.
+///
+/// NB: It is expected that a reliable production system will redefine all of
+/// the 'panic' macros and the default DBCR0 setup.
+
+#ifndef PPC405_DBCR0_INITIAL
+#define PPC405_DBCR0_INITIAL (DBCR0_EDM | DBCR0_TDE | DBCR0_FT)
+#endif
+
+/// This is the value of the MSR used during initialization. Once SSX threads
+/// are started (with \c ssx_start_threads()), all machine contexts derive
+/// from the default thread context \c
+/// SSX_THREAD_MACHINE_CONTEXT_DEFAULT. This definition can be overriden by
+/// the application.
+///
+/// The default is to enable machine checks only.
+
+#ifndef PPC405_MSR_INITIAL
+#define PPC405_MSR_INITIAL MSR_ME
+#endif
+
+/// This is the initial value of Cache Control Register 0 (CCR0). This
+/// definition can be overridden by the application.
+///
+/// The default sets the CCR0 to give priority to DCU and ICU operations. The
+/// user should consider setting other options in this register that affect
+/// performance, e.g., ICU prefetching. Other options can be set at run time
+/// with the API \c ppc405_ccr0_modify().
+
+#ifndef PPC405_CCR0_INITIAL
+#define PPC405_CCR0_INITIAL (CCR0_DPP1 | CCR0_IPP0 | CCR0_IPP1)
+#endif
+
+/// The \a argc argument passed to \c main(). This definition can be overriden
+/// by the application.
+
+#ifndef PPC405_ARGC_INITIAL
+#define PPC405_ARGC_INITIAL 0
+#endif
+
+/// The \a argv argument passed to \c main(). This definition can be overriden
+/// by the application.
+
+#ifndef PPC405_ARGV_INITIAL
+#define PPC405_ARGV_INITIAL 0
+#endif
+
+/// Optionally trap the reset for the debugger, which means that the PPC405
+/// will simply spin at the symbol \c __reset_trap after a chip reset. Set R0
+/// to a non-zero value in the debugger to continue execution. This definition
+/// can be overriden by the application.
+
+#ifndef PPC405_RESET_TRAP
+#define PPC405_RESET_TRAP 0
+#endif
+
+#ifndef __ASSEMBLER__
+
+/// The PPC405 SSX machine context is simply the MSR, a 32-bit integer.
+
+typedef uint32_t SsxMachineContext;
+
+/// Disable interrupts at the given priority level, and return the current
+/// context.
+///
+/// \param priority The interrupt priority level to disable, either \c
+/// SSX_NONCRITICAL, \c SSX_CRITICAL or \c SSX_SUPERCRITICAL. For best
+/// efficiency, the \a priority parameter should be a manifest constant.
+///
+/// \param context A pointer to an SsxMachineContext, this is the context that
+/// existed before interrupts were disabled. Typically this
+/// context is restored at the end of a critical section.
+///
+/// The PPC405 supports a 'super-critical' context in which every possible
+/// maskable exception is disabled.
+///
+/// Return values other then SSX_OK (0) are errors; see \ref ssx_errors
+///
+/// \retval 0 Successful completion
+///
+/// \retval -SSX_INVALID_ARGUMENT_INTERRUPT An illegal priority was specified.
+
+UNLESS__PPC405_CORE_C__(extern)
+inline int
+ssx_interrupt_disable(int priority, SsxMachineContext *context)
+{
+ *context = mfmsr();
+
+ if (priority == SSX_NONCRITICAL) {
+
+ wrteei(0);
+
+ } else if (priority == SSX_CRITICAL) {
+
+ mtmsr(*context & ~(MSR_EE | MSR_CE));
+
+ } else if (priority == SSX_SUPERCRITICAL) {
+
+ mtmsr(*context & ~(MSR_APE | MSR_WE | MSR_CE | MSR_EE | MSR_ME |
+ MSR_DWE | MSR_DE));
+
+ } else if (SSX_ERROR_CHECK_API) {
+ SSX_ERROR(SSX_INVALID_ARGUMENT_INTERRUPT);
+ }
+
+ return SSX_OK;
+}
+
+
+/// Set the machine context.
+///
+/// \param context A pointer to an SsxMachineContext
+///
+/// Return values other then SSX_OK (0) are errors; see \ref ssx_errors
+///
+/// \retval 0 Successful completion
+///
+/// \retval -SSX_INVALID_ARGUMENT_CONTEXT_SET A null pointer was provided as
+/// the \a context argument or an illegal machine context was specified.
+
+UNLESS__PPC405_CORE_C__(extern)
+inline int
+ssx_machine_context_set(SsxMachineContext *context)
+{
+ if (SSX_ERROR_CHECK_API) {
+ SSX_ERROR_IF(context == 0, SSX_INVALID_ARGUMENT_CONTEXT_SET);
+ }
+
+ mtmsr(*context);
+
+ return SSX_OK;
+}
+
+
+/// Get the machine context.
+///
+/// \param context A pointer to an SsxMachineContext.
+///
+/// Return values other then SSX_OK (0) are errors; see \ref ssx_errors
+///
+/// \retval 0 Successful completion
+///
+/// \retval -SSX_INVALID_ARGUMENT_CONTEXT_GET A null pointer was provided as
+/// the \a context argument.
+
+UNLESS__PPC405_CORE_C__(extern)
+inline int
+ssx_machine_context_get(SsxMachineContext *context)
+{
+ if (SSX_ERROR_CHECK_API) {
+ SSX_ERROR_IF(context == 0, SSX_INVALID_ARGUMENT_CONTEXT_GET);
+ }
+
+ *context = mfmsr();
+
+ return SSX_OK;
+}
+
+
+/// The SSX kernel thread context switch - PPC405 uses the system call
+/// exception.
+
+#define __ssx_switch() asm volatile ("sc")
+
+
+/// In the PowerPC EABI all initial stack frames require 8 bytes - the 4 bytes
+/// at the SP are zeroed to indicate the end of the stack, and the 4 bytes
+/// behind the SP are for the initial subroutine's LR.
+
+static inline void
+__ssx_stack_create_initial_frame(SsxAddress *stack, size_t *size) \
+{
+ *stack -= 8;
+ *size -= 8;
+ *((SSX_STACK_TYPE *)(*stack)) = 0;
+}
+
+
+/// The SSX Kernel Context for PPC405
+///
+/// The SSX portable kernel does not define how the kernel keeps track of
+/// whether SSX is running, interrupt levels, and other debug
+/// information. Instead it defines an API that the port must provide to the
+/// portable kernel.
+///
+/// In the PPC405 port, the kernel context is maintained in USPRG0. This
+/// 32-bit value is treated as 5 distinct fields as indicated in the structure
+/// definition. For certain tests it's also helpful to look at the two
+/// interrupt counters as a single 0/non-0 field.
+
+typedef union {
+
+ uint32_t value;
+
+ struct {
+
+ /// The critical interrupt nesting level. If this field is non-zero,
+ /// then interrupt priority and preemption rules guarantee that a
+ /// critical interrupt handler is running, and the \c irq field will
+ /// contain the SsxIrqId of the currently active critical interrupt.
+ unsigned critical_interrupts : 8;
+
+ /// The non-critical interrupt nesting level. If this field is
+ /// non-zero and the \c critical_interrupts field is 0, then interrupt
+ /// priority and preemption rules guarantee that a noncritical
+ /// interrupt handler is running, and the \c irq field will contain
+ /// the SsxIrqId of the currently active noncritical interrupt.
+ unsigned noncritical_interrupts : 8;
+
+ /// The SsxIrqId of the currently running (or last run) handler. If
+ /// either of the interrupt nesting levels are non-0, then this is the
+ /// SsxIrqId of the IRQ that is currently executing.
+ unsigned irq : 8;
+
+ /// A flag indicating that SSX is in thread mode after a call of
+ /// ssx_start_threads().
+ unsigned thread_mode : 1;
+
+ /// The priority of the currently running thread. In an interrupt
+ /// context, this is the priority of the thread that was interrupted.
+ unsigned thread_priority : 7;
+
+ } fields;
+
+ struct {
+
+ /// Used as a 0/non-0 flag for interrupt context.
+ unsigned interrupt_context : 16;
+
+ /// Ignore
+ unsigned ignore : 16;
+
+ } merged_fields;
+
+} __SsxKernelContext;
+
+
+// These APIs are provided to the SSX portable kernel by the port.
+
+/// SSX threads have been started by a call of ssx_start_threads().
+
+#define __ssx_kernel_mode_thread() \
+ ({ \
+ __SsxKernelContext __ctx; \
+ __ctx.value = mfspr(SPRN_USPRG0); \
+ __ctx.fields.thread_mode;})
+
+
+/// SSX is executing in a thread context (not an interrupt handler).
+
+#define __ssx_kernel_context_thread() \
+ ({ \
+ __SsxKernelContext __ctx; \
+ __ctx.value = mfspr(SPRN_USPRG0); \
+ __ctx.fields.thread_mode && !__ctx.merged_fields.interrupt_context;})
+
+
+/// SSX is executing an interrupt handler of any priority.
+
+#define __ssx_kernel_context_any_interrupt() \
+ ({ \
+ __SsxKernelContext __ctx; \
+ __ctx.value = mfspr(SPRN_USPRG0); \
+ __ctx.merged_fields.interrupt_context;})
+
+
+/// SSX is executing a critical interrupt handler.
+
+#define __ssx_kernel_context_critical_interrupt() \
+ ({ \
+ __SsxKernelContext __ctx; \
+ __ctx.value = mfspr(SPRN_USPRG0); \
+ __ctx.fields.critical_interrupts;})
+
+
+/// SSX is executing a non-critical interrupt handler.
+
+#define __ssx_kernel_context_noncritical_interrupt() \
+ ({ \
+ __SsxKernelContext __ctx; \
+ __ctx.value = mfspr(SPRN_USPRG0); \
+ __ctx.fields.noncritical_interrupts && \
+ !__ctx.fields.critical_interrupts;})
+
+
+/// Return the noncritical interrupt nesting level
+
+#define __ssx_noncritical_level() \
+ ({ \
+ __SsxKernelContext __ctx; \
+ __ctx.value = mfspr(SPRN_USPRG0); \
+ __ctx.fields.noncritical_interrupts; })
+
+
+/// Return the critical interrupt nesting level
+
+#define __ssx_critical_level() \
+ ({ \
+ __SsxKernelContext __ctx; \
+ __ctx.value = mfspr(SPRN_USPRG0); \
+ __ctx.fields.critical_interrupts; })
+
+
+// SSX requires the port to define the type SsxThreadQueue, which is a
+// priority queue (where 0 is the highest priority). This queue must be able
+// to handle SSX_THREADS + 1 priorities (the last for the idle thread) The
+// port must also define methods for clearing, insertion, deletion and min
+// (with assumed legal priorities). The min operation returns SSX_THREADS if
+// the queue is empty (or a queue could be initialized with that entry always
+// present - SSX code never tries to delete the idle thread from a thread
+// queue).
+//
+// These queues are used both for the run queue and the pending queue
+// associated with every semaphore.
+//
+// On PPC405 with 32 threads (implied), this is a job for a uint32_t and
+// cntlzw().
+
+static inline void
+__ssx_thread_queue_clear(volatile SsxThreadQueue *queue)
+{
+ *queue = 0;
+}
+
+static inline void
+__ssx_thread_queue_insert(volatile SsxThreadQueue *queue, SsxThreadPriority priority)
+{
+ *queue |= (0x80000000u >> priority);
+}
+
+static inline void
+__ssx_thread_queue_delete(volatile SsxThreadQueue *queue, SsxThreadPriority priority)
+{
+ *queue &= ~(0x80000000u >> priority);
+}
+
+static inline SsxThreadPriority
+__ssx_thread_queue_min(volatile SsxThreadQueue *queue)
+{
+ return cntlzw(*queue);
+}
+
+static inline int
+__ssx_thread_queue_member(volatile SsxThreadQueue *queue, SsxThreadPriority priority)
+{
+ return ((*queue >> (31 - priority)) & 1);
+}
+
+static inline void
+__ssx_thread_queue_union(volatile SsxThreadQueue *queue0,
+ volatile SsxThreadQueue *queue1)
+{
+ *queue0 |= *queue1;
+}
+
+static inline int
+__ssx_thread_queue_count(volatile SsxThreadQueue* queue)
+{
+ return __builtin_popcount(*queue);
+}
+
+
+/// This macro is used to call __ssx_start_threads() using the kernel stack,
+/// in a SSX_NONCRITICAL critical section.
+
+#define __ssx_call_ssx_start_threads() \
+ do { \
+ SsxMachineContext ctx; \
+ ssx_critical_section_enter(SSX_NONCRITICAL, &ctx); \
+ asm volatile ("mr 1, %0; mtlr %1; blrl" : : \
+ "r" (__ssx_noncritical_stack), \
+ "r" (__ssx_start_threads)); \
+ SSX_PANIC(SSX_START_THREADS_RETURNED); \
+ } while (0)
+
+
+#endif /* __ASSEMBLER__ */
+
+/// The __SsxKernelContext 'thread_mode' bit as a flag
+
+#define PPC405_THREAD_MODE 0x80
+
+
+#ifndef __ASSEMBLER__
+
+/// Code breakpoints for PPC405
+///
+/// This macro inserts a special PPC405-only breakpoint into the object code
+/// at the place the macro invocation appears. This facility is designed for
+/// VBU/VPO procedure debugging. This type of breakpoint may not be required
+/// on real hardware as we will then have the full power of RISCWatch, gdb,
+/// etc. Once inserted into the code, code breakpoints can be enabled or
+/// disabled by manipulating the global variable _code_breakpoint_enable,
+/// which defaults to 1.
+///
+/// The code breakpoint is implemented as a setup routine and a teardown
+/// routine, executed in an SSX_CRITICAL critical section. The actual break
+/// will occur at the address of the call of the teardown routine, in the
+/// context of the calling code. The setup routine saves the state of DBCR0/1
+/// and IAC4, then programs the DBCR for an external debug mode, IAC4
+/// breakpoint. The IAC4 breakpoint is set for the address of the call of the
+/// teardown routine. The teardown routine simply restores the state of the
+/// debug registers that existed before the code breakpoint.
+///
+/// Once hit, restarting from the break requires clearing IAC4 and restarting
+/// instructions:
+///
+/// \code
+///
+/// putspr pu.occ iac4 0
+/// cipinstruct pu.occ start
+///
+/// \endcode
+///
+/// The above restart processes is also encapsulated as the p8_tclEcmd
+/// procedure 'unbreakOcc'.
+///
+/// In code built for the Simics environment (i.e., with the preprocessor
+/// macro SIMICS_ENVIRONMENT=1) this macro simply expands into
+/// SIMICS_MAGIC_BREAKPOINT, and simulation can be continued from the break as
+/// normal. This Simics magic breakpoint is also under the control of
+/// _code_breakpoint_enable. In code not built with SIMICS_ENVIROMENT=1, note
+/// that the CODE_BREAKPOINT is ignored by the Simics PPC405 model as it does
+/// not model debug events.
+
+#if defined(SIMICS_ENVIRONMENT) && (SIMICS_ENVIRONMENT != 0)
+#define CODE_BREAKPOINT \
+ do { \
+ if (_code_breakpoint_enable) { \
+ SIMICS_MAGIC_BREAKPOINT; \
+ } \
+ } while (0)
+#else
+#define CODE_BREAKPOINT \
+ do { \
+ if (_code_breakpoint_enable) { \
+ SsxMachineContext __ctx; \
+ ssx_critical_section_enter(SSX_CRITICAL, &__ctx); \
+ _code_breakpoint_prologue(); \
+ _code_breakpoint_epilogue(); \
+ ssx_critical_section_exit(&__ctx); \
+ } \
+ } while (0)
+#endif
+
+void
+_code_breakpoint_prologue(void);
+
+void
+_code_breakpoint_epilogue(void);
+
+extern uint32_t _code_breakpoint_enable;
+
+#endif // __ASSEMBLER__
+
+
+#endif /* __PPC405_H__ */
diff --git a/src/ssx/ppc405/ppc405_boot.S b/src/ssx/ppc405/ppc405_boot.S
new file mode 100755
index 0000000..1f4a4f9
--- /dev/null
+++ b/src/ssx/ppc405/ppc405_boot.S
@@ -0,0 +1,242 @@
+// $Id: ppc405_boot.S,v 1.1.1.1 2013/12/11 21:03:27 bcbrock Exp $
+// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ssx/ppc405/ppc405_boot.S,v $
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2013
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+/// \file ppc405_boot.S
+/// \brief SSX bootloader for PPC405
+
+ .nolist
+#include "ssx.h"
+ .list
+
+### SSX Bootloader for PPC405
+###
+### This is the basic restart initialization of the processor.
+### Parts of this code were derived from examples in the IBM OSopen
+### OpenBIOS for the 405GP written by James Burke.
+###
+### This code does not really do very much, just makes sure that there will
+### be a reasonable state in the machine when control is turned over to
+### the SSX application. Any core setup that requires SPR access will be done
+### here. All other setup is expected to take place in system-specific
+### routines.
+###
+### From the PowerPC 405-S Embedded Core User's manual:
+###
+### "In general, the contents of SPRs are undefined after a core, chip or
+### system reset. Some SPRs retain the content they had before the reset
+### occurred."
+###
+### Registers fully reset:
+### DBCR1 - Data compares disabled
+### DCCR - Data cache disabled
+### DCWR - Data cache write-through disabled
+### ESR - No exception syndromes
+### ICCR - Instruction cache disabled
+### MSR - No exceptions/interrupts are allowed
+### SLER - Storage is big-endian
+### SU0R - Storage is uncompressed
+###
+### Registers partially reset:
+### CCR0 = 0x00700000 - Sets ICU and DCU PLB Priority
+### DBCR0 [EDM] = 0 - External debug mode disabled
+### [RST] = 0 - No reset action
+### DBSR [MRR] = x - x indicates most recent reset action
+### SGR = 0xffffffff - Storage is guarded
+### TCR [WRC] = 0 - Watchdog timer reset disabled
+### TSR [WRS] = x - x is a copy of TCR[WRC] Watchdog reset status
+### [PIS] = x - undefined
+
+ .global_function __ssx_boot
+ .global __reset_trap
+
+__ssx_boot:
+
+ ## Trap the reset for the debugger. Set R0 to a non-zero value in the
+ ## debugger to continue.
+
+ .if PPC405_RESET_TRAP
+ li %r0, 0
+__reset_trap:
+ cmpwi %r0, 0
+ beq __reset_trap
+ .endif
+
+ ## First the real-mode memory parameters are set up as configured
+ ## for the system and/or application. There are no defaults for these 6
+ ## configuration options - they must always be supplied.
+
+ _liwa %r3, PPC405_ICCR_INITIAL
+ mticcr %r3
+ _liwa %r3, PPC405_DCCR_INITIAL
+ mtdccr %r3
+ _liwa %r3, PPC405_DCWR_INITIAL
+ mtdcwr %r3
+ _liwa %r3, PPC405_SGR_INITIAL
+ mtsgr %r3
+ _liwa %r3, PPC405_SU0R_INITIAL
+ mtsu0r %r3
+ _liwa %r3, PPC405_SLER_INITIAL
+ mtsler %r3
+
+ ## Next the I and D caches are invalidated.
+ ## NB: The only reason we can do a BL at this point is because we
+ ## know that these routines do not try to save the LR on the stack.
+
+ bl icache_invalidate_all
+ bl dcache_invalidate_all
+
+ ## Set up PowerPC EABI constant registers. These registers are never
+ ## again touched by the SSX kernel or the application (if they are
+ ## behaving).
+
+ _liw %r2, _SDA2_BASE_
+ _liw %r13, _SDA_BASE_
+
+ ## Clear the timer control register. This masks all timer interrupts.
+
+ mttcr %r3
+
+ ## Several options are available for dynamic memory ...
+ ## If none of these options are defined, then it is assumed that
+ ## the memory area is already set up.
+
+#if defined(SSX_RUN_FROM_DCACHE)
+
+ ## SSX can run from the DCACHE. In order for this to work, the
+ ## the linker script must set the symbol __ssx_ram_vma to the
+ ## beginning of a 128MB memory region marked as cacheable in the
+ ## application-defined PPC405_DCCR_INITIAL.
+ ##
+ ## Note that there is typically no backing store for this data. Before
+ ## going further, all data cache blocks must be allocated with 'dcbz'.
+ ## Should a subsequent programming error cause the eviction of
+ ## a dirty line, a machine check exception will result.
+
+ _liw %r3, __ssx_ram_vma
+ _liwa %r4, DCACHE_LINES
+ mtctr %r4
+
+dcbz_loop:
+ dcbz %r0, %r3
+ addi %r3, %r3, CACHE_LINE_SIZE
+ bdnz dcbz_loop
+
+#elif defined(SSX_RUN_FROM_MEMORY)
+
+ ## Nothing to do here...
+
+#else
+
+#error "One of SSX_RUN_FROM_DCACHE or SSX_RUN_FROM_MEMORY must be defined"
+
+#endif /* SSX_RUN_FROM_xxx */
+
+ ## The stack pointer is initialized for use by the remainder of the
+ ## initialization, including the application main(). The linker script
+ ## defines the initial stack area.
+ ##
+ ## Stacks are always 8-byte aligned. A '0' is stored at the
+ ## stack pointer to indicate the end of the stack chain. Stack frames
+ ## always consist of at least 8 bytes - the backchain pointer and the
+ ## slot above the backchain pointer for the callee's LR.
+
+ _liw %r1, _SSX_INITIAL_STACK
+ _clrfield %r1, %r1, 3, 29 # 8-byte align
+ li %r3, 0
+ stwu %r3, -8(%r1)
+
+ ## USPRG0 (__SsxKernelContext) is initialized to 0
+ ## indicating that the SSX kernel is not in thread mode, and no
+ ## interrupts are active.
+
+ li %r3, 0
+ mtusprg0 %r3
+
+ ## Set up the initial value of Debug Control Register 0. Note that
+ ## DBCR1 is specified to be cleared at reset. VBU simulation requested
+ ## an option that this register not be modified so that they could
+ ## completely control debug behavior from reset of the 405.
+
+#ifndef NO_INIT_DBCR0
+ _liwa %r3, PPC405_DBCR0_INITIAL
+ mtdbcr0 %r3
+#endif
+
+ ## The exception vector prefix is set - it must be 64KB aligned.
+
+ _liw %r3, __vectors
+ andi. %r4, %r3, 0xffff
+ beq 1f
+ _ssx_panic PPC405_BOOT_VECTORS_NOT_ALIGNED
+1:
+ mtevpr %r3
+ isync
+
+ ## The MSR to be used during the rest of intialization is
+ ## established. This MSR should NOT enable critical or non-critical
+ ## interrupts, but could enable machine check exceptions.
+
+ _liwa %r3, PPC405_MSR_INITIAL
+ mtmsr %r3
+ isync
+
+ ## Initialize the CCR0. If it returns non-zero, panic.
+
+ _liwa %r4, PPC405_CCR0_INITIAL # bits_to_set
+ not %r3, %r4 # bits_to_clear
+ bl ppc405_ccr0_modify
+ cmpwi %r3, 0
+ beq 1f
+ _ssx_panic PPC405_BOOT_CCR0_MODIFY_FAILED
+1:
+
+#ifdef SSX_BOOT_FROM_ROM
+
+ ## NB: I don't think the old linker scripts were necessarily the most
+ ## optimal. We need to revisit this if we actually do ROM boots in SSX
+ ## Version 2. Not sure the comments are correct.
+
+ ## Data is copied from the initial ROM image to the RAM. The
+ ## address symbols are defined in the linker command file. The linker
+ ## will have zeroed this area in the ROM image.
+
+ liw %r3, __ssx_ram_lma - 4 # src
+ liw %r4, __ssx_ram_vma - 4 # dest
+ liw %r5, __ssx_ram_size
+ liw %r6, 2
+ srw %r5, %r5, %r6 # Number of word transfers
+ mtctr %r5
+
+copy_loop:
+ lwzu %r5, 4(%r3)
+ stwu %r5, 4(%r4)
+ bdnz copy_loop
+
+#endif /* SSX_BOOT_FROM_ROM */
+
+
+ ## Call the system setup code.
+
+ bl __ppc405_system_setup
+
+ ## Call the application. If for some reason we return from
+ ## the call of the application we call an alternate entry point of the
+ ## idle thread.
+ ##
+ ## An initial argc/argv can be passed into main(). argc is expected to
+ ## be a 32-bit immediate integer, and argv is expected to be a 32-bit
+ ## absolute or relocatable expression.
+
+ _liwa %r3, PPC405_ARGC_INITIAL
+ _liw %r4, PPC405_ARGV_INITIAL
+ bl __ssx_main
+
+ b __ssx_idle_thread_from_bootloader
+
+ .epilogue __ssx_boot
+
diff --git a/src/ssx/ppc405/ppc405_breakpoint.S b/src/ssx/ppc405/ppc405_breakpoint.S
new file mode 100644
index 0000000..3adbd13
--- /dev/null
+++ b/src/ssx/ppc405/ppc405_breakpoint.S
@@ -0,0 +1,103 @@
+// $Id: ppc405_breakpoint.S,v 1.1.1.1 2013/12/11 21:03:27 bcbrock Exp $
+// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ssx/ppc405/ppc405_breakpoint.S,v $
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2013
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+/// \file breakpoint.S
+/// \brief Implementation of code breakpoints for VBU/VPO
+///
+/// The operation of these short assembler routines is introduced as part of
+/// the documentation for the CODE_BREAKPOINT macro.
+
+ .nolist
+#include "ssx.h"
+ .list
+
+#ifdef DOXYGEN_ONLY
+void _code_breakpoint_prologue(void);
+#endif
+
+/// \cond
+
+ _sprinstrs iac4, SPRN_IAC4
+
+ .global_function _code_breakpoint_prologue
+_code_breakpoint_prologue:
+
+ // Save the DBCR0/1 and IAC4
+
+ mfdbcr0 %r3
+ mfdbcr1 %r4
+ mfiac4 %r5
+ _stwsd %r3, _saved_dbcr0
+ _stwsd %r4, _saved_dbcr1
+ _stwsd %r5, _saved_iac4
+
+ // Set IAC4 to the contents of LR, which will cause a break in the
+ // caller's context.
+
+ mflr %r3
+ mtiac4 %r3
+
+ // Reprogram to external debug mode, trigger on IAC4 compare, freeze
+ // timers. Clear data compare setup for good measure.
+
+ _liwa %r3, DBCR0_EDM | DBCR0_IA4 | DBCR0_FT
+ mtdbcr0 %r3
+ _liwa %r3, 0
+ mtdbcr1 %r3
+
+ isync
+ blr
+
+ .epilogue _code_breakpoint_prologue
+
+ .section .sdata
+ .balign 4
+
+ .global _code_breakpoint_enable
+_code_breakpoint_enable:
+ .long 1
+
+_saved_dbcr0:
+ .long 0
+_saved_dbcr1:
+ .long 0
+_saved_iac4:
+ .long 0
+
+/// \endcond
+
+
+#ifdef DOXYGEN_ONLY
+void _code_breakpoint_epilogue(void);
+#endif
+
+/// \cond
+
+ .global_function _code_breakpoint_epilogue
+_code_breakpoint_epilogue:
+
+ // Restore the DBCR0/1 and IAC4
+
+ _lwzsd %r3, _saved_dbcr0
+ _lwzsd %r4, _saved_dbcr1
+ _lwzsd %r5, _saved_iac4
+ mtdbcr0 %r3
+ mtdbcr1 %r4
+ mtiac4 %r5
+
+ // Clear IA4 status
+
+ _liwa %r3, DBSR_IA4
+ mtdbsr %r3
+
+ isync
+ blr
+
+ .epilogue _code_breakpoint_epilogue
+
+/// \endcond
diff --git a/src/ssx/ppc405/ppc405_cache.h b/src/ssx/ppc405/ppc405_cache.h
new file mode 100755
index 0000000..02bd4f5
--- /dev/null
+++ b/src/ssx/ppc405/ppc405_cache.h
@@ -0,0 +1,113 @@
+#ifndef __PPC405_CACHE_H__
+#define __PPC405_CACHE_H__
+
+// $Id: ppc405_cache.h,v 1.1.1.1 2013/12/11 21:03:27 bcbrock Exp $
+// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ssx/ppc405/ppc405_cache.h,v $
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2013
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+/// \file ppc405_cache.h
+/// \brief PowerPC 405 cache management header for SSX
+///
+/// The data cache flush/invalidate macros defined here create a compiler
+/// memory barrier that will cause GCC to flush/invalidate all memory data
+/// held in registers before the macro.
+
+#ifndef __ASSEMBLER__
+
+/// Determine cache-alignment of a pointer or byte-count
+#define cache_aligned(x) \
+ ((((unsigned long)(x)) & (POW2_32(LOG_CACHE_LINE_SIZE) - 1)) == 0)
+
+/// Cache-align a pointer or byte count. If the 'direction' is <= 0 then we
+/// round down, else round up.
+#define cache_align(x, direction) \
+ ({ \
+ unsigned long __x = (unsigned long)(x); \
+ unsigned long __r; \
+ if ((direction) <= 0) { \
+ __r = __x & ~(((unsigned long)CACHE_LINE_SIZE) - 1); \
+ } else { \
+ if (__x % CACHE_LINE_SIZE) { \
+ __r = __x + (CACHE_LINE_SIZE - (__x % CACHE_LINE_SIZE)); \
+ } \
+ } \
+ (void *)__r; \
+ })
+
+/// Data Cache Block Flush
+#define dcbf(p) asm volatile ("dcbf 0, %0" : : "r" (p) : "memory")
+
+/// Data Cache Block Touch
+#define dcbt(p) asm volatile ("dcbt 0, %0" : : "r" (p) : "memory")
+
+/// Data Cache Block Invalidate (Privileged)
+#define dcbi(p) asm volatile ("dcbi 0, %0" : : "r" (p) : "memory")
+
+/// Instruction Cache Block Invalidate
+#define icbi(p) asm volatile ("icbi 0, %0" : : "r" (p) : "memory")
+
+/// Instruction Cache Block Touch
+#define icbt(p) asm volatile ("icbt 0, %0" : : "r" (p) : "memory")
+
+void
+icache_invalidate_all(void);
+
+void
+dcache_invalidate_all(void);
+
+void
+dcache_flush_all(void);
+
+void
+dcache_invalidate(void *p, size_t bytes);
+
+void
+dcache_flush(void *p, size_t bytes);
+
+/// Invalidate a line in the D-cache
+///
+/// \param p An address withing the cache line to be invalidated.
+///
+/// The dcache_invalidate_line() API is used to invalidate a single cache line
+/// containing the address \a p. Note that invalidation is a destructive
+/// operation that may cause the loss of information. It is the caller's
+/// responsibility to insure that no useful data is inadverdently invalidated.
+/// D-cache invalidation is more-or-less a no-op for data either not in the
+/// cache or marked as non-cacheable.
+///
+/// This API always issues a sync() after the invalidation.
+
+static inline void
+dcache_invalidate_line(void *p)
+{
+ dcbi(p);
+ sync();
+}
+
+/// Flush and invalidate a line from the D-cache
+///
+/// \param p An address within the cache line to be flushed.
+///
+/// The dcache_flush_line() API can be used as a shortcut to flush and
+/// invalidate a single cache line. Note that flushing is not a destructive
+/// operation in the sense that no information is lost, however the caller
+/// must make sure that the entirity of the data to be flushed is contained in
+/// the line that includes the address \a p. D-cache flush is more-or-less a
+/// no-op for data either not in the cache or marked as non-cacheable.
+///
+/// This API always issues a sync() after the flush.
+
+static inline void
+dcache_flush_line(void *p)
+{
+ dcbf(p);
+ sync();
+}
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* __PPC405_CAHE_H__ */
diff --git a/src/ssx/ppc405/ppc405_cache_core.c b/src/ssx/ppc405/ppc405_cache_core.c
new file mode 100755
index 0000000..a03f5e2
--- /dev/null
+++ b/src/ssx/ppc405/ppc405_cache_core.c
@@ -0,0 +1,115 @@
+// $Id: ppc405_cache_core.c,v 1.1.1.1 2013/12/11 21:03:27 bcbrock Exp $
+// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ssx/ppc405/ppc405_cache_core.c,v $
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2013
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+/// \file ppc405_cache_core.c
+/// \brief Core cache management routines required of any PPC405 configuration
+/// of SSX that interacts with DMA devices using cacheable memory.
+///
+/// The entry points in this file are considered 'core' routines that will
+/// always be present at runtime in any SSX application.
+///
+/// \todo The compiler generates wierd assembly language for these cache
+/// management APIs - probably due to the "volatile" asm - it may be best to
+/// recode them directly in assembler.
+
+#include "ssx.h"
+
+/// Invalidate a range of addresses from the D-cache
+///
+/// \param p A pointer to the memory area to be invalidated.
+///
+/// \param bytes The size (in bytes) of the area to invalidate.
+///
+/// The dcache_invalidate() API is used to invalidate an arbitrary range of
+/// memory in the cache. Note that invalidation is a destructive operation
+/// that may cause the loss of information. This API will invalidate all
+/// cache lines from the line containing the address \a p, to the line
+/// containing the address \a p + \a size - 1. (If \a size == 0 this call is
+/// a NOP.) It is the caller's responsibility to insure that no useful data is
+/// inadverdently invalidated. D-cache invalidation is more-or-less a no-op
+/// for data either not in the cache or marked as non-cacheable.
+///
+/// This API always issues a sync() after the invalidation, even in the event
+/// of \a size == 0.
+///
+/// \note For invalidating small blocks of data where some alignmment
+/// constraints are known it may be more efficient to use
+/// dcache_invalidate_line() rather than this API.
+
+void
+dcache_invalidate(void *p, size_t bytes)
+{
+ size_t lines;
+
+ if (bytes != 0) {
+ lines = 1;
+ bytes -=
+ MIN((CACHE_LINE_SIZE - ((unsigned long)p % CACHE_LINE_SIZE)),
+ bytes);
+ lines += bytes / CACHE_LINE_SIZE;
+ if (!cache_aligned(bytes)) {
+ lines++;
+ }
+ while (lines--) {
+ dcbi(p);
+ p += CACHE_LINE_SIZE;
+ }
+ }
+ sync();
+}
+
+
+/// Flush and invalidate a range of addresses from the D-cache
+///
+/// \param p A pointer to a memory area to be invalidated.
+///
+/// \param bytes The size (in bytes) of the area to invalidate.
+///
+/// The dcache_flush() API is used to flush and invalidate an arbitrary range
+/// of memory from the D-cache. Note that flushing is not a destructive
+/// operation in the sense that no information is lost. This API will flush
+/// and invalidate all cache lines from the line containing the address \a p,
+/// to the line containing the address \a p + \a size - 1. (If \a size == 0
+/// this call is a NOP.) D-cache flush is more-or-less a no-op for data
+/// either not in the cache or marked as non-cacheable.
+///
+/// This API always issues a sync() after the flush, even in the event of \a
+/// size == 0.
+///
+/// \note For flushing small blocks of data where some alignmment constraints
+/// are known it may be more efficient to use dcache_flush_line() rather than
+/// this API.
+
+
+void
+dcache_flush(void *p, size_t bytes)
+{
+ size_t lines;
+
+ if (bytes != 0) {
+ lines = 1;
+ bytes -=
+ MIN((CACHE_LINE_SIZE - ((unsigned long)p % CACHE_LINE_SIZE)),
+ bytes);
+ lines += bytes / CACHE_LINE_SIZE;
+ if (!cache_aligned(bytes)) {
+ lines++;
+ }
+ while (lines--) {
+ dcbf(p);
+ p += CACHE_LINE_SIZE;
+ }
+ }
+ sync();
+}
+
+
+
+
+
+
diff --git a/src/ssx/ppc405/ppc405_cache_init.S b/src/ssx/ppc405/ppc405_cache_init.S
new file mode 100755
index 0000000..c4bed0e
--- /dev/null
+++ b/src/ssx/ppc405/ppc405_cache_init.S
@@ -0,0 +1,314 @@
+// $Id: ppc405_cache_init.S,v 1.3 2014/06/26 12:59:35 cmolsen Exp $
+// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ssx/ppc405/ppc405_cache_init.S,v $
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2013
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+/// \file ppc405_cache_init.S
+/// \brief PPC405-specific procedures required for cache initialization
+///
+/// The routines in this file are not part of SSX per se, but are included
+/// with SSX because they are used during initialialization and boot.
+///
+/// These are 'initialization' routines used by the SSX bootloader and other
+/// application initialization code and may not be needed after
+/// boot/initialization of the SSX application.
+///
+/// See the documents './doc/PowerPC 405-S Core Errata' and './doc/PowerPC
+/// 405-S Core Errata Impact on PGP OCC Firmware' for information on the
+/// CPU_xxx errata.
+
+ .nolist
+#include "ssx.h"
+#include "ppc405_context.h"
+ .list
+
+/// \fn void icache_invalidate_all()
+/// \brief Invalidate the entire I-Cache
+///
+/// In the PPC405 a single instruction invalidates the entire I-Cache,
+/// therefore this routine need not be executed from a critical section.
+///
+///
+/// This API always issues an isync() after the invalidation.
+#ifdef DOXYGEN_ONLY
+void icache_invalidate_all();
+#endif
+/// \cond
+
+ .global_function icache_invalidate_all
+
+icache_invalidate_all:
+
+ _ssx_critical_section_enter SSX_SUPERCRITICAL, %r5, %r6
+
+ # Workaround for CPU_121: The iccci instruction may errantly
+ # cause a Data TLB exception. Workaround avoids this erratum by
+ # temporarily disabling data address translation.
+
+ mfmsr %r3
+ andi. %r4, %r3, 0xffef
+ mtmsr %r4
+ isync
+
+ ## On the 405, iccci invalidates the entire I-Cache.
+
+ iccci %r0,%r0
+
+ # Restore MSR/DR bit
+ mtmsr %r3
+ isync
+
+ _ssx_critical_section_exit %r5
+
+ blr
+
+ .epilogue icache_invalidate_all
+
+/// \endcond
+
+
+/// \fn void dcache_invalidate_all()
+/// \brief Invalidate the entire D-Cache
+///
+/// This API operates in an SSX_SUPERCRITICAL critical section. This API always
+/// issues a sync() after the invalidation.
+#ifdef DOXYGEN_ONLY
+void dcache_invalidate_all();
+#endif
+/// \cond
+
+ .global_function dcache_invalidate_all
+
+dcache_invalidate_all:
+
+ _ssx_critical_section_enter SSX_SUPERCRITICAL, %r5, %r6
+
+ ## We loop through addresses 0 ... (DCACHE_SIZE / DCACHE_WAYS) - 1
+ ## invalidating the D-Cache. The dccci instruction on the 405
+ ## invalidates both ways.
+
+ li %r3, 0
+ _liwa %r4, (DCACHE_LINES / DCACHE_WAYS)
+ mtctr %r4
+
+1:
+ dccci %r0, %r3
+ addi %r3, %r3, CACHE_LINE_SIZE
+ bdnz 1b
+
+ sync
+
+ _ssx_critical_section_exit %r5
+
+ blr
+
+ .epilogue dcache_invalidate_all
+
+/// \endcond
+
+
+#ifndef USE_GENERIC_DCACHE_FLUSH_ALL
+#define USE_GENERIC_DCACHE_FLUSH_ALL 1
+#endif
+
+/// \fn void dcache_flush_all()
+/// \brief Flush the entire D-Cache (Generic)
+///
+/// This API is necessary whenever it is required to change data cacheability
+/// after boot. The D-cache is scanned, and any lines present in the cache are
+/// flushed. This API operates in an SSX_SUPERCRITICAL critical section. This
+/// API always issues a sync() after the flush.
+///
+/// This is a generic implementation that should work on all PPC405
+/// systems. If something is known about the memory configuration then SSX
+/// ports can implement better/more efficient flush-all algorithms.
+///
+/// This API runs with data translation disabled. This is necessary for
+/// correctness, and also obviates the need to check whether a cache entry is
+/// valid before flushing the entry.
+///
+/// This API is currently a NOP in Simics simulations which complain that the
+/// dcread instruction is implemented as a NOP.
+#ifdef DOXYGEN_ONLY
+void dcache_flush_all();
+#endif
+/// \cond
+
+#if USE_GENERIC_DCACHE_FLUSH_ALL
+
+ .global_function dcache_flush_all
+dcache_flush_all:
+
+#if !SIMICS_ENVIRONMENT
+
+ ## %r3 used as temp throughout
+ ## %r4 holds the original MSR throughout
+ ## %r8 used as a temp throughout
+ ## %r9 used as a temp throughout
+
+ _ssx_critical_section_enter SSX_SUPERCRITICAL, %r4, %r3
+
+ mfmsr %r3
+ _clrbit %r3, %r3, MSR_DR_BIT
+ mtmsr %r3
+ isync
+
+ ## %r5 counts the congruence class address
+ ## CTR counts the number of congruence classes
+
+ li %r5, 0
+ _liwa %r9, (DCACHE_LINES / DCACHE_WAYS)
+ mtctr %r9
+
+ ## %r6 has the CCR0 value for reading DCACHE tag on way A
+ ## %r7 has the CCR0 value for reading DCACHE tag on way B
+ ## %r8 has the cache tag mask
+
+ mfccr0 %r6
+ _liwa %r9, 0x00000011
+ andc %r6, %r6, %r9 # Clear DCREAD control bits
+ ori %r6, %r6, 0x0010
+ ori %r7, %r6, 0x0001
+ _liwa %r8, DCACHE_TAG_MASK # Cache tag mask
+
+dflush_loop:
+
+ ## Way A
+
+ mtccr0 %r6 # Set CCR0 for DCREAD
+ dcread %r9, %r0, %r5
+ and %r9, %r9, %r8 # Mask out cache tag
+ or %r9, %r9, %r5 # OR in the congruence class address
+ dcbf %r0, %r9 # Flush the line
+
+ ## Way B
+
+ mtccr0 %r7 # Set CCR0 for DCREAD
+ dcread %r9, %r0, %r5
+ and %r9, %r9, %r8 # Mask out cache tag
+ or %r9, %r9, %r5 # OR in the congruence class address
+ dcbf %r0, %r9 # Flush the line
+
+ addi %r5, %r5, CACHE_LINE_SIZE
+ bdnz dflush_loop
+
+ ## Restore the MSR and return
+
+ _ssx_critical_section_exit %r4
+
+#endif // SIMICS_ENVIRONMENT
+
+ blr
+
+ .epilogue dcache_flush_all
+
+#endif // USE_GENERIC_DCACHE_FLUSH_ALL
+
+/// \endcond
+
+/// \fn int ppc405_ccr0_modify(uint32_t bits_to_clear, uint32_t bits_to_set)
+/// \brief A special code sequence to modify the Cache Control Register 0 (CCR0)
+///
+/// \param bits_to_clear A positive mask of bits in CCR0 that will be cleared
+/// by this call.
+///
+/// \param bits_to_set A positive mask of bits in CCR0 that will be set by
+/// this call.
+///
+/// The code that implements this procedure \e must be in I-cacheable memory.
+///
+/// Return values other then SSX_OK (0) are errors; see \ref ssx_errors
+///
+/// \retval 0 Successful completion.
+///
+/// \retval -SSX_ILLEGAL_CONTEXT_PPC405_CACHE The code implementing the API
+/// is not cacheable.
+///
+#ifdef DOXYGEN_ONLY
+int ppc405_ccr0_modify(uint32_t bits_to_clear, uint32_t bits_to_set);
+#endif
+/// \cond
+
+ ## The implementation is based on an example from the 405GP manual.
+
+ .global_function ppc405_ccr0_modify
+
+ppc405_ccr0_modify:
+
+ ## %r3 is parameter bits_to_clear
+ ## %r4 is parameter bits_to_set
+ ## %r5 will hold the original MSR
+ ## %r6 will hold the address of the update code.
+ ## %r7, %r8, %r9 are used for computation.
+
+ ## Disable external and critical interrupts
+
+ mfmsr %r5
+ _liwa %r6, (MSR_CE | MSR_EE)
+ andc %r6, %r5, %r6
+ mtmsr %r6 # Interrupts disabled
+ sync
+
+ .if SSX_ERROR_CHECK_API
+
+ ## Check for cacheability of the code sequence
+
+ _liw %r6, ccr0_modify_doit
+ extrwi %r7, %r6, 5, 0 # Upper 5 bits are address range
+ mficcr %r8
+ _liwa %r9, 0x80000000
+ srw %r9, %r9, %r7 # %r9 holds bit to check in %r8
+ and. %r9, %r9, %r8
+ bne ccr0_modify_continue
+
+ ## Whoops! Code not cacheable; restore MSR and return error code.
+
+ .if SSX_ERROR_PANIC
+
+ _ssx_panic SSX_ILLEGAL_CONTEXT_PPC405_CACHE
+
+ .else
+
+ _liwa %r3, -SSX_ILLEGAL_CONTEXT_PPC405_CACHE
+ mtmsr %r5
+ isync
+
+ .endif
+
+ blr
+
+ .endif
+
+ ## Touch the code sequence into the I-Cache and do it.
+
+ccr0_modify_continue:
+ icbt %r0, %r6
+ isync # Erratum CPU_208
+ b ccr0_modify_doit
+
+ ## The ICBT is repeated to insure that the code is really in
+ ## the cache. The bits are cleared, set, updated and we exit.
+
+ .cache_align # Must be cache-line aligned
+ccr0_modify_doit:
+ icbt %r0, %r6
+ isync
+ mfccr0 %r7
+ andc %r7, %r7, %r3 # Clear some bits
+ or %r7, %r7, %r4 # Set some bits
+ mtccr0 %r7
+ isync
+
+ ## Restore MSR and return 0.
+
+ li %r3, 0
+ mtmsr %r5
+ isync
+ blr
+
+ .epilogue ppc405_ccr0_modify
+
+/// \endcond
diff --git a/src/ssx/ppc405/ppc405_context.h b/src/ssx/ppc405/ppc405_context.h
new file mode 100755
index 0000000..3019358
--- /dev/null
+++ b/src/ssx/ppc405/ppc405_context.h
@@ -0,0 +1,594 @@
+#ifndef __PPC405_CONTEXT_H__
+#define __PPC405_CONTEXT_H__
+
+// $Id: ppc405_context.h,v 1.1.1.1 2013/12/11 21:03:27 bcbrock Exp $
+// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ssx/ppc405/ppc405_context.h,v $
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2013
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+/// \file ppc405_context.h
+/// \brief PPC405 Machine and Thread context for SSX
+
+/// \page ppc405_machine_context PPC405 Assembler Macros for SSX Machine
+/// Context (Critical Sections)
+///
+/// \section _ssx_enter_critical \b _ssx_critical_section_enter/exit
+///
+/// These macro encapsulates the instruction sequences required to enter and
+/// exit critical sections, along with the machine context save for later
+/// exiting the critical section.
+///
+/// \arg \c priority Either \c SSX_CRITICAL, \c SSX_NON_CRITICAL or
+/// SSX_SUPERCRITICAL (for \c ssx_critical_section_enter).
+///
+/// \arg \c ctxreg A register that will hold (holds) the machine context (MSR)
+/// prior to entering the critical section (to be restored) for \c
+/// _ssx_critical_section_enter (\c _ssx_critical_section_exit).
+///
+/// \arg \c scrreg A scratch register required for the computation of
+/// \c _ssx_critical_section_enter.
+///
+/// Forms:
+///
+/// \b _ssx_critical_section_enter \a priority, \a ctxreg, \a scrreg - Enter a
+/// critical section \n
+/// \b _ssx_critical_section_exit \a ctxreg - Exit a critical section
+
+#ifdef __ASSEMBLER__
+
+ .set _msr_ee_bit, MSR_EE_BIT
+ .set _msr_ce_bit, MSR_CE_BIT
+
+ .macro _ssx_critical_section_enter priority, ctxreg, scrreg
+ mfmsr \ctxreg
+ .if ((\priority) == SSX_CRITICAL)
+ _clrbit \scrreg, \ctxreg, _msr_ee_bit
+ _clrbit \scrreg, \scrreg, _msr_ce_bit
+ mtmsr \scrreg
+ .elseif ((\priority) == SSX_SUPERCRITICAL)
+ _liwa \scrreg, (MSR_APE | MSR_WE | MSR_CE | MSR_EE | MSR_ME | MSR_DWE | MSR_DE)
+ andc \scrreg, \ctxreg, \scrreg
+ mtmsr \scrreg
+ .elseif ((\priority) == SSX_NONCRITICAL)
+ wrteei 0
+ .else
+ .error "'priority' was not one of SSX_CRITICAL, SSX_NONCRITICAL or SSX_SUPERCRITICAL"
+ .endif
+ isync
+ .endm
+
+ .macro _ssx_critical_section_exit ctxreg
+ mtmsr \ctxreg
+ isync
+ .endm
+
+// ****************************************************************************
+// SSX context save/restore macros for 32-bit Embedded PowerPC
+// ****************************************************************************
+
+// All stack frames are 8-byte aligned in conformance with the EABI. SSX
+// never saves or restores GPR2 or GPR13. GPR13 is constant in (E)ABI
+// applications - the base of the read-write small data area. GPR2 is
+// system-reserved in ABI applications, and is the base for read-only small data
+// in EABI applications.
+
+// A fair amount of complexity is involved in handling the non-critical and
+// critical interrupt levels, and the emphasis on performance of fast-mode
+// interrupt handlers. Several different approaches and philosophies could
+// have been implemented - this is only one. In this implementation
+// critical/non-critical interrupt levels are treated more or less the same,
+// and the interrupt priority is just that - a kind of preemption priority.
+// Critical interrupt handling does have a little less overhead because it
+// does not have a thread scheduling step at the end.
+
+// A full context save takes place in 3 or 4 steps. Thread switches always do
+// steps 1, 2 and 3.
+// 1. The fast context that is always saved in response to every interrupt;
+// 1a. The optional save/update of the kernel context for interrupts.
+// 2. The (volatile - fast) context that is saved if an interrupt handler
+// switches from fast-mode to full-mode.
+// 3. The non-volatile context that is saved when a thread is switched out.
+
+// USPRG0 holds the __SsxKernelContext structure (defined in ppc405.h) that
+// represents the current kernel context. The layout is as follows:
+//
+// Bits Meaning
+// ==============
+// 0:7 The critical interrupt count
+// 8:15 The non-critical interrupt count
+// 16:23 The IRQ currently being processed
+// 24 The 'thread_mode' flag
+// 25:31 The thread priority of the running thread
+//
+// When SSX is initialized USPRG0 is initialized to 0. When thread-mode is
+// entered (by ssx_start_threads()) bit 24 is set to 1. In order to support
+// PgP/OCC firmware, once initialized (with ssx_initialize()) SSX can simply
+// handle interrupts, reverting back to the non-thread-mode idle loop when
+// there's nothing to do.
+//
+// Note that it would require a serious error for the interrupt counts to ever
+// equal or exceed 2**8 as this would imply runaway reentrancy and stack
+// overflow. In fact it is most likely an error if an interrupt handler is
+// ever re-entered while active.
+
+// Registers SRR2 and SRR3 are always saved in IRQ context because
+// __ssx_irq_fast2full must save the (volatile - fast) context to provide
+// working registers before it can look at USPRG0 to determine critical
+// vs. non-critical context. However, when restoring a non-critical interrupt
+// or thread these registers need not be restored. SRR2 and SRR3 are never
+// saved or restored for thread context switches, because threads always
+// operate at noncritical level.
+
+// When MMU protection is enabled, relocation/protection is re-established
+// immediately upon entry to the interrupt handler, before any memory
+// operations (load/store) take place. This requires using SPRG0 and SPGR4
+// for temporary storage for noncritical/critical handlers respectively in
+// accordance with the SSX conventions for SPRGn usage by fast-mode
+// interrupts.
+
+ ## ------------------------------------------------------------
+ ## Unused registers for embedded PowerPC
+ ## ------------------------------------------------------------
+
+ ## Registers GPR2 and GPR13 are never saved or restored. In ABI and
+ ## EABI applications these registers are constant.
+
+ .set UNUSED_GPR2, 0x2 # Dedicated; EABI read-only small data area
+ .set UNUSED_GPR13, 0xd # Dedicated; (E)ABI read-write small data area
+
+ ## ------------------------------------------------------------
+ ## Flags for context push/pop
+ ## ------------------------------------------------------------
+
+ .set SSX_THREAD_CONTEXT, 0
+ .set SSX_IRQ_CONTEXT, 1
+
+ ## ------------------------------------------------------------
+ ## The SSX fast context layout for Embedded PowerPC
+ ## ------------------------------------------------------------
+
+ .set SSX_FAST_CTX_GPR1, 0x00 # Dedicated; Stack pointer
+ .set SSX_FAST_CTX_HANDLER_LR, 0x04 # Slot for handler to store LR
+ .set SSX_FAST_CTX_GPR3, 0x08 # Volatile; Parameter; Return Value
+ .set SSX_FAST_CTX_GPR4, 0x0c # Volatile; Parameter
+ .set SSX_FAST_CTX_GPR5, 0x10 # Volatile; Parameter
+ .set SSX_FAST_CTX_GPR6, 0x14 # Volatile; Parameter
+ .set SSX_FAST_CTX_GPR7, 0x18 # Volatile; Parameter
+ .set SSX_FAST_CTX_CR, 0x1c # Condition register
+ .set SSX_FAST_CTX_LR, 0x20 # Link register SPRN 0x008
+ .set SSX_FAST_CTX_KERNEL_CTX, 0x24 # Saved __SsxKernelContext for IRQ
+
+ .set SSX_FAST_CTX_SIZE, 0x28 # Must be 8-byte aligned
+
+ ## ------------------------------------------------------------
+ ## The SSX (volatile - fast) context layout for Embedded PowerPC
+ ## ------------------------------------------------------------
+
+ .set SSX_VOL_FAST_CTX_GPR1, 0x00 # Dedicated; Stack pointer
+ .set SSX_VOL_FAST_CTX_HANDLER_LR, 0x04 # Slot for handler to store LR
+ .set SSX_VOL_FAST_CTX_GPR0, 0x08 # Volatile; Language specific
+ .set SSX_VOL_FAST_CTX_GPR8, 0x0c # Volatile; Parameter
+ .set SSX_VOL_FAST_CTX_GPR9, 0x10 # Volatile; Parameter
+ .set SSX_VOL_FAST_CTX_GPR10, 0x14 # Volatile; Parameter
+ .set SSX_VOL_FAST_CTX_GPR11, 0x18 # Volatile
+ .set SSX_VOL_FAST_CTX_GPR12, 0x1c # Volatile
+ .set SSX_VOL_FAST_CTX_XER, 0x20 # Fixed-point exception register SPRN 0x001
+ .set SSX_VOL_FAST_CTX_CTR, 0x24 # Count register SPRN 0x009
+ .set SSX_VOL_FAST_CTX_SRR0, 0x28 # Save/restore register 0 SPRN 0x01a
+ .set SSX_VOL_FAST_CTX_SRR1, 0x2c # Save/restore register 1 SPRN 0x01b
+ .set SSX_VOL_FAST_CTX_SRR2, 0x30 # Save/restore register 2 SPRN 0x3de
+ .set SSX_VOL_FAST_CTX_SRR3, 0x34 # Save/restore register 3 SPRN 0x3df
+
+ .set SSX_VOL_FAST_CTX_SIZE, 0x38 # Must be 8-byte aligned
+
+ ## ------------------------------------------------------------
+ ## The SSX non-volatile context layout for Embedded PowerPC
+ ## ------------------------------------------------------------
+
+ ## The 'preferred form' for stmw is for the LSB of R31 to fall into the
+ ## end of a 16-byte aligned block.
+
+ .set SSX_NON_VOL_CTX_GPR1, 0x0 # Dedicated; Stack Pointer
+ .set SSX_NON_VOL_CTX_HANDLER_LR, 0x4 # Slot for handler to store LR
+ .set SSX_NON_VOL_CTX_GPR14, 0x8 # Non-volatile
+ .set SSX_NON_VOL_CTX_GPR15, 0xc # Non-volatile
+ .set SSX_NON_VOL_CTX_GPR16, 0x10 # Non-volatile
+ .set SSX_NON_VOL_CTX_GPR17, 0x14 # Non-volatile
+ .set SSX_NON_VOL_CTX_GPR18, 0x18 # Non-volatile
+ .set SSX_NON_VOL_CTX_GPR19, 0x1c # Non-volatile
+ .set SSX_NON_VOL_CTX_GPR20, 0x20 # Non-volatile
+ .set SSX_NON_VOL_CTX_GPR21, 0x24 # Non-volatile
+ .set SSX_NON_VOL_CTX_GPR22, 0x28 # Non-volatile
+ .set SSX_NON_VOL_CTX_GPR23, 0x2c # Non-volatile
+ .set SSX_NON_VOL_CTX_GPR24, 0x30 # Non-volatile
+ .set SSX_NON_VOL_CTX_GPR25, 0x34 # Non-volatile
+ .set SSX_NON_VOL_CTX_GPR26, 0x38 # Non-volatile
+ .set SSX_NON_VOL_CTX_GPR27, 0x3c # Non-volatile
+ .set SSX_NON_VOL_CTX_GPR28, 0x40 # Non-volatile
+ .set SSX_NON_VOL_CTX_GPR29, 0x44 # Non-volatile
+ .set SSX_NON_VOL_CTX_GPR30, 0x48 # Non-volatile
+ .set SSX_NON_VOL_CTX_GPR31, 0x4c # Non-volatile
+
+ .set SSX_NON_VOL_CTX_SIZE, 0x50 # Must be 8-byte aligned
+
+ ## ------------------------------------------------------------
+ ## Save/restore the fast context
+ ##
+ ## 11 Instructions, 8 Loads/Stores : If MMU is disabled
+ ## 17 Instructions, 8 Loads/Stores : If MMU is enabled
+ ## ------------------------------------------------------------
+ ##
+ ## Without MMU support, an EIEIO is always executed at the entry point
+ ## to gauarantee that all memory operations (especially MMIO
+ ## operations) have completed prior to execution of the interrupt
+ ## handler.
+ ##
+ ## If MMU support is enabled, address translation is re-established
+ ## immediately at the entry of each interrupt, prior to performing any
+ ## loads or stores. SSX currently only supports using the MMU for
+ ## protection, not for address translation. Therfore it is 'legal'
+ ## to change translation modes a with an MTMSR followed by an
+ ## ISYNC. This is much simpler then the complex instruction sequence
+ ## that would be required if we had to set up RFI/RFCI sequences to
+ ## change the execution context at this point.
+ ##
+ ## Note that since we are not really doing address translation, it
+ ## would also be in keeping with the 'fast interrupt' idea to defer
+ ## reenabling translation (protection) until the fast-to-full sequence
+ ## was executed for full-mode interrupts, and run fast-mode interrupts
+ ## unprotected. However here we chose to run all interrupts with MMU
+ ## protection.
+ ##
+ ## Unfortunately the simple MTMSR;ISYNC sequence exposes a serious bug
+ ## in the 405-S core that causes the stack-pointer store instruction
+ ## to generate a seemingly random, *real-mode* address in certain cases
+ ## when this instruction in a noncritical interrupt prologue is
+ ## interrupted by a critical interrupt. This bug is described in
+ ## HW239446. The workaround is to follow the ISYNC sith a SYNC - which
+ ## eliminates the problem for reasons still unknown. On the bright side
+ ## this SYNC might also serve the same purpose as the EIEIO in the
+ ## non-MMU case, guaranteeing that all MMIO has completed prior to the
+ ## interrupt handler. However without the initial EIEIO we still
+ ## experience failures, so this seemingly redundant instruction also
+ ## remains in place. This requirement is assumed to be related to the
+ ## HW239446 issue.
+
+ .macro _ssx_fast_ctx_push, critical
+
+ .if !PPC405_MMU_SUPPORT
+
+ eieio
+
+ .elseif \critical
+
+ eieio # HW239446?
+ mtsprg4 %r3
+ mfmsr %r3
+ ori %r3, %r3, PPC405_RELOCATION_MODE
+ mtmsr %r3
+ isync
+#ifndef ALLOW_HW239446
+ sync # HW239446!
+#endif
+ mfsprg4 %r3
+
+ .else
+
+ eieio # HW239446?
+ mtsprg0 %r3
+ mfmsr %r3
+ ori %r3, %r3, PPC405_RELOCATION_MODE
+ mtmsr %r3
+ isync
+#ifndef ALLOW_HW239446
+ sync # HW239446!
+#endif
+ mfsprg0 %r3
+
+ .endif
+
+ stwu %r1, -SSX_FAST_CTX_SIZE(%r1) # May be corrupted w/o HW239446
+
+ stw %r3, SSX_FAST_CTX_GPR3(%r1)
+ stw %r4, SSX_FAST_CTX_GPR4(%r1)
+ stw %r5, SSX_FAST_CTX_GPR5(%r1)
+ stw %r6, SSX_FAST_CTX_GPR6(%r1)
+ stw %r7, SSX_FAST_CTX_GPR7(%r1)
+
+ mfcr %r3
+ mflr %r4
+
+ stw %r3, SSX_FAST_CTX_CR(%r1)
+ stw %r4, SSX_FAST_CTX_LR(%r1)
+
+ .endm
+
+
+ .macro _ssx_fast_ctx_pop
+
+ lwz %r3, SSX_FAST_CTX_CR(%r1)
+ lwz %r4, SSX_FAST_CTX_LR(%r1)
+
+ mtcr %r3
+ mtlr %r4
+
+ lwz %r3, SSX_FAST_CTX_GPR3(%r1)
+ lwz %r4, SSX_FAST_CTX_GPR4(%r1)
+ lwz %r5, SSX_FAST_CTX_GPR5(%r1)
+ lwz %r6, SSX_FAST_CTX_GPR6(%r1)
+ lwz %r7, SSX_FAST_CTX_GPR7(%r1)
+
+ lwz %r1, 0(%r1)
+
+ .endm
+
+ ## ------------------------------------------------------------
+ ## Save/update the kernel context in response to an interrupt. This is
+ ## not part of the fast context save because for external interupts the
+ ## IRQ is not determined until later.
+ ## ------------------------------------------------------------
+
+ ## The kernel context is saved, then updated with the currently active
+ ## IRQ in bits 16:23. The correct interrupt count is incremented and
+ ## the context is returned to USPRG0.
+
+ .macro _save_update_kernel_context critical, irqreg, ctxreg
+
+ .if \critical
+ SSX_TRACE_CRITICAL_IRQ_ENTRY \irqreg, \ctxreg
+ .else
+ SSX_TRACE_NONCRITICAL_IRQ_ENTRY \irqreg, \ctxreg
+ .endif
+
+ mfusprg0 \ctxreg
+ stw \ctxreg, SSX_FAST_CTX_KERNEL_CTX(%r1)
+ rlwimi \ctxreg, \irqreg, 8, 16, 23
+ .if \critical
+ addis \ctxreg, \ctxreg, 0x0100
+ .else
+ addis \ctxreg, \ctxreg, 0x0001
+ .endif
+ mtusprg0 \ctxreg
+
+ .endm
+
+ ## ------------------------------------------------------------
+ ## Fast-mode context pop and RF(C)I. This is only used by
+ ## interrupt handlers - the thread context switch has its own
+ ## code to handle updating USPRG0 for thread mode.
+ ## ------------------------------------------------------------
+
+ .macro _ssx_fast_ctx_pop_exit critical
+
+ .if SSX_KERNEL_TRACE_ENABLE
+ .if \critical
+ bl __ssx_trace_critical_irq_exit
+ .else
+ bl __ssx_trace_noncritical_irq_exit
+ .endif
+ .endif
+
+ lwz %r3, SSX_FAST_CTX_KERNEL_CTX(%r1)
+ mtusprg0 %r3
+ _ssx_fast_ctx_pop
+ .if \critical
+ rfci
+ .else
+ rfi
+ .endif
+
+ .endm
+
+ ## ------------------------------------------------------------
+ ## Save/restore the (volatile - fast) context
+ ##
+ ## Thread - 15 Instructions, 11 Loads/Stores
+ ## IRQ - 19(15) Instructions, 13(11) Loads/Stores
+ ## ------------------------------------------------------------
+
+ .macro _ssx_vol_fast_ctx_push, irq_context, critical=1
+
+ stwu %r1, -SSX_VOL_FAST_CTX_SIZE(%r1)
+
+ stw %r0, SSX_VOL_FAST_CTX_GPR0(%r1)
+ stw %r8, SSX_VOL_FAST_CTX_GPR8(%r1)
+ stw %r9, SSX_VOL_FAST_CTX_GPR9(%r1)
+ stw %r10, SSX_VOL_FAST_CTX_GPR10(%r1)
+ stw %r11, SSX_VOL_FAST_CTX_GPR11(%r1)
+ stw %r12, SSX_VOL_FAST_CTX_GPR12(%r1)
+
+ mfxer %r8
+ mfctr %r9
+ mfsrr0 %r10
+ mfsrr1 %r11
+
+ stw %r8, SSX_VOL_FAST_CTX_XER(%r1)
+ stw %r9, SSX_VOL_FAST_CTX_CTR(%r1)
+ stw %r10, SSX_VOL_FAST_CTX_SRR0(%r1)
+ stw %r11, SSX_VOL_FAST_CTX_SRR1(%r1)
+
+ .if (\irq_context & \critical)
+ mfsrr2 %r8
+ mfsrr3 %r9
+
+ stw %r8, SSX_VOL_FAST_CTX_SRR2(%r1)
+ stw %r9, SSX_VOL_FAST_CTX_SRR3(%r1)
+ .endif
+
+ .endm
+
+
+ .macro _ssx_vol_fast_ctx_pop, irq_context, critical
+
+ .if (\irq_context & \critical)
+ lwz %r8, SSX_VOL_FAST_CTX_SRR2(%r1)
+ lwz %r9, SSX_VOL_FAST_CTX_SRR3(%r1)
+
+ mtsrr2 %r8
+ mtsrr3 %r9
+ .endif
+
+ lwz %r8, SSX_VOL_FAST_CTX_XER(%r1)
+ lwz %r9, SSX_VOL_FAST_CTX_CTR(%r1)
+ lwz %r10, SSX_VOL_FAST_CTX_SRR0(%r1)
+ lwz %r11, SSX_VOL_FAST_CTX_SRR1(%r1)
+
+ mtxer %r8
+ mtctr %r9
+ mtsrr0 %r10
+ mtsrr1 %r11
+
+ lwz %r0, SSX_VOL_FAST_CTX_GPR0(%r1)
+ lwz %r8, SSX_VOL_FAST_CTX_GPR8(%r1)
+ lwz %r9, SSX_VOL_FAST_CTX_GPR9(%r1)
+ lwz %r10, SSX_VOL_FAST_CTX_GPR10(%r1)
+ lwz %r11, SSX_VOL_FAST_CTX_GPR11(%r1)
+ lwz %r12, SSX_VOL_FAST_CTX_GPR12(%r1)
+
+ lwz %r1, 0(%r1)
+
+ .endm
+
+ ## ------------------------------------------------------------
+ ## Save/restore the non-volatile context on the stack
+ ##
+ ## 2 Instructions, 19 Loads/Stores
+ ## ------------------------------------------------------------
+
+ .macro _ssx_non_vol_ctx_push
+
+ stwu %r1, -SSX_NON_VOL_CTX_SIZE(%r1)
+ stmw %r14, SSX_NON_VOL_CTX_GPR14(%r1)
+
+ .endm
+
+
+ .macro _ssx_non_vol_ctx_pop
+
+ lmw %r14, SSX_NON_VOL_CTX_GPR14(%r1)
+ lwz %r1, 0(%r1)
+
+ .endm
+
+#else /* __ASSEMBLER__ */
+
+/// SSX thread context layout as a C structure.
+///
+/// This is the structure of the stack area pointed to by
+/// thread->saved_stack_pointer when a thread is fully context-switched out.
+
+typedef struct {
+
+ uint32_t r1_nv;
+ uint32_t link_nv;
+ uint32_t r14;
+ uint32_t r15;
+ uint32_t r16;
+ uint32_t r17;
+ uint32_t r18;
+ uint32_t r19;
+ uint32_t r20;
+ uint32_t r21;
+ uint32_t r22;
+ uint32_t r23;
+ uint32_t r24;
+ uint32_t r25;
+ uint32_t r26;
+ uint32_t r27;
+ uint32_t r28;
+ uint32_t r29;
+ uint32_t r30;
+ uint32_t r31;
+ uint32_t r1_vf;
+ uint32_t link_vf;
+ uint32_t r0;
+ uint32_t r8;
+ uint32_t r9;
+ uint32_t r10;
+ uint32_t r11;
+ uint32_t r12;
+ uint32_t xer;
+ uint32_t ctr;
+ uint32_t srr0;
+ uint32_t srr1;
+ uint32_t srr2;
+ uint32_t srr3;
+ uint32_t r1;
+ uint32_t link_fast;
+ uint32_t r3;
+ uint32_t r4;
+ uint32_t r5;
+ uint32_t r6;
+ uint32_t r7;
+ uint32_t cr;
+ uint32_t lr;
+ uint32_t usprg0;
+
+} SsxThreadContext;
+
+
+/// SSX thread context of an interrupted thread (full-mode handler)
+///
+/// When a thread is interrupted by a full-mode interrupt handler, this is the
+/// layout of the stack area pointed to by either __ssx_saved_sp_noncritical
+/// or __ssx_saved_sp_critical.
+
+typedef struct {
+
+ uint32_t r1_vf;
+ uint32_t link_vf;
+ uint32_t r0;
+ uint32_t r8;
+ uint32_t r9;
+ uint32_t r10;
+ uint32_t r11;
+ uint32_t r12;
+ uint32_t xer;
+ uint32_t ctr;
+ uint32_t srr0;
+ uint32_t srr1;
+ uint32_t srr2;
+ uint32_t srr3;
+ uint32_t r1;
+ uint32_t link_fast;
+ uint32_t r3;
+ uint32_t r4;
+ uint32_t r5;
+ uint32_t r6;
+ uint32_t r7;
+ uint32_t cr;
+ uint32_t lr;
+ uint32_t usprg0;
+
+} SsxThreadContextFullIrq;
+
+
+/// SSX thread context of an interrupted thread (fast-mode handler)
+///
+/// When a thread is interrupted by a fast-mode interrupt handler, this is the
+/// layout of the stack area pointed to by R1 - unless the fast-mode interrupt
+/// handler extends the stack.
+
+typedef struct {
+
+ uint32_t r1;
+ uint32_t link_fast;
+ uint32_t r3;
+ uint32_t r4;
+ uint32_t r5;
+ uint32_t r6;
+ uint32_t r7;
+ uint32_t cr;
+ uint32_t lr;
+ uint32_t usprg0;
+
+} SsxThreadContextFastIrq;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* __PPC405_CONTEXT_H__ */
+
+
diff --git a/src/ssx/ppc405/ppc405_core.c b/src/ssx/ppc405/ppc405_core.c
new file mode 100755
index 0000000..5df0967
--- /dev/null
+++ b/src/ssx/ppc405/ppc405_core.c
@@ -0,0 +1,183 @@
+// $Id: ppc405_core.c,v 1.1.1.1 2013/12/11 21:03:27 bcbrock Exp $
+// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ssx/ppc405/ppc405_core.c,v $
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2013
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+/// \file ppc405_core.c
+/// \brief The final bits of SSX runtime code required to complete the PPC405
+/// port.
+///
+/// The entry points in this file are considered 'core' routines that will
+/// always be present during runtime in any SSX application.
+
+#define __PPC405_CORE_C__
+
+#include "ssx.h"
+
+/// Get the 64-bit timebase following the PowerPC protocol
+///
+/// Note that the only way to guarantee that the value returned is the value
+/// \e right \e now is to call this API from a critical section.
+
+SsxTimebase
+ssx_timebase_get(void)
+{
+ Uint64 tb;
+ uint32_t high;
+
+ do {
+ tb.word[0] = mftbu();
+ tb.word[1] = mftb();
+ high = mftbu();
+ } while (high != tb.word[0]);
+
+ return tb.value;
+}
+
+
+/// Set the 64-bit timebase in an SSX_CRITICAL critical section
+///
+/// It is assumed that the caller knows what they are doing; e.g., is aware of
+/// what may happen when time warps as a result of this call.
+
+void
+ssx_timebase_set(SsxTimebase timebase)
+{
+ SsxMachineContext ctx;
+ Uint64 tb;
+
+ tb.value = timebase;
+
+ ssx_critical_section_enter(SSX_CRITICAL, &ctx);
+
+ mttbl(0);
+ mttbu(tb.word[0]);
+ mttbl(tb.word[1]);
+
+ ssx_critical_section_exit(&ctx);
+}
+
+
+/// Enable interrupt preemption
+///
+/// This API can only be called from an interrupt context. Threads will
+/// always be preempted by interrupts unless they explicitly disable
+/// interrupts with the \c ssx_interrupt_disable() API. It is legal to call
+/// this API redundantly.
+///
+/// Be careful when enabling interrupt handler preemption that the interrupt
+/// being handled does not/can not trigger again, as this could rapidly lead
+/// to stack overflows.
+///
+/// Return values other then SSX_OK (0) are errors; see \ref ssx_errors
+///
+/// \retval 0 Successful completion
+///
+/// \retval -SSX_ILLEGAL_CONTEXT The API call was not made from an interrupt
+/// context.
+
+int
+ssx_interrupt_preemption_enable()
+{
+ if (SSX_ERROR_CHECK_API) {
+ SSX_ERROR_UNLESS_ANY_INTERRUPT_CONTEXT();
+ }
+
+ if (__ssx_kernel_context_noncritical_interrupt()) {
+ wrteei(1);
+ } else {
+ or_msr(MSR_CE);
+ }
+
+ return SSX_OK;
+}
+
+
+/// Disable interrupt preemption
+///
+/// This API can only be called from an interrupt context. Threads will
+/// always be preempted by interrupts unless they explicitly disable
+/// interrupts with the \c ssx_interrupt_disable() API. It is legal to call
+/// this API redundantly.
+///
+/// Return values other then SSX_OK (0) are errors; see \ref ssx_errors
+///
+/// \retval 0 Successful completion
+///
+/// \retval -SSX_ILLEGAL_CONTEXT The API call was not made from an interrupt
+/// context.
+
+int
+ssx_interrupt_preemption_disable()
+{
+ if (SSX_ERROR_CHECK_API) {
+ SSX_ERROR_UNLESS_ANY_INTERRUPT_CONTEXT();
+ }
+
+ if (__ssx_kernel_context_noncritical_interrupt()) {
+ wrteei(0);
+ } else {
+ andc_msr(MSR_CE);
+ }
+
+ return SSX_OK;
+}
+
+
+#if SSX_TIMER_SUPPORT
+
+// The tickless kernel timer mechanism for PPC405
+//
+// This routine must be called from an SSX_NONCRITICAL critical section.
+//
+// Tickless timeouts are provided by programming the PIT timer based on when
+// the next timeout will occur. If the timeout is for the end of time there's
+// nothing to do - SSX does not use auto-reload mode so no more PIT interrupts
+// will be arriving. Otherwise, if the timeout is longer than the 32-bit PIT
+// timer can handle, we simply schedule the timeout for 2**32 - 1 and
+// __ssx_timer_handler() will keep rescheduling it until it finally occurs.
+// If the \a timeout is in the past, we schedule the PIT interrupt for 1 tick
+// in the future in accordance with the SSX specification.
+
+void
+__ssx_schedule_hardware_timeout(SsxTimebase timeout)
+{
+ SsxTimebase now;
+ uint32_t pit;
+
+ if (timeout != SSX_TIMEBASE_MAX) {
+
+ now = ssx_timebase_get();
+
+ if (timeout <= now) {
+ pit = 1;
+ } else if ((timeout - now) > 0xffffffff) {
+ pit = 0xffffffff;
+ } else {
+ pit = timeout - now;
+ }
+
+ mtspr(SPRN_PIT, pit);
+ }
+}
+
+
+// Cancel the PPC405 tickless kernel timeout
+//
+// This routine must be called from an SSX_NONCRITICAL critical section. SSX
+// does not use auto-reload mode of the PIT, so simply writing the PIT with 0
+// effectively cancels the timer.
+
+void
+__ssx_cancel_hardware_timeout()
+{
+ mtspr(SPRN_PIT, 0);
+}
+
+
+#endif /* SSX_TIMER_SUPPORT */
+
+#undef __PPC405_CORE_C__
diff --git a/src/ssx/ppc405/ppc405_dcr.h b/src/ssx/ppc405/ppc405_dcr.h
new file mode 100755
index 0000000..1f389bf
--- /dev/null
+++ b/src/ssx/ppc405/ppc405_dcr.h
@@ -0,0 +1,55 @@
+#ifndef __PPC405_DCR_H__
+#define __PPC405_DCR_H__
+
+// $Id: ppc405_dcr.h,v 1.1.1.1 2013/12/11 21:03:27 bcbrock Exp $
+// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ssx/ppc405/ppc405_dcr.h,v $
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2013
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+/// \file ppc405_dcr.h
+/// \brief Everything related to PPC405-specific DCRs
+///
+/// DCRs are chip-specific. This file only defines DCR access methods; DCR
+/// numbers will be defined by chip-specific headers.
+
+/// Move From DCR
+///
+/// Note that \a dcrn must be a compile-time constant.
+
+#define mfdcr(dcrn) \
+ ({uint32_t __value; \
+ asm volatile ("mfdcr %0, %1" : "=r" (__value) : "i" (dcrn)); \
+ __value;})
+
+
+/// Move to DCR
+///
+/// Note that \a dcrn must be a compile-time constant.
+
+#define mtdcr(dcrn, value) \
+ ({uint32_t __value = (value); \
+ asm volatile ("mtdcr %0, %1" : : "i" (dcrn), "r" (__value)); \
+ })
+
+
+/// Read-Modify-Write a DCR with OR (Set DCR bits)
+///
+/// Note that \a dcrn must be a compile-time constant. This operation is only
+/// guaranteed atomic in a critical section.
+
+#define or_dcr(dcrn, x) \
+ mtdcr(dcrn, mfdcr(dcrn) | (x))
+
+
+/// Read-Modify-Write a DCR with AND complement (Clear DCR bits)
+///
+/// Note that \a dcrn must be a compile-time constant. This operation is only
+/// guaranteed atomic in a critical section.
+
+#define andc_dcr(dcrn, x) \
+ mtdcr(dcrn, mfdcr(dcrn) & ~(x))
+
+#endif /* __PPC405_DCR_H__ */
diff --git a/src/ssx/ppc405/ppc405_exceptions.S b/src/ssx/ppc405/ppc405_exceptions.S
new file mode 100755
index 0000000..c8be2e3
--- /dev/null
+++ b/src/ssx/ppc405/ppc405_exceptions.S
@@ -0,0 +1,832 @@
+// $Id: ppc405_exceptions.S,v 1.1.1.1 2013/12/11 21:03:27 bcbrock Exp $
+// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ssx/ppc405/ppc405_exceptions.S,v $
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2013
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+/// \file ppc405_exceptions.S
+/// \brief PPC405 exception vector area.
+///
+/// The PowerPC exception vector area has many small and large 'holes' in the
+/// SSX implementation. These are due to numerous unhandled exceptions and
+/// unimplemented exceptions in the exception vector area that comprises 8KB
+/// in the 405. SSX interrupt handling and other code is 'packed' into these
+/// holes to reduce the effective code footprint of SSX. The packing is done
+/// (hopefully) on a reasonable basis - we haven't tried to squeeze every last
+/// byte by chopping up routines willy-nilly and stitching them together with
+/// random branches - but some fragmentation has occurred in the interrupt
+/// handling code.
+///
+/// To facilitate the packing, the exception vector area is divided into 5 ELF
+/// sections (addresses are offsets into the exception area)
+///
+/// .vectors_0000 - Empty section for adding image header
+///
+/// .vectors_0100 - From 0x0100 to 0x0bff. The beginning of the table through
+/// the large space prior to the system call vector.
+///
+/// .vectors_0c00 - From 0x0c00 to 0x0eff. This is a moderately large area
+/// after the system call vector.
+///
+/// .vectors_0f00 - From 0x0f00 to 0x1fff. From the APU Unavailable vector
+/// through the major 3.5K hole above the Debug vector.
+///
+/// .vectors_2000 - From 0x2000 to 0x2003 - branch to the Debug handler.
+///
+/// The exception vector area must be aligned on a 64KB boundary.
+///
+/// Note that PgP mainstore boot and interrupt controller handling is
+/// currently hard-coded into this file - but it can easily be generalized if
+/// a port to another environment is required, assuming the new environment
+/// has something similar to a PgP or 405 ASIC interrupt controller.
+///
+/// \cond
+
+ .nolist
+#include "ssx.h"
+ .list
+
+### ****************************************************************************
+### .vectors_0000 - Empty section ( Image header will be placed in this section
+### from the linker command file )
+### ****************************************************************************
+
+ .section .vectors_0000, "a", @progbits
+ .global __vectors
+ .global __vectors_0000
+__vectors:
+__vectors_0000:
+
+### ****************************************************************************
+### .vectors_0100
+### ****************************************************************************
+ .section .vectors_0100, "ax", @progbits
+
+ .global __vectors_0100
+
+__vectors_0100:
+
+ ############################################################
+ # 0x0100 : Critical Interrupt
+ ############################################################
+
+__critical_interrupt:
+
+ ## The critical interrupt handler entry point is re-entrant - A handler
+ ## may allow preemption, which could cause another entry here.
+
+ ## Entry invariants:
+ ## 1. Critical interupts are disabled;
+ ## 2. The SP points to a thread stack, the non-critical stack or
+ ## critical stack
+
+ ## Since fast-mode handlers can not use SSX services or alter the
+ ## machine context, the exit of a fast mode handler is a simple RF(C)I.
+
+ ## Begin by pushing the fast context on the stack.
+
+ _ssx_fast_ctx_push SSX_CRITICAL
+
+ ## Load critical status 0 and the handler array base address. Check
+ ## for interrupts pending in status register 0 while the IRQ is
+ ## computed and R5 is loaded with the critical flag.
+
+ _lwzi %r3, %r3, OCB_OCISR0
+ _liw %r6, __ppc405_irq_handlers
+ cmpwi %r3, 0
+ cntlzw %r4, %r3
+ li %r5, SSX_CRITICAL
+ bne+ critical_irq_found
+
+ ## No IRQ pending in interrupt set 0. Try set 1.
+
+ _lwzi %r3, %r3, OCB_OCISR1
+ cmpwi %r3, 0
+ cntlzw %r4, %r3
+ addi %r4, %r4, 32
+ beq- critical_phantom
+
+ ## An active IRQ was found. At entry here R6 has the handler table
+ ## base address, R4 has the IRQ number, and R5 has the critical
+ ## flag. The IRQ is converted into a pointer to an 8-byte handler
+ ## structure, and the handler is dispatched. The call is made with the
+ ## parameters:
+
+ ## R3 = private
+ ## R4 = irq
+ ## R5 = SSX_CRITICAL
+
+critical_irq_found:
+
+ _save_update_kernel_context SSX_CRITICAL, %r4, %r7
+ slwi %r3, %r4, 3
+ lwzux %r7, %r6, %r3
+ lwz %r3, 4(%r6)
+ mtlr %r7
+ blrl
+
+ ## Pop the stack/RFCI when (if) it returns here.
+
+fast_exit_critical:
+
+ _ssx_fast_ctx_pop_exit SSX_CRITICAL
+
+ ## This is a phantom interrupt - we got interrupted but no status bits
+ ## are set. The interrupt is marked as #64. The register used for the
+ ## handler table address (R6) is set to the special structure for the
+ ## phantom interrupt, with it's address adjusted to make it appear to
+ ## be the 64th entry in the table.
+
+critical_phantom:
+
+ _liw %r6, __ppc405_phantom_irq
+ subi %r6, %r6, (64 * 8)
+ b critical_irq_found
+
+
+ ############################################################
+ # 0x0200 : Machine Check, Data or Instruction
+ ############################################################
+
+ .org __vectors_0100 + 0x0100
+__machine_check:
+
+ PPC405_MACHINE_CHECK_HANDLER
+
+ .org __machine_check + 0x20
+
+ .global __ssx_irq_fast2full
+__ssx_irq_fast2full:
+
+ ## Convert a fast-mode to a full-mode interrupt by saving the
+ ## (volatile - fast) context, and switching to the appropriate system
+ ## stack.
+
+ ## Entry invariants:
+ ## 1. The SP/stack must be exactly as it was when the fast-mode
+ ## handler was entered.
+ ## 2. No changes have been made to the MSR - the interrupt level must
+ ## remain disabled.
+ ## 3. The handler owns the fast context and has not modified the other
+ ## register context. This routine can only use the (volatile -
+ ## fast) register context.
+
+ ## 41 (linear) instructions plus alignmenmt
+
+ ## Start by pushing the (volatile - fast) context. Technically we also
+ ## need to save the CR as our contract with the handler is not to
+ ## disturb any of its register state.
+
+ _ssx_vol_fast_ctx_push SSX_IRQ_CONTEXT
+ mfcr %r12
+
+ ## USPRG0 tells whether this is a critical or non-critical interrupt.
+ ## The high-order 8 bits of USPRG0 counts critical interrupt nesting,
+ ## and the SSX preemption rules guarantee that if the count is > 0 then
+ ## we are in a critical handler.
+
+ mfusprg0 %r8
+ extrwi. %r9, %r8, 8, 0
+ beq fast2full_noncritical
+
+ ## If the critical interrupt count is > 1, we are already in a
+ ## nested critical interrupt, so we're already on the critical stack
+ ## and there's nothing left to do.
+
+ cmpwi %r9, 1
+ bne 1f
+
+ ## Otherwise, save the current stack pointer and switch to the critical
+ ## stack.
+
+ _stwsd %r1, __ssx_saved_sp_critical
+ _lwzsd %r1, __ssx_critical_stack
+
+ ## Restore the CR and return to the now full-mode handler.
+
+1:
+ mtcr %r12
+ blr
+
+ ## Non-critical interrupts are handled analogously to the above,
+ ## except that bits 8:15 of R7 are the non-critical
+ ## count. At entry here the (volatile - fast) context has been pushed,
+ ## R8 has USPRG0 and R12 contains the saved CR.
+
+ ## Note that it would violate a kernel/API invariant if this routine
+ ## were entered from outside an interrupt context.
+
+ .cache_align
+fast2full_noncritical:
+
+ extrwi %r9, %r8, 8, 8
+ cmpwi %r9, 1
+ bne 1f
+
+ _stwsd %r1, __ssx_saved_sp_noncritical
+ _lwzsd %r1, __ssx_noncritical_stack
+
+1:
+
+ .if (SSX_ERROR_CHECK_KERNEL | SSX_ERROR_CHECK_API)
+ cmpwi %r9, 0
+ bne 2f
+ _ssx_panic PPC405_IRQ_FAST2FULL_INVARIANT
+2:
+ .endif
+
+ mtcr %r12
+ blr
+
+
+ ############################################################
+ # 0x0300 : Data Storage Interrupt
+ ############################################################
+
+ .org __vectors_0100 + 0x0200
+__data_storage:
+
+ PPC405_DATA_STORAGE_HANDLER
+
+ .org __data_storage + 0x20
+
+ .global __ssx_irq_full_mode_exit
+__ssx_irq_full_mode_exit:
+
+ ## Exit a full-mode handler.
+
+ ## Entry invariants:
+ ## 1. The SP/stack must be in exactly the same state it was left in at
+ ## the exit of __ssx_irq_fast2full.
+ ## 2. It is assumed the the preemption rules of SSX have been followed
+ ## - in particular that critical handlers have not enabled
+ ## non-critical interrupts.
+
+ ## We can freely modify the volatile context here - the handler is done
+ ## and we will restore the interrupted volatile context.
+
+ ## 22 linear instructions
+
+ ## If the critical count is non-zero, then the SSX preemption rules
+ ## guarantee that we are exiting from a critical interrupt
+ ## handler. This test is safe to make even if critical interrupts are
+ ## enabled, because the variable is set exactly once in a critical
+ ## section.
+
+ mfusprg0 %r3
+ extrwi. %r4, %r3, 8, 0
+ beq full_exit_noncritical
+
+ ## The context restore must be done from a critical section, in case
+ ## the handler enabled preemption.
+
+ _ssx_critical_section_enter SSX_CRITICAL, %r5, %r6
+
+ ## If the critical count (R4) is > 1 then this is a nested interrupt
+ ## and we can simply pop the context and RFCI.
+
+ cmpwi %r4, 1
+ bne full_exit_critical
+
+ ## Otherwise, restore the saved stack pointer before popping and RFCI.
+
+ _lwzsd %r1, __ssx_saved_sp_critical
+
+full_exit_critical:
+ _ssx_vol_fast_ctx_pop SSX_IRQ_CONTEXT, SSX_CRITICAL
+ b fast_exit_critical
+
+
+ ############################################################
+ # 0x0400 : Instruction Storage Interrupt
+ ############################################################
+
+ .org __vectors_0100 + 0x0300
+__instruction_storage:
+
+ PPC405_INSTRUCTION_STORAGE_HANDLER
+
+ .org __instruction_storage + 0x20
+
+ ## The idle thread has no permanent register context. The idle thread
+ ## entry point is re-entered whenever the idle thread is scheduled.
+
+ .global __ssx_idle_thread
+ .global __ssx_idle_thread_from_bootloader
+
+__ssx_idle_thread:
+
+ ## The idle thread 'uses' the non-critical stack. Any register context
+ ## pushed here is redundant and is wiped out/ignored every time the
+ ## idle thread is re-scheduled.
+
+ ## The idle thread simply establishes a default machine context and
+ ## enters the wait-enable state. The idle thread is always entered
+ ## with non-critical interrupts disabled.
+ ##
+ ## The kernel context is initialized to indicate that the idle thread
+ ## is running - the idle thread priority is SSX_THREADS, and the
+ ## 'thread-mode' bit is asserted as well.
+ ##
+ ## This loop can also be called from the SSX bootloader if main()
+ ## returns - in which case we don't muck with the USPRG0 or the stack
+ ## pointer.
+
+ li %r3, (SSX_THREADS | PPC405_THREAD_MODE)
+ mtusprg0 %r3
+ _lwzsd %r1, __ssx_noncritical_stack
+
+__ssx_idle_thread_from_bootloader:
+
+ li %r3, SSX_THREADS
+ SSX_TRACE_THREAD_SWITCH %r3, %r4
+ _lwzsd %r3, __ssx_thread_machine_context_default
+ _oriwa %r3, %r3, MSR_WE
+ mtmsr %r3
+ b .
+
+ ## ssx_halt() is implemented on the PPC405 by disabling all
+ ## interrupts, forcing external debug mode, and executing a trap. A
+ ## 0x0 word appears after the trap instruction similar to the default
+ ## SSX_PANIC macro. The caller may also call ssx_halt() with
+ ## parameters which will appear in R3, R4, etc. In the Simics
+ ## environment we use the Simics 'trap' since Simics does not handle
+ ## the PPC405 TRAP instruction correctly.
+
+ .global ssx_halt
+ssx_halt:
+ li %r31, 0
+ mtmsr %r31
+ isync
+ _liwa %r31, (DBCR0_EDM | DBCR0_TDE)
+ mtdbcr0 %r31
+ isync
+#if SIMICS_ENVIRONMENT
+ rlwimi 1, 1, 0, 0, 0
+#else
+ trap
+#endif
+ .long 0
+
+ ############################################################
+ # 0x0500 : External Interrupt
+ ############################################################
+
+ .org __vectors_0100 + 0x0400
+__external_interrupt:
+
+ ## The non-critical interrupt handler entry point is re-entrant - A
+ ## handler may allow preemption, which could cause another entry here.
+
+ ## Entry invariants:
+ ## 1. Non-critical interupts are disabled;
+ ## 2. The SP points to a thread stack or the non-critical stack.
+
+ ## Since fast-mode handlers can not use SSX services or alter the
+ ## machine context, the exit of a fast mode handler is a simple RF(C)I.
+
+ ## Begin by pushing the fast context on the current stack.
+
+ _ssx_fast_ctx_push SSX_NONCRITICAL
+
+ ## Load noncritical status 0 and the handler array base address. Check
+ ## for interrupts pending in status register 0 while the IRQ is
+ ## computed and R5 is loaded with the noncritical flag.
+
+ _lwzi %r3, %r3, OCB_ONISR0
+ _liw %r6, __ppc405_irq_handlers
+ cmpwi %r3, 0
+ cntlzw %r4, %r3
+ li %r5, SSX_NONCRITICAL
+ bne+ noncritical_irq_found
+
+ ## No IRQ pending in interrupt set 0. Try set 1.
+
+ _lwzi %r3, %r3, OCB_ONISR1
+ cmpwi %r3, 0
+ cntlzw %r4, %r3
+ addi %r4, %r4, 32
+ beq- noncritical_phantom
+
+ ## An active IRQ was found. At entry here R6 has the handler table
+ ## base address, R4 has the IRQ number, and R5 has the noncritical
+ ## flag. The IRQ is converted into a pointer to an 8-byte handler
+ ## structure, and the handler is dispatched. The call is made with the
+ ## parameters:
+
+ ## R3 = private
+ ## R4 = irq
+ ## R5 = SSX_NONCRITICAL
+
+noncritical_irq_found:
+
+ _save_update_kernel_context SSX_NONCRITICAL, %r4, %r7
+ slwi %r3, %r4, 3
+ lwzux %r7, %r6, %r3
+ lwz %r3, 4(%r6)
+ mtlr %r7
+ blrl
+
+ ## Pop the stack/RFI when (if) it returns here.
+
+fast_exit_noncritical:
+
+ _ssx_fast_ctx_pop_exit SSX_NONCRITICAL
+
+ ## This is a phantom interrupt - we got interrupted but no status bits
+ ## are set. The interrupt is marked as #64. The register used for the
+ ## handler table address (R6) is set to the special structure for the
+ ## phantom interrupt, with it's address adjusted to make it appear to
+ ## be the 64th entry in the table.
+
+noncritical_phantom:
+
+ _liw %r6, __ppc405_phantom_irq
+ subi %r6, %r6, (64 * 8)
+ b noncritical_irq_found
+
+ ############################################################
+ # 0x0600 : Alignment Exception
+ ############################################################
+
+ .org __vectors_0100 + 0x0500
+__alignment_exception:
+
+ PPC405_ALIGNMENT_HANDLER
+
+ .org __alignment_exception + 0x20
+pit_handler:
+
+ ## The portable timer handler of SSX a full-mode handler with the prototype:
+ ## void (*ssx_timer_handler)(void).
+ ##
+ ## To support the portable specification, the kernel clears the
+ ## interrupt by writing the PIS back into the TSR before calling the
+ ## handler. SSX does not use the PIT in auto-reload mode - it is
+ ## tickless - so the interrupt will not fire again until reprogrammed
+ ## by the timer handler. The timer handler does not take any arguments.
+
+ ## 21 instructions
+
+ _ssx_fast_ctx_push SSX_NONCRITICAL
+ li %r3, PPC405_IRQ_PIT
+ _save_update_kernel_context SSX_NONCRITICAL, %r3, %r4
+
+ _liwa %r3, TSR_PIS
+ mttsr %r3
+ isync
+
+ _ssx_irq_fast2full __ssx_timer_handler
+
+
+ ############################################################
+ # 0x0700 : Program Interrupt
+ ############################################################
+
+ .org __vectors_0100 + 0x0600
+__program_interrupt:
+
+ PPC405_PROGRAM_HANDLER
+
+ .org __program_interrupt + 0x20
+
+ ## Exiting a full-mode non-critical handler is more complex than the
+ ## critical case, because the handler may have made a new
+ ## highest-priority thread runnable and we may need to go through a
+ ## delayed scheduling step.
+
+ ## Note that the idle thread is treated as a special case. The idle
+ ## thread has no permanent register context. To avoid having to
+ ## allocate a stack area for the idle thread, the idle thread
+ ## 'uses' the non-critical stack. When the idle thread is interrupted
+ ## the (redundant) context is pushed, but is then effectively lost.
+ ## Whenever we restore the idle thread we simply reenter the idle
+ ## thread entry point.
+
+ ## At entry:
+ ## 1. R3 holds the value of USPRG0 (__SsxKernelContext)
+
+ ## 33 linear instructions.
+
+full_exit_noncritical:
+
+ ## Enter a critical section for the return from interrupt, in the event
+ ## that the handler enabled preemption.
+
+ _ssx_critical_section_enter SSX_NONCRITICAL, %r4, %r5
+
+ ## If the non-critical count is > 1 then this is a nested interrupt
+ ## and we can simply pop the context and RFI. Note that it would
+ ## violate a kernel/API invariant if this routine were entered from
+ ## outside an interrupt context (interrupt level == 0).
+
+ extrwi. %r4, %r3, 8, 8
+
+ .if (SSX_ERROR_CHECK_KERNEL | SSX_ERROR_CHECK_API)
+ bne 1f
+ _ssx_panic PPC405_IRQ_FULL_EXIT_INVARIANT
+1:
+ .endif
+
+ cmpwi %r4, 1
+ bne exit_noncritical_without_switch
+
+ ## Otherwise, restore the saved stack pointer and continue.
+
+ _lwzsd %r1, __ssx_saved_sp_noncritical
+
+ ## If we are not in thread mode (i.e., we took an interrupt in an
+ ## interupt-only configuration of SSX or after ssx_initialize() but
+ ## before ssx_start_threads) simply pop the context and RFI - in this
+ ## case we'll most likely be returning to main() or the non-thread-mode
+ ## idle thread.
+
+ andi. %r4, %r3, PPC405_THREAD_MODE
+ beq exit_noncritical_without_switch
+
+ ## Now, check for a delayed context switch. If none is pending, we can
+ ## exit (after a check for the idle thread special case).
+
+ _lwzsd %r3, __ssx_delayed_switch
+ cmpwi %r3, 0
+ bne noncritical_switch
+
+ _lwzsd %r3, __ssx_current_thread
+ cmpwi %r3, 0
+ beq __ssx_idle_thread
+
+exit_noncritical_without_switch:
+ _ssx_vol_fast_ctx_pop SSX_IRQ_CONTEXT, SSX_NONCRITICAL
+ b fast_exit_noncritical
+
+ ## The non-critical interrupt activated a delayed context switch. The
+ ## C-level code has taken care of the scheduling decisions - we simply
+ ## need to implement them here.
+
+noncritical_switch:
+
+ ## Clear the delayed switch flag and go to the context switch code to
+ ## finish the switch.
+
+ li %r3, 0
+ _stwsd %r3, __ssx_delayed_switch
+
+ b thread_save_non_volatile_and_switch
+
+
+ ############################################################
+ # 0x0800 : FPU Unavailable
+ ############################################################
+
+ .org __vectors_0100 + 0x0700
+__fpu_unavailable:
+
+ PPC405_FPU_UNAVAILABLE_HANDLER
+
+ .org __fpu_unavailable + 0x20
+
+
+ ## Exit traces are moved here because the code area (0x100 bytes)
+ ## reserved for individual interrupts is overflowing when tracing is
+ ## enabled. This is kind of a hack: We know that this trace only
+ ## occurs when we're about to exit the fast context, at a place
+ ## where we can use any of the fast registers.
+
+__ssx_trace_critical_irq_exit:
+ SSX_TRACE_CRITICAL_IRQ_EXIT %r3, %r4
+ blr
+
+__ssx_trace_noncritical_irq_exit:
+ SSX_TRACE_NONCRITICAL_IRQ_EXIT %r3, %r4
+ blr
+
+ ## >>>>>>>>>> Pack .vectors_0100 here. Room for ~900 bytes. <<<<<<<<<<
+
+### ****************************************************************************
+### .vectors_0c00
+### ****************************************************************************
+
+ .section .vectors_0c00, "ax", @progbits
+ .global __vectors_0c00
+__vectors_0c00:
+
+ ############################################################
+ # 0x0c00 : System Call
+ ############################################################
+
+ .org __vectors_0c00 + 0x0
+ .global __ssx_next_thread_resume
+
+__system_call:
+
+ ## The system call exception is used by SSX as a handy way to start a
+ ## context switch, as the continuation address and MSR of the thread to
+ ## be swapped out are saved in SRR0 and SRR1.
+
+ ## Non-critical interrupts are disabled at entry.
+
+ ## Note that the system call exception begins a large free area
+ ## so there is plenty of room for the context switch code.
+
+ ## Begin by saving the volatile context of the current thread.
+
+ _ssx_fast_ctx_push SSX_NONCRITICAL
+ _ssx_vol_fast_ctx_push SSX_THREAD_CONTEXT
+
+thread_save_non_volatile_and_switch:
+
+ ## Finish the thread context save by pushing the non-volatile context
+ ## and saving the resulting stack pointer in the thread structure. If
+ ## the current thread is the idle thread this step is bypassed.
+
+ ## This symbol is also used as an entry point by the non-critical
+ ## interrupt handler - non-critical interrupts are disabled here.
+
+ _lwzsd %r3, __ssx_current_thread
+ cmpwi %r3, 0
+ beq __ssx_next_thread_resume
+
+ _ssx_non_vol_ctx_push
+ stw %r1, SSX_THREAD_OFFSET_SAVED_STACK_POINTER(%r3)
+
+ ## The next thread becomes the current thread, and we switch to its
+ ## stack - unless the new thread is the idle thread, in which case it
+ ## (the idle thread) is simply resumed.
+
+__ssx_next_thread_resume:
+
+ _lwzsd %r3, __ssx_next_thread
+ _stwsd %r3, __ssx_current_thread
+
+ cmpwi %r3, 0
+ beq __ssx_idle_thread
+
+ lwz %r1, SSX_THREAD_OFFSET_SAVED_STACK_POINTER(%r3)
+
+ ## Restore the thread context and resume the new thread. The kernel
+ ## context in thread mode is simply the thread priority OR'ed with the
+ ## thread-mode flag. All other fields are cleared.
+
+ _ssx_non_vol_ctx_pop
+ _ssx_vol_fast_ctx_pop SSX_THREAD_CONTEXT, SSX_NONCRITICAL
+
+ _lbzsd %r3, __ssx_next_priority
+ SSX_TRACE_THREAD_SWITCH %r3, %r4
+ ori %r3, %r3, PPC405_THREAD_MODE
+ mtusprg0 %r3
+
+ _ssx_fast_ctx_pop
+ rfi
+
+ ## >>>>>>>> Pack .vectors_0c00 here - room for ~500 bytes <<<<<<<
+
+### ****************************************************************************
+### .vectors_0f00
+### ****************************************************************************
+
+ .section .vectors_0f00, "ax", @progbits
+ .global __vectors_0f00
+__vectors_0f00:
+
+ ############################################################
+ # 0x0f20 : APU Unavailable
+ ############################################################
+
+ .org __vectors_0f00 + 0x20 # 0x0f20
+__apu_unavailable:
+
+ PPC405_APU_UNAVAILABLE_HANDLER
+
+ .org __vectors_0f00 + 0x40 # 0x0f40
+
+fit_handler:
+
+ ## The FIT handler is user defined, and is a fast-mode handler. By
+ ## convention the kernel clears the interrupt by writing the FIS back
+ ## into the TSR.
+
+ _ssx_fast_ctx_push SSX_NONCRITICAL
+
+ _lwzsd %r3, __ppc405_fit_arg
+ li %r4, PPC405_IRQ_FIT
+ li %r5, SSX_NONCRITICAL
+
+ _save_update_kernel_context SSX_NONCRITICAL, %r4, %r6
+
+ _liwa %r6, TSR_FIS
+ mttsr %r6
+ isync
+
+ _lwzsd %r6, __ppc405_fit_routine
+ mtlr %r6
+ blrl
+
+ b fast_exit_noncritical
+
+
+ ############################################################
+ # 0x10x0 : PIT, FIT and Watchdog Interrupts
+ ############################################################
+
+ .org __vectors_0f00 + 0x100 # 0x1000
+__pit_interrupt:
+
+ b pit_handler
+
+ .org __vectors_0f00 + 0x110 # 0x1010
+__fit_interrupt:
+
+ b fit_handler
+
+ .org __vectors_0f00 + 0x120 # 0x1020
+__watchdog_interrupt:
+
+ ## Watchdog setup is described in the SSX Specification.
+ ## The kernel clears TSR[WIS] prior to calling the handler.
+ ## The watchdog handler is a critical, fast-mode handler.
+
+ _ssx_fast_ctx_push SSX_CRITICAL
+
+ _lwzsd %r3, __ppc405_watchdog_arg
+ li %r4, PPC405_IRQ_WATCHDOG
+ li %r5, SSX_CRITICAL
+
+ _save_update_kernel_context SSX_CRITICAL, %r4, %r6
+
+ _liwa %r6, TSR_WIS
+ mttsr %r6
+ isync
+
+ _lwzsd %r6, __ppc405_watchdog_routine
+ mtlr %r6
+ blrl
+
+ b fast_exit_critical
+
+
+ ############################################################
+ # 0x1100 : Data TLB Miss
+ ############################################################
+
+ .org __vectors_0f00 + 0x200 # 0x1100
+__data_tlb_miss:
+
+ PPC405_DATA_TLB_MISS_HANDLER
+
+ .org __data_tlb_miss + 0x20
+debug_handler:
+
+ ## SSX does nothing upon reception of the debug interrupt other
+ ## than calling the handler (if non-0). The debug handler is a
+ ## fast-mode handler.
+
+ _ssx_fast_ctx_push SSX_CRITICAL
+
+ _lwzsd %r3, __ppc405_debug_arg
+ li %r4, PPC405_IRQ_DEBUG
+ li %r5, SSX_CRITICAL
+
+ _save_update_kernel_context SSX_CRITICAL, %r4, %r6
+
+ _lwzsd %r6, __ppc405_debug_routine
+ cmpwi %r6, 0
+ mtlr %r6
+ beq debug_exit
+ blrl
+
+debug_exit:
+ b fast_exit_critical
+
+
+ ############################################################
+ # 0x1200 : Instruction TLB Miss
+ ############################################################
+
+ .org __vectors_0f00 + 0x300 # 0x1200
+__instruction_tlb_miss:
+
+ PPC405_INSTRUCTION_TLB_MISS_HANDLER
+
+ .org __instruction_tlb_miss + 0x20
+
+ ## >>>>>> Pack .vectors_0f00 A huge hole here - ~3.5KB <<<<<<
+
+### ****************************************************************************
+### .vectors_2000
+### ****************************************************************************
+
+ .section .vectors_2000, "ax", @progbits
+
+ .global __vectors_2000
+__vectors_2000:
+
+ ############################################################
+ # 0x2000 : Debug Interrupt
+ ############################################################
+
+__debug_interrupt:
+ b debug_handler
+
+/// \endcond
diff --git a/src/ssx/ppc405/ppc405_init.c b/src/ssx/ppc405/ppc405_init.c
new file mode 100755
index 0000000..e818737
--- /dev/null
+++ b/src/ssx/ppc405/ppc405_init.c
@@ -0,0 +1,80 @@
+// $Id: ppc405_init.c,v 1.1.1.1 2013/12/11 21:03:26 bcbrock Exp $
+// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ssx/ppc405/ppc405_init.c,v $
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2013
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+/// \file ppc405_init.c
+/// \brief PPC405 initialization routines
+///
+/// The entry points in this file are routines that are typically used during
+/// initialization, and their code space could be deallocated and recovered if
+/// no longer needed by the application after initialization.
+
+#include "ssx.h"
+
+// Note that __ppc405_system_setup() is called from the SSX bootloader early
+// in the initialization, at a point before the aplication has enabled
+// critical or external interruts.
+
+void
+__ppc405_system_setup()
+{
+ SsxIrqId irq;
+
+ // Initialize the interrupt vectors.
+
+ for (irq = 0; irq < PPC405_IRQS; irq++) {
+ __ppc405_irq_handlers[irq].handler = __ppc405_default_irq_handler;
+ __ppc405_irq_handlers[irq].arg = 0;
+ }
+
+ __ppc405_phantom_irq.handler = __ppc405_phantom_irq_handler;
+ __ppc405_phantom_irq.arg = 0;
+
+ // Initialize special interrupt handlers
+
+ __ppc405_fit_routine = __ppc405_default_irq_handler;
+ __ppc405_fit_arg = 0;
+
+ __ppc405_watchdog_routine = __ppc405_default_irq_handler;
+ __ppc405_watchdog_arg = 0;
+
+ __ppc405_debug_routine = __ppc405_default_irq_handler;
+ __ppc405_debug_arg = 0;
+
+ // Enable the PIT interrupt, but not auto-reload mode. Clear the status
+ // of all timers for good measure.
+
+ andc_spr(SPRN_TCR, TCR_ARE);
+ or_spr(SPRN_TCR, TCR_PIE);
+
+ or_spr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_PIS | TSR_FIS);
+
+ // Call system-specific setup
+
+#ifdef CHIP_PGP
+ void __pgp_setup();
+ __pgp_setup();
+#endif
+
+}
+
+
+// Set the timebase using the PowerPC protocol.
+
+void
+__ssx_timebase_set(SsxTimebase t)
+{
+ Uint64 tb;
+
+ tb.value = t;
+
+ mttbl(0);
+ mttbu(tb.word[0]);
+ mttbl(tb.word[1]);
+}
+
+
diff --git a/src/ssx/ppc405/ppc405_irq.h b/src/ssx/ppc405/ppc405_irq.h
new file mode 100755
index 0000000..d85e9ce
--- /dev/null
+++ b/src/ssx/ppc405/ppc405_irq.h
@@ -0,0 +1,342 @@
+#ifndef __PPC405_IRQ_H__
+#define __PPC405_IRQ_H__
+
+// $Id: ppc405_irq.h,v 1.1.1.1 2013/12/11 21:03:27 bcbrock Exp $
+// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ssx/ppc405/ppc405_irq.h,v $
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2013
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+/// \file ppc405_irq.h
+/// \brief PPC405 interrupt handling for SSX
+///
+/// Interrupt handling protocols and interrupt controller programming are
+/// inherently non-portable, however SSX defines APIs that may be useful among
+/// different machines.
+///
+/// The interrupt controllers in PPC405 ASICS and PGP allow interrupts to be
+/// programmed as critical or non-critical, with programmable polarity and
+/// edge or level sensitivity.
+
+
+// Define pseudo-IRQ numbers for PPC405 built-in interrupts. These numbers
+// will appear in bits 16:23 of USPRG0 (__SsxKernelContext) when the handlers
+// are active, and are also passed as the second argument of the handlers when
+// they are invoked.
+
+#define PPC405_IRQ_PIT 0x80
+#define PPC405_IRQ_FIT 0x81
+#define PPC405_IRQ_WATCHDOG 0x82
+#define PPC405_IRQ_DEBUG 0x83
+
+// These are suggested values to use for the IRQ in the __SsxKernelContext if
+// the application defines handlers for any of the 'unhandled exceptions'.
+
+#define PPC405_EXC_MACHINE_CHECK 0x90
+#define PPC405_EXC_DATA_STORAGE 0x91
+#define PPC405_EXC_INSTRUCTION_STORAGE 0x92
+#define PPC405_EXC_ALIGNMENT 0x93
+#define PPC405_EXC_PROGRAM 0x94
+#define PPC405_EXC_FPU_UNAVAILABLE 0x95
+#define PPC405_EXC_APU_UNAVAILABLE 0x96
+#define PPC405_EXC_DATA_TLB_MISS 0x97
+#define PPC405_EXC_INSTRUCTION_TLB_MISS 0x98
+
+
+// Unhandled exceptions default to a kernel panic, but the application can
+// override these definition. Note that the exception area only allocates 32
+// bytes (8 instructions) to an unhandled exception, so any redefinition
+// would most likely be a branch to an application-defined handler.
+
+#ifndef PPC405_MACHINE_CHECK_HANDLER
+#define PPC405_MACHINE_CHECK_HANDLER SSX_PANIC(0x0200)
+#endif
+
+#ifndef PPC405_DATA_STORAGE_HANDLER
+#define PPC405_DATA_STORAGE_HANDLER SSX_PANIC(0x0300)
+#endif
+
+#ifndef PPC405_INSTRUCTION_STORAGE_HANDLER
+#define PPC405_INSTRUCTION_STORAGE_HANDLER SSX_PANIC(0x0400)
+#endif
+
+#ifndef PPC405_ALIGNMENT_HANDLER
+#define PPC405_ALIGNMENT_HANDLER SSX_PANIC(0x0600)
+#endif
+
+#ifndef PPC405_PROGRAM_HANDLER
+#define PPC405_PROGRAM_HANDLER SSX_PANIC(0x0700)
+#endif
+
+#ifndef PPC405_FPU_UNAVAILABLE_HANDLER
+#define PPC405_FPU_UNAVAILABLE_HANDLER SSX_PANIC(0x0800)
+#endif
+
+#ifndef PPC405_APU_UNAVAILABLE_HANDLER
+#define PPC405_APU_UNAVAILABLE_HANDLER SSX_PANIC(0x0f20)
+#endif
+
+#ifndef PPC405_DATA_TLB_MISS_HANDLER
+#define PPC405_DATA_TLB_MISS_HANDLER SSX_PANIC(0x1100)
+#endif
+
+#ifndef PPC405_INSTRUCTION_TLB_MISS_HANDLER
+#define PPC405_INSTRUCTION_TLB_MISS_HANDLER SSX_PANIC(0x1200)
+#endif
+
+
+////////////////////////////////////////////////////////////////////////////
+// SSX API
+////////////////////////////////////////////////////////////////////////////
+
+#ifndef __ASSEMBLER__
+
+/// An IRQ handler takes 3 arguments:
+/// \arg \c arg - Private handler data installed by \c ssx_irq_setup() or
+/// \c ssx_irq_handler_set().
+/// \arg \c irq - The IRQ id; to enable a generic handler to manipulate
+/// its own interrupt status .
+/// \arg \c priority - One of the values \c SSX_CRITICAL or \c
+/// SSX_NONCRITICAL; to enable a generic handler to choose
+/// a behavior appropriate for the interrupt priority.
+
+typedef void (*SsxIrqHandler)(void* arg,
+ SsxIrqId irq,
+ int priority);
+
+/// Declare a subroutine as an IRQ handler
+
+#define SSX_IRQ_HANDLER(f) void f(void *arg, SsxIrqId irq, int priority)
+
+int ssx_irq_setup(SsxIrqId irq,
+ int polarity,
+ int trigger);
+
+int ssx_irq_handler_set(SsxIrqId irq,
+ SsxIrqHandler handler,
+ void* arg,
+ int priority);
+
+void ssx_irq_enable(SsxIrqId irq);
+void ssx_irq_disable(SsxIrqId irq);
+void ssx_irq_statusclear(SsxIrqId irq);
+
+SSX_IRQ_HANDLER(__ppc405_default_irq_handler);
+SSX_IRQ_HANDLER(__ppc405_phantom_irq_handler);
+
+
+int
+ppc405_fit_setup(int tcr_fp, SsxIrqHandler handler, void* arg);
+
+
+/// The address of the optional FIT interrupt handler
+
+UNLESS__PPC405_IRQ_CORE_C__(extern)
+volatile
+SsxIrqHandler __ppc405_fit_routine;
+
+
+/// The private data of the optional FIT interrupt handler
+
+UNLESS__PPC405_IRQ_CORE_C__(extern)
+volatile
+void* __ppc405_fit_arg;
+
+
+int
+ppc405_watchdog_setup(int tcr_wp, int tcr_wrc,
+ SsxIrqHandler handler, void* arg);
+
+
+/// The address of the optional Watchdog interrupt handler
+
+UNLESS__PPC405_IRQ_CORE_C__(extern)
+volatile
+SsxIrqHandler __ppc405_watchdog_routine;
+
+
+/// The private data of the optional Watchdog interrupt handler
+
+UNLESS__PPC405_IRQ_CORE_C__(extern)
+volatile
+void* __ppc405_watchdog_arg;
+
+
+int
+ppc405_debug_setup(SsxIrqHandler handler, void* arg);
+
+
+/// The address of the optional Debug interrupt handler
+
+UNLESS__PPC405_IRQ_CORE_C__(extern)
+volatile
+SsxIrqHandler __ppc405_debug_routine;
+
+
+/// The private data of the optional Watchdog interrupt handler
+
+UNLESS__PPC405_IRQ_CORE_C__(extern)
+volatile
+void* __ppc405_debug_arg;
+
+
+// Note: Why SSX_IRQ_FAST2FULL (below) is implemented so strangely.
+//
+// I am adamant that I want to have a a macro in the 'C' environment to create
+// these bridge functions. However the limitations of the C preprocessor and
+// the intelligence of the GCC 'asm' facility consipre against a
+// straightforward solution. The only way that I was able to find to get
+// naked assembly code into the output stream is to use 'asm' with simple
+// strings - I couldn't make it work with any kind of argument, as 'asm' would
+// reinterpret the arguments and resulting assembler code in various ways.
+//
+// There is another alternative that I tried wherby I created a subroutine
+// call and then filled in the subroutine body with 'asm' code. However, the
+// subroutine wrapper that GCC creates only works for PowerPC fast-mode
+// handlers if GCC is invoked with optimization, which ensures that the
+// wrapper doesn't touch the stack pointer or other registers. True, we'll
+// always use optimization, but I did not want to have to make this
+// requirement for using this macro.
+
+/// This macro creates a 'bridge' handler that converts the initial fast-mode
+/// IRQ dispatch into a call of a full-mode IRQ handler. The full-mode
+/// handler is defined by the user (presumably as a \c C subroutine) and has
+/// the same prototype (type SsxIrqHandler) as the fast handler.
+///
+/// \param fast_handler This will be the global function name of the fast
+/// IRQ handler created by this macro. This is the symbol
+/// that should be passed in as the \a handler argument
+/// of \c ssx_irq_setup() and \c ssx_irq_handler_set().
+///
+/// \param full_handler This is the name of the user-defined full-mode
+/// handler which is invoked through this bridge.
+///
+/// \e BUG \e ALERT : Beware of passing the \c full_handler to IRQ setup
+/// APIs. This won't be caught by the compiler (because the \c full_handler
+/// has the correct prototype) and will lead to nasty bugs. Always pass in
+/// the \c fast_handler symbol to IRQ setup APIS.
+///
+/// The code stream injected into the GCC assembler output in response to
+///
+/// SSX_IRQ_FAST2FULL(fast_handler, full_handler)
+///
+/// is (comments added for clarification) :
+///
+/// \code
+/// .text
+/// .global fast_handler
+/// .align 5 # Hard-coded PPC405 cache-line alignment
+/// fast_handler = . # Can't macro expand LABEL: - this is equivalent
+/// bl __ssx_irq_fast2full # The fast-mode to full-mode conversion sequence
+/// bl full_handler
+/// b __ssx_irq_full_mode_exit
+/// \endcode
+///
+/// The macro also declares the prototype of the fast handler:
+///
+/// \code
+/// SSX_IRQ_HANDLER(fast_handler);
+/// \endcode
+///
+
+#define SSX_IRQ_FAST2FULL(fast_handler, full_handler) \
+ SSX_IRQ_HANDLER(fast_handler); \
+ __SSX_IRQ_FAST2FULL(.global fast_handler, fast_handler = ., bl full_handler)
+
+#define __SSX_IRQ_FAST2FULL(global, label, call) \
+asm(".text"); \
+asm(#global); \
+asm(".align 5"); \
+asm(#label); \
+asm("bl __ssx_irq_fast2full"); \
+asm(#call); \
+asm("b __ssx_irq_full_mode_exit");
+
+#endif /* __ASSEMBLER__ */
+
+// It's hard to be portable and get all of the definitions and headers in the
+// correct order. We need to bring in the system IRQ header here.
+
+#ifdef CHIP_PGP
+#include "pgp_irq.h"
+#endif
+
+/// \page ppc405_irq_macros_page PPC405 SSX IRQ Assembler Macros
+///
+///
+/// \section fast2full_asm Fast-Mode to Full-Mode Handler Conversion
+///
+/// This macro produces the calling sequence required to convert a
+/// fast-mode interrupt handler to a full-mode interrupt handler. The
+/// full-mode handler is implemented by another subroutine. The
+/// requirements for invoking this macro are:
+///
+/// \li The stack pointer and stack must be exactly as they were when the
+/// fast-mode handler was entered.
+///
+/// \li No changes have been made to the MSR - the interrupt level must
+/// remain disabled.
+///
+/// \li The handler owns the fast context and has not modified the other
+/// register context. The conversion process will not modify any
+/// register in the fast context (other than the LR used for
+/// subroutine linkage).
+///
+/// The final condition above means that the \a full_handler will
+/// begin with the fast-mode context exactly as it was (save for LR)
+/// at conversion, including the contents of GPR3-7 (the first 5
+/// PowerPC ABI paramater passing registers) and the entire CR.
+///
+/// Forms:
+///
+/// \c _ssx_irq_fast2full \a full_handler
+/// \cond
+
+#ifdef __ASSEMBLER__
+
+ .macro _ssx_irq_fast2full full_handler
+ bl __ssx_irq_fast2full
+ bl \full_handler
+ b __ssx_irq_full_mode_exit
+ .endm
+
+#endif /* __ASSEMBLER__ */
+
+/// \endcond
+
+#ifndef __ASSEMBLER__
+
+
+/// This structure holds the interrupt handler routine addresses and private
+/// data. Assembler code assumes the given structure layout, so any changes
+/// to this structure will need to be reflected down into the interrupt
+/// dispatch assembler code.
+
+typedef struct {
+ SsxIrqHandler handler;
+ void *arg;
+} Ppc405IrqHandler;
+
+
+/// Interrupt handlers for real (implemented interrupts)
+
+UNLESS__PPC405_IRQ_CORE_C__(extern)
+Ppc405IrqHandler __ppc405_irq_handlers[PPC405_IRQS];
+
+
+/// The 'phantom interrupt' handler
+///
+/// A 'phantom' interrupt occurs when the interrupt handling code in the
+/// kernel is entered, but no interrupt is found pending in the controller.
+/// This is considered a serious bug, as it indictates a short window
+/// condition where a level-sensitive interrupt has been asserted and then
+/// quickly deasserted before it can be handled.
+
+UNLESS__PPC405_IRQ_CORE_C__(extern)
+Ppc405IrqHandler __ppc405_phantom_irq;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* __PPC405_IRQ_H__ */
diff --git a/src/ssx/ppc405/ppc405_irq_core.c b/src/ssx/ppc405/ppc405_irq_core.c
new file mode 100755
index 0000000..3cd7469
--- /dev/null
+++ b/src/ssx/ppc405/ppc405_irq_core.c
@@ -0,0 +1,49 @@
+// $Id: ppc405_irq_core.c,v 1.1.1.1 2013/12/11 21:03:26 bcbrock Exp $
+// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ssx/ppc405/ppc405_irq_core.c,v $
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2013
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+/// \file ppc405_irq_core.c
+/// \brief Core IRQ routines required of any PPC405 configuration of SSX
+///
+/// This file is mostly only a placeholder - where 'extern inline' API
+/// functions and 'extern' variables are realized. A couple of default
+/// handlers are also installed here. The entry points in this file are
+/// considered 'core' routines that will always be present at runtime in any
+/// SSX application.
+
+#define __PPC405_IRQ_CORE_C__
+
+#include "ssx.h"
+
+/// This function is installed by default for interrupts not explicitly set up
+/// by the application. These interrupts should never fire.
+
+void
+__ppc405_default_irq_handler(void *arg,
+ SsxIrqId irq,
+ int critical)
+{
+ SSX_PANIC(PPC405_DEFAULT_IRQ_HANDLER);
+}
+
+
+/// This function is installed by default to handle the case that the
+/// interrupt dispatch code is entered in response to an external critical or
+/// non-critical interrupt, but no interrupt is found pending in the interrupt
+/// controller. This should never happen, as it would indicate that a
+/// 'glitch' occurred on the external noncritical or critical interrupt input
+/// to the PPC405 core.
+
+void __ppc405_phantom_irq_handler(void *arg,
+ SsxIrqId irq,
+ int critical)
+{
+ SSX_PANIC(PPC405_PHANTOM_INTERRUPT);
+}
+
+
+#undef __PPC405_IRQ_CORE_C__
diff --git a/src/ssx/ppc405/ppc405_irq_init.c b/src/ssx/ppc405/ppc405_irq_init.c
new file mode 100755
index 0000000..da33947
--- /dev/null
+++ b/src/ssx/ppc405/ppc405_irq_init.c
@@ -0,0 +1,169 @@
+// $Id: ppc405_irq_init.c,v 1.2 2014/02/03 01:30:42 daviddu Exp $
+// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ssx/ppc405/ppc405_irq_init.c,v $
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2013
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+/// \file ppc405_irq_init.c
+/// \brief PPC405 IRQ initialization routines
+///
+/// The entry points in this file are routines that are typically used during
+/// initialization, and their code space could be deallocated and recovered if
+/// no longer needed by the application after initialization.
+
+#include "ssx.h"
+
+/// Set up a PPC405 Fixed Interval Timer (FIT) handler
+///
+/// See the SSX specification for full details on setting up a FIT handler.
+///
+/// Return values other then SSX_OK (0) are errors; see \ref ssx_errors
+///
+/// \retval 0 Successful completion
+///
+/// \retval -SSX_INVALID_ARGUMENT_PPC405_FIT The \a tcr_fp argument was
+/// invalid when called with a non-null (non-0) \a handler.
+
+// Since the SSX_CRITICAL Watchdog interrupt is also controlled by the TCR, we
+// need to enter an SSX_CRITICAL critical section to manipulate the TCR.
+
+int
+ppc405_fit_setup(int tcr_fp, SsxIrqHandler handler, void* arg)
+{
+ SsxMachineContext ctx;
+ Ppc405TCR tcr;
+
+ if (SSX_ERROR_CHECK_API && handler) {
+ SSX_ERROR_IF((tcr_fp < 0) ||
+ (tcr_fp > 3),
+ SSX_INVALID_ARGUMENT_PPC405_FIT);
+ }
+
+ ssx_critical_section_enter(SSX_CRITICAL, &ctx);
+
+ tcr.value = mfspr(SPRN_TCR);
+
+ if (handler) {
+
+ tcr.fields.fp = tcr_fp;
+ tcr.fields.fie = 1;
+
+ __ppc405_fit_routine = handler;
+ __ppc405_fit_arg = arg;
+
+ } else {
+
+ tcr.fields.fie = 0;
+ }
+
+ mtspr(SPRN_TCR, tcr.value);
+ isync();
+
+ ssx_critical_section_exit(&ctx);
+
+ return SSX_OK;
+}
+
+
+/// Set up a PPC405 Watchdog interrupt handler
+///
+/// See the SSX specification for full details on setting up a watchdog
+/// handler.
+///
+/// Return values other then SSX_OK (0) are errors; see \ref ssx_errors
+///
+/// \retval 0 Successful completion
+///
+/// \retval -SSX_INVALID_ARGUMENT_PPC405_WATCHDOG One or more of the \a tcr_wp
+/// or \a tcr_wrc arguments were invalid.
+
+int
+ppc405_watchdog_setup(int tcr_wp, int tcr_wrc,
+ SsxIrqHandler handler, void* arg)
+{
+ SsxMachineContext ctx;
+ Ppc405TCR tcr;
+
+ if (SSX_ERROR_CHECK_API) {
+ SSX_ERROR_IF((tcr_wp < 0) ||
+ (tcr_wp > 3) ||
+ (tcr_wrc < 0) ||
+ (tcr_wrc > 3),
+ SSX_INVALID_ARGUMENT_PPC405_WATCHDOG);
+ }
+
+ ssx_critical_section_enter(SSX_CRITICAL, &ctx);
+
+ mtspr(SPRN_TSR, TSR_ENW | TSR_WIS);
+
+ tcr.value = mfspr(SPRN_TCR);
+
+ tcr.fields.wp = tcr_wp;
+ tcr.fields.wrc = tcr_wrc;
+
+ if (handler == 0) {
+
+ // Reinstall the default handler and clear the interrupt enable. Then
+ // clear any pending interrupt status.
+ // WIS.
+
+ __ppc405_watchdog_routine = __ppc405_default_irq_handler;
+ __ppc405_watchdog_arg = 0;
+
+ tcr.fields.wie = 0;
+ mtspr(SPRN_TCR, tcr.value);
+ isync();
+
+ mtspr(SPRN_TSR, TSR_WIS);
+ isync();
+
+ } else {
+
+ // Install the new handler and enable the watchdog interrup.
+
+ __ppc405_watchdog_routine = handler;
+ __ppc405_watchdog_arg = arg;
+
+ tcr.fields.wie = 1;
+ mtspr(SPRN_TCR, tcr.value);
+ isync();
+ }
+
+ ssx_critical_section_exit(&ctx);
+
+ return SSX_OK;
+}
+
+
+/// Set up a PPC405 Debug interrupt handler
+///
+/// See the SSX specification for full details on setting up a debug handler.
+///
+/// Return values other then SSX_OK (0) are errors; see \ref ssx_errors
+///
+/// \retval 0 Successful completion
+///
+/// \retval -SSX_INVALID_ARGUMENT_PPC405_DEBUG The \a handler argument
+/// is null (0).
+
+// The debug handler is installed in an SSX_CRITICAL critical section with all
+// debug interrupts disabled as well.
+
+int
+ppc405_debug_setup(SsxIrqHandler handler, void* arg)
+{
+ SsxMachineContext ctx;
+
+ ssx_critical_section_enter(SSX_CRITICAL, &ctx);
+ andc_msr(MSR_DE | MSR_DWE);
+
+ __ppc405_debug_routine = handler;
+ __ppc405_debug_arg = arg;
+ isync();
+
+ ssx_critical_section_exit(&ctx);
+
+ return SSX_OK;
+}
diff --git a/src/ssx/ppc405/ppc405_lib_core.c b/src/ssx/ppc405/ppc405_lib_core.c
new file mode 100755
index 0000000..3086efb
--- /dev/null
+++ b/src/ssx/ppc405/ppc405_lib_core.c
@@ -0,0 +1,42 @@
+// $Id: ppc405_lib_core.c,v 1.2 2014/06/26 13:00:11 cmolsen Exp $
+// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ssx/ppc405/ppc405_lib_core.c,v $
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2013
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+/// \file ppc405_lib_core.c
+/// \brief PPC405-specific library procedures
+///
+/// The routines in this file are not part of SSX per se, but are included
+/// with SSX because they may be considered OS services.
+///
+/// These are core routines that will present in all PPC405 SSX applications.
+
+#include "ssx.h"
+
+/// Cause a PPC405 core reset by an action on DBCR0
+
+void
+ppc405_core_reset()
+{
+ or_spr(SPRN_DBCR0, DBCR0_RST_CORE);
+}
+
+/// Cause a PPC405 chip reset by an action on DBCR0
+
+void
+ppc405_chip_reset()
+{
+ or_spr(SPRN_DBCR0, DBCR0_RST_CHIP);
+}
+
+/// Cause a PPC405 system reset by an action on DBCR0
+
+void
+ppc405_system_reset()
+{
+ or_spr(SPRN_DBCR0, DBCR0_RST_SYSTEM);
+}
+
diff --git a/src/ssx/ppc405/ppc405_mmu.c b/src/ssx/ppc405/ppc405_mmu.c
new file mode 100755
index 0000000..1affc67
--- /dev/null
+++ b/src/ssx/ppc405/ppc405_mmu.c
@@ -0,0 +1,474 @@
+// $Id: ppc405_mmu.c,v 1.1.1.1 2013/12/11 21:03:27 bcbrock Exp $
+// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ssx/ppc405/ppc405_mmu.c,v $
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2013
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+/// \file ppc405_mmu.c
+/// \brief Functions related to the PPC405 MMU and its use in SSX.
+///
+/// SSX currently only supports using the PPC405 MMU for instruction and data
+/// area translation and protection for the global SSX application and kernel.
+/// No support for demand paging, user vs. kernel protection or
+/// thread-specific protection is provided.
+///
+/// Instead, the ppc405_mmu_map() API is provided. This API sets up TLB
+/// entries that provide address translation and protection for a region of
+/// memory. TLB entries are "locked", that is, once a TLB entry is defined it
+/// is permanently defined. SSX makes use of the variable page sizes provided
+/// by the PPC405 to protect regions using the minimum number of TLB entries.
+/// The minimum page size is 1K, and all regions to be protected must be 1K
+/// aligned and have sizes that are multiples of 1K.
+///
+/// The ppc405_mmu_map() API optionally returns a Ppc405MmuMap descriptor.
+/// This descriptor can be later used as the argument to ppc405_mmu_unmap() to
+/// unmap the region.
+///
+/// The overall SSX configuration option is PPC405_MMU_SUPPORT, with
+/// suboptions PPC405_IR_SUPPORT and PPC405_DR_SUPPORT. The IR (instruction
+/// relocate) and DR (data relocate) bits of the MSR must be set to enable
+/// instruction and data translation/protection respectively. In SSX this is
+/// handled by the definition of a macro PPC405_RELOCATION_MODE that contains
+/// the IR and/or DR bits as configured. This macro is OR'ed with the default
+/// PPC405 SSX_THREAD_MACHINE_CONTEXT_DEFAULT. If the application defines its
+/// own SSX_THREAD_MACHINE_CONTEXT_DEFAULT then the application will have to
+/// take care of ensuring that the correct IR/DR settings go into the default.
+///
+/// During interrupts and context switches the relocation mode is
+/// re-established before any loads or stores take place which provides
+/// complete protection for interrupt handlers. Note the
+/// assumption/requirement that all kernel, interrupt and thread code will be
+/// run under the PPC405_RELOCATION_MODE.
+
+#include "ssx.h"
+#include "ppc405_mmu.h"
+
+// A map of free TLB entries.
+//
+// It's handy that the PPC405 TLB has 64 entries. Thus we can use a 64-bit
+// bit mask to represent free entries. The next free entry is quickly found
+// using cntlz64().
+uint64_t __ppc405_tlb_free = 0;
+
+/// Reset the PPC405 simple MMU translation/protection scheme
+///
+/// This API invalidates the TLB, clears the zone protection register, and
+/// otherwise resets the SSX simple translation/protcetion scheme for the
+/// PPC405. The application must not be running in a translated mode when
+/// this API is invoked.
+///
+/// \retval 0 Success
+///
+/// \retval -PPC405_MMU_ILLEGAL_CONTEXT The API was called with translation
+/// enabled.
+
+int
+ppc405_mmu_reset()
+{
+ if (SSX_ERROR_CHECK_API) {
+ SSX_ERROR_IF(mfmsr() & (MSR_IR | MSR_DR),
+ PPC405_MMU_ILLEGAL_CONTEXT);
+ }
+
+ tlbia();
+ mtspr(SPRN_PID, 0);
+ mtspr(SPRN_ZPR, 0);
+ __ppc405_tlb_free = 0xffffffffffffffffull;
+ isync();
+
+ return 0;
+}
+
+
+/// Define a memory region for MMU protection purposes
+///
+/// \param effective_address The effective (virtual) base address of the
+/// region. This address must be at least 1KB aligned.
+///
+/// \param real_address The real base address of the region. This address
+/// must be at least 1KB aligned.
+///
+/// \param size The size of the region in bytes. The size must be a multiple
+/// of 1KB. A \a size of 0 is legal and creates no entries.
+///
+/// \param tlbhi_flags An OR combination of PPC405_TLBHI_* bits (excluding the
+/// V-bit). This parameter is unlikely to be non-0 as it contains only the
+/// little-endian (E) and U0 flags.
+///
+/// - TLBHI_E : Little Endian
+/// - TLBHI_U0 : U0 mode access
+///
+/// \param tlblo_flags An OR combination of PPC405_TLBLO_* bits. This
+/// parameter defines WR and EX protection, as well as the 'WIMG' bits.
+///
+/// - TLBLO_EX : Executable code
+/// - TLBLO_WR : Writable data
+/// - TLBLO_W : Write-through mode
+/// - TLBLO_I : Cache-inhibited
+/// - TLBLO_M : Memory Coherent (Implemented but Ignored)
+/// - TLBLO_G : Guarded
+///
+/// \param map If non-0, then a Ppc405TlbMap for the region is returned.
+/// This map can be later used as an argument to ppc405_mmu_unmap() to unmap
+/// the region.
+///
+/// This API creates fixed TLB entries that provide virtual-to-real address
+/// translation and protection using a minimum number of TLB entries. The
+/// number of TLB entries is fixed, so there is no guarantee in general that
+/// any particular memory map is feasible. In general it is helpful to make
+/// sure that the effective and real memory ranges have similar alignment,
+/// otherwise the algorithm will be forced to use small page sizes.
+///
+/// The caller is responsible for cache-correctness of this API. If necessary
+/// the caller should flush or invalidate memory areas whose protection
+/// attributes have changed prior to and/or after invoking this API.
+///
+/// Note the the simple translation/protection protocol supported by this SSX
+/// API does not support the "zone selection" field of the PPC405 TLB
+/// entry. In SSX the PID is always 0.
+///
+/// If SSX_ERROR_CHECK_API is configured, the API checks each new TLB entry to
+/// ensure that it does not duplicate an existing entry. The check only
+/// covers duplicated effective addresses (which are not supported by the
+/// hardware), not the real addresses.
+///
+/// \retval 0 Success
+///
+/// \retval -PPC405_MMU_INVALID_ARGUMENT Can be signalled by numerous errors
+/// including improperly aigned memory regions, region size not a multiple of
+/// 1KB, and illegal flags.
+///
+/// \retval -PPC405_TOO_MANY_TLB_ENTRIES There are not enough TLB entries left
+/// to completely map the region. The state of the TLB at this point may be
+/// inconsistent.
+///
+/// \retval -PPC405_DUPLICATE_TLB_ENTRY The requested mapping would duplicate
+/// all or part of a currently existing TLB entry. Duplicate entries are not
+/// supported in the 405 core and will cause a TLB miss if an address in a
+/// duplicated range is accessed.
+
+int
+ppc405_mmu_map(SsxAddress effective_address,
+ SsxAddress real_address,
+ size_t size, int tlbhi_flags, int tlblo_flags,
+ Ppc405MmuMap *map)
+{
+ size_t log_page_size;
+ size_t page_size = 0;
+ Ppc405Tlbhi tlbhi;
+ Ppc405Tlblo tlblo;
+ Ppc405MmuMap local_map = 0;
+ int tlb_entry;
+ uint64_t bit;
+ SsxMachineContext ctx;
+
+ if (SSX_ERROR_CHECK_API) {
+ uint64_t allocated;
+ SsxAddress this_effective_address;
+ int entry, overlap;
+
+ // Check alignment, wrapping and legal flags.
+
+ SSX_ERROR_IF((effective_address % PPC405_PAGE_SIZE_MIN) ||
+ (real_address % PPC405_PAGE_SIZE_MIN) ||
+ (size % PPC405_PAGE_SIZE_MIN) ||
+ ((effective_address + size - 1) < effective_address) ||
+ (tlbhi_flags & ~TLBHI_LEGAL_FLAGS) ||
+ (tlblo_flags & ~TLBLO_LEGAL_FLAGS),
+ PPC405_MMU_INVALID_ARGUMENT);
+
+ // The check for duplicate entries needs to be done iteratively since
+ // we don't use a fixed page size. Since this API will probably only
+ // be called during initialization or from thread contexts, and since
+ // the TLB size is small, this overhead is not considered too onerous.
+ // For complete correctness this check would need to be done in its
+ // entirity in an SSX_CRITICAL critical section. In order to reduce
+ // SSX_CRITICAL interrupt latency we simply check against the TLB
+ // entries that were allocated at the time of the API call. This code
+ // may not protect against multiple threads simultaneously creating
+ // mappings that duplicate each other (a super-bug), but it should
+ // protect against bugs in a single thread's updating of the TLB.
+
+ if (size != 0) {
+
+ // See if the requested effective address is already mapped in the
+ // TLB
+
+ overlap = tlbsx(effective_address, &entry);
+
+ // Iteratively check the other overlap condition, which is the
+ // case that the effective address of any TLB entry is in the
+ // range of the new request.
+
+ allocated = ~__ppc405_tlb_free;
+ while (!overlap && (allocated != 0)) {
+
+ entry = cntlz64(allocated);
+ allocated &= ~((uint64_t)1 << (63 - entry));
+
+ tlbhi.value = tlbrehi(entry);
+ if (tlbhi.fields.v) {
+
+ this_effective_address =
+ tlbhi.fields.epn << PPC405_LOG_PAGE_SIZE_MIN;
+
+
+ // See if the first byte of this entry is inside the
+ // requested effective address range. NB: use actual
+ // address ranges (addr + size - 1) to compute overlap to
+ // avoid overflow.
+
+ overlap |=
+ (this_effective_address >=
+ effective_address) &&
+ (this_effective_address <=
+ (effective_address + size - 1));
+ }
+ }
+ SSX_ERROR_IF(overlap, PPC405_DUPLICATE_TLB_ENTRY);
+ }
+ }
+
+ // NB: PPC405 page sizes go from 1K to 16M by factors of 4. This is a
+ // 'greedy' algorithm that packs the region into the fewest number of
+ // pages, by using the largest possible (aligned) page size for the
+ // remaining memory area.
+
+ while (size != 0) {
+
+ ssx_critical_section_enter(SSX_CRITICAL, &ctx);
+
+ if (SSX_ERROR_CHECK_API) {
+ SSX_ERROR_IF_CRITICAL(__ppc405_tlb_free == 0,
+ PPC405_TOO_MANY_TLB_ENTRIES,
+ &ctx);
+ }
+
+ tlb_entry = cntlz64(__ppc405_tlb_free);
+ bit = 0x8000000000000000ull >> tlb_entry;
+ __ppc405_tlb_free &= ~bit;
+ local_map |= bit;
+
+ ssx_critical_section_exit(&ctx);
+
+ log_page_size = PPC405_LOG_PAGE_SIZE_MAX;
+ do {
+ page_size = POW2_32(log_page_size);
+ if ((size >= page_size) &&
+ ((effective_address & (page_size - 1)) == 0) &&
+ ((real_address & (page_size - 1)) == 0)) {
+ break;
+ } else {
+ log_page_size -= 2;
+ }
+ } while (1);
+
+ size -= page_size;
+
+ // Create and install the TLB entries. The installation is done in a
+ // critical section to avoid any chance of another entity seeing an
+ // inconsistent TLB.
+
+ tlbhi.value = tlbhi_flags;
+ tlbhi.fields.epn = effective_address >> PPC405_LOG_PAGE_SIZE_MIN;
+ tlbhi.fields.size = (log_page_size - PPC405_LOG_PAGE_SIZE_MIN) / 2;
+ tlbhi.fields.v = 1;
+
+ tlblo.value = tlblo_flags;
+ tlblo.fields.rpn = real_address >> PPC405_LOG_PAGE_SIZE_MIN;
+
+ ssx_critical_section_enter(SSX_CRITICAL, &ctx);
+
+ tlbwelo(tlb_entry, tlblo.value);
+ tlbwehi(tlb_entry, tlbhi.value);
+ isync();
+
+ ssx_critical_section_exit(&ctx);
+
+ effective_address += page_size;
+ real_address += page_size;
+ }
+
+ if (map) {
+ *map = local_map;
+ }
+
+ return 0;
+}
+
+
+/// Un-define a memory region for MMU protection purposes
+///
+/// \param map A pointer to a Ppc405MmuMap object created by ppc405_mmu_map()
+/// when the memory region was mapped. This map is used to invalidate the TLB
+/// entries associated with the map, then the map itself is invalidated.
+///
+/// The caller is responsible for cache-correctness of this API. If necessary
+/// the caller should flush or invalidate memory areas whose protection
+/// attributes have changed prior to and/or after invoking this API.
+///
+/// \retval 0 Success
+///
+/// \retval -PPC405_MMU_INVALID_ARGUMENT The \a map pointer is null (0).
+
+int
+ppc405_mmu_unmap(Ppc405MmuMap *map)
+{
+ int tlb_entry;
+ uint64_t bit;
+ SsxMachineContext ctx;
+
+ if (SSX_ERROR_CHECK_API) {
+ SSX_ERROR_IF(map == 0, PPC405_MMU_INVALID_ARGUMENT);
+ }
+
+ while ((tlb_entry = cntlz64(*map)) != 64) {
+
+ bit = 0x8000000000000000ull >> tlb_entry;
+ *map &= ~bit;
+ tlbwehi(tlb_entry, 0);
+ isync();
+
+ ssx_critical_section_enter(SSX_CRITICAL, &ctx);
+
+ __ppc405_tlb_free |= bit;
+
+ ssx_critical_section_exit(&ctx);
+ }
+
+ return 0;
+}
+
+
+/// Produce a dump of the TLB
+///
+/// \param[in] i_stream The output stream for the dump
+///
+/// \param[in] i_map An optional pointer. If NULL (0) then a full report is
+/// printed. If non-null then only the entries recorded in the \a i_map are
+/// printed.
+
+void
+ppc405_mmu_report(FILE* i_stream, Ppc405MmuMap* i_map)
+{
+ size_t i;
+ Ppc405Tlbhi tlbhi;
+ Ppc405Tlblo tlblo;
+ uint32_t size, eff_lo, eff_hi, real_lo, real_hi;
+ const char *size_string[] = {
+ " 1K", " 4K", " 16K", " 64K", "256K", " 1M", " 4M", " 16M"
+ };
+ Ppc405MmuMap map;
+
+ fprintf(i_stream, "------------------------------------------------------------------------------\n");
+ if (i_map == 0) {
+ fprintf(i_stream, "-- PPC405 MMU : Full Report --\n");
+ } else {
+ fprintf(i_stream, "-- PPC405 MMU : Partial Report --\n");
+ }
+ fprintf(i_stream, "------------------------------------------------------------------------------\n");
+ fprintf(i_stream, "-- # Effective Real Size EX/WR WIMG Other --\n");
+ fprintf(i_stream, "------------------------------------------------------------------------------\n");
+
+ if (i_map == 0) {
+ map = __ppc405_tlb_free;
+ } else {
+ map = ~*i_map;
+ }
+
+ for (i = 0; i < PPC405_TLB_ENTRIES; i++, map <<= 1) {
+
+ if (map & 0x8000000000000000ull) {
+ continue;
+ }
+
+ tlbhi.value = tlbrehi(i);
+ tlblo.value = tlbrelo(i);
+
+ if (tlbhi.fields.v) {
+
+ size =
+ POW2_32(PPC405_LOG_PAGE_SIZE_MIN) << (2 * tlbhi.fields.size);
+
+ eff_lo = tlbhi.fields.epn << PPC405_LOG_PAGE_SIZE_MIN;
+ eff_hi = eff_lo + size - 1;
+
+ real_lo = tlblo.fields.rpn << PPC405_LOG_PAGE_SIZE_MIN;
+ real_hi = real_lo + size - 1;
+
+ fprintf(i_stream, "-- %2d : %08x:%08x -> %08x:%08x : %s : %s %s : %s%s%s%s : %s%s --\n",
+ i,
+ eff_lo, eff_hi,
+ real_lo, real_hi,
+ size_string[tlbhi.fields.size],
+ tlblo.fields.ex ? "EX" : " ",
+ tlblo.fields.wr ? "WR" : " ",
+ tlblo.fields.w ? "W" : " ",
+ tlblo.fields.i ? "I" : " ",
+ tlblo.fields.m ? "M" : " ",
+ tlblo.fields.g ? "G" : " ",
+ tlbhi.fields.e ? "E" : " ",
+ tlbhi.fields.u0 ? "U0" : " ");
+ } else {
+ fprintf(i_stream, "-- %2d : ENTRY NOT VALID\n", i);
+ }
+ }
+
+ fprintf(i_stream, "------------------------------------------------------------------------------\n");
+}
+
+
+/// Perform a memcpy() without address translation (protection)
+///
+/// It sometimes arises that "read-only" data needs to be initialized at
+/// run-time. This can be accomplished in general by temporarily disabling
+/// translation (protection) while the "read-only" data is altered. Another
+/// option is to use the memcpy_real() API to copy an image of the data from
+/// writable memory to memory marked read-only by the MMU.
+///
+/// The memcpy_real() function copies \a n bytes from memory area \a src to
+/// memory area \a dest, with translation disabled. The memory areas should
+/// not overlap. The memcpy_real() function returns a pointer to dest.
+///
+/// This is a general-purpose API that makes no assumption about the
+/// cacheability of the data, and can also be used to move code from data
+/// areas to text areas as the I-cache is always invalidated after the copy.
+/// The algorithm is as follows:
+///
+/// - Flush the \a dest data from the D-cache
+/// - Disable translation
+/// - memcpy() the \a src to the \a dest
+/// - Flush the \a dest data from the D-cache
+/// - Invalidate the I-cache
+/// - Re-enable translation (if it had been previously enabled)
+///
+/// \note Any synchronization access required for \a dest or \a src is the
+/// responsibility of the caller.
+void*
+memcpy_real(void* dest, const void* src, size_t n)
+{
+ uint32_t msr;
+
+ dcache_flush(dest, n);
+
+ msr = mfmsr();
+ mtmsr(msr & ~(MSR_IR | MSR_DR));
+ sync(); /* HW239446! */
+
+ memcpy(dest, src, n);
+
+ dcache_flush(dest, n);
+
+ icache_invalidate_all();
+
+ mtmsr(msr);
+ sync(); /* HW239446! */
+
+ return dest;
+}
+
+
+
+
diff --git a/src/ssx/ppc405/ppc405_mmu.h b/src/ssx/ppc405/ppc405_mmu.h
new file mode 100755
index 0000000..cd7e249
--- /dev/null
+++ b/src/ssx/ppc405/ppc405_mmu.h
@@ -0,0 +1,170 @@
+#ifndef __PPC405_MMU_H__
+#define __PPC405_MMU_H__
+
+// $Id: ppc405_mmu.h,v 1.1.1.1 2013/12/11 21:03:26 bcbrock Exp $
+// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ssx/ppc405/ppc405_mmu.h,v $
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2013
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+/// \file ppc405_mmu.h
+/// \brief Definitions related to the PPC405 MMU and its use in SSX.
+
+#ifndef __ASSEMBLER__
+
+#include "ssx_io.h"
+#include <stdint.h>
+
+/// The PPC405 TLBHI (tag) structure
+///
+/// Note that in hardware this is a 36-bit register, as it includes the TID
+/// field. When writing, TID is set from the current PID, and when reading PID
+/// is set from the TID entry of the register.
+
+typedef union {
+ uint32_t value;
+ struct {
+ uint32_t epn : 22;
+ uint32_t size : 3;
+ uint32_t v : 1;
+ uint32_t e : 1;
+ uint32_t u0 : 1;
+ } fields;
+} Ppc405Tlbhi;
+
+
+/// The PPC405 TLBLO (Data) structure
+
+typedef union {
+ uint32_t value;
+ struct {
+ uint32_t rpn : 22;
+ uint32_t ex : 1;
+ uint32_t wr : 1;
+ uint32_t zsel : 4;
+ uint32_t w : 1;
+ uint32_t i : 1;
+ uint32_t m : 1;
+ uint32_t g : 1;
+ } fields;
+} Ppc405Tlblo;
+
+#endif /* __ASSEMBLER__ */
+
+// TLBHI contains little-endian and U0 flags (probably never used)
+
+#define TLBHI_E 0x00000020
+#define TLBHI_U0 0x00000010
+
+#define TLBHI_LEGAL_FLAGS (TLBHI_E | TLBHI_U0)
+
+// TLBLO contains WIMG + EX/WR bits
+
+#define TLBLO_EX 0x00000200
+#define TLBLO_WR 0x00000100
+#define TLBLO_W 0x00000008
+#define TLBLO_I 0x00000004
+#define TLBLO_M 0x00000002
+#define TLBLO_G 0x00000001
+
+#define TLBLO_LEGAL_FLAGS \
+ (TLBLO_EX | TLBLO_WR | TLBLO_W | TLBLO_I | TLBLO_M | TLBLO_G)
+
+
+#define PPC405_TLB_ENTRIES 64
+
+#define PPC405_PAGE_SIZE_MIN 1024
+#define PPC405_PAGE_SIZE_MAX (16 * 1024 * 1024)
+
+#define PPC405_LOG_PAGE_SIZE_MIN 10
+#define PPC405_LOG_PAGE_SIZE_MAX 24
+
+#define PPC405_PAGE_SIZE_1K 0
+#define PPC405_PAGE_SIZE_4K 1
+#define PPC405_PAGE_SIZE_16K 2
+#define PPC405_PAGE_SIZE_64K 3
+#define PPC405_PAGE_SIZE_256K 4
+#define PPC405_PAGE_SIZE_1M 5
+#define PPC405_PAGE_SIZE_4M 6
+#define PPC405_PAGE_SIZE_16M 7
+
+// PPC405 MMU error and panic codes
+
+#define PPC405_MMU_ILLEGAL_CONTEXT 0x00668001
+#define PPC405_MMU_INVALID_ARGUMENT 0x00668002
+#define PPC405_TOO_MANY_TLB_ENTRIES 0x00668003
+#define PPC405_DUPLICATE_TLB_ENTRY 0x00668004
+
+
+#ifndef __ASSEMBLER__
+
+/// A descriptor of a memory region statically defined in the TLB
+///
+/// These maps are returned by ppc405_mmu_map(), and can be used later
+/// to unmap the region with ppc405_mmu_unmap(). They can also be used to
+/// control what gets printed by ppc405_mmu_report().
+typedef uint64_t Ppc405MmuMap;
+
+/// TLBIA
+#define tlbia() asm volatile ("tlbia" : : : "memory")
+
+/// TLBWEHI
+#define tlbwehi(entry, tlbhi) \
+asm volatile ("tlbwehi %0, %1" : : "r" (tlbhi), "r" (entry) : "memory")
+
+/// TLBWELO
+#define tlbwelo(entry, tlblo) \
+asm volatile ("tlbwelo %0, %1" : : "r" (tlblo), "r" (entry) : "memory")
+
+/// TLBREHI
+#define tlbrehi(entry) \
+ ({ \
+ uint32_t __tlbhi; \
+ asm volatile ("tlbrehi %0, %1" : "=r" (__tlbhi) : "r" (entry)); \
+ __tlbhi;})
+
+/// TLBRELO
+#define tlbrelo(entry) \
+ ({ \
+ uint32_t __tlblo; \
+ asm volatile ("tlbrelo %0, %1" : "=r" (__tlblo) : "r" (entry)); \
+ __tlblo;})
+
+/// TLBSX
+///
+/// Returns 1 if the address is mapped, else 0. If positive the integer
+/// pointed to by \a entry is updated with the TLB index of the matching
+/// entry, otherwise the return value is undefined.
+#define tlbsx(address, entry) \
+ ({ \
+ uint32_t __cr, __entry; \
+ asm volatile ("tlbsx. %0, 0, %2; mfcr %1" : \
+ "=r" (__entry), "=r" (__cr) : "r" (address)); \
+ *(entry) = __entry; \
+ ((__cr & CR_EQ(0)) != 0);})
+
+
+int
+ppc405_mmu_reset(void);
+
+int
+ppc405_mmu_map(SsxAddress effective_address,
+ SsxAddress real_address,
+ size_t size, int tlbhi_flags, int tlblo_flags,
+ Ppc405MmuMap *map);
+
+int
+ppc405_mmu_unmap(Ppc405MmuMap *map);
+
+void
+ppc405_mmu_start(void);
+
+void
+ppc405_mmu_report(FILE* stream, Ppc405MmuMap* map);
+
+#endif /* __ASSEMBLER__ */
+
+
+#endif /* __PPC405_MMU_H__ */
diff --git a/src/ssx/ppc405/ppc405_mmu_asm.S b/src/ssx/ppc405/ppc405_mmu_asm.S
new file mode 100755
index 0000000..1118779
--- /dev/null
+++ b/src/ssx/ppc405/ppc405_mmu_asm.S
@@ -0,0 +1,73 @@
+// $Id: ppc405_mmu_asm.S,v 1.1.1.1 2013/12/11 21:03:27 bcbrock Exp $
+// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ssx/ppc405/ppc405_mmu_asm.S,v $
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2013
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+/// \file ppc405_mmu_asm.S
+/// \brief Assembler support for PPC405 MMU operations
+///
+/// Currently the only API in this file - ppc405_mmu_start() - is likely an
+/// initialization-only API that could be removed from the run-time image.
+
+ .nolist
+#include "ssx.h"
+ .list
+
+/// Start MMU mode on the PPC405
+///
+/// This API enables PPC405 address translation of the type supported by SSX -
+/// simple 1 to 1 effective - real translation for the purpose of
+/// protection. It is coded in assembler to ensure that this transition is
+/// done as cache-safely as generically possible. The API enters a critical
+/// section and flushes the data cache and invalidates the I-cache, then
+/// enables the translation modes specifed by PPC405_RELOCATION_MODE.
+/// Following is a final invalidation of the I-cache.
+///
+/// \cond
+
+ .global_function ppc405_mmu_start
+ppc405_mmu_start:
+
+ ## Create and link a stack frame
+
+ stwu %r1, -16(%r1)
+ mflr %r0
+ stw %r0, 20(%r1)
+
+ ## Enter critical section, save original MSR in R31
+
+ stw %r31, 8(%r1)
+ _ssx_critical_section_enter SSX_CRITICAL, %r31, %r4
+
+ ## Flush D-cache, Invalidate I-Cache
+
+ bl dcache_flush_all
+ bl icache_invalidate_all
+
+ ## Enter translation mode on original MSR (removing critical section)
+
+ _liw %r3, 1f
+ mtsrr0 %r3
+ _liwa %r3, PPC405_RELOCATION_MODE
+ or %r3, %r31, %r3
+ mtsrr1 %r3
+ rfi
+1:
+ ## Invalidate I-cache again
+
+ bl icache_invalidate_all
+
+ ## Restore R31, Unwind stack and return
+
+ lwz %r31, 8(%r1)
+ lwz %r0, 20(%r1)
+ mtlr %r0
+ addi %r1, %r1, 16
+ blr
+
+ .epilogue ppc405_mmu_start
+
+/// \endcond
diff --git a/src/ssx/ppc405/ppc405_msr.h b/src/ssx/ppc405/ppc405_msr.h
new file mode 100755
index 0000000..645a052
--- /dev/null
+++ b/src/ssx/ppc405/ppc405_msr.h
@@ -0,0 +1,85 @@
+#ifndef __PPC405_MSR_H__
+#define __PPC405_MSR_H__
+
+// $Id: ppc405_msr.h,v 1.1.1.1 2013/12/11 21:03:26 bcbrock Exp $
+// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ssx/ppc405/ppc405_msr.h,v $
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2013
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+/// \file ppc405_msr.h
+/// \brief Everything related to the PPC405 Machine State Register
+///
+/// All of the macros defined here that \e modify the MSR create a compiler
+/// memory barrier that will cause GCC to flush/invalidate all memory data
+/// held in registers before the macro. This is consistent with other systems,
+/// e.g., the PowerPC Linux kernel, and is the safest way to define these
+/// macros as it guarantess for example that kernel data structure updates
+/// have completed before exiting a critical section.
+
+#define MSR_AP 0x02000000 /* APU Available */
+#define MSR_APE 0x00080000 /* APU Exception Enable */
+#define MSR_WE 0x00040000 /* Wait State Enable */
+#define MSR_CE 0x00020000 /* Critical Interrupt Enable */
+#define MSR_EE 0x00008000 /* External Interrupt Enable */
+#define MSR_PR 0x00004000 /* Problem State */
+#define MSR_ME 0x00001000 /* Machine Check Exception Enable */
+#define MSR_FE0 0x00000800 /* Floating-Point Exception Mode 0 */
+#define MSR_DWE 0x00000400 /* Debug Wait Enable */
+#define MSR_DE 0x00000200 /* Debug Interrupt Enable */
+#define MSR_IR 0x00000020 /* Instruction Relocation */
+#define MSR_DR 0x00000010 /* Data Relocation */
+
+#define MSR_CE_BIT 14
+#define MSR_EE_BIT 16
+#define MSR_IR_BIT 26
+#define MSR_DR_BIT 27
+
+#ifndef __ASSEMBLER__
+
+/// Move From MSR
+
+#define mfmsr() \
+ ({uint32_t __msr; \
+ asm volatile ("mfmsr %0" : "=r" (__msr)); \
+ __msr;})
+
+
+/// Move to MSR
+
+#define mtmsr(value) \
+ asm volatile ("mtmsr %0; isync" : : "r" (value) : "memory")
+
+
+/// Read-Modify-Write the MSR with OR (Set MSR bits). This operation is only
+/// guaranteed atomic in a critical section.
+
+#define or_msr(x) \
+ mtmsr(mfmsr() | (x))
+
+
+/// Read-Modify-Write the MSR with AND complement (Clear MSR bits). This
+/// operation is only guaranteed atomic in a critical section.
+
+#define andc_msr(x) \
+ mtmsr(mfmsr() & ~(x))
+
+
+/// Write MSR[EE] with an immediate value (0/1)
+///
+/// Note that the immediate value \a i must be a compile-time constant.
+
+#define wrteei(i) \
+ asm volatile ("wrteei %0; isync" : : "i" (i) : "memory")
+
+
+/// Write MSR[EE] from the EE bit of another MSR
+
+#define wrtee(other_msr) \
+ asm volatile ("wrtee %0; isync" : : "r" (other_msr) : "memory")
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* __PPC405_MSR_H__ */
diff --git a/src/ssx/ppc405/ppc405_spr.h b/src/ssx/ppc405/ppc405_spr.h
new file mode 100755
index 0000000..ede91cb
--- /dev/null
+++ b/src/ssx/ppc405/ppc405_spr.h
@@ -0,0 +1,319 @@
+#ifndef __PPC405_SPR_H__
+#define __PPC405_SPR_H__
+
+// $Id: ppc405_spr.h,v 1.1.1.1 2013/12/11 21:03:27 bcbrock Exp $
+// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ssx/ppc405/ppc405_spr.h,v $
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2013
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+/// \file ppc405_spr.h
+/// \brief Everything related to PPC405-specific SPRs
+
+/// \defgroup ppc405_sprs PowerPC 405 SPRs
+///
+/// These are the documented SPRs of the PPC405. Most of these SPRs are
+/// available in RISCWatch and eCmd using the defined names (minus SPRN_). In
+/// some cases RISCWatch/eCMD use different names, which appear in square
+/// brackets in the brief comments for each register. RISCWatch/eCMD also
+/// allow CR, MSR and IAR (Instruction Address Register) to be accessed as
+/// SPRs.
+///
+/// @{
+
+#define SPRN_CCR0 0x3b3 /// Core configuration register 0
+#define SPRN_CCR1 0x378 /// Core configuration register 1
+#define SPRN_CTR 0x009 /// Count register
+#define SPRN_DAC1 0x3f6 /// Data address compare 1
+#define SPRN_DAC2 0x3f7 /// Data address compare 2
+#define SPRN_DBCR0 0x3f2 /// Debug control register 0
+#define SPRN_DBCR1 0x3bd /// Debug control register 1
+#define SPRN_DBSR 0x3f0 /// Debug status register
+#define SPRN_DCCR 0x3fa /// Data cacheability (real mode)
+#define SPRN_DCWR 0x3ba /// Data cache writeback (real mode)
+#define SPRN_DEAR 0x3d5 /// Data exception address register
+#define SPRN_DVC1 0x3b6 /// Data value compare 1
+#define SPRN_DVC2 0x3b7 /// Data value compare 2
+#define SPRN_ESR 0x3d4 /// Exception syndrome register
+#define SPRN_EVPR 0x3d6 /// Exception. vec. prefix reg.
+#define SPRN_IAC1 0x3f4 /// Instruction address compare 1
+#define SPRN_IAC2 0x3f5 /// Instruction address compare 2
+#define SPRN_IAC3 0x3b4 /// Instruction address compare 3
+#define SPRN_IAC4 0x3b5 /// Instruction address compare 4
+#define SPRN_ICCR 0x3fb /// Instruction cache. (real mode)
+#define SPRN_ICDBDR 0x3d3 /// Instruction cache debug data reg.
+#define SPRN_LR 0x008 /// Link register
+#define SPRN_MCSR 0x23c /// Machine check syndrome register
+#define SPRN_PID 0x3b1 /// Process ID
+#define SPRN_PIT 0x3db /// Programmable interrupt timer
+#define SPRN_PVR 0x11f /// Processor version register
+#define SPRN_SGR 0x3b9 /// Storage guarded (real mode)
+#define SPRN_SLER 0x3bb /// Storage little-endian (real mode)
+#define SPRN_SPRG0 0x110 /// SPR general register 0
+#define SPRN_SPRG1 0x111 /// SPR general register 1
+#define SPRN_SPRG2 0x112 /// SPR general register 2
+#define SPRN_SPRG3 0x113 /// SPR general register 3
+#define SPRN_SPRG4 0x114 /// SPR general register 4 [SPRG4_W]
+#define SPRN_SPRG5 0x115 /// SPR general register 5 [SPRG5_W]
+#define SPRN_SPRG6 0x116 /// SPR general register 6 [SPRG6_W]
+#define SPRN_SPRG7 0x117 /// SPR general register 7 [SPRG7_W]
+#define SPRN_SRR0 0x01a /// Save/restore register 0
+#define SPRN_SRR1 0x01b /// Save/restore register 1
+#define SPRN_SRR2 0x3de /// Save/restore register 2
+#define SPRN_SRR3 0x3df /// Save/restore register 3
+#define SPRN_SU0R 0x3bc /// Storage user 0 (real mode)
+#define SPRN_TBL 0x11c /// Time base lower [TBL_W]
+#define SPRN_TBU 0x11d /// Time base upper [TBU_W]
+#define SPRN_TCR 0x3da /// Timer control register
+#define SPRN_TSR 0x3d8 /// Timer status register
+#define SPRN_USPRG0 0x100 /// User read/write SPR general 0
+#define SPRN_XER 0x001 /// Fixed-point exception register
+#define SPRN_ZPR 0x3b0 /// Zone protection register
+#define SPRN_UR_SPRG4 0x104 /// User-readable SPRG4 [SPRG4_R]
+#define SPRN_UR_SPRG5 0x105 /// User-readable SPRG5 [SPRG5_R]
+#define SPRN_UR_SPRG6 0x106 /// User-readable SPRG6 [SPRG6_R]
+#define SPRN_UR_SPRG7 0x107 /// User-readable SPRG7 [SPRG7_R]
+#define SPRN_UR_TBL 0x10c /// User-readable TBL [TBL, TBL_R]
+#define SPRN_UR_TBU 0x10d /// User-readable TBU [TBU, TBU_R]
+
+/// @}
+
+/// \defgroup ppc405_undocumented_sprs PowerPC 405 Undocumented SPRs
+///
+/// These are undocumented SPRs related to RISCWatch and debugging. These
+/// registers are also available in RISCWatch/eCMD.
+///
+/// - DBDR is a scratch register used by RISCwatch when "RAM-ing" data in/out of
+/// the core. This register can be read and written.
+///
+/// - DBSRS and TSRS are "hidden" registers connected to DBSR and TSR
+/// respectively. These are write-only registers. When written, any 1 bits in
+/// the write data are OR-ed into the DBSR and TSR respectively, as a way to
+/// force status bits and cause interrupts.
+///
+/// @{
+
+#define SPRN_DBDR 0x3f3 /// Debug data register 0x3f3 */
+#define SPRN_DBSRS 0x3f1 /// Debug status register set 0x3f1 */
+#define SPRN_TSRS 0x3d9 /// Timer status register set 0x3d9 */
+
+/// @}
+
+/* CCR0 - Cache Control Register 0 */
+
+#define CCR0_LWL 0x02000000 /* Load Word as Line */
+#define CCR0_LWOA 0x01000000 /* Load Without Allocate */
+#define CCR0_SWOA 0x00800000 /* Store Without Allocate */
+#define CCR0_DPP1 0x00400000 /* DCU PLB Priority Bit 1 */
+#define CCR0_IPP0 0x00200000 /* ICU PLB Priority Bit 0 */
+#define CCR0_IPP1 0x00100000 /* ICU PLB Priority Bit 1 */
+#define CCR0_DPE 0x00080000 /* Data Cache Parity Enable */
+#define CCR0_DPP 0x00040000 /* DCU Parity is Precise (0/1) */
+#define CCR0_U0XE 0x00020000 /* Enable U0 Exception */
+#define CCR0_LDBE 0x00010000 /* Load Debug Enable */
+#define CCR0_IPE 0x00002000 /* Instruction Cache Parity Enable */
+#define CCR0_TPE 0x00001000 /* TLB Parity Enable */
+#define CCR0_PFC 0x00000800 /* ICU Prefetching for Cacheable Regions */
+#define CCR0_PFNC 0x00000400 /* ICU Prefetching for Non-Cacheable Regions */
+#define CCR0_NCRS 0x00000200 /* Non-Cacheable ICU request is 16(0)/32(1)B */
+#define CCR0_FWOA 0x00000100 /* Fetch Without Allocate */
+#define CCR0_CIS 0x00000010 /* Cache Information Select Data(0)/Tag(1) */
+#define CCR0_PRS 0x00000008 /* Parity Read Select */
+#define CCR0_CWS 0x00000001 /* Cache Way Select A(0)/B(1) */
+
+/* CCR1 - Cache Control Register 1 */
+
+#define CCR1_ICTE 0x80000000 /* Instruction Cache Tag Parity Insertion */
+#define CCR1_ICDE 0x40000000 /* Instruction Cache Data Parity Insertion */
+#define CCR1_DCTE 0x20000000 /* Data Cache Tag Parity Insertion */
+#define CCR1_DCDE 0x10000000 /* Data Cache Data Parity Insertion */
+#define CCR1_TLBE 0x08000000 /* TLB Parity Insertion */
+
+/* DBCR0 - Debug Control Register 0 */
+
+#define DBCR0_EDM 0x80000000 /* External Debug Mode */
+#define DBCR0_IDM 0x40000000 /* Internal Debug Mode */
+#define DBCR0_RST_MASK 0x30000000 /* ReSeT */
+#define DBCR0_RST_NONE 0x00000000 /* No action */
+#define DBCR0_RST_CORE 0x10000000 /* Core reset */
+#define DBCR0_RST_CHIP 0x20000000 /* Chip reset */
+#define DBCR0_RST_SYSTEM 0x30000000 /* System reset */
+#define DBCR0_IC 0x08000000 /* Instruction Completion debug event */
+#define DBCR0_BT 0x04000000 /* Branch Taken debug event */
+#define DBCR0_EDE 0x02000000 /* Exception Debug Event */
+#define DBCR0_TDE 0x01000000 /* Trap Debug Event */
+#define DBCR0_IA1 0x00800000 /* IAC (Instruction Address Compare) 1 debug event */
+#define DBCR0_IA2 0x00400000 /* IAC 2 debug event */
+#define DBCR0_IA12 0x00200000 /* Instruction Address Range Compare 1-2 */
+#define DBCR0_IA12X 0x00100000 /* IA12 eXclusive */
+#define DBCR0_IA3 0x00080000 /* IAC 3 debug event */
+#define DBCR0_IA4 0x00040000 /* IAC 4 debug event */
+#define DBCR0_IA34 0x00020000 /* Instruction Address Range Compare 3-4 */
+#define DBCR0_IA34X 0x00010000 /* IA34 eXclusive */
+#define DBCR0_IA12T 0x00008000 /* Instruction Address Range Compare 1-2 range Toggle */
+#define DBCR0_IA34T 0x00004000 /* Instruction Address Range Compare 3-4 range Toggle */
+#define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */
+
+/* DBSR - Debug Status Register */
+
+#define DBSR_IC 0x80000000 /* Instruction completion debug event */
+#define DBSR_BT 0x40000000 /* Branch Taken debug event */
+#define DBSR_EDE 0x20000000 /* Exception debug event */
+#define DBSR_TIE 0x10000000 /* Trap Instruction debug event */
+#define DBSR_UDE 0x08000000 /* Unconditional debug event */
+#define DBSR_IA1 0x04000000 /* IAC1 debug event */
+#define DBSR_IA2 0x02000000 /* IAC2 debug event */
+#define DBSR_DR1 0x01000000 /* DAC1 Read debug event */
+#define DBSR_DW1 0x00800000 /* DAC1 Write debug event */
+#define DBSR_DR2 0x00400000 /* DAC2 Read debug event */
+#define DBSR_DW2 0x00200000 /* DAC2 Write debug event */
+#define DBSR_IDE 0x00100000 /* Imprecise debug event */
+#define DBSR_IA3 0x00080000 /* IAC3 debug event */
+#define DBSR_IA4 0x00040000 /* IAC4 debug event */
+#define DBSR_MRR 0x00000300 /* Most recent reset */
+
+/* TCR - Timer Control Register */
+
+#define TCR_WP_MASK 0xc0000000 /* Watchdog Period mask */
+#define TCR_WP_2_17 0x00000000 /* 2**17 clocks */
+#define TCR_WP_2_21 0x40000000 /* 2**21 clocks */
+#define TCR_WP_2_25 0x80000000 /* 2**25 clocks */
+#define TCR_WP_2_29 0xc0000000 /* 2**29 clocks */
+#define TCR_WRC_MASK 0x30000000 /* Watchdog Reset Control mask */
+#define TCR_WRC_NONE 0x00000000 /* No watchdog reset */
+#define TCR_WRC_CORE 0x10000000 /* Core reset */
+#define TCR_WRC_CHIP 0x20000000 /* Chip reset */
+#define TCR_WRC_SYSTEM 0x30000000 /* System reset */
+#define TCR_WIE 0x08000000 /* Watchdog Interrupt Enable */
+#define TCR_PIE 0x04000000 /* PIT Interrupt Enable */
+#define TCR_FP_MASK 0x03000000 /* FIT Period */
+#define TCR_FP_2_9 0x00000000 /* 2**9 clocks */
+#define TCR_FP_2_13 0x01000000 /* 2**13 clocks */
+#define TCR_FP_2_17 0x02000000 /* 2**17 clocks */
+#define TCR_FP_2_21 0x03000000 /* 2**21 clocks */
+#define TCR_FIE 0x00800000 /* FIT Interrupt Enable */
+#define TCR_ARE 0x00400000 /* Auto-reload Enable */
+
+#ifndef __ASSEMBLER__
+
+typedef union {
+ uint32_t value;
+ struct {
+ unsigned int wp : 2;
+ unsigned int wrc : 2;
+ unsigned int wie : 1;
+ unsigned int pie : 1;
+ unsigned int fp : 2;
+ unsigned int fie : 1;
+ unsigned int are : 1;
+ unsigned int reserved : 22;
+ } fields;
+} Ppc405TCR;
+
+#endif /* __ASSEMBLER__ */
+
+/* TSR - Timer Status Register */
+
+#define TSR_ENW 0x80000000 /* Enable Next Watchdog */
+#define TSR_WIS 0x40000000 /* Watchdog Interrupt Status */
+#define TSR_WRS_MASK 0x30000000 /* Watchdog Reset Status */
+#define TSR_WRS_NONE 0x00000000 /* No watchdog reset has occurred */
+#define TSR_WRS_CORE 0x10000000 /* Core reset was forced by the watchdog */
+#define TSR_WRS_CHIP 0x20000000 /* Chip reset was forced by the watchdog */
+#define TSR_WRS_SYSTEM 0x30000000 /* System reset was forced by the watchdog */
+#define TSR_PIS 0x08000000 /* PIT Interrupt Status */
+#define TSR_FIS 0x04000000 /* FIT Interrupt Status */
+
+#ifndef __ASSEMBLER__
+
+/// Move From SPR
+///
+/// Note that \a sprn must be a compile-time constant.
+
+#define mfspr(sprn) \
+ ({uint32_t __value; \
+ asm volatile ("mfspr %0, %1" : "=r" (__value) : "i" (sprn)); \
+ __value;})
+
+
+/// Move to SPR
+///
+/// Note that \a sprn must be a compile-time constant.
+
+#define mtspr(sprn, value) \
+ ({uint32_t __value = (value); \
+ asm volatile ("mtspr %0, %1" : : "i" (sprn), "r" (__value)); \
+ })
+
+
+/// Read-Modify-Write an SPR with OR (Set SPR bits)
+///
+/// Note that \a sprn must be a compile-time constant. This operation is only
+/// guaranteed atomic in a critical section.
+
+#define or_spr(sprn, x) \
+ mtspr(sprn, mfspr(sprn) | (x))
+
+
+/// Read-Modify-Write an SPR with AND complement (Clear SPR bits)
+///
+/// Note that \a sprn must be a compile-time constant. This operation is only
+/// guaranteed atomic in a critical section.
+
+#define andc_spr(sprn, x) \
+ mtspr(sprn, mfspr(sprn) & ~(x))
+
+
+/// Move From Time Base (Lower)
+
+#define mftb() mfspr(SPRN_TBL)
+
+/// Move To Time Base (Lower)
+
+#define mttbl(x) mtspr(SPRN_TBL, (x))
+
+/// Move From Time Base (Upper)
+
+#define mftbu() mfspr(SPRN_TBU)
+
+/// Move To Time Base (UPPER)
+
+#define mttbu(x) mtspr(SPRN_TBU, (x))
+
+#endif /* __ASSEMBLER__ */
+
+#ifdef __ASSEMBLER__
+
+ /// \cond
+
+ // Use this macro to define new mt<spr> and mf<spr> instructions that
+ // may not exist in the assembler.
+
+ .macro _sprinstrs, name, num
+ .macro mt\name, reg
+ mtspr \num, \reg
+ .endm
+ .macro mf\name, reg
+ mfspr \reg, \num
+ .endm
+ .endm
+
+ _sprinstrs ccr0, SPRN_CCR0
+ _sprinstrs ccr1, SPRN_CCR1
+ _sprinstrs dbcr0, SPRN_DBCR0
+ _sprinstrs dbcr1, SPRN_DBCR1
+ _sprinstrs dcwr, SPRN_DCWR
+ _sprinstrs mcsr, SPRN_MCSR
+ _sprinstrs pid, SPRN_PID
+ _sprinstrs sgr, SPRN_SGR
+ _sprinstrs sler, SPRN_SLER
+ _sprinstrs su0r, SPRN_SU0R
+ _sprinstrs usprg0, SPRN_USPRG0
+
+ /// \endcond
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* __PPC405_SPR_H__ */
diff --git a/src/ssx/ppc405/ppc405_thread_init.S b/src/ssx/ppc405/ppc405_thread_init.S
new file mode 100755
index 0000000..47f5813
--- /dev/null
+++ b/src/ssx/ppc405/ppc405_thread_init.S
@@ -0,0 +1,126 @@
+// $Id: ppc405_thread_init.S,v 1.1.1.1 2013/12/11 21:03:27 bcbrock Exp $
+
+/// \file ppc405_thread_init.S
+/// \brief PPC405-specific thread initialization
+///
+/// The entry points in this file are routines that are typically used during
+/// initialization, and their code space could be deallocated and recovered if
+/// no longer needed by the application after initialization.
+
+ .nolist
+#include "ssx.h"
+ .list
+
+/// \fn void __ssx_thread_context_initialize(SsxThread *thread, SsxThreadRoutine thread_routine, void *private)
+/// \brief Create the initial thread context on the stack
+///
+/// The non-reserved GPRs are prepatterned with 0x0000\<rn\>\<rn\> where \<rn\> is
+/// the register number (as decimal). The initial context is set up with the
+/// thread running in the default machine context, and when the thread is
+/// switched in it will begin executing at the entry point of the thread
+/// routine with the \c private parameter in R3. The LR is initialized such
+/// that when the thread returns, it will return to the entry point of \c
+/// ssx_complete().
+#ifdef DOXYGEN_ONLY
+void
+__ssx_thread_context_initialize(SsxThread *thread,
+ SsxThreadRoutine thread_routine,
+ void *private);
+#endif// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ssx/ppc405/ppc405_thread_init.S,v $
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2013
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+/// \cond
+
+ .global_function __ssx_thread_context_initialize
+
+__ssx_thread_context_initialize:
+
+ ## R3 = thread (param)
+ ## R4 = thread_routine (param)
+ ## R5 = private (param)
+ ## R6 = thread stack pointer (computed)
+ ## R7 = scratch
+
+ .macro _gpr_init, prefix, reg, val
+ li %r7, \val
+ stw %r7, \prefix\reg(%r6)
+ .endm
+
+ ## Initialize a fast context on the thread stack. The CR is cleared,
+ ## the LR = ssx_complete(), R3 has the private parameter.
+
+ lwz %r6, SSX_THREAD_OFFSET_SAVED_STACK_POINTER(%r3)
+
+ stwu %r6, -SSX_FAST_CTX_SIZE(%r6)
+
+ li %r7, 0
+ stw %r7, SSX_FAST_CTX_CR(%r6)
+
+ _liw %r7, ssx_complete
+ stw %r7, SSX_FAST_CTX_LR(%r6)
+
+ stw %r5, SSX_FAST_CTX_GPR3(%r6)
+
+ _gpr_init SSX_FAST_CTX_GPR, 4, 0x0404
+ _gpr_init SSX_FAST_CTX_GPR, 5, 0x0505
+ _gpr_init SSX_FAST_CTX_GPR, 6, 0x0606
+ _gpr_init SSX_FAST_CTX_GPR, 7, 0x0707
+
+ ## Initialize the (volatile - fast) context on the thread stack. XER
+ ## and CTR are clear, SRR0 = thread_routine, SRR1 = default machine
+ ## context.
+
+ stwu %r6, -SSX_VOL_FAST_CTX_SIZE(%r6)
+
+ li %r7, 0
+ stw %r7, SSX_VOL_FAST_CTX_XER(%r6)
+ stw %r7, SSX_VOL_FAST_CTX_CTR(%r6)
+
+ stw %r4, SSX_VOL_FAST_CTX_SRR0(%r6)
+
+ _lwzsd %r7, __ssx_thread_machine_context_default
+ stw %r7, SSX_VOL_FAST_CTX_SRR1(%r6)
+
+ _gpr_init SSX_VOL_FAST_CTX_GPR, 0, 0x0000
+ _gpr_init SSX_VOL_FAST_CTX_GPR, 8, 0x0808
+ _gpr_init SSX_VOL_FAST_CTX_GPR, 9, 0x0909
+ _gpr_init SSX_VOL_FAST_CTX_GPR, 10, 0x1010
+ _gpr_init SSX_VOL_FAST_CTX_GPR, 11 0x1111
+ _gpr_init SSX_VOL_FAST_CTX_GPR, 12 0x1212
+
+ ## Initialize the non-volatile context on the thread stack.
+
+ stwu %r6, -SSX_NON_VOL_CTX_SIZE(%r6)
+
+ _gpr_init SSX_NON_VOL_CTX_GPR, 14, 0x1414
+ _gpr_init SSX_NON_VOL_CTX_GPR, 15, 0x1515
+ _gpr_init SSX_NON_VOL_CTX_GPR, 16, 0x1616
+ _gpr_init SSX_NON_VOL_CTX_GPR, 17, 0x1717
+ _gpr_init SSX_NON_VOL_CTX_GPR, 18, 0x1818
+ _gpr_init SSX_NON_VOL_CTX_GPR, 19, 0x1919
+ _gpr_init SSX_NON_VOL_CTX_GPR, 20, 0x2020
+ _gpr_init SSX_NON_VOL_CTX_GPR, 21, 0x2121
+ _gpr_init SSX_NON_VOL_CTX_GPR, 22, 0x2222
+ _gpr_init SSX_NON_VOL_CTX_GPR, 23, 0x2323
+ _gpr_init SSX_NON_VOL_CTX_GPR, 24, 0x2424
+ _gpr_init SSX_NON_VOL_CTX_GPR, 25, 0x2525
+ _gpr_init SSX_NON_VOL_CTX_GPR, 26, 0x2626
+ _gpr_init SSX_NON_VOL_CTX_GPR, 27, 0x2727
+ _gpr_init SSX_NON_VOL_CTX_GPR, 28, 0x2828
+ _gpr_init SSX_NON_VOL_CTX_GPR, 29, 0x2929
+ _gpr_init SSX_NON_VOL_CTX_GPR, 30, 0x3030
+ _gpr_init SSX_NON_VOL_CTX_GPR, 31, 0x3131
+
+ ## Initialization is done - the stack pointer is stored back in the
+ ## thread.
+
+ stw %r6, SSX_THREAD_OFFSET_SAVED_STACK_POINTER(%r3)
+ blr
+
+ .epilogue __ssx_thread_context_initialize
+
+/// \endcond
diff --git a/src/ssx/ppc405/ssx_port_types.h b/src/ssx/ppc405/ssx_port_types.h
new file mode 100755
index 0000000..f57951d
--- /dev/null
+++ b/src/ssx/ppc405/ssx_port_types.h
@@ -0,0 +1,44 @@
+#ifndef __SSX_PORT_TYPES_H__
+#define __SSX_PORT_TYPES_H__
+
+// $Id: ssx_port_types.h,v 1.1.1.1 2013/12/11 21:03:27 bcbrock Exp $
+// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ssx/ppc405/ssx_port_types.h,v $
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2013
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+/// \file ssx_port_types.h
+/// \brief Type definitions required by the SSX port.
+///
+/// \todo GCC provides a portable version of cntlzw called __builtin_clz().
+/// We should make the SSX priority queues portable by using this facility.
+///
+/// \todo I think that if more of the port-dependent types were moved here, we
+/// could break the circular dependencies in some of the header inclusion and
+/// simplify the way the SSX/port/chip headers are included.
+
+/// An SsxIrqId is an integer in the range of valid interrupts defined by the
+/// interrupt controller.
+
+typedef uint8_t SsxIrqId;
+
+/// SSX requires the port to define the type SsxThreadQueue, which is a
+/// priority queue (where 0 is the highest priority). This queue must be able
+/// to handle SSX_THREADS + 1 priorities (the last for the idle thread). The
+/// port must also define methods for clearing, insertion, deletion and min
+/// (with assumed legal priorities). The min operation returns SSX_THREADS if
+/// the queue is empty. (Or a queue could be initialized with the SSX_THREADS
+/// entry always present - SSX code never tries to delete the idle thread from
+/// a thread queue).
+///
+/// These queues are used both for the run queue and the pending queue
+/// associated with every semaphore.
+///
+/// On PPC405 with 32 threads (implied), this is a job for a uint32_t and
+/// cntlzw().
+
+typedef uint32_t SsxThreadQueue;
+
+#endif /* __SSX_PORT_TYPES_H__ */
diff --git a/src/ssx/ppc405/ssxppc405files.mk b/src/ssx/ppc405/ssxppc405files.mk
new file mode 100755
index 0000000..72d5ecb
--- /dev/null
+++ b/src/ssx/ppc405/ssxppc405files.mk
@@ -0,0 +1,53 @@
+# $Id: ssxppc405files.mk,v 1.2 2014/06/26 13:00:55 cmolsen Exp $
+# $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ssx/ppc405/ssxppc405files.mk,v $
+# @file ssxppc405files.mk
+#
+# @brief mk for including ppc405 object files
+#
+# @page ChangeLogs Change Logs
+# @section ssxppc405files.mk
+# @verbatim
+#
+#
+# Change Log ******************************************************************
+# Flag Defect/Feature User Date Description
+# ------ -------------- ---------- ------------ -----------
+# @pb00E pbavari 03/11/2012 Makefile ODE support
+#
+# @endverbatim
+#
+##########################################################################
+# Include Files
+##########################################################################
+
+
+
+##########################################################################
+# Object Files
+##########################################################################
+PPC405-C-SOURCES = ppc405_core.c \
+ ppc405_lib_core.c \
+ ppc405_cache_core.c \
+ ppc405_init.c \
+ ppc405_irq_core.c \
+ ppc405_irq_init.c
+
+PPC405-S-SOURCES = ppc405_boot.S \
+ ppc405_exceptions.S \
+ ppc405_cache_init.S \
+ ppc405_mmu_asm.S \
+ ppc405_breakpoint.S
+
+PPC405-TIMER-C-SOURCES =
+PPC405-TIMER-S-SOURCES =
+
+PPC405-THREAD-C-SOURCES +=
+PPC405-THREAD-S-SOURCES += ppc405_thread_init.S
+
+PPC405-MMU-C-SOURCES += ppc405_mmu.c
+PPC405-MMU-S-SOURCES +=
+
+PPC405_OBJECTS += $(PPC405-C-SOURCES:.c=.o) $(PPC405-S-SOURCES:.S=.o)
+
+
+
OpenPOWER on IntegriCloud