summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorPatrick Williams <iawillia@us.ibm.com>2011-07-08 19:33:40 -0500
committerA. Patrick Williams III <iawillia@us.ibm.com>2011-07-20 14:58:43 -0500
commit471f09f1a9bcc46fc385fa8aca776cb682075c0b (patch)
treee0a4969825799dcc4c28a71975cb68439f507390 /src
parent3ecf7085ccc55eb4f815a62f47ea09f55bb6688e (diff)
downloadtalos-hostboot-471f09f1a9bcc46fc385fa8aca776cb682075c0b.tar.gz
talos-hostboot-471f09f1a9bcc46fc385fa8aca776cb682075c0b.zip
VMM Improvements.
- Segment Manager - Base / Device Segments - Block for Base image. Change-Id: Ic0c058e5c5b210ec1c48d30f6ed9f9837d74a3c8 Reviewed-on: http://gfw160.austin.ibm.com:8080/gerrit/193 Tested-by: Jenkins Server Reviewed-by: MATTHEW S. BARTH <msbarth@us.ibm.com>
Diffstat (limited to 'src')
-rw-r--r--src/include/kernel/basesegment.H54
-rw-r--r--src/include/kernel/block.H161
-rw-r--r--src/include/kernel/devicesegment.H33
-rw-r--r--src/include/kernel/pagemgr.H2
-rw-r--r--src/include/kernel/ptmgr.H16
-rw-r--r--src/include/kernel/segment.H52
-rw-r--r--src/include/kernel/segmentmgr.H92
-rw-r--r--src/include/kernel/spte.H87
-rw-r--r--src/include/kernel/vmmmgr.H87
-rw-r--r--src/kernel/basesegment.C57
-rw-r--r--src/kernel/block.C109
-rw-r--r--src/kernel/devicesegment.C91
-rw-r--r--src/kernel/exception.C26
-rw-r--r--src/kernel/heapmgr.C3
-rw-r--r--src/kernel/makefile4
-rw-r--r--src/kernel/pagemgr.C1
-rw-r--r--src/kernel/ptmgr.C66
-rw-r--r--src/kernel/segmentmgr.C76
-rw-r--r--src/kernel/taskmgr.C2
-rw-r--r--src/kernel/vmmmgr.C193
-rw-r--r--src/lib/stdlib.C7
-rw-r--r--src/makefile3
-rw-r--r--src/usr/testcore/kernel/slbtest.H19
-rw-r--r--src/usr/testcore/kernel/vmmbasetest.H97
24 files changed, 1055 insertions, 283 deletions
diff --git a/src/include/kernel/basesegment.H b/src/include/kernel/basesegment.H
new file mode 100644
index 000000000..7f1c78dfa
--- /dev/null
+++ b/src/include/kernel/basesegment.H
@@ -0,0 +1,54 @@
+/** @file basesegment.H
+ * @brief Defines the base segment (0TB) class.
+ */
+#ifndef __KERNEL_BASESEGMENT_H
+#define __KERNEL_BASESEGMENT_H
+
+#include <kernel/segment.H>
+
+class Block;
+
+/** @class BaseSegment
+ * @brief Class to manage the base segment at 0 TB.
+ *
+ * Contains a chain of blocks associated with the first segment.
+ */
+class BaseSegment : public Segment
+{
+ protected:
+ /**
+ * @brief Constructor.
+ * Initialize attributes and set base addresss of segment to 0 TB.
+ */
+ BaseSegment() : Segment(0x0), iv_block(NULL) {};
+ /**
+ * @brief Destructor
+ * Delete any blocks owned by this segment.
+ */
+ ~BaseSegment();
+
+ public:
+ /**
+ * @brief Initialize the segment by allocating initial blocks and
+ * adding to the segment manager.
+ */
+ static void init();
+
+ /**
+ * @brief Implementation of the pure-virtual function from Segment.
+ *
+ * Calls block chain to deal with page fault.
+ */
+ bool handlePageFault(task_t* i_task, uint64_t i_addr);
+
+ private:
+ /**
+ * @brief Internal implementation of init function.
+ */
+ void _init();
+
+ /** Block-chain associated with this Segment. */
+ Block* iv_block;
+};
+
+#endif
diff --git a/src/include/kernel/block.H b/src/include/kernel/block.H
new file mode 100644
index 000000000..1de1c1280
--- /dev/null
+++ b/src/include/kernel/block.H
@@ -0,0 +1,161 @@
+/** @file block.H
+ * @brief Defines the implementation for the generic VMM block class.
+ */
+#ifndef __KERNEL_BLOCK_H
+#define __KERNEL_BLOCK_H
+
+#include <stdint.h>
+#include <kernel/task.H>
+#include <kernel/vmmmgr.H>
+
+class ShadowPTE;
+class Segment;
+
+/** @class Block
+ * @brief Provides management of the memory pages associated with a block of
+ * virtual memory.
+ *
+ * This class is organized to be either an independent block (typically
+ * managed by the Segment container in some way) or as a chain of blocks.
+ *
+ * When the instance is assigned down-stream blocks, the instance will
+ * forward requests that do not belong to it down the chain for handling
+ * by a block responsible for the request. Also, when used in this manner,
+ * this block is responsible for the ownership of all down-stream blocks,
+ * including calling their destructor when necessary.
+ *
+ * There is currently no mechanism for dynamically removing blocks from the
+ * chain. The expectation is that all known use cases would suggest either
+ * a fixed (increasing-only) chain or a known-sized array of blocks.
+ */
+class Block
+{
+ public:
+ /**
+ * @brief Constructor.
+ *
+ * @param[in] i_baseAddr - Base virtual Address of the block.
+ * @param[in] i_size - Size of the block (in bytes).
+ *
+ * Will allocate enough shadow PTEs to track pages in the block.
+ */
+ Block(uint64_t i_baseAddr, uint64_t i_size) :
+ iv_baseAddr(i_baseAddr), iv_size(i_size),
+ iv_parent(NULL), iv_nextBlock(NULL), iv_ptes(NULL)
+ { init(); };
+
+ /**
+ * @brief Destructor.
+ *
+ * Releases associated memory and down-stream blocks.
+ */
+ ~Block();
+
+ /**
+ * @brief Get the base address of this block.
+ * @return Base address (as uint64_t).
+ */
+ uint64_t getBaseAddress() const { return iv_baseAddr; };
+
+ /**
+ * @brief Determines if a virtual address is in the range of the block.
+ *
+ * @param[in] i_addr - The virtual address in question.
+ * @return true - Address is contained within the block.
+ * @return false - Address is not contained within the block.
+ */
+ bool isContained(uint64_t i_addr) const
+ { return (i_addr >= iv_baseAddr) &&
+ (i_addr < iv_baseAddr + iv_size); };
+
+ /**
+ * @brief Responsible for handling page faults within the block [chain].
+ *
+ * @param[in] i_task - Task causing the page fault.
+ * @param[in] i_addr - Effective address accessed to cause fault.
+ *
+ * @return true - Page fault was successfully handled.
+ *
+ * If the page fault is not successfully handled the expectation is
+ * that the VMM will perform appropriate action, such as killing the
+ * task.
+ *
+ * If the address is not within this block, the block will attempt to
+ * make calls down the block-chain if it exists.
+ */
+ bool handlePageFault(task_t* i_task, uint64_t i_addr);
+
+ friend class Segment;
+ friend class BaseSegment;
+ friend class StackSegment;
+
+ protected:
+ /**
+ * @brief Assign a segment to a parent relationship to this block.
+ *
+ * @param[in] i_parent - The segment to assign as a parent.
+ */
+ void setParent(Segment* i_parent) { iv_parent = i_parent; };
+
+ /**
+ * @brief Add a block to the end of this block-chain.
+ *
+ * @param[in] i_block - The block tp append.
+ */
+ void appendBlock(Block* i_block)
+ {
+ if (NULL == iv_nextBlock) iv_nextBlock = i_block;
+ else iv_nextBlock->appendBlock(i_block);
+ }
+
+ /**
+ * @brief Set up a virtual-physical mapping for a static page.
+ *
+ * @param[in] i_vAddr - The virtual address of the page.
+ * @param[in] i_pAddr - The physical address of the page.
+ * @param[in] i_access - The permissions of the page.
+ */
+ void setPhysicalPage(uint64_t i_vAddr, uint64_t i_pAddr,
+ VmmManager::ACCESS_TYPES i_access);
+
+ private:
+ /** Base address of the block */
+ const uint64_t iv_baseAddr;
+ /** Size of the block */
+ const uint64_t iv_size;
+
+ /** Pointer to the parent (containing) segment. */
+ Segment* iv_parent;
+ /** Pointer to the next block in the chain. */
+ Block* iv_nextBlock;
+
+ /** Pointer to the Shadow PTE entries. */
+ ShadowPTE* iv_ptes;
+
+ /**
+ * @brief Finish initialization of block.
+ *
+ * Construct ShadowPTE entries.
+ *
+ * This is defined as a separate function to reduce the code
+ * footprint of the class constructors. GCC emits an "in-charge" and
+ * "not-in-charge" version of each constructor, so put as much
+ * common code into an init function.
+ */
+ void init();
+
+ /**
+ * @brief Find the Shadow PTE for a virtual address.
+ *
+ * @param[in] i_addr - The virtual address to find a page for.
+ * @note This function does no bounds checking.
+ */
+ ShadowPTE* getPTE(uint64_t i_addr) const;
+
+
+ Block(const Block&); // prohibit copy.
+ Block& operator=(const Block&); // prohibit assignment.
+
+};
+
+#endif
diff --git a/src/include/kernel/devicesegment.H b/src/include/kernel/devicesegment.H
new file mode 100644
index 000000000..5fc63819b
--- /dev/null
+++ b/src/include/kernel/devicesegment.H
@@ -0,0 +1,33 @@
+#ifndef __KERNEL_DEVICESEGMENT_H
+#define __KERNEL_DEVICESEGMENT_H
+
+#include <kernel/segment.H>
+
+class DeviceSegment : public Segment
+{
+ public:
+ DeviceSegment() : Segment(0x020000000000ull)
+ {
+ for (int i = 0; i < MMIO_MAP_DEVICES; i++)
+ iv_mmioMap[i] = 0;
+ };
+ ~DeviceSegment() {};
+
+ static void init();
+
+ bool handlePageFault(task_t* i_task, uint64_t i_addr);
+
+ static void* mmioMap(void* ra, size_t pages); // to be deprecated.
+ static int mmioUnmap(void* ea, size_t pages); // to be deprecated.
+
+ private:
+ enum { MMIO_MAP_DEVICES = 32 };
+ uint64_t iv_mmioMap[MMIO_MAP_DEVICES];
+
+ void _init();
+
+ void* _mmioMap(void* ra, size_t pages);
+ int _mmioUnmap(void* ea, size_t pages);
+};
+
+#endif
diff --git a/src/include/kernel/pagemgr.H b/src/include/kernel/pagemgr.H
index 95d7bf21c..5c22d6864 100644
--- a/src/include/kernel/pagemgr.H
+++ b/src/include/kernel/pagemgr.H
@@ -2,6 +2,7 @@
#define __KERNEL_PAGEMGR_H
#include <stdint.h>
+#include <limits.h>
#include <util/lockfree/stack.H>
#include <kernel/vmmmgr.H>
@@ -19,7 +20,6 @@ class PageManager
enum
{
- PAGESIZE = VmmManager::PAGESIZE,
MEMLEN = VmmManager::HTABORG,
BUCKETS = 16,
diff --git a/src/include/kernel/ptmgr.H b/src/include/kernel/ptmgr.H
index db552ada4..462a6400e 100644
--- a/src/include/kernel/ptmgr.H
+++ b/src/include/kernel/ptmgr.H
@@ -17,7 +17,7 @@ class PageTableManager
/**
* Status Values
*/
- enum
+ enum
{
PTE_UNKNOWN = 0x0000000000000000, /**< Entry wasn't found */
PTE_PRESENT = 0x0000000000000001, /**< Entry is present in table */
@@ -71,7 +71,7 @@ class PageTableManager
* @param[in] i_vAddrFinish End of VA range to remove (full address)
*/
static void delRangeVA( uint64_t i_vAddrStart,
- uint64_t i_vAddrFinish );
+ uint64_t i_vAddrFinish );
/**
@@ -81,7 +81,7 @@ class PageTableManager
* @param[in] i_pnFinish Last Physical Page to remove (page number)
*/
static void delRangePN( uint64_t i_pnStart,
- uint64_t i_pnFinish );
+ uint64_t i_pnFinish );
/**
* @brief Return status information about an entry in the hardware page table
@@ -138,7 +138,7 @@ class PageTableManager
* Local copy of Page Table for user-space testing
* (set to NULL for kernel instance)
*/
- char* ivTABLE;
+ char* ivTABLE;
/**
@@ -177,7 +177,7 @@ class PageTableManager
uint64_t WIMG:4; /**< 57:60 Storage control bits */
uint64_t N:1; /**< 61 No-execute page (N==1) */
uint64_t pp1_2:2; /**< 62:63 Page Protection bits 1:2 */
- };
+ };
uint64_t dword1; /**< Full Dword1 */
};
} PACKED;
@@ -294,7 +294,7 @@ class PageTableManager
* @param[in] i_vAddrFinish End of VA range to remove
*/
void _delRangeVA( uint64_t i_vAddrStart,
- uint64_t i_vAddrFinish );
+ uint64_t i_vAddrFinish );
/**
* @brief Remove a range of entries from the hardware page table
@@ -303,7 +303,7 @@ class PageTableManager
* @param[in] i_pnFinish Last Physical Page to remove
*/
void _delRangePN( uint64_t i_pnStart,
- uint64_t i_pnFinish );
+ uint64_t i_pnFinish );
/**
* @brief Return status information about an entry in the hardware page table
@@ -399,7 +399,7 @@ class PageTableManager
- // Allow testcase to see inside
+ // Allow testcase to see inside
friend class ptmgrtest;
};
diff --git a/src/include/kernel/segment.H b/src/include/kernel/segment.H
new file mode 100644
index 000000000..5a4e7d092
--- /dev/null
+++ b/src/include/kernel/segment.H
@@ -0,0 +1,52 @@
+/** @file segment.H
+ * @brief Contains the definition of the virtual Segment class.
+ */
+#ifndef __KERNEL_SEGMENT_H
+#define __KERNEL_SEGMENT_H
+
+#include <kernel/task.H>
+
+/** @class Segment
+ * @brief Virtual segment class to handle virtual memory management within
+ * a 1TB segment.
+ */
+class Segment
+{
+ public:
+ /**
+ * @brief Constructor.
+ * @param[in] i_baseAddr - Base [virtual] address of this segment.
+ */
+ explicit Segment(uint64_t i_baseAddr) : iv_baseAddress(i_baseAddr) {};
+ /**
+ * @brief Destructor.
+ * No additional action necessary.
+ */
+ virtual ~Segment() {};
+
+ /**
+ * @brief Responsible for handling page faults within the segment.
+ *
+ * @param[in] i_task - Task causing the page fault.
+ * @param[in] i_addr - Effective address accessed to cause fault.
+ *
+ * @return true - Page fault was successfully handled.
+ *
+ * If the page fault is not successfully handled the expectation is
+ * that the VMM will perform appropriate action, such as killing the
+ * task.
+ */
+ virtual bool handlePageFault(task_t* i_task, uint64_t i_addr) = 0;
+
+ /**
+ * @brief Get the base address of this segment.
+ * @return Base address (as uint64_t).
+ */
+ uint64_t getBaseAddress() const { return iv_baseAddress; };
+
+ protected:
+ /** The base address of the segment. */
+ const uint64_t iv_baseAddress;
+};
+
+#endif
diff --git a/src/include/kernel/segmentmgr.H b/src/include/kernel/segmentmgr.H
new file mode 100644
index 000000000..7048fa365
--- /dev/null
+++ b/src/include/kernel/segmentmgr.H
@@ -0,0 +1,92 @@
+/** @file segmentmgr.H
+ * Provides definition of the SegmentManager class.
+ */
+
+#ifndef __KERNEL_SEGMENTMGR_H
+#define __KERNEL_SEGMENTMGR_H
+
+#include <kernel/task.H>
+
+// Forward declaration.
+class Segment;
+
+/** @class SegmentManager
+ * @brief Container of Segments. Responsible for managing the SLB.
+ *
+ * @note This class is not thread-safe on its own. Expectation is that
+ * the virtual memory manager will serialize internal operations.
+ */
+class SegmentManager
+{
+ public:
+ /** Segment Identifiers */
+ enum SegmentIds
+ {
+ /** Base Segment (0-1TB). */
+ BASE_SEGMENT_ID = 0,
+ /** Task Stack Segment (1-2TB). */
+ STACK_SEGMENT_ID = 1,
+ /** MMIO Space Segment (2-3TB). */
+ MMIO_SEGMENT_ID = 2,
+
+ MAX_SEGMENTS = 4
+ };
+
+ /**
+ * Constructor. Initializes instance variables.
+ */
+ SegmentManager()
+ {
+ for(int i = 0; i < MAX_SEGMENTS; i++)
+ iv_segments[i] = NULL;
+ };
+ /**
+ * Destructor.
+ * No action necessary. Associated segments are owned externally,
+ * such as in Singletons.
+ */
+ ~SegmentManager() {};
+
+ /**
+ * @brief Responsible for directing page faults to the owning segment.
+ *
+ * @param[in] i_task - Task causing the page fault.
+ * @param[in] i_addr - Effective address accessed to cause fault.
+ *
+ * @return true - Page fault was successfully handled.
+ *
+ * If the page fault is not successfully handled the expectation is
+ * that the VMM will perform appropriate action, such as killing the
+ * task.
+ */
+ static bool handlePageFault(task_t* i_task, uint64_t i_addr);
+
+ /**
+ * @brief Adds a segment to the container.
+ *
+ * @param[in] i_segment - Segment object to associate to segment.
+ * @param[in] i_segId - Segment identifier (which TB) to associate.
+ *
+ * @note Ownership of the Segment object (for freeing memory) remains
+ * with the callee.
+ */
+ static void addSegment(Segment* i_segment, size_t i_segId);
+
+ /**
+ * @brief Update SLB on this hardware thread with associated segments.
+ */
+ static void initSLB();
+
+ private:
+ /** See handlePageFault. */
+ bool _handlePageFault(task_t* i_task, uint64_t i_addr);
+ /** See addSegment. */
+ void _addSegment(Segment* i_segment, size_t i_segId);
+ /** See initSLB. */
+ void _initSLB();
+
+ /** Array of segment objects to associated segment IDs. */
+ Segment* iv_segments[MAX_SEGMENTS];
+};
+
+#endif
diff --git a/src/include/kernel/spte.H b/src/include/kernel/spte.H
new file mode 100644
index 000000000..ec5c9f20c
--- /dev/null
+++ b/src/include/kernel/spte.H
@@ -0,0 +1,87 @@
+/** @file spte.H
+ * @brief Defines the structure of the Shadow Page Table Entry.
+ */
+#ifndef __KERNEL_SPTE_H
+#define __KERNEL_SPTE_H
+
+#include <stdint.h>
+#include <builtins.h>
+
+/** @class ShadowPTE
+ * @brief Stores information needed in a shadow page table for virtual to
+ * physical address mapping, such as physical page number and
+ * security permissions.
+ *
+ * @note This structure only allows addressing of 4GB of physical memory due
+ * to the page number being stored in a 20 bit field.
+ *
+ * The data within is stored in a way so that the union value is an address
+ * within the physical page who's number is stored and the low-order bits
+ * are used for storing misc information about the page, such as permissions.
+ */
+class ShadowPTE
+{
+ protected:
+ union
+ {
+ uint32_t word;
+ struct
+ {
+ /** Physical page number. */
+ uint32_t page:20;
+ /** Page is present (is PN valid?). */
+ uint32_t present:1;
+
+ /** May the page be written to. */
+ uint32_t writable:1;
+ /** May code be executed off page. */
+ uint32_t executable:1;
+
+ /** Should the dirty bit be maintained. */
+ uint32_t track_write:1;
+ /** Has page been written to. */
+ uint32_t dirty:1;
+
+ /** Reserved for future use. */
+ uint32_t reserved:7;
+ } PACKED;
+ };
+
+ public:
+ /** Initialize PTE */
+ ShadowPTE() : word(0) {};
+ /** Cast-construct from integer directly to the data union. */
+ explicit ShadowPTE(uint32_t i_data) : word(i_data) {};
+
+ /** Get physical page (as address). */
+ uint32_t getPageAddr() const { return (page << 12); };
+ /** Set physical page (as address). */
+ void setPageAddr(uint32_t i_page) { page = (i_page >> 12); };
+ /** Get physical page (as page number). */
+ uint32_t getPage() const { return page; }
+
+ /** Get present bit. */
+ bool isPresent() const { return present; };
+ /** Set present bit. */
+ void setPresent(bool i_present) { present = i_present; };
+
+ /** Get writable bit. */
+ bool isWritable() const { return writable; };
+ /** Set writable bit. */
+ void setWritable(bool i_write) { writable = i_write; };
+ /** Get executable bit. */
+ bool isExecutable() const { return executable; };
+ /** Set executable bit. */
+ void setExecutable(bool i_exec) { executable = i_exec; };
+
+ /** Get write-tracked bit. */
+ bool isWriteTracked() const { return track_write; };
+ /** Set write-tracked bit. */
+ void setWriteTracked(bool i_track) { track_write = i_track; };
+ /** Get dirty bit. */
+ bool isDirty() const { return dirty; };
+ /** Set dirty bit. */
+ void setDirty(bool i_dirty) { dirty = i_dirty; };
+};
+
+#endif
diff --git a/src/include/kernel/vmmmgr.H b/src/include/kernel/vmmmgr.H
index c194b0456..49101feed 100644
--- a/src/include/kernel/vmmmgr.H
+++ b/src/include/kernel/vmmmgr.H
@@ -1,68 +1,71 @@
#ifndef __KERNEL_VMMMGR_H
#define __KERNEL_VMMMGR_H
+#include <limits.h>
#include <kernel/types.h>
#include <kernel/spinlock.H>
class VmmManager
{
public:
- enum VMM_CONSTS
- {
- EIGHT_MEG = 8 * 1024 * 1024,
+ enum VMM_CONSTS
+ {
+ EIGHT_MEG = 8 * 1024 * 1024,
- FULL_MEM_SIZE = 1 * EIGHT_MEG,
- PAGESIZE = 4096,
+ FULL_MEM_SIZE = 1 * EIGHT_MEG,
// put the Page Table at the end of our memory space
PTSIZE = (1 << 18),
- HTABORG = (FULL_MEM_SIZE - PTSIZE),
- };
-
- enum ACCESS_TYPES
- {
- NO_USER_ACCESS,
- READ_O_ACCESS,
- NORMAL_ACCESS,
- CI_ACCESS,
+ HTABORG = (FULL_MEM_SIZE - PTSIZE),
+ };
+
+ enum ACCESS_TYPES
+ {
+ NO_USER_ACCESS,
+ READ_O_ACCESS,
+ NORMAL_ACCESS,
+ CI_ACCESS,
RO_EXE_ACCESS,
- };
+ };
+
+ enum PID_ALLOCATIONS
+ {
+ LinearSpace = (FULL_MEM_SIZE / EIGHT_MEG) - 1,
+ MMIOSpace = LinearSpace + 1,
+ FirstPid,
+ };
- enum PID_ALLOCATIONS
- {
- LinearSpace = (FULL_MEM_SIZE / EIGHT_MEG) - 1,
- MMIOSpace = LinearSpace + 1,
- FirstPid,
- };
+ static void init();
+ static void init_slb();
- enum MMIO_SPACE_INFO
- {
- MMIO_T_ENTRIES =
- FULL_MEM_SIZE * (MMIOSpace - LinearSpace) / PAGESIZE,
- };
+ /**
+ * @brief Responsible for handling PTE misses.
+ *
+ * @param[in] t - Task causing the page fault.
+ * @param[in] effAddr - Effective address accessed to cause fault.
+ *
+ * @return true - PTE miss was successfully handled.
+ *
+ * If the PTE miss is not successfully handled, the exception
+ * handler should collect debug information and kill the task.
+ */
+ static bool pteMiss(task_t* t, uint64_t effAddr);
- static void init();
- static void init_slb();
- static bool pteMiss(task_t*);
+ static void* mmioMap(void*, size_t);
+ static int mmioUnmap(void*, size_t);
- static void* mmioMap(void*, size_t);
- static int mmioUnmap(void*, size_t);
protected:
- VmmManager();
- ~VmmManager() {};
-
+ VmmManager();
+ ~VmmManager() {};
+
private:
- Spinlock lock;
- uint64_t mmioMapT[MMIO_T_ENTRIES];
+ Spinlock lock;
- void initSLB();
- void initPTEs();
- void initSDR1();
+ void initPTEs();
+ void initSDR1();
- bool _pteMiss(task_t*);
- void* _mmioMap(void*, size_t);
- int _mmioUnmap(void*, size_t);
+ bool _pteMiss(task_t*, uint64_t);
};
diff --git a/src/kernel/basesegment.C b/src/kernel/basesegment.C
new file mode 100644
index 000000000..70d91ac72
--- /dev/null
+++ b/src/kernel/basesegment.C
@@ -0,0 +1,57 @@
+#include <limits.h>
+#include <util/singleton.H>
+
+#include <kernel/basesegment.H>
+#include <kernel/segmentmgr.H>
+#include <kernel/block.H>
+#include <kernel/vmmmgr.H>
+
+BaseSegment::~BaseSegment()
+{
+ delete iv_block;
+}
+
+void BaseSegment::init()
+{
+ Singleton<BaseSegment>::instance()._init();
+}
+
+void BaseSegment::_init()
+{
+ // Assign segment to segment manager.
+ SegmentManager::addSegment(this, SegmentManager::BASE_SEGMENT_ID);
+
+ // Create initial static 8MB block.
+ iv_block = new Block(0x0, 0x800000);
+ iv_block->setParent(this);
+
+ // Set default page permissions on block.
+ for (uint64_t i = 0; i < 0x800000; i += PAGESIZE)
+ {
+ // External address filled in by linker as start of kernel's
+ // data pages.
+ extern void* data_load_address;
+
+ // Don't map in the 0 (NULL) page.
+ if (i == 0) continue;
+
+ // Set pages in kernel text section to be read-only / executable.
+ if (((uint64_t)&data_load_address) > i)
+ {
+ iv_block->setPhysicalPage(i, i, VmmManager::RO_EXE_ACCESS);
+ }
+ // Set all other pages to initially be read/write. VFS will set
+ // permissions on pages outside kernel.
+ // (@TODO: Future Sprint, for now keep NORMAL_ACCESS as RWX, not RW.)
+ else
+ {
+ iv_block->setPhysicalPage(i, i, VmmManager::NORMAL_ACCESS);
+ }
+ }
+}
+
+bool BaseSegment::handlePageFault(task_t* i_task, uint64_t i_addr)
+{
+ // Tail recursion to block chain.
+ return iv_block->handlePageFault(i_task, i_addr);
+}
diff --git a/src/kernel/block.C b/src/kernel/block.C
new file mode 100644
index 000000000..25157dac5
--- /dev/null
+++ b/src/kernel/block.C
@@ -0,0 +1,109 @@
+#include <limits.h>
+#include <assert.h>
+
+#include <kernel/block.H>
+#include <kernel/spte.H>
+#include <kernel/vmmmgr.H>
+#include <kernel/ptmgr.H>
+
+Block::~Block()
+{
+ // Release shadow PTE array.
+ delete[] iv_ptes;
+
+ // Delete next block in the chain.
+ if (iv_nextBlock)
+ {
+ delete iv_nextBlock;
+ }
+}
+
+void Block::init()
+{
+ // Create a shadow PTE for each page.
+ iv_ptes = new ShadowPTE[iv_size / PAGESIZE];
+}
+
+ShadowPTE* Block::getPTE(uint64_t i_addr) const
+{
+ return &iv_ptes[(i_addr - iv_baseAddr) / PAGESIZE];
+};
+
+bool Block::handlePageFault(task_t* i_task, uint64_t i_addr)
+{
+ // Check containment, call down chain if address isn't in this block.
+ if (!isContained(i_addr))
+ {
+ return (iv_nextBlock ?
+ false : iv_nextBlock->handlePageFault(i_task, i_addr));
+ }
+
+ ShadowPTE* pte = getPTE(i_addr);
+
+ if (!pte->isPresent())
+ {
+ // TODO. Needs swapping support.
+ return false;
+ }
+
+ if (pte->getPage() == 0)
+ {
+ return false;
+ }
+
+ // Add page table entry.
+ PageTableManager::addEntry(
+ (i_addr / PAGESIZE) * PAGESIZE,
+ pte->getPage(),
+ (pte->isExecutable() ? VmmManager::RO_EXE_ACCESS :
+ (pte->isWritable() ? VmmManager::NORMAL_ACCESS :
+ VmmManager::READ_O_ACCESS)));
+
+ return true;
+
+}
+
+void Block::setPhysicalPage(uint64_t i_vAddr, uint64_t i_pAddr,
+ VmmManager::ACCESS_TYPES i_access)
+{
+ // Check containment, call down chain if address isn't in this block.
+ if (!isContained(i_vAddr))
+ {
+ if (iv_nextBlock)
+ {
+ iv_nextBlock->setPhysicalPage(i_vAddr, i_pAddr, i_access);
+ }
+ else
+ {
+ // No block owns this address. Code bug.
+ kassert(iv_nextBlock);
+ }
+ return;
+ }
+
+ // Create virtual to physical mapping.
+ ShadowPTE* pte = getPTE(i_vAddr);
+ pte->setPageAddr(i_pAddr);
+ pte->setPresent(true);
+ switch(i_access)
+ {
+ case VmmManager::READ_O_ACCESS:
+ pte->setExecutable(false);
+ pte->setWritable(false);
+ break;
+
+ case VmmManager::NORMAL_ACCESS:
+ pte->setExecutable(false);
+ pte->setWritable(true);
+ break;
+
+ case VmmManager::RO_EXE_ACCESS:
+ pte->setExecutable(true);
+ pte->setWritable(false);
+ break;
+
+ default:
+ kassert(false);
+ break;
+ }
+}
diff --git a/src/kernel/devicesegment.C b/src/kernel/devicesegment.C
new file mode 100644
index 000000000..446d17298
--- /dev/null
+++ b/src/kernel/devicesegment.C
@@ -0,0 +1,91 @@
+#include <util/singleton.H>
+#include <limits.h>
+
+#include <kernel/vmmmgr.H>
+#include <kernel/ptmgr.H>
+#include <kernel/devicesegment.H>
+#include <kernel/segmentmgr.H>
+
+#define SLBE_s 40
+
+#include <kernel/console.H>
+
+void DeviceSegment::init()
+{
+ Singleton<DeviceSegment>::instance()._init();
+}
+
+void* DeviceSegment::mmioMap(void* ra, size_t pages)
+{
+ return Singleton<DeviceSegment>::instance()._mmioMap(ra, pages);
+}
+
+int DeviceSegment::mmioUnmap(void* ea, size_t pages)
+{
+ return Singleton<DeviceSegment>::instance()._mmioUnmap(ea, pages);
+}
+
+void DeviceSegment::_init()
+{
+ SegmentManager::addSegment(this, SegmentManager::MMIO_SEGMENT_ID);
+}
+
+bool DeviceSegment::handlePageFault(task_t* i_task, uint64_t i_addr)
+{
+ // Check address range.
+ if (i_addr < this->getBaseAddress() ||
+ i_addr >= (this->getBaseAddress() + 0x010000000000ull))
+ {
+ return false;
+ }
+
+ // Check valid device.
+ uint64_t segment_ea = i_addr - this->getBaseAddress();
+ size_t idx = segment_ea / ((1ull << SLBE_s) / MMIO_MAP_DEVICES);
+ uint64_t device_offset = segment_ea -
+ (idx * (1ull << SLBE_s) / MMIO_MAP_DEVICES);
+
+ if (0 == iv_mmioMap[idx])
+ {
+ return false;
+ }
+
+ PageTableManager::addEntry((i_addr / PAGESIZE) * PAGESIZE,
+ (iv_mmioMap[idx] + device_offset) / PAGESIZE,
+ VmmManager::CI_ACCESS);
+ return true;
+}
+
+void* DeviceSegment::_mmioMap(void* ra, size_t pages)
+{
+ for (size_t i = 0; i < MMIO_MAP_DEVICES; i++)
+ {
+ if (0 == iv_mmioMap[i])
+ {
+ iv_mmioMap[i] = reinterpret_cast<uint64_t>(ra);
+ return reinterpret_cast<void*>(i *
+ ((1ull << SLBE_s) / MMIO_MAP_DEVICES) +
+ this->getBaseAddress());
+ }
+ }
+
+ return NULL;
+}
+
+int DeviceSegment::_mmioUnmap(void* ea, size_t pages)
+{
+ uint64_t segment_ea = reinterpret_cast<uint64_t>(ea) -
+ this->getBaseAddress();
+ size_t idx = segment_ea / ((1ull << SLBE_s) / MMIO_MAP_DEVICES);
+ if (0 != iv_mmioMap[idx])
+ {
+ PageTableManager::delRangePN(iv_mmioMap[idx] / PAGESIZE,
+ iv_mmioMap[idx] / PAGESIZE +
+ pages);
+ iv_mmioMap[idx] = 0;
+ return 0;
+ }
+
+ return -1;
+}
+
diff --git a/src/kernel/exception.C b/src/kernel/exception.C
index cbbf3cf89..b12914fe8 100644
--- a/src/kernel/exception.C
+++ b/src/kernel/exception.C
@@ -47,12 +47,12 @@ void kernel_execute_data_storage()
switch(exception)
{
case EXCEPTION_DSISR_PTEMISS:
- handled = VmmManager::pteMiss(t);
+ handled = VmmManager::pteMiss(t, getDAR());
break;
}
if (!handled)
{
- printk("Data Storage exception on %d: %lx, %lx\n",
+ printk("Data Storage exception on %d: %lx, %lx\n",
t->tid, getDAR(), getDSISR());
Systemcalls::TaskEnd(t);
}
@@ -66,12 +66,28 @@ void kernel_execute_data_segment()
Systemcalls::TaskEnd(t);
}
+const uint64_t EXCEPTION_SRR1_INSTR_MASK = 0x0000000040000000;
+const uint64_t EXCEPTION_SRR1_INSTR_PTEMISS = 0x0000000040000000;
+
extern "C"
void kernel_execute_inst_storage()
{
task_t* t = TaskManager::getCurrentTask();
- printk("Inst Storage exception, killing task %d\n", t->tid);
- Systemcalls::TaskEnd(t);
+ uint64_t exception = getSRR1() & EXCEPTION_SRR1_INSTR_MASK;
+
+ bool handled = false;
+ switch (exception)
+ {
+ case EXCEPTION_SRR1_INSTR_PTEMISS:
+ handled = VmmManager::pteMiss(t, getSRR0());
+ break;
+ }
+ if (!handled)
+ {
+ printk("Inst Storage exception on %d: %lx, %lx\n",
+ t->tid, getSRR0(), getSRR1());
+ Systemcalls::TaskEnd(t);
+ }
}
extern "C"
@@ -103,7 +119,7 @@ namespace ExceptionHandles
// check for mfsprg3
if ((instruction & 0xfc1fffff) == 0x7c1342a6)
{
- t->context.gprs[(instruction & 0x03E00000) >> 21] =
+ t->context.gprs[(instruction & 0x03E00000) >> 21] =
(uint64_t) t;
t->context.nip = (void*) (((uint64_t)t->context.nip)+4);
return true;
diff --git a/src/kernel/heapmgr.C b/src/kernel/heapmgr.C
index cac6999b5..8bd31bd5f 100644
--- a/src/kernel/heapmgr.C
+++ b/src/kernel/heapmgr.C
@@ -1,3 +1,4 @@
+#include <limits.h>
#include <kernel/heapmgr.H>
#include <util/singleton.H>
#include <kernel/console.H>
@@ -79,7 +80,7 @@ void HeapManager::newPage()
{
void* page = PageManager::allocatePage();
chunk_t * c = (chunk_t*)page;
- for (int i = 0; i < (PageManager::PAGESIZE / (1 << (BUCKETS + 3))); i++)
+ for (int i = 0; i < (PAGESIZE / (1 << (BUCKETS + 3))); i++)
{
c->len = BUCKETS-1;
push_bucket(c, BUCKETS-1);
diff --git a/src/kernel/makefile b/src/kernel/makefile
index fd64fd611..f3d58dee0 100644
--- a/src/kernel/makefile
+++ b/src/kernel/makefile
@@ -1,7 +1,9 @@
ROOTPATH = ../..
OBJS = start.o kernel.o console.o pagemgr.o heapmgr.o taskmgr.o cpumgr.o
-OBJS += syscall.o scheduler.o spinlock.o exception.o vmmmgr.o timemgr.o futexmgr.o ptmgr.o
+OBJS += syscall.o scheduler.o spinlock.o exception.o vmmmgr.o timemgr.o
+OBJS += futexmgr.o ptmgr.o segmentmgr.o devicesegment.o basesegment.o
+OBJS += block.o
include ${ROOTPATH}/config.mk
diff --git a/src/kernel/pagemgr.C b/src/kernel/pagemgr.C
index 5d6637203..1570a97d3 100644
--- a/src/kernel/pagemgr.C
+++ b/src/kernel/pagemgr.C
@@ -1,3 +1,4 @@
+#include <limits.h>
#include <kernel/pagemgr.H>
#include <util/singleton.H>
#include <kernel/console.H>
diff --git a/src/kernel/ptmgr.C b/src/kernel/ptmgr.C
index b4cce308a..536930322 100644
--- a/src/kernel/ptmgr.C
+++ b/src/kernel/ptmgr.C
@@ -6,8 +6,8 @@
#include <assert.h>
//#define Dprintk(...) printk(args...)
-#define Dprintk(args...)
-#define Tprintk(args...)
+#define Dprintk(args...)
+#define Tprintk(args...)
#define Eprintk(args...) printk(args)
// Utilities to do some bit manipulation
@@ -74,13 +74,13 @@ ALWAYS_INLINE uint64_t EXTRACT_RJ_LEN( uint64_t i_lastword,
}
else if( i_bitlen <= 64 )
{
- uint64_t diff = 64 - i_bitlen;
+ uint64_t diff = 64 - i_bitlen;
return EXTRACT_RJ( i_lastword, i_startbit + diff, i_lastbit + diff );
}
else if( i_lastbit < (i_bitlen - 64) )
{
// desired bits are inside the first word
- return 0;
+ return 0;
}
// goal is to left-justify the i_startbit to be bit0 in the resulting word
@@ -172,18 +172,18 @@ void PageTableManager::delEntry( uint64_t i_vAddr )
void PageTableManager::delRangeVA( uint64_t i_vAddrStart,
uint64_t i_vAddrFinish )
{
- return Singleton<PageTableManager>::instance()._delRangeVA(i_vAddrStart,i_vAddrFinish);
-}
+ return Singleton<PageTableManager>::instance()._delRangeVA(i_vAddrStart,i_vAddrFinish);
+}
/**
* STATIC
* @brief Remove a range of entries from the hardware page table
*/
void PageTableManager::delRangePN( uint64_t i_pnStart,
- uint64_t i_pnFinish )
+ uint64_t i_pnFinish )
{
- return Singleton<PageTableManager>::instance()._delRangePN(i_pnStart,i_pnFinish);
-}
+ return Singleton<PageTableManager>::instance()._delRangePN(i_pnStart,i_pnFinish);
+}
/**
@@ -193,7 +193,7 @@ void PageTableManager::delRangePN( uint64_t i_pnStart,
uint64_t PageTableManager::getStatus( uint64_t i_vAddr,
uint64_t& o_pn )
{
- return Singleton<PageTableManager>::instance()._getStatus(i_vAddr,o_pn);
+ return Singleton<PageTableManager>::instance()._getStatus(i_vAddr,o_pn);
}
/**
@@ -214,7 +214,7 @@ void PageTableManager::printPTE( const char* i_label,
void PageTableManager::printPTE( uint64_t i_va,
bool i_verbose )
{
- PageTableEntry* pte = Singleton<PageTableManager>::instance().findPTE(i_va);
+ PageTableEntry* pte = Singleton<PageTableManager>::instance().findPTE(i_va);
Singleton<PageTableManager>::instance().printPTE( NULL, pte, i_verbose );
}
@@ -368,11 +368,11 @@ void PageTableManager::_delRangeVA( uint64_t i_vAddrStart,
// Note : this could potentially be very slow for large ranges
// loop around 4K pages within the range
- for( uint64_t va = i_vAddrStart; va < i_vAddrFinish; va += VmmManager::PAGESIZE )
+ for( uint64_t va = i_vAddrStart; va < i_vAddrFinish; va += PAGESIZE )
{
_delEntry( va );
}
-}
+}
/**
* @brief Remove a range of entries from the hardware page table
@@ -395,7 +395,7 @@ void PageTableManager::_delRangePN( uint64_t i_pnStart,
pte++;
}
-}
+}
/**
* @brief Return status information about an entry in the hardware page table
@@ -407,7 +407,7 @@ uint64_t PageTableManager::_getStatus( uint64_t i_vAddr,
o_pn = INVALID_PN;
if( pte ) {
o_pn = pte->PN;
- }
+ }
return getStatus( pte );
}
@@ -453,7 +453,7 @@ uint64_t PageTableManager::getStatus( PageTableEntry* i_pte )
if( i_pte->R == 1 ) {
status |= PTE_ACCESSED;
}
-
+
return status;
}
@@ -522,7 +522,7 @@ uint64_t PageTableManager::findPTEG( uint64_t i_vAddr )
// use the hash as the index into the array of PTEGs
uint64_t pteg_addr = getAddress() + hash * PTEG_SIZE_BYTES;
- Dprintk( "PageTableManager::findPTEG(i_vAddr=0x%.16lX) = 0x%.16lX\n", i_vAddr, pteg_addr );
+ Dprintk( "PageTableManager::findPTEG(i_vAddr=0x%.16lX) = 0x%.16lX\n", i_vAddr, pteg_addr );
return pteg_addr;
}
@@ -572,7 +572,7 @@ PageTableManager::PageTableEntry* PageTableManager::findPTE( uint64_t i_vAddr,
}
Dprintk( "<<PageTableManager::findPTE() = %.16lX>>\n", (uint64_t)pte_found );
- return pte_found;
+ return pte_found;
}
/**
@@ -682,7 +682,7 @@ void PageTableManager::printPTE( const char* i_label,
{
printk( "[%4ld:%4ld]> @%p : %.16lX %.16lX : AVA=%16lX, PN=%ld\n", pte_num/PTEG_SIZE, pte_num%PTEG_SIZE, i_pte, i_pte->dword0, i_pte->dword1, i_pte->AVA, i_pte->PN );
}
-
+
}
@@ -716,7 +716,7 @@ void PageTableManager::_printPT( void )
uint64_t PageTableManager::getAddress( void )
{
if(ivTABLE) {
- return (uint64_t)ivTABLE;
+ return (uint64_t)ivTABLE;
} else {
return VmmManager::HTABORG;
}
@@ -739,15 +739,23 @@ void PageTableManager::setAccessBits( PageTableEntry* o_pte,
o_pte->dword1 &= ~PTE_ACCESS_BITS;
if( VmmManager::NO_USER_ACCESS == i_accessType ) {
o_pte->WIMG = 0b0010; // Memory Coherency Required
+ o_pte->N = 0b1; // No Execute
} else if( VmmManager::READ_O_ACCESS == i_accessType ) {
o_pte->WIMG = 0b0010; // Memory Coherency Required
o_pte->pp1_2 = 0b01; // PP=001
+ o_pte->N = 0b1; // No Execute
} else if( VmmManager::NORMAL_ACCESS == i_accessType ) {
o_pte->WIMG = 0b0010; // Memory Coherency Required
o_pte->pp1_2 = 0b10; // PP=010
+ o_pte->N = 0b0; // @TODO Change to 'No Execute' when VFS supports.
} else if( VmmManager::CI_ACCESS == i_accessType ) {
o_pte->WIMG = 0b0101; // Cache Inhibited, Guarded
o_pte->pp1_2 = 0b10; // PP=010
+ o_pte->N = 0b1; // No Execute
+ } else if( VmmManager::RO_EXE_ACCESS == i_accessType ) {
+ o_pte->WIMG = 0b0010; // Memory Coherency Required
+ o_pte->pp1_2 = 0b01; // PP=001
+ o_pte->N = 0b0; // Execute
} else {
//@fixme - add RO_EXE_ACCESS
Eprintk( "** unrecognized access=%d\n", i_accessType );
@@ -795,7 +803,7 @@ void PageTableManager::setupDefaultPTE( PageTableEntry* o_pte )
o_pte->L = 0b0; //Virtual page size (1=>4KB)
o_pte->H = 0b0; //Hash function identifier (0=primary hash)
}
-
+
/**
* @brief Find the real address of a PTE that that is empty or invalid
*/
@@ -822,7 +830,7 @@ PageTableManager::PageTableEntry* PageTableManager::findEmptyPTE( uint64_t i_pte
}
Dprintk( "<<PageTableManager::findEmptyPTE() = %p>>\n", pte_slot );
- return pte_slot;
+ return pte_slot;
}
/**
@@ -833,15 +841,15 @@ PageTableManager::PageTableEntry* PageTableManager::findOldPTE( uint64_t i_ptegA
{
// Order of preference for PTE slots to steal:
// 1) PTE with highest use count (LRU==SW[2:3])
- // 2) Lowest PTE with the highest use count
+ // 2) Lowest PTE with the highest use count
PageTableEntry* pte = (PageTableEntry*)i_ptegAddr;
PageTableEntry* old_pte = pte;
for( uint64_t x = 0; x < 8; x++ )
{
if( pte->LRU > old_pte->LRU )
{
- old_pte = pte;
- }
+ old_pte = pte;
+ }
pte++;
}
@@ -860,7 +868,7 @@ void PageTableManager::updateLRU( const PageTableEntry* i_newPTE )
// find the beginning of the PTEG
uint64_t pteg_addr = (((uint64_t)i_newPTE) - getAddress()) / PTEG_SIZE_BYTES;
pteg_addr = pteg_addr*PTEG_SIZE_BYTES + getAddress();
-
+
// loop through all 8 PTEs in the PTEG
PageTableEntry* pte_cur = (PageTableEntry*)pteg_addr;
for( uint64_t x = 0; x < 8; x++ )
@@ -894,7 +902,7 @@ void PageTableManager::updateLRU( const PageTableEntry* i_newPTE )
new_pte.dword0 ) );
// tlbie, eieio, tlbsync, ptesync
- invalidateTLB(pte_cur);
+ invalidateTLB(pte_cur);
}
pte_cur++;
@@ -922,10 +930,10 @@ void PageTableManager::invalidateTLB( PageTableEntry* i_pte )
"r"(rB), "r"(rS) : "memory");
/* order tlbie before tlbsync */
- asm volatile("eieio" ::: "memory");
+ asm volatile("eieio" ::: "memory");
/* order tlbie before ptesync */
- asm volatile("tlbsync" ::: "memory");
+ asm volatile("tlbsync" ::: "memory");
/* order tlbie, tlbsync and 1st update before 2nd update */
asm volatile("ptesync" ::: "memory");
diff --git a/src/kernel/segmentmgr.C b/src/kernel/segmentmgr.C
new file mode 100644
index 000000000..fae9d3479
--- /dev/null
+++ b/src/kernel/segmentmgr.C
@@ -0,0 +1,76 @@
+#include <assert.h>
+#include <arch/ppc.H>
+#include <util/singleton.H>
+
+#include <kernel/segmentmgr.H>
+#include <kernel/segment.H>
+
+bool SegmentManager::handlePageFault(task_t* i_task, uint64_t i_addr)
+{
+ return Singleton<SegmentManager>::instance().
+ _handlePageFault(i_task, i_addr);
+}
+
+void SegmentManager::addSegment(Segment* i_segment, size_t i_segId)
+{
+ Singleton<SegmentManager>::instance()._addSegment(i_segment, i_segId);
+}
+
+void SegmentManager::initSLB()
+{
+ Singleton<SegmentManager>::instance()._initSLB();
+}
+
+bool SegmentManager::_handlePageFault(task_t* i_task, uint64_t i_addr)
+{
+ // This constant should come from page manager. Segment size.
+ const size_t SLBE_s = 40;
+
+ // Get segment ID from effective address.
+ size_t segId = i_addr >> SLBE_s;
+
+ // Call contained segment object to handle page fault.
+ if ((segId < MAX_SEGMENTS) && (NULL != iv_segments[segId]))
+ {
+ return iv_segments[segId]->handlePageFault(i_task, i_addr);
+ }
+
+ return false;
+}
+
+void SegmentManager::_addSegment(Segment* i_segment, size_t i_segId)
+{
+ kassert(i_segId < MAX_SEGMENTS);
+ iv_segments[i_segId] = i_segment;
+}
+
+void SegmentManager::_initSLB()
+{
+ // Flush SLB.
+ asm volatile("slbia" ::: "memory");
+ isync(); // Ensure slbia completes prior to slbmtes.
+
+ register uint64_t slbRS, slbRB;
+
+ // Default segment descriptors.
+ // ESID = 0, V = 1, Index = 1.
+ slbRB = 0x0000000008000001;
+ // B = 01 (1TB), VSID = 0, Ks = 0, Kp = 1, NLCLP = 0
+ slbRS = 0x4000000000000400;
+
+ // Add all segments to SLB.
+ for (size_t i = 0; i < MAX_SEGMENTS; i++)
+ {
+ // Add segment to SLB.
+ if (NULL != iv_segments[i])
+ {
+ asm volatile("slbmte %0, %1" :: "r"(slbRS), "r"(slbRB) : "memory");
+ }
+
+ // Increment ESID, VSID, Index.
+ slbRB += 0x0000010000000001;
+ slbRS += 0x0000000001000000;
+ }
+
+ isync(); // Ensure slbmtes complete prior to continuing on.
+}
diff --git a/src/kernel/taskmgr.C b/src/kernel/taskmgr.C
index 8073c0b48..7d2454b6d 100644
--- a/src/kernel/taskmgr.C
+++ b/src/kernel/taskmgr.C
@@ -80,7 +80,7 @@ task_t* TaskManager::_createTask(TaskManager::task_fn_t t,
task->context.stack_ptr =
PageManager::allocatePage(TASK_DEFAULT_STACK_SIZE);
memset(task->context.stack_ptr, '\0',
- TASK_DEFAULT_STACK_SIZE * PAGE_SIZE);
+ TASK_DEFAULT_STACK_SIZE * PAGESIZE);
task->context.gprs[1] = ((uint64_t)task->context.stack_ptr) + 16320;
}
else
diff --git a/src/kernel/vmmmgr.C b/src/kernel/vmmmgr.C
index ba1191d90..98e5a0410 100644
--- a/src/kernel/vmmmgr.C
+++ b/src/kernel/vmmmgr.C
@@ -1,8 +1,12 @@
+#include <limits.h>
#include <util/singleton.H>
#include <kernel/vmmmgr.H>
#include <kernel/console.H>
#include <arch/ppc.H>
#include <kernel/ptmgr.H>
+#include <kernel/segmentmgr.H>
+#include <kernel/devicesegment.H>
+#include <kernel/basesegment.H>
extern void* data_load_address;
@@ -13,10 +17,13 @@ VmmManager::VmmManager() : lock()
void VmmManager::init()
{
printk("Starting VMM...\n");
-
+
VmmManager& v = Singleton<VmmManager>::instance();
- v.initSLB();
+ BaseSegment::init();
+ DeviceSegment::init();
+ SegmentManager::initSLB();
+
v.initPTEs();
v.initSDR1();
@@ -26,46 +33,24 @@ void VmmManager::init()
void VmmManager::init_slb()
{
VmmManager& v = Singleton<VmmManager>::instance();
- v.initSLB();
+ SegmentManager::initSLB();
+
v.initSDR1();
}
-bool VmmManager::pteMiss(task_t* t)
+bool VmmManager::pteMiss(task_t* t, uint64_t effAddr)
{
- return Singleton<VmmManager>::instance()._pteMiss(t);
+ return Singleton<VmmManager>::instance()._pteMiss(t, effAddr);
}
void* VmmManager::mmioMap(void* ra, size_t pages)
{
- return Singleton<VmmManager>::instance()._mmioMap(ra,pages);
+ return DeviceSegment::mmioMap(ra, pages);
}
int VmmManager::mmioUnmap(void* ea, size_t pages)
{
- return Singleton<VmmManager>::instance()._mmioUnmap(ea,pages);
-}
-
-void VmmManager::initSLB()
-{
- register uint64_t slbRS, slbRB;
-
- // ESID = 0, V = 1, Index = 1.
- slbRB = 0x0000000008000001;
-
- // B = 01 (1TB), VSID = 0, Ks = 0, Kp = 1, NLCLP = 0
- slbRS = 0x4000000000000400;
-
- asm volatile("slbia" ::: "memory");
- asm volatile("isync" ::: "memory");
- asm volatile("slbmte %0, %1" :: "r"(slbRS), "r"(slbRB) : "memory");
-
- // ESID = 2TB, V = 1, Index = 3
- slbRB = 0x0000020008000003;
- // B = 01 (1TB), VSID = 2TB, Ks = 0, Kp = 1, NLCLP = 0
- slbRS = 0x4000020000000400;
-
- asm volatile("slbmte %0, %1" :: "r"(slbRS), "r"(slbRB) : "memory");
- asm volatile("isync" ::: "memory");
+ return DeviceSegment::mmioUnmap(ea, pages);
}
void VmmManager::initPTEs()
@@ -73,21 +58,8 @@ void VmmManager::initPTEs()
// Initialize and invalidate the page table
PageTableManager::init();
- // Set up linear map for every 4K page
- for(size_t i = 0; i < (FULL_MEM_SIZE / PAGESIZE); i++)
- {
- ACCESS_TYPES access = NORMAL_ACCESS;
- if (0 == i)
- {
- access = NO_USER_ACCESS;
- }
- else if (((uint64_t)&data_load_address) > (i * PAGESIZE))
- {
- access = READ_O_ACCESS;
- }
-
- PageTableManager::addEntry( i*PAGESIZE, i, access );
- }
+ // There is no need to add PTE entries because the PTE-miss page fault
+ // handler will add as-needed.
}
void VmmManager::initSDR1()
@@ -97,138 +69,15 @@ void VmmManager::initSDR1()
asm volatile("mtsdr1 %0" :: "r"(sdr1) : "memory");
}
-bool VmmManager::_pteMiss(task_t* t)
+bool VmmManager::_pteMiss(task_t* t, uint64_t effAddr)
{
lock.lock();
- uint64_t effAddr = getDAR();
- uint64_t effPid = effAddr / FULL_MEM_SIZE;
-
-
- if (effPid <= LinearSpace)
- {
- lock.unlock();
- return false; // Should not get this exception in Linear space
- // because it is all mapped in all the time.
- }
-
- // Check for exception in MMIO vs Dynamic Stack space.
- if (effPid <= MMIOSpace)
- {
- // Do MMIO mapping.
- uint64_t effAddrPage = (effAddr - FULL_MEM_SIZE) / PAGESIZE;
-
- // Check for valid entry in MMIO map.
- uint64_t mmioMapEntry = mmioMapT[effAddrPage];
- if (0 == mmioMapEntry)
- {
- lock.unlock();
- return false;
- }
-
- uint64_t mmioMapPage = mmioMapEntry / PAGESIZE;
-
- // Update PTE.
- PageTableManager::addEntry( effAddr, mmioMapPage, CI_ACCESS );
-
- lock.unlock();
- return true;
- }
- else
- {
- // TODO: Do dynamic stack mapping.
- lock.unlock();
- return false;
- }
-}
+ bool rc = SegmentManager::handlePageFault(t, effAddr);
-void* VmmManager::_mmioMap(void* ra, size_t pages)
-{
- lock.lock();
-
- ssize_t match = -1;
- uint64_t _ra = (uint64_t) ra;
-
- // Search for memory already mapped in.
- for (size_t i = 0; i < MMIO_T_ENTRIES; i++)
- {
- if ((mmioMapT[i] & ~(PAGESIZE - 1)) == _ra)
- {
- if (i + pages < MMIO_T_ENTRIES)
- {
- bool matched = true;
- for (size_t j = 1; j < pages; j++)
- {
- if ((mmioMapT[i+j] & ~(PAGESIZE - 1)) !=
- (_ra + (j*PAGESIZE)))
- {
- matched = false;
- break;
- }
- }
- if (matched)
- {
- match = i;
- break;
- }
- }
- }
- }
-
- // Found region already mapped in.
- if (-1 != match)
- {
- // Increment ref counts.
- for (size_t i = 0; i < pages; i++)
- {
- mmioMapT[match + i]++;
- }
- // Return calculated effective address.
- lock.unlock();
- return (void*)(FULL_MEM_SIZE + (match * PAGESIZE));
- }
-
- // Search for empty region in map.
- for (size_t i = 0; i < MMIO_T_ENTRIES; i++)
- {
- if (0 == mmioMapT[i])
- {
- bool matched = true;
- for (size_t j = 1; j < pages; j++)
- {
- if (0 != mmioMapT[i+j])
- {
- matched = false;
- break;
- }
- }
- if (matched)
- {
- match = i;
- break;
- }
- }
- }
-
- // Found region to use for map.
- if (-1 != match)
- {
- for (size_t i = 0; i < pages; i++)
- {
- mmioMapT[match + i] = _ra + 1; // RA + ref count of 1.
- }
-
- lock.unlock();
- return (void*)(FULL_MEM_SIZE + (match * PAGESIZE));
- }
-
- // No entry found and no space for more, return NULL.
lock.unlock();
- return NULL;
-}
-int VmmManager::_mmioUnmap(void* ea, size_t pages)
-{
- return -1;
+ return rc;
}
+
diff --git a/src/lib/stdlib.C b/src/lib/stdlib.C
index ab509ca9a..0d2cde9df 100644
--- a/src/lib/stdlib.C
+++ b/src/lib/stdlib.C
@@ -1,3 +1,4 @@
+#include <limits.h>
#include <stdlib.h>
#include <string.h>
#include <kernel/heapmgr.H>
@@ -7,8 +8,8 @@ void* malloc(size_t s)
{
if (s > HeapManager::MAX_ALLOC_SIZE)
{
- size_t pages = (s + 8 + (PageManager::PAGESIZE - 1))
- / PageManager::PAGESIZE;
+ size_t pages = (s + 8 + (PAGESIZE - 1))
+ / PAGESIZE;
void* v = PageManager::allocatePage(pages);
size_t* len = (size_t*)v;
*len = pages << 8;
@@ -46,7 +47,7 @@ void* realloc(void* p, size_t s)
size_t cur_size;
if ((*len) > 0xff)
{
- cur_size = ((*len) >> 8) * PageManager::PAGESIZE - 8;
+ cur_size = ((*len) >> 8) * PAGESIZE - 8;
}
else
{
diff --git a/src/makefile b/src/makefile
index 8f2ecd0b3..e0bc90011 100644
--- a/src/makefile
+++ b/src/makefile
@@ -12,7 +12,8 @@ DIRECT_BOOT_OBJECTS = start.o kernel.o taskmgr.o cpumgr.o syscall.o \
syscall_stub.o syscall_task.o syscall_misc.o \
syscall_msg.o syscall_mmio.o syscall_time.o \
init_main.o vfs_main.o sync.o futexmgr.o \
- ptmgr.o
+ ptmgr.o segmentmgr.o basesegment.o devicesegment.o \
+ block.o
STUB_TESTCASE_OBJECT = cxxtest_stub.o
diff --git a/src/usr/testcore/kernel/slbtest.H b/src/usr/testcore/kernel/slbtest.H
index 8784458d0..f773efe2d 100644
--- a/src/usr/testcore/kernel/slbtest.H
+++ b/src/usr/testcore/kernel/slbtest.H
@@ -26,15 +26,6 @@ class slbtest: public CxxTest::TestSuite
{
TS_FAIL("Data Segment exception expected in 1TB segment\n");
}
-
- rc = 0;
- task_create(writeEA2TB, this);
- while (rc == 0) task_yield();
- task_yield();
- if (rc == -1)
- {
- TS_FAIL("Data Storage exception expected in 2TB segment\n");
- }
}
private:
@@ -48,16 +39,6 @@ class slbtest: public CxxTest::TestSuite
rc = -1;
task_end();
}
-
- static void writeEA2TB(void *i_p)
- {
- rc = 1;
- sync();
- *(int *)0x20000000000 = 1;
- sync();
- rc = -1;
- task_end();
- }
};
volatile int slbtest::rc = 0;
diff --git a/src/usr/testcore/kernel/vmmbasetest.H b/src/usr/testcore/kernel/vmmbasetest.H
new file mode 100644
index 000000000..df9d320a4
--- /dev/null
+++ b/src/usr/testcore/kernel/vmmbasetest.H
@@ -0,0 +1,97 @@
+/** @file vmmbasetest.H
+ * @brief Test cases for permission settings on the base block of the VMM.
+ */
+#ifndef __KERNEL_VMMBASETEST_H
+#define __KERNEL_VMMBASETEST_H
+
+#include <cxxtest/TestSuite.H>
+#include <sys/task.h>
+#include <kernel/console.H>
+#include <arch/ppc.H>
+
+class VmmBaseTest : public CxxTest::TestSuite
+{
+ public:
+ static volatile int rc;
+
+ void testNullAccess()
+ {
+ rc = 0; sync();
+ printk("Test case: Expect to see uncaught exception! ");
+ task_create(readFromNULL, NULL);
+ while (rc == 0) task_yield();
+ task_yield();
+ if (rc == -1)
+ {
+ TS_FAIL("Write to NULL not caught.");
+ }
+
+ rc = 0; sync();
+ printk("Test case: Expect to see uncaught exception! ");
+ task_create(writeToNULL, NULL);
+ while (rc == 0) task_yield();
+ task_yield();
+ if (rc == -1)
+ {
+ TS_FAIL("Write to NULL not caught.");
+ }
+ }
+
+ void testWriteToKernelCode()
+ {
+ rc = 0; sync();
+ printk("Test case: Expect to see uncaught exception! ");
+ task_create(writeToKernelCode, NULL);
+ while (rc == 0) task_yield();
+ task_yield();
+ if (rc == -1)
+ {
+ TS_FAIL("Write to kernel code not caught.");
+ }
+ }
+
+ void testExecuteKernelDataSpace()
+ {
+ // @TODO. VMM not ready.
+ }
+
+ void testWriteModuleText()
+ {
+ // @TODO. VMM not ready.
+ }
+
+ void testExecuteModuleDataSpace()
+ {
+ // @TODO. VMM not ready.
+ }
+
+ private:
+
+ static void readFromNULL(void* unused)
+ {
+ rc = 1; sync();
+ printk("%lx", (*(uint64_t*)NULL)); sync();
+ rc = -1; sync();
+ task_end();
+ }
+
+ static void writeToNULL(void* unused)
+ {
+ rc = 1; sync();
+ (*(uint64_t*)NULL) = 0x12345678; sync();
+ rc = -1; sync();
+ task_end();
+ }
+
+ static void writeToKernelCode(void* unused)
+ {
+ rc = 1; sync();
+ (*(*(uint64_t**)&printk)) = 0x12345678; sync();
+ rc = -1; sync();
+ task_end();
+ }
+
+};
+volatile int VmmBaseTest::rc = 0;
+
+#endif
OpenPOWER on IntegriCloud