summaryrefslogtreecommitdiffstats
path: root/src/include
diff options
context:
space:
mode:
authorPatrick Williams <iawillia@us.ibm.com>2011-07-08 19:33:40 -0500
committerA. Patrick Williams III <iawillia@us.ibm.com>2011-07-20 14:58:43 -0500
commit471f09f1a9bcc46fc385fa8aca776cb682075c0b (patch)
treee0a4969825799dcc4c28a71975cb68439f507390 /src/include
parent3ecf7085ccc55eb4f815a62f47ea09f55bb6688e (diff)
downloadtalos-hostboot-471f09f1a9bcc46fc385fa8aca776cb682075c0b.tar.gz
talos-hostboot-471f09f1a9bcc46fc385fa8aca776cb682075c0b.zip
VMM Improvements.
- Segment Manager - Base / Device Segments - Block for Base image. Change-Id: Ic0c058e5c5b210ec1c48d30f6ed9f9837d74a3c8 Reviewed-on: http://gfw160.austin.ibm.com:8080/gerrit/193 Tested-by: Jenkins Server Reviewed-by: MATTHEW S. BARTH <msbarth@us.ibm.com>
Diffstat (limited to 'src/include')
-rw-r--r--src/include/kernel/basesegment.H54
-rw-r--r--src/include/kernel/block.H161
-rw-r--r--src/include/kernel/devicesegment.H33
-rw-r--r--src/include/kernel/pagemgr.H2
-rw-r--r--src/include/kernel/ptmgr.H16
-rw-r--r--src/include/kernel/segment.H52
-rw-r--r--src/include/kernel/segmentmgr.H92
-rw-r--r--src/include/kernel/spte.H87
-rw-r--r--src/include/kernel/vmmmgr.H87
9 files changed, 533 insertions, 51 deletions
diff --git a/src/include/kernel/basesegment.H b/src/include/kernel/basesegment.H
new file mode 100644
index 000000000..7f1c78dfa
--- /dev/null
+++ b/src/include/kernel/basesegment.H
@@ -0,0 +1,54 @@
+/** @file basesegment.H
+ * @brief Defines the base segment (0TB) class.
+ */
+#ifndef __KERNEL_BASESEGMENT_H
+#define __KERNEL_BASESEGMENT_H
+
+#include <kernel/segment.H>
+
+class Block;
+
+/** @class BaseSegment
+ * @brief Class to manage the base segment at 0 TB.
+ *
+ * Contains a chain of blocks associated with the first segment.
+ */
+class BaseSegment : public Segment
+{
+ protected:
+ /**
+ * @brief Constructor.
+ * Initialize attributes and set base addresss of segment to 0 TB.
+ */
+ BaseSegment() : Segment(0x0), iv_block(NULL) {};
+ /**
+ * @brief Destructor
+ * Delete any blocks owned by this segment.
+ */
+ ~BaseSegment();
+
+ public:
+ /**
+ * @brief Initialize the segment by allocating initial blocks and
+ * adding to the segment manager.
+ */
+ static void init();
+
+ /**
+ * @brief Implementation of the pure-virtual function from Segment.
+ *
+ * Calls block chain to deal with page fault.
+ */
+ bool handlePageFault(task_t* i_task, uint64_t i_addr);
+
+ private:
+ /**
+ * @brief Internal implementation of init function.
+ */
+ void _init();
+
+ /** Block-chain associated with this Segment. */
+ Block* iv_block;
+};
+
+#endif
diff --git a/src/include/kernel/block.H b/src/include/kernel/block.H
new file mode 100644
index 000000000..1de1c1280
--- /dev/null
+++ b/src/include/kernel/block.H
@@ -0,0 +1,161 @@
+/** @file block.H
+ * @brief Defines the implementation for the generic VMM block class.
+ */
+#ifndef __KERNEL_BLOCK_H
+#define __KERNEL_BLOCK_H
+
+#include <stdint.h>
+#include <kernel/task.H>
+#include <kernel/vmmmgr.H>
+
+class ShadowPTE;
+class Segment;
+
+/** @class Block
+ * @brief Provides management of the memory pages associated with a block of
+ * virtual memory.
+ *
+ * This class is organized to be either an independent block (typically
+ * managed by the Segment container in some way) or as a chain of blocks.
+ *
+ * When the instance is assigned down-stream blocks, the instance will
+ * forward requests that do not belong to it down the chain for handling
+ * by a block responsible for the request. Also, when used in this manner,
+ * this block is responsible for the ownership of all down-stream blocks,
+ * including calling their destructor when necessary.
+ *
+ * There is currently no mechanism for dynamically removing blocks from the
+ * chain. The expectation is that all known use cases would suggest either
+ * a fixed (increasing-only) chain or a known-sized array of blocks.
+ */
+class Block
+{
+ public:
+ /**
+ * @brief Constructor.
+ *
+ * @param[in] i_baseAddr - Base virtual Address of the block.
+ * @param[in] i_size - Size of the block (in bytes).
+ *
+ * Will allocate enough shadow PTEs to track pages in the block.
+ */
+ Block(uint64_t i_baseAddr, uint64_t i_size) :
+ iv_baseAddr(i_baseAddr), iv_size(i_size),
+ iv_parent(NULL), iv_nextBlock(NULL), iv_ptes(NULL)
+ { init(); };
+
+ /**
+ * @brief Destructor.
+ *
+ * Releases associated memory and down-stream blocks.
+ */
+ ~Block();
+
+ /**
+ * @brief Get the base address of this block.
+ * @return Base address (as uint64_t).
+ */
+ uint64_t getBaseAddress() const { return iv_baseAddr; };
+
+ /**
+ * @brief Determines if a virtual address is in the range of the block.
+ *
+ * @param[in] i_addr - The virtual address in question.
+ * @return true - Address is contained within the block.
+ * @return false - Address is not contained within the block.
+ */
+ bool isContained(uint64_t i_addr) const
+ { return (i_addr >= iv_baseAddr) &&
+ (i_addr < iv_baseAddr + iv_size); };
+
+ /**
+ * @brief Responsible for handling page faults within the block [chain].
+ *
+ * @param[in] i_task - Task causing the page fault.
+ * @param[in] i_addr - Effective address accessed to cause fault.
+ *
+ * @return true - Page fault was successfully handled.
+ *
+ * If the page fault is not successfully handled the expectation is
+ * that the VMM will perform appropriate action, such as killing the
+ * task.
+ *
+ * If the address is not within this block, the block will attempt to
+ * make calls down the block-chain if it exists.
+ */
+ bool handlePageFault(task_t* i_task, uint64_t i_addr);
+
+ friend class Segment;
+ friend class BaseSegment;
+ friend class StackSegment;
+
+ protected:
+ /**
+ * @brief Assign a segment to a parent relationship to this block.
+ *
+ * @param[in] i_parent - The segment to assign as a parent.
+ */
+ void setParent(Segment* i_parent) { iv_parent = i_parent; };
+
+ /**
+ * @brief Add a block to the end of this block-chain.
+ *
+ * @param[in] i_block - The block tp append.
+ */
+ void appendBlock(Block* i_block)
+ {
+ if (NULL == iv_nextBlock) iv_nextBlock = i_block;
+ else iv_nextBlock->appendBlock(i_block);
+ }
+
+ /**
+ * @brief Set up a virtual-physical mapping for a static page.
+ *
+ * @param[in] i_vAddr - The virtual address of the page.
+ * @param[in] i_pAddr - The physical address of the page.
+ * @param[in] i_access - The permissions of the page.
+ */
+ void setPhysicalPage(uint64_t i_vAddr, uint64_t i_pAddr,
+ VmmManager::ACCESS_TYPES i_access);
+
+ private:
+ /** Base address of the block */
+ const uint64_t iv_baseAddr;
+ /** Size of the block */
+ const uint64_t iv_size;
+
+ /** Pointer to the parent (containing) segment. */
+ Segment* iv_parent;
+ /** Pointer to the next block in the chain. */
+ Block* iv_nextBlock;
+
+ /** Pointer to the Shadow PTE entries. */
+ ShadowPTE* iv_ptes;
+
+ /**
+ * @brief Finish initialization of block.
+ *
+ * Construct ShadowPTE entries.
+ *
+ * This is defined as a separate function to reduce the code
+ * footprint of the class constructors. GCC emits an "in-charge" and
+ * "not-in-charge" version of each constructor, so put as much
+ * common code into an init function.
+ */
+ void init();
+
+ /**
+ * @brief Find the Shadow PTE for a virtual address.
+ *
+ * @param[in] i_addr - The virtual address to find a page for.
+ * @note This function does no bounds checking.
+ */
+ ShadowPTE* getPTE(uint64_t i_addr) const;
+
+
+ Block(const Block&); // prohibit copy.
+ Block& operator=(const Block&); // prohibit assignment.
+
+};
+
+#endif
diff --git a/src/include/kernel/devicesegment.H b/src/include/kernel/devicesegment.H
new file mode 100644
index 000000000..5fc63819b
--- /dev/null
+++ b/src/include/kernel/devicesegment.H
@@ -0,0 +1,33 @@
+#ifndef __KERNEL_DEVICESEGMENT_H
+#define __KERNEL_DEVICESEGMENT_H
+
+#include <kernel/segment.H>
+
+class DeviceSegment : public Segment
+{
+ public:
+ DeviceSegment() : Segment(0x020000000000ull)
+ {
+ for (int i = 0; i < MMIO_MAP_DEVICES; i++)
+ iv_mmioMap[i] = 0;
+ };
+ ~DeviceSegment() {};
+
+ static void init();
+
+ bool handlePageFault(task_t* i_task, uint64_t i_addr);
+
+ static void* mmioMap(void* ra, size_t pages); // to be deprecated.
+ static int mmioUnmap(void* ea, size_t pages); // to be deprecated.
+
+ private:
+ enum { MMIO_MAP_DEVICES = 32 };
+ uint64_t iv_mmioMap[MMIO_MAP_DEVICES];
+
+ void _init();
+
+ void* _mmioMap(void* ra, size_t pages);
+ int _mmioUnmap(void* ea, size_t pages);
+};
+
+#endif
diff --git a/src/include/kernel/pagemgr.H b/src/include/kernel/pagemgr.H
index 95d7bf21c..5c22d6864 100644
--- a/src/include/kernel/pagemgr.H
+++ b/src/include/kernel/pagemgr.H
@@ -2,6 +2,7 @@
#define __KERNEL_PAGEMGR_H
#include <stdint.h>
+#include <limits.h>
#include <util/lockfree/stack.H>
#include <kernel/vmmmgr.H>
@@ -19,7 +20,6 @@ class PageManager
enum
{
- PAGESIZE = VmmManager::PAGESIZE,
MEMLEN = VmmManager::HTABORG,
BUCKETS = 16,
diff --git a/src/include/kernel/ptmgr.H b/src/include/kernel/ptmgr.H
index db552ada4..462a6400e 100644
--- a/src/include/kernel/ptmgr.H
+++ b/src/include/kernel/ptmgr.H
@@ -17,7 +17,7 @@ class PageTableManager
/**
* Status Values
*/
- enum
+ enum
{
PTE_UNKNOWN = 0x0000000000000000, /**< Entry wasn't found */
PTE_PRESENT = 0x0000000000000001, /**< Entry is present in table */
@@ -71,7 +71,7 @@ class PageTableManager
* @param[in] i_vAddrFinish End of VA range to remove (full address)
*/
static void delRangeVA( uint64_t i_vAddrStart,
- uint64_t i_vAddrFinish );
+ uint64_t i_vAddrFinish );
/**
@@ -81,7 +81,7 @@ class PageTableManager
* @param[in] i_pnFinish Last Physical Page to remove (page number)
*/
static void delRangePN( uint64_t i_pnStart,
- uint64_t i_pnFinish );
+ uint64_t i_pnFinish );
/**
* @brief Return status information about an entry in the hardware page table
@@ -138,7 +138,7 @@ class PageTableManager
* Local copy of Page Table for user-space testing
* (set to NULL for kernel instance)
*/
- char* ivTABLE;
+ char* ivTABLE;
/**
@@ -177,7 +177,7 @@ class PageTableManager
uint64_t WIMG:4; /**< 57:60 Storage control bits */
uint64_t N:1; /**< 61 No-execute page (N==1) */
uint64_t pp1_2:2; /**< 62:63 Page Protection bits 1:2 */
- };
+ };
uint64_t dword1; /**< Full Dword1 */
};
} PACKED;
@@ -294,7 +294,7 @@ class PageTableManager
* @param[in] i_vAddrFinish End of VA range to remove
*/
void _delRangeVA( uint64_t i_vAddrStart,
- uint64_t i_vAddrFinish );
+ uint64_t i_vAddrFinish );
/**
* @brief Remove a range of entries from the hardware page table
@@ -303,7 +303,7 @@ class PageTableManager
* @param[in] i_pnFinish Last Physical Page to remove
*/
void _delRangePN( uint64_t i_pnStart,
- uint64_t i_pnFinish );
+ uint64_t i_pnFinish );
/**
* @brief Return status information about an entry in the hardware page table
@@ -399,7 +399,7 @@ class PageTableManager
- // Allow testcase to see inside
+ // Allow testcase to see inside
friend class ptmgrtest;
};
diff --git a/src/include/kernel/segment.H b/src/include/kernel/segment.H
new file mode 100644
index 000000000..5a4e7d092
--- /dev/null
+++ b/src/include/kernel/segment.H
@@ -0,0 +1,52 @@
+/** @file segment.H
+ * @brief Contains the definition of the virtual Segment class.
+ */
+#ifndef __KERNEL_SEGMENT_H
+#define __KERNEL_SEGMENT_H
+
+#include <kernel/task.H>
+
+/** @class Segment
+ * @brief Virtual segment class to handle virtual memory management within
+ * a 1TB segment.
+ */
+class Segment
+{
+ public:
+ /**
+ * @brief Constructor.
+ * @param[in] i_baseAddr - Base [virtual] address of this segment.
+ */
+ explicit Segment(uint64_t i_baseAddr) : iv_baseAddress(i_baseAddr) {};
+ /**
+ * @brief Destructor.
+ * No additional action necessary.
+ */
+ virtual ~Segment() {};
+
+ /**
+ * @brief Responsible for handling page faults within the segment.
+ *
+ * @param[in] i_task - Task causing the page fault.
+ * @param[in] i_addr - Effective address accessed to cause fault.
+ *
+ * @return true - Page fault was successfully handled.
+ *
+ * If the page fault is not successfully handled the expectation is
+ * that the VMM will perform appropriate action, such as killing the
+ * task.
+ */
+ virtual bool handlePageFault(task_t* i_task, uint64_t i_addr) = 0;
+
+ /**
+ * @brief Get the base address of this segment.
+ * @return Base address (as uint64_t).
+ */
+ uint64_t getBaseAddress() const { return iv_baseAddress; };
+
+ protected:
+ /** The base address of the segment. */
+ const uint64_t iv_baseAddress;
+};
+
+#endif
diff --git a/src/include/kernel/segmentmgr.H b/src/include/kernel/segmentmgr.H
new file mode 100644
index 000000000..7048fa365
--- /dev/null
+++ b/src/include/kernel/segmentmgr.H
@@ -0,0 +1,92 @@
+/** @file segmentmgr.H
+ * Provides definition of the SegmentManager class.
+ */
+
+#ifndef __KERNEL_SEGMENTMGR_H
+#define __KERNEL_SEGMENTMGR_H
+
+#include <kernel/task.H>
+
+// Forward declaration.
+class Segment;
+
+/** @class SegmentManager
+ * @brief Container of Segments. Responsible for managing the SLB.
+ *
+ * @note This class is not thread-safe on its own. Expectation is that
+ * the virtual memory manager will serialize internal operations.
+ */
+class SegmentManager
+{
+ public:
+ /** Segment Identifiers */
+ enum SegmentIds
+ {
+ /** Base Segment (0-1TB). */
+ BASE_SEGMENT_ID = 0,
+ /** Task Stack Segment (1-2TB). */
+ STACK_SEGMENT_ID = 1,
+ /** MMIO Space Segment (2-3TB). */
+ MMIO_SEGMENT_ID = 2,
+
+ MAX_SEGMENTS = 4
+ };
+
+ /**
+ * Constructor. Initializes instance variables.
+ */
+ SegmentManager()
+ {
+ for(int i = 0; i < MAX_SEGMENTS; i++)
+ iv_segments[i] = NULL;
+ };
+ /**
+ * Destructor.
+ * No action necessary. Associated segments are owned externally,
+ * such as in Singletons.
+ */
+ ~SegmentManager() {};
+
+ /**
+ * @brief Responsible for directing page faults to the owning segment.
+ *
+ * @param[in] i_task - Task causing the page fault.
+ * @param[in] i_addr - Effective address accessed to cause fault.
+ *
+ * @return true - Page fault was successfully handled.
+ *
+ * If the page fault is not successfully handled the expectation is
+ * that the VMM will perform appropriate action, such as killing the
+ * task.
+ */
+ static bool handlePageFault(task_t* i_task, uint64_t i_addr);
+
+ /**
+ * @brief Adds a segment to the container.
+ *
+ * @param[in] i_segment - Segment object to associate to segment.
+ * @param[in] i_segId - Segment identifier (which TB) to associate.
+ *
+ * @note Ownership of the Segment object (for freeing memory) remains
+ * with the callee.
+ */
+ static void addSegment(Segment* i_segment, size_t i_segId);
+
+ /**
+ * @brief Update SLB on this hardware thread with associated segments.
+ */
+ static void initSLB();
+
+ private:
+ /** See handlePageFault. */
+ bool _handlePageFault(task_t* i_task, uint64_t i_addr);
+ /** See addSegment. */
+ void _addSegment(Segment* i_segment, size_t i_segId);
+ /** See initSLB. */
+ void _initSLB();
+
+ /** Array of segment objects to associated segment IDs. */
+ Segment* iv_segments[MAX_SEGMENTS];
+};
+
+#endif
diff --git a/src/include/kernel/spte.H b/src/include/kernel/spte.H
new file mode 100644
index 000000000..ec5c9f20c
--- /dev/null
+++ b/src/include/kernel/spte.H
@@ -0,0 +1,87 @@
+/** @file spte.H
+ * @brief Defines the structure of the Shadow Page Table Entry.
+ */
+#ifndef __KERNEL_SPTE_H
+#define __KERNEL_SPTE_H
+
+#include <stdint.h>
+#include <builtins.h>
+
+/** @class ShadowPTE
+ * @brief Stores information needed in a shadow page table for virtual to
+ * physical address mapping, such as physical page number and
+ * security permissions.
+ *
+ * @note This structure only allows addressing of 4GB of physical memory due
+ * to the page number being stored in a 20 bit field.
+ *
+ * The data within is stored in a way so that the union value is an address
+ * within the physical page who's number is stored and the low-order bits
+ * are used for storing misc information about the page, such as permissions.
+ */
+class ShadowPTE
+{
+ protected:
+ union
+ {
+ uint32_t word;
+ struct
+ {
+ /** Physical page number. */
+ uint32_t page:20;
+ /** Page is present (is PN valid?). */
+ uint32_t present:1;
+
+ /** May the page be written to. */
+ uint32_t writable:1;
+ /** May code be executed off page. */
+ uint32_t executable:1;
+
+ /** Should the dirty bit be maintained. */
+ uint32_t track_write:1;
+ /** Has page been written to. */
+ uint32_t dirty:1;
+
+ /** Reserved for future use. */
+ uint32_t reserved:7;
+ } PACKED;
+ };
+
+ public:
+ /** Initialize PTE */
+ ShadowPTE() : word(0) {};
+ /** Cast-construct from integer directly to the data union. */
+ explicit ShadowPTE(uint32_t i_data) : word(i_data) {};
+
+ /** Get physical page (as address). */
+ uint32_t getPageAddr() const { return (page << 12); };
+ /** Set physical page (as address). */
+ void setPageAddr(uint32_t i_page) { page = (i_page >> 12); };
+ /** Get physical page (as page number). */
+ uint32_t getPage() const { return page; }
+
+ /** Get present bit. */
+ bool isPresent() const { return present; };
+ /** Set present bit. */
+ void setPresent(bool i_present) { present = i_present; };
+
+ /** Get writable bit. */
+ bool isWritable() const { return writable; };
+ /** Set writable bit. */
+ void setWritable(bool i_write) { writable = i_write; };
+ /** Get executable bit. */
+ bool isExecutable() const { return executable; };
+ /** Set executable bit. */
+ void setExecutable(bool i_exec) { executable = i_exec; };
+
+ /** Get write-tracked bit. */
+ bool isWriteTracked() const { return track_write; };
+ /** Set write-tracked bit. */
+ void setWriteTracked(bool i_track) { track_write = i_track; };
+ /** Get dirty bit. */
+ bool isDirty() const { return dirty; };
+ /** Set dirty bit. */
+ void setDirty(bool i_dirty) { dirty = i_dirty; };
+};
+
+#endif
diff --git a/src/include/kernel/vmmmgr.H b/src/include/kernel/vmmmgr.H
index c194b0456..49101feed 100644
--- a/src/include/kernel/vmmmgr.H
+++ b/src/include/kernel/vmmmgr.H
@@ -1,68 +1,71 @@
#ifndef __KERNEL_VMMMGR_H
#define __KERNEL_VMMMGR_H
+#include <limits.h>
#include <kernel/types.h>
#include <kernel/spinlock.H>
class VmmManager
{
public:
- enum VMM_CONSTS
- {
- EIGHT_MEG = 8 * 1024 * 1024,
+ enum VMM_CONSTS
+ {
+ EIGHT_MEG = 8 * 1024 * 1024,
- FULL_MEM_SIZE = 1 * EIGHT_MEG,
- PAGESIZE = 4096,
+ FULL_MEM_SIZE = 1 * EIGHT_MEG,
// put the Page Table at the end of our memory space
PTSIZE = (1 << 18),
- HTABORG = (FULL_MEM_SIZE - PTSIZE),
- };
-
- enum ACCESS_TYPES
- {
- NO_USER_ACCESS,
- READ_O_ACCESS,
- NORMAL_ACCESS,
- CI_ACCESS,
+ HTABORG = (FULL_MEM_SIZE - PTSIZE),
+ };
+
+ enum ACCESS_TYPES
+ {
+ NO_USER_ACCESS,
+ READ_O_ACCESS,
+ NORMAL_ACCESS,
+ CI_ACCESS,
RO_EXE_ACCESS,
- };
+ };
+
+ enum PID_ALLOCATIONS
+ {
+ LinearSpace = (FULL_MEM_SIZE / EIGHT_MEG) - 1,
+ MMIOSpace = LinearSpace + 1,
+ FirstPid,
+ };
- enum PID_ALLOCATIONS
- {
- LinearSpace = (FULL_MEM_SIZE / EIGHT_MEG) - 1,
- MMIOSpace = LinearSpace + 1,
- FirstPid,
- };
+ static void init();
+ static void init_slb();
- enum MMIO_SPACE_INFO
- {
- MMIO_T_ENTRIES =
- FULL_MEM_SIZE * (MMIOSpace - LinearSpace) / PAGESIZE,
- };
+ /**
+ * @brief Responsible for handling PTE misses.
+ *
+ * @param[in] t - Task causing the page fault.
+ * @param[in] effAddr - Effective address accessed to cause fault.
+ *
+ * @return true - PTE miss was successfully handled.
+ *
+ * If the PTE miss is not successfully handled, the exception
+ * handler should collect debug information and kill the task.
+ */
+ static bool pteMiss(task_t* t, uint64_t effAddr);
- static void init();
- static void init_slb();
- static bool pteMiss(task_t*);
+ static void* mmioMap(void*, size_t);
+ static int mmioUnmap(void*, size_t);
- static void* mmioMap(void*, size_t);
- static int mmioUnmap(void*, size_t);
protected:
- VmmManager();
- ~VmmManager() {};
-
+ VmmManager();
+ ~VmmManager() {};
+
private:
- Spinlock lock;
- uint64_t mmioMapT[MMIO_T_ENTRIES];
+ Spinlock lock;
- void initSLB();
- void initPTEs();
- void initSDR1();
+ void initPTEs();
+ void initSDR1();
- bool _pteMiss(task_t*);
- void* _mmioMap(void*, size_t);
- int _mmioUnmap(void*, size_t);
+ bool _pteMiss(task_t*, uint64_t);
};
OpenPOWER on IntegriCloud