summaryrefslogtreecommitdiffstats
path: root/src/include
diff options
context:
space:
mode:
Diffstat (limited to 'src/include')
-rw-r--r--src/include/kernel/block.H17
-rw-r--r--src/include/kernel/spte.H4
-rw-r--r--src/include/kernel/stacksegment.H145
-rw-r--r--src/include/util/locked/list.H34
4 files changed, 183 insertions, 17 deletions
diff --git a/src/include/kernel/block.H b/src/include/kernel/block.H
index 3745b065e..94daf0b3a 100644
--- a/src/include/kernel/block.H
+++ b/src/include/kernel/block.H
@@ -169,11 +169,22 @@ class Block
* @param[in] i_vAddr - The virtual address of the page.
* @param[in] i_pAddr - The physical address of the page.
* @param[in] i_access - The permissions of the page.
+ *
+ * @note If (i_pAddr == 0), then the virtual page is left with
+ * the current present / page-number state but the access
+ * mode is still set.
*/
void setPhysicalPage(uint64_t i_vAddr, uint64_t i_pAddr,
VmmManager::ACCESS_TYPES i_access);
/**
+ * @brief Set up a virtual address to be "allocate-from-zero".
+ *
+ * @param[in] i_vAddr - The virtual address of the page.
+ */
+ void setPageAllocateFromZero(uint64_t i_vAddr);
+
+ /**
* @brief Adds up the total size of all blocks within the segment
*
* @param[in/out] io_totalSize - total size allocated within segment
@@ -185,6 +196,12 @@ class Block
else iv_nextBlock->totalBlocksAlloc(io_totalSize);
}
+ /**
+ * @brief Release all allocated pages back to memory pool and remove
+ * from page table.
+ */
+ void releaseAllPages();
+
private:
/** Base address of the block */
const uint64_t iv_baseAddr;
diff --git a/src/include/kernel/spte.H b/src/include/kernel/spte.H
index 6e4832bb6..1a284d6e7 100644
--- a/src/include/kernel/spte.H
+++ b/src/include/kernel/spte.H
@@ -106,6 +106,10 @@ class ShadowPTE
bool isDirty() const { return dirty; };
/** Set dirty bit. */
void setDirty(bool i_dirty) { dirty = i_dirty; };
+ /** Get allocate-from-zero bit. */
+ bool isAllocateFromZero() const { return allocate_from_zero; };
+ /** Set allocate-from-zero bit. */
+ void setAllocateFromZero(bool i_zero) { allocate_from_zero = i_zero; };
};
#endif
diff --git a/src/include/kernel/stacksegment.H b/src/include/kernel/stacksegment.H
new file mode 100644
index 000000000..8b60ec3c7
--- /dev/null
+++ b/src/include/kernel/stacksegment.H
@@ -0,0 +1,145 @@
+// IBM_PROLOG_BEGIN_TAG
+// This is an automatically generated prolog.
+//
+// $Source: src/include/kernel/stacksegment.H $
+//
+// IBM CONFIDENTIAL
+//
+// COPYRIGHT International Business Machines Corp. 2011
+//
+// p1
+//
+// Object Code Only (OCO) source materials
+// Licensed Internal Code Source Materials
+// IBM HostBoot Licensed Internal Code
+//
+// The source code for this program is not published or other-
+// wise divested of its trade secrets, irrespective of what has
+// been deposited with the U.S. Copyright Office.
+//
+// Origin: 30
+//
+// IBM_PROLOG_END
+
+/** @file stacksegment.H
+ * @brief Defines the stack segment (1TB) class.
+ */
+#ifndef __KERNEL_STACKSEGMENT_H
+#define __KERNEL_STACKSEGMENT_H
+
+#include <kernel/types.h>
+#include <kernel/segment.H>
+#include <util/locked/list.H>
+
+// Forward declaration.
+class Block;
+
+/** @struct StackBlockNode
+ * @brief Node structure for storing blocks onto a Util::Locked::List.
+ */
+struct StackBlockNode
+{
+ /** Next pointer for list. */
+ StackBlockNode* next;
+ /** Previous pointer for list. */
+ StackBlockNode* prev;
+
+ /** Key value (8mb adjusted address for stack). */
+ uint64_t key;
+ /** Pointer to block representing the stack. */
+ Block* block;
+};
+
+
+/** @class StackSegment
+ * @brief Class to manage the stack segment at 1 TB.
+ *
+ * Contains a list of blocks, one for each task, associated with the segment
+ * representing the stacks.
+ */
+class StackSegment : public Segment
+{
+ protected:
+ enum
+ {
+ EIGHT_MEGABYTE = 8 * 1024 * 1024ul,
+ ONE_TERABYTE = 1024 * 1024 * 1024 * 1024ul,
+ };
+
+
+ /**
+ * @brief Constructor.
+ * Initialize attributes and set base addresss of segment to 1 TB.
+ */
+ StackSegment() : Segment(ONE_TERABYTE) {};
+
+ /**
+ * @brief Destructor
+ * Delete any blocks owned by this segment.
+ */
+ ~StackSegment();
+
+ public:
+ /**
+ * @brief Initialize the segment by adding to the segment manager.
+ */
+ static void init();
+
+ /**
+ * @brief Implementation of the pure-virtual function from Segment.
+ *
+ * Calls block chain to deal with page fault.
+ */
+ bool handlePageFault(task_t* i_task, uint64_t i_addr);
+
+ /**
+ * @brief Locate the physical address of the given virtual address
+ * @param[in] i_vaddr virtual address
+ * @return the physical address bound to the virtual address, or
+ * -EFAULT if i_vaddr not found. @see errno.h
+ */
+ uint64_t findPhysicalAddress(uint64_t i_vaddr) const;
+
+ /**
+ * @brief Create a new stack for a task.
+ *
+ * @param i_task - Task ID of task to own the stack.
+ *
+ * @return Upper address of the newly created stack.
+ */
+ static void* createStack(tid_t i_task);
+
+ /**
+ * @brief Delete previously created stack for a task.
+ *
+ * @param i_task - Task ID of task owning the stack.
+ */
+ static void deleteStack(tid_t i_task);
+
+ private:
+ /** @brief Mapping of virtual address ranges to blocks representing
+ * stacks.
+ *
+ * The blocks are created such that the 1TB range of this segment is
+ * divided into 8MB chunks, such that (tid*8MB + 1TB) = bottom of
+ * the stack address range. The stack is then arranged somewhere
+ * within that range to provide protection above and below the stack
+ * and to efficiently utilize the hashed page table.
+ *
+ * This list is therefore indexed by the low address of the
+ * range (tid*8MB + 1TB).
+ */
+ Util::Locked::List<StackBlockNode, uint64_t> iv_blockList;
+
+ /** Internal implementation of init function. */
+ void _init();
+ /** Internal implementation of createStack function. */
+ void* _createStack(tid_t i_task);
+ /** Internal implementation of deleteStack function. */
+ void _deleteStack(tid_t i_task);
+
+ StackSegment(const StackSegment&); // prohibit copy.
+ StackSegment& operator=(const StackSegment&); // prohibit assignment.
+};
+
+#endif
diff --git a/src/include/util/locked/list.H b/src/include/util/locked/list.H
index be3c60f67..5e06c5e8e 100644
--- a/src/include/util/locked/list.H
+++ b/src/include/util/locked/list.H
@@ -32,7 +32,7 @@ namespace Util
{
template <typename _T, typename _K,
bool locked = false, typename _S = int>
- class List
+ class List
{
public:
List() : head(NULL), tail(NULL), lock() {};
@@ -44,16 +44,16 @@ namespace Util
void erase(_T* node);
void erase(_K& key);
- _T* find(_K& key);
+ _T* find(_K& key) const;
protected:
_T* head;
_T* tail;
- _S lock;
+ mutable _S lock;
- void __lock();
- void __unlock();
+ void __lock() const;
+ void __unlock() const;
};
template <typename _T, typename _K, bool locked, typename _S>
@@ -62,7 +62,7 @@ namespace Util
_T* item = NULL;
__lock();
-
+
if (tail != NULL)
{
item = tail;
@@ -73,7 +73,7 @@ namespace Util
}
__unlock();
-
+
return item;
}
@@ -81,7 +81,7 @@ namespace Util
void List<_T,_K,locked,_S>::insert(_T* item)
{
__lock();
-
+
if (head == NULL)
{
item->next = item->prev = NULL;
@@ -93,18 +93,18 @@ namespace Util
item->next = head;
head = head->prev = item;
}
-
+
__unlock();
}
template <typename _T, typename _K, bool locked, typename _S>
- void List<_T,_K,locked,_S>::__lock()
+ void List<_T,_K,locked,_S>::__lock() const
{
Util::Locked::LockHelper<locked,_S>(lock).lock();
}
-
+
template <typename _T, typename _K, bool locked, typename _S>
- void List<_T,_K,locked,_S>::__unlock()
+ void List<_T,_K,locked,_S>::__unlock() const
{
Util::Locked::LockHelper<locked,_S>(lock).unlock();
}
@@ -122,7 +122,7 @@ namespace Util
if (node == tail)
tail = node->prev;
else
- node->next->prev = node->prev;
+ node->next->prev = node->prev;
__unlock();
}
@@ -131,7 +131,7 @@ namespace Util
void List<_T,_K,locked,_S>::erase(_K& key)
{
__lock();
-
+
_T* node = head;
while((node != NULL) && (node->key != key))
@@ -147,7 +147,7 @@ namespace Util
if (node == tail)
tail = node->prev;
else
- node->next->prev = node->prev;
+ node->next->prev = node->prev;
}
__unlock();
@@ -156,7 +156,7 @@ namespace Util
}
template <typename _T, typename _K, bool locked, typename _S>
- _T* List<_T,_K,locked,_S>::find(_K& key)
+ _T* List<_T,_K,locked,_S>::find(_K& key) const
{
__lock();
@@ -166,7 +166,7 @@ namespace Util
node = node->next;
__unlock();
-
+
return node;
}
}
OpenPOWER on IntegriCloud