diff options
-rw-r--r-- | src/include/kernel/block.H | 17 | ||||
-rw-r--r-- | src/include/kernel/spte.H | 4 | ||||
-rw-r--r-- | src/include/kernel/stacksegment.H | 145 | ||||
-rw-r--r-- | src/include/util/locked/list.H | 34 | ||||
-rw-r--r-- | src/kernel/block.C | 61 | ||||
-rw-r--r-- | src/kernel/makefile | 2 | ||||
-rw-r--r-- | src/kernel/stacksegment.C | 163 | ||||
-rw-r--r-- | src/kernel/syscall.C | 8 | ||||
-rw-r--r-- | src/kernel/taskmgr.C | 10 | ||||
-rw-r--r-- | src/kernel/vmmmgr.C | 4 | ||||
-rw-r--r-- | src/makefile | 4 | ||||
-rw-r--r-- | src/usr/testcore/kernel/slbtest.H | 22 |
12 files changed, 419 insertions, 55 deletions
diff --git a/src/include/kernel/block.H b/src/include/kernel/block.H index 3745b065e..94daf0b3a 100644 --- a/src/include/kernel/block.H +++ b/src/include/kernel/block.H @@ -169,11 +169,22 @@ class Block * @param[in] i_vAddr - The virtual address of the page. * @param[in] i_pAddr - The physical address of the page. * @param[in] i_access - The permissions of the page. + * + * @note If (i_pAddr == 0), then the virtual page is left with + * the current present / page-number state but the access + * mode is still set. */ void setPhysicalPage(uint64_t i_vAddr, uint64_t i_pAddr, VmmManager::ACCESS_TYPES i_access); /** + * @brief Set up a virtual address to be "allocate-from-zero". + * + * @param[in] i_vAddr - The virtual address of the page. + */ + void setPageAllocateFromZero(uint64_t i_vAddr); + + /** * @brief Adds up the total size of all blocks within the segment * * @param[in/out] io_totalSize - total size allocated within segment @@ -185,6 +196,12 @@ class Block else iv_nextBlock->totalBlocksAlloc(io_totalSize); } + /** + * @brief Release all allocated pages back to memory pool and remove + * from page table. + */ + void releaseAllPages(); + private: /** Base address of the block */ const uint64_t iv_baseAddr; diff --git a/src/include/kernel/spte.H b/src/include/kernel/spte.H index 6e4832bb6..1a284d6e7 100644 --- a/src/include/kernel/spte.H +++ b/src/include/kernel/spte.H @@ -106,6 +106,10 @@ class ShadowPTE bool isDirty() const { return dirty; }; /** Set dirty bit. */ void setDirty(bool i_dirty) { dirty = i_dirty; }; + /** Get allocate-from-zero bit. */ + bool isAllocateFromZero() const { return allocate_from_zero; }; + /** Set allocate-from-zero bit. */ + void setAllocateFromZero(bool i_zero) { allocate_from_zero = i_zero; }; }; #endif diff --git a/src/include/kernel/stacksegment.H b/src/include/kernel/stacksegment.H new file mode 100644 index 000000000..8b60ec3c7 --- /dev/null +++ b/src/include/kernel/stacksegment.H @@ -0,0 +1,145 @@ +// IBM_PROLOG_BEGIN_TAG +// This is an automatically generated prolog. +// +// $Source: src/include/kernel/stacksegment.H $ +// +// IBM CONFIDENTIAL +// +// COPYRIGHT International Business Machines Corp. 2011 +// +// p1 +// +// Object Code Only (OCO) source materials +// Licensed Internal Code Source Materials +// IBM HostBoot Licensed Internal Code +// +// The source code for this program is not published or other- +// wise divested of its trade secrets, irrespective of what has +// been deposited with the U.S. Copyright Office. +// +// Origin: 30 +// +// IBM_PROLOG_END + +/** @file stacksegment.H + * @brief Defines the stack segment (1TB) class. + */ +#ifndef __KERNEL_STACKSEGMENT_H +#define __KERNEL_STACKSEGMENT_H + +#include <kernel/types.h> +#include <kernel/segment.H> +#include <util/locked/list.H> + +// Forward declaration. +class Block; + +/** @struct StackBlockNode + * @brief Node structure for storing blocks onto a Util::Locked::List. + */ +struct StackBlockNode +{ + /** Next pointer for list. */ + StackBlockNode* next; + /** Previous pointer for list. */ + StackBlockNode* prev; + + /** Key value (8mb adjusted address for stack). */ + uint64_t key; + /** Pointer to block representing the stack. */ + Block* block; +}; + + +/** @class StackSegment + * @brief Class to manage the stack segment at 1 TB. + * + * Contains a list of blocks, one for each task, associated with the segment + * representing the stacks. + */ +class StackSegment : public Segment +{ + protected: + enum + { + EIGHT_MEGABYTE = 8 * 1024 * 1024ul, + ONE_TERABYTE = 1024 * 1024 * 1024 * 1024ul, + }; + + + /** + * @brief Constructor. + * Initialize attributes and set base addresss of segment to 1 TB. + */ + StackSegment() : Segment(ONE_TERABYTE) {}; + + /** + * @brief Destructor + * Delete any blocks owned by this segment. + */ + ~StackSegment(); + + public: + /** + * @brief Initialize the segment by adding to the segment manager. + */ + static void init(); + + /** + * @brief Implementation of the pure-virtual function from Segment. + * + * Calls block chain to deal with page fault. + */ + bool handlePageFault(task_t* i_task, uint64_t i_addr); + + /** + * @brief Locate the physical address of the given virtual address + * @param[in] i_vaddr virtual address + * @return the physical address bound to the virtual address, or + * -EFAULT if i_vaddr not found. @see errno.h + */ + uint64_t findPhysicalAddress(uint64_t i_vaddr) const; + + /** + * @brief Create a new stack for a task. + * + * @param i_task - Task ID of task to own the stack. + * + * @return Upper address of the newly created stack. + */ + static void* createStack(tid_t i_task); + + /** + * @brief Delete previously created stack for a task. + * + * @param i_task - Task ID of task owning the stack. + */ + static void deleteStack(tid_t i_task); + + private: + /** @brief Mapping of virtual address ranges to blocks representing + * stacks. + * + * The blocks are created such that the 1TB range of this segment is + * divided into 8MB chunks, such that (tid*8MB + 1TB) = bottom of + * the stack address range. The stack is then arranged somewhere + * within that range to provide protection above and below the stack + * and to efficiently utilize the hashed page table. + * + * This list is therefore indexed by the low address of the + * range (tid*8MB + 1TB). + */ + Util::Locked::List<StackBlockNode, uint64_t> iv_blockList; + + /** Internal implementation of init function. */ + void _init(); + /** Internal implementation of createStack function. */ + void* _createStack(tid_t i_task); + /** Internal implementation of deleteStack function. */ + void _deleteStack(tid_t i_task); + + StackSegment(const StackSegment&); // prohibit copy. + StackSegment& operator=(const StackSegment&); // prohibit assignment. +}; + +#endif diff --git a/src/include/util/locked/list.H b/src/include/util/locked/list.H index be3c60f67..5e06c5e8e 100644 --- a/src/include/util/locked/list.H +++ b/src/include/util/locked/list.H @@ -32,7 +32,7 @@ namespace Util { template <typename _T, typename _K, bool locked = false, typename _S = int> - class List + class List { public: List() : head(NULL), tail(NULL), lock() {}; @@ -44,16 +44,16 @@ namespace Util void erase(_T* node); void erase(_K& key); - _T* find(_K& key); + _T* find(_K& key) const; protected: _T* head; _T* tail; - _S lock; + mutable _S lock; - void __lock(); - void __unlock(); + void __lock() const; + void __unlock() const; }; template <typename _T, typename _K, bool locked, typename _S> @@ -62,7 +62,7 @@ namespace Util _T* item = NULL; __lock(); - + if (tail != NULL) { item = tail; @@ -73,7 +73,7 @@ namespace Util } __unlock(); - + return item; } @@ -81,7 +81,7 @@ namespace Util void List<_T,_K,locked,_S>::insert(_T* item) { __lock(); - + if (head == NULL) { item->next = item->prev = NULL; @@ -93,18 +93,18 @@ namespace Util item->next = head; head = head->prev = item; } - + __unlock(); } template <typename _T, typename _K, bool locked, typename _S> - void List<_T,_K,locked,_S>::__lock() + void List<_T,_K,locked,_S>::__lock() const { Util::Locked::LockHelper<locked,_S>(lock).lock(); } - + template <typename _T, typename _K, bool locked, typename _S> - void List<_T,_K,locked,_S>::__unlock() + void List<_T,_K,locked,_S>::__unlock() const { Util::Locked::LockHelper<locked,_S>(lock).unlock(); } @@ -122,7 +122,7 @@ namespace Util if (node == tail) tail = node->prev; else - node->next->prev = node->prev; + node->next->prev = node->prev; __unlock(); } @@ -131,7 +131,7 @@ namespace Util void List<_T,_K,locked,_S>::erase(_K& key) { __lock(); - + _T* node = head; while((node != NULL) && (node->key != key)) @@ -147,7 +147,7 @@ namespace Util if (node == tail) tail = node->prev; else - node->next->prev = node->prev; + node->next->prev = node->prev; } __unlock(); @@ -156,7 +156,7 @@ namespace Util } template <typename _T, typename _K, bool locked, typename _S> - _T* List<_T,_K,locked,_S>::find(_K& key) + _T* List<_T,_K,locked,_S>::find(_K& key) const { __lock(); @@ -166,7 +166,7 @@ namespace Util node = node->next; __unlock(); - + return node; } } diff --git a/src/kernel/block.C b/src/kernel/block.C index 68db26a95..1402bd60a 100644 --- a/src/kernel/block.C +++ b/src/kernel/block.C @@ -98,7 +98,7 @@ bool Block::handlePageFault(task_t* i_task, uint64_t i_addr) //Done(waiting for response) return true; } - else if (pte->allocate_from_zero) + else if (pte->isAllocateFromZero()) { void* l_page = PageManager::allocatePage(); memset(l_page, '\0', PAGESIZE); @@ -144,8 +144,21 @@ void Block::setPhysicalPage(uint64_t i_vAddr, uint64_t i_pAddr, // Create virtual to physical mapping. ShadowPTE* pte = getPTE(i_vAddr); - pte->setPageAddr(i_pAddr); - pte->setPresent(true); + if (i_pAddr != 0) + { + pte->setPageAddr(i_pAddr); + pte->setPresent(true); + + // Modified an SPTE, clear the HPTE. + PageTableManager::delEntry(i_vAddr); + } + // If the page is already present, we might be changing permissions, so + // clear the HPTE. + else if (pte->isPresent()) + { + PageTableManager::delEntry(i_vAddr); + } + switch(i_access) { case VmmManager::READ_O_ACCESS: @@ -209,3 +222,45 @@ uint64_t Block::findPhysicalAddress(uint64_t i_vaddr) const return paddr; } + +void Block::setPageAllocateFromZero(uint64_t i_vAddr) +{ + // Check containment, call down chain if address isn't in this block. + if (!isContained(i_vAddr)) + { + if (iv_nextBlock) + { + iv_nextBlock->setPageAllocateFromZero(i_vAddr); + } + else + { + // No block owns this address. Code bug. + kassert(iv_nextBlock); + } + return; + } + + // Set page to allocate-from-zero. + ShadowPTE* pte = getPTE(i_vAddr); + pte->setAllocateFromZero(true); +} + +void Block::releaseAllPages() +{ + // Release all pages from page table. + PageTableManager::delRangeVA(iv_baseAddr, iv_baseAddr + iv_size); + + // Free all pages back to page manager. + for(uint64_t page = iv_baseAddr; + page < (iv_baseAddr + iv_size); + page += PAGESIZE) + { + ShadowPTE* pte = getPTE(page); + if (pte->isPresent() && (0 != pte->getPageAddr())) + { + PageManager::freePage(reinterpret_cast<void*>(pte->getPageAddr())); + pte->setPresent(false); + pte->setPageAddr(NULL); + } + } +} diff --git a/src/kernel/makefile b/src/kernel/makefile index 24052a695..a0f1e2d7f 100644 --- a/src/kernel/makefile +++ b/src/kernel/makefile @@ -25,7 +25,7 @@ ROOTPATH = ../.. OBJS = start.o kernel.o console.o pagemgr.o heapmgr.o taskmgr.o cpumgr.o OBJS += syscall.o scheduler.o spinlock.o exception.o vmmmgr.o timemgr.o OBJS += futexmgr.o ptmgr.o segmentmgr.o devicesegment.o basesegment.o -OBJS += block.o cpuid.o misc.o msghandler.o blockmsghdlr.o +OBJS += block.o cpuid.o misc.o msghandler.o blockmsghdlr.o stacksegment.o include ${ROOTPATH}/config.mk diff --git a/src/kernel/stacksegment.C b/src/kernel/stacksegment.C new file mode 100644 index 000000000..91b16e6a9 --- /dev/null +++ b/src/kernel/stacksegment.C @@ -0,0 +1,163 @@ +// IBM_PROLOG_BEGIN_TAG +// This is an automatically generated prolog. +// +// $Source: src/kernel/stacksegment.C $ +// +// IBM CONFIDENTIAL +// +// COPYRIGHT International Business Machines Corp. 2011 +// +// p1 +// +// Object Code Only (OCO) source materials +// Licensed Internal Code Source Materials +// IBM HostBoot Licensed Internal Code +// +// The source code for this program is not published or other- +// wise divested of its trade secrets, irrespective of what has +// been deposited with the U.S. Copyright Office. +// +// Origin: 30 +// +// IBM_PROLOG_END + +#include <assert.h> +#include <util/singleton.H> + +#include <kernel/stacksegment.H> +#include <kernel/segmentmgr.H> +#include <kernel/block.H> +#include <errno.h> + +void StackSegment::init() +{ + Singleton<StackSegment>::instance()._init(); +} + +void* StackSegment::createStack(tid_t i_task) +{ + return Singleton<StackSegment>::instance()._createStack(i_task); +} + +void StackSegment::deleteStack(tid_t i_task) +{ + Singleton<StackSegment>::instance()._deleteStack(i_task); +} + +StackSegment::~StackSegment() +{ + // Release all blocks and associated pages. + StackBlockNode* l_node = NULL; + do + { + l_node = iv_blockList.remove(); + if (NULL != l_node) + { + l_node->block->releaseAllPages(); + delete l_node->block; + delete l_node; + } + } while (l_node != NULL); +} + +bool StackSegment::handlePageFault(task_t* i_task, uint64_t i_addr) +{ + uint64_t l_addr_8mb = i_addr & ~((EIGHT_MEGABYTE) - 1); + + StackBlockNode* l_node = iv_blockList.find(l_addr_8mb); + + return (NULL == l_node ? + false : + l_node->block->handlePageFault(i_task, i_addr)); +} + +uint64_t StackSegment::findPhysicalAddress(uint64_t i_vaddr) const +{ + uint64_t l_addr_8mb = i_vaddr & ~((EIGHT_MEGABYTE) - 1); + + StackBlockNode* l_node = iv_blockList.find(l_addr_8mb); + + return (NULL == l_node ? + -EFAULT : + l_node->block->findPhysicalAddress(i_vaddr)); +} + +void StackSegment::_init() +{ + // Assign segment to segment manager. + SegmentManager::addSegment(this, SegmentManager::STACK_SEGMENT_ID); +} + +void* StackSegment::_createStack(tid_t i_task) +{ + /* The segment is broken out into 8MB blocks so we need to place the + * stack somewhere within an 8MB range. The constrants are ensuring + * we have adequate protection and that the hashed page table does not + * have a large number of collisions. If we were to place all of the + * stacks at (8MB - 64k) there would be a large amount of contention on + * the same PTEG in the hashed page table. + * + * Design: + * - Provide 64k of protection minimum at the top and bottom of the + * stack. + * - Allow stack sizes up to 256k. + * - Expect typical (well performing) stacks of under 128k. + * + * Therefore, place stacks at: + * Bottom = 64k + 128k * (tid % 61). + * Top = Bottom + 256k - 8. + * + * This provides a possible range of 64k to (8MB - 64k), giving 64k of + * protection at each end. It also cycles the stacks through the 8MB + * range, and therefore the hashed page table, at 128k blocks. Finally, + * it provides for stack sizes up to 256k. + * + * Any attempt to grow the stack above 256k can be caught by killing the + * task (so we can re-write the offending code to not waste so much stack + * space). + */ + + uint64_t l_addr_8mb = i_task * EIGHT_MEGABYTE + ONE_TERABYTE; + // Ensure block doesn't already exist. + kassert(NULL == iv_blockList.find(l_addr_8mb)); + + // Calculate offset bounds of stack. + uint64_t l_offset_bottom = (64 + 128 * (i_task % 61)) * 1024; + uint64_t l_offset_top = l_offset_bottom + (256 * 1024) - 8; + + uint64_t l_addr_bottom = l_addr_8mb + l_offset_bottom; + uint64_t l_addr_top = l_addr_8mb + l_offset_top; + + // Create block. + Block* l_block = new Block(l_addr_bottom, 256 * 1024); + // Set pages to be allocate-from-zero. + for(uint64_t i = l_addr_bottom; i <= l_addr_top; i += PAGE_SIZE) + { + l_block->setPhysicalPage(i, 0, VmmManager::NORMAL_ACCESS); + l_block->setPageAllocateFromZero(i); + } + + // Insert block to list. + StackBlockNode* l_node = new StackBlockNode(); + l_node->key = l_addr_8mb; + l_node->block = l_block; + iv_blockList.insert(l_node); + + // Return pointer to top of stack, since stacks grow down. + return reinterpret_cast<void*>(l_addr_top); +} + +void StackSegment::_deleteStack(tid_t i_task) +{ + uint64_t l_addr_8mb = i_task * EIGHT_MEGABYTE + ONE_TERABYTE; + + StackBlockNode* l_node = iv_blockList.find(l_addr_8mb); + kassert(NULL != l_node); + iv_blockList.erase(l_node); + + l_node->block->releaseAllPages(); + delete l_node->block; + delete l_node; + + return; +} diff --git a/src/kernel/syscall.C b/src/kernel/syscall.C index 8f69e934e..6faf47681 100644 --- a/src/kernel/syscall.C +++ b/src/kernel/syscall.C @@ -37,6 +37,7 @@ #include <kernel/misc.H> #include <kernel/msghandler.H> #include <kernel/vmmmgr.H> +#include <kernel/stacksegment.H> extern "C" void kernel_execute_decrementer() @@ -164,7 +165,7 @@ namespace Systemcalls // TODO: Deal with join. // Clean up task memory. - PageManager::freePage(t->context.stack_ptr, TASK_DEFAULT_STACK_SIZE); + StackSegment::deleteStack(t->tid); delete t; } @@ -218,7 +219,7 @@ namespace Systemcalls if (m->type >= MSG_FIRST_SYS_TYPE) { - printkd("MsgSend> type=%d\n", m->type); + printkd("Invalid type for msg_send, type=%d.\n", m->type); TASK_SETRTN(t, -EINVAL); return; } @@ -252,7 +253,8 @@ namespace Systemcalls if (m->type >= MSG_FIRST_SYS_TYPE) { - printkd("MsgSendRecv> type=%d\n", m->type); + printkd("Invalid message type for msg_sendrecv, type=%d.\n", + m->type); TASK_SETRTN(t, -EINVAL); return; } diff --git a/src/kernel/taskmgr.C b/src/kernel/taskmgr.C index 00bb1d2c9..3fe2cff6c 100644 --- a/src/kernel/taskmgr.C +++ b/src/kernel/taskmgr.C @@ -25,6 +25,7 @@ #include <kernel/task.H> #include <kernel/pagemgr.H> #include <kernel/cpumgr.H> +#include <kernel/stacksegment.H> #include <sys/task.h> #include <arch/ppc.H> #include <string.h> @@ -99,12 +100,9 @@ task_t* TaskManager::_createTask(TaskManager::task_fn_t t, // Setup stack. if (withStack) { - task->context.stack_ptr = - PageManager::allocatePage(TASK_DEFAULT_STACK_SIZE); - memset(task->context.stack_ptr, '\0', - TASK_DEFAULT_STACK_SIZE * PAGESIZE); - task->context.gprs[1] = ((uint64_t)task->context.stack_ptr) + - TASK_DEFAULT_STACK_SIZE * PAGESIZE - 8; + task->context.stack_ptr = StackSegment::createStack(task->tid); + task->context.gprs[1] = + reinterpret_cast<uint64_t>(task->context.stack_ptr); } else { diff --git a/src/kernel/vmmmgr.C b/src/kernel/vmmmgr.C index ba5b795f3..14a08da2c 100644 --- a/src/kernel/vmmmgr.C +++ b/src/kernel/vmmmgr.C @@ -27,8 +27,9 @@ #include <arch/ppc.H> #include <kernel/ptmgr.H> #include <kernel/segmentmgr.H> -#include <kernel/devicesegment.H> #include <kernel/basesegment.H> +#include <kernel/stacksegment.H> +#include <kernel/devicesegment.H> extern void* data_load_address; @@ -43,6 +44,7 @@ void VmmManager::init() VmmManager& v = Singleton<VmmManager>::instance(); BaseSegment::init(); + StackSegment::init(); DeviceSegment::init(); SegmentManager::initSLB(); diff --git a/src/makefile b/src/makefile index 0ffe8c500..542c7db34 100644 --- a/src/makefile +++ b/src/makefile @@ -36,8 +36,8 @@ DIRECT_BOOT_OBJECTS = start.o kernel.o taskmgr.o cpumgr.o syscall.o \ syscall_msg.o syscall_mmio.o syscall_time.o \ syscall_mm.o init_main.o vfs_main.o sync.o futexmgr.o \ ptmgr.o segmentmgr.o basesegment.o devicesegment.o \ - block.o cxxtest_data.o cpuid.o misc.o msghandler.o \ - blockmsghdlr.o + block.o cxxtest_data.o cpuid.o misc.o msghandler.o \ + blockmsghdlr.o stacksegment.o ## STUB_TESTCASE_OBJECT = cxxtest_stub.o diff --git a/src/usr/testcore/kernel/slbtest.H b/src/usr/testcore/kernel/slbtest.H index 82a8d26b0..2112b7fa8 100644 --- a/src/usr/testcore/kernel/slbtest.H +++ b/src/usr/testcore/kernel/slbtest.H @@ -40,19 +40,6 @@ class slbtest: public CxxTest::TestSuite static volatile int rc; - void testSLB() - { - rc = 0; - printk("Data Segment exception expected in 1TB segment test - "); - task_create(writeEA1TB, this); - while (rc == 0) task_yield(); - task_yield(); - if (rc == -1) - { - TS_FAIL("Data Segment exception expected in 1TB segment\n"); - } - } - void testDevSeg() { int rc = 0; @@ -111,15 +98,6 @@ class slbtest: public CxxTest::TestSuite private: - static void writeEA1TB(void *i_p) - { - rc = 1; - sync(); - *(int *)0x10000000000 = 1; - sync(); - rc = -1; - task_end(); - } }; volatile int slbtest::rc = 0; |