summaryrefslogtreecommitdiffstats
path: root/src/kernel
diff options
context:
space:
mode:
authorMissy Connell <missyc@us.ibm.com>2012-09-10 16:05:01 -0500
committerA. Patrick Williams III <iawillia@us.ibm.com>2012-10-09 17:10:32 -0500
commit1bf8c6b8376efacd03e3ec62d5ded5b2be4bff39 (patch)
treec266815232142e67f15a61ffcbdf09407737f259 /src/kernel
parentfb1836fd7b1b8839815595db08ae740ec7b86347 (diff)
downloadtalos-hostboot-1bf8c6b8376efacd03e3ec62d5ded5b2be4bff39.tar.gz
talos-hostboot-1bf8c6b8376efacd03e3ec62d5ded5b2be4bff39.zip
Extend VMM to 32M
Add mmLinearMap to create block at a specified phys addr. Added iv_MaptoPhy in the block to indicate we are physically mapped block and to not apply the HRMOR. Change-Id: I75ddb19b82ae9a2035ff873edff8a34a33c74639 RTC:43401 Reviewed-on: http://gfw160.austin.ibm.com:8080/gerrit/1846 Tested-by: Jenkins Server Reviewed-by: Daniel M. Crowell <dcrowell@us.ibm.com> Reviewed-by: A. Patrick Williams III <iawillia@us.ibm.com>
Diffstat (limited to 'src/kernel')
-rw-r--r--src/kernel/basesegment.C143
-rw-r--r--src/kernel/block.C26
-rw-r--r--src/kernel/pagemgr.C19
-rw-r--r--src/kernel/syscall.C28
-rw-r--r--src/kernel/vmmmgr.C29
5 files changed, 230 insertions, 15 deletions
diff --git a/src/kernel/basesegment.C b/src/kernel/basesegment.C
index c9a826f8e..79cf7ada8 100644
--- a/src/kernel/basesegment.C
+++ b/src/kernel/basesegment.C
@@ -30,7 +30,8 @@
#include <kernel/block.H>
#include <kernel/cpuid.H>
#include <kernel/console.H>
-
+#include <kernel/pagemgr.H>
+#include <kernel/spte.H>
BaseSegment::~BaseSegment()
{
@@ -53,7 +54,7 @@ void BaseSegment::_init()
case CORE_POWER8_MURANO:
case CORE_POWER8_VENICE:
default:
- iv_physMemSize = (8*MEGABYTE);
+ iv_physMemSize = VMM_BASE_BLOCK_SIZE;
break;
}
// Base block is L3 cache physical memory size
@@ -63,10 +64,10 @@ void BaseSegment::_init()
// TODO iv_physMemSize needs to be recalculated when DIMM memory is avail.
// Set default page permissions on block.
- for (uint64_t i = 0; i < 0x800000; i += PAGESIZE)
+ for (uint64_t i = 0; i < VMM_BASE_BLOCK_SIZE; i += PAGESIZE)
{
- // External address filled in by linker as start of kernel's
- // data pages.
+ // External address filled in by linker as start of kernel's
+ // data pages.
extern void* data_load_address;
// Don't map in the 0 (NULL) page.
@@ -97,15 +98,21 @@ bool BaseSegment::handlePageFault(task_t* i_task, uint64_t i_addr, bool i_store)
* STATIC
* Allocates a block of virtual memory of the given size
*/
-int BaseSegment::mmAllocBlock(MessageQueue* i_mq,void* i_va,uint64_t i_size)
+int BaseSegment::mmAllocBlock(MessageQueue* i_mq,void* i_va,uint64_t i_size,
+ bool i_mappedToPhy, uint64_t *i_SPTEaddr)
{
- return Singleton<BaseSegment>::instance()._mmAllocBlock(i_mq,i_va,i_size);
+ return Singleton<BaseSegment>::instance()._mmAllocBlock(i_mq,i_va,i_size,
+ i_mappedToPhy,
+ i_SPTEaddr);
+
}
/**
* Allocates a block of virtual memory of the given size
*/
-int BaseSegment::_mmAllocBlock(MessageQueue* i_mq,void* i_va,uint64_t i_size)
+int BaseSegment::_mmAllocBlock(MessageQueue* i_mq,void* i_va,uint64_t i_size,
+ bool i_mappedToPhy, uint64_t *i_SPTEaddr)
+
{
uint64_t l_vaddr = reinterpret_cast<uint64_t>(i_va);
uint64_t l_blockSizeTotal = 0;
@@ -118,7 +125,26 @@ int BaseSegment::_mmAllocBlock(MessageQueue* i_mq,void* i_va,uint64_t i_size)
{
return -EINVAL;
}
- Block* l_block = new Block(l_vaddr, ALIGN_PAGE(i_size), i_mq);
+
+ // Verify that the block we are adding is not already contained within
+ // another block in the base segment
+ Block* temp_block = iv_block;
+ while (temp_block != NULL)
+ {
+ // Checking to see if the l_vaddr is already contained in another
+ // block.. if so return error
+ if (temp_block->isContained(l_vaddr))
+ {
+ printk("mmAllocBlock Address = %lx is already in a block\n",l_vaddr);
+ return -EINVAL;
+ }
+
+ temp_block = temp_block->iv_nextBlock;
+ }
+
+ Block* l_block = new Block(l_vaddr, ALIGN_PAGE(i_size), i_mq,i_mappedToPhy,
+ i_SPTEaddr );
+
l_block->setParent(this);
iv_block->appendBlock(l_block);
return 0;
@@ -191,3 +217,102 @@ int BaseSegment::_mmRemovePages(VmmManager::PAGE_REMOVAL_OPS i_op,
iv_block->iv_nextBlock->removePages(i_op,i_vaddr,i_size,i_task):
-EINVAL);
}
+
+
+/**
+ * STATIC
+ * Allocates a block of virtual memory to extend the VMM
+ */
+int BaseSegment::mmExtend(void)
+{
+ return Singleton<BaseSegment>::instance()._mmExtend();
+}
+
+/**
+ * Allocates a block of virtual memory of the given size
+ * to extend the VMM to 32MEG in size in mainstore
+ */
+int BaseSegment::_mmExtend(void)
+{
+ // The base address of the extended memory is 8Mg.. The first x pages is
+ // for the SPTE.. The remaining pages from 8MG + SPTE to 32MEG is added to
+ // the HEAP..
+
+ uint64_t l_vaddr = VMM_ADDR_EXTEND_BLOCK; // 8MEG
+ uint64_t l_size = VMM_EXTEND_BLOCK_SIZE; // 32MEG - 8MB (base block)
+
+ // Call to allocate a block passing in the requested address of where the
+ // SPTEs should be created
+ int rc = _mmAllocBlock(NULL, reinterpret_cast<void *>(l_vaddr), l_size, false,
+ /*(uint64_t *)*/reinterpret_cast<uint64_t *>(l_vaddr));
+
+ if (rc)
+ {
+ printk("Got an error in mmAllocBlock\n");
+ return rc;
+ }
+
+ // Set default page permissions on block.
+ for (uint64_t i = l_vaddr; i < l_vaddr + l_size; i += PAGESIZE)
+ {
+ iv_block->setPhysicalPage(i, i, WRITABLE);
+ }
+
+ // Now need to take the pages past the SPTE and add them to the heap.
+
+ //get the number of pages needed to hold the SPTE entries.
+ uint64_t spte_pages = (ALIGN_PAGE(l_size)/PAGESIZE * sizeof(ShadowPTE))/PAGESIZE;
+
+ printkd("Number of SPTE pages %ld\n", spte_pages);
+
+ // Need to setup the starting address of the memory we need to add to the
+ // heap to be the address of the block + the number of pages that are being
+ // used for the SPTE.
+
+ // Call Add Memory with the starting address , size.. it will put the pages
+ // on the heap call this with the address being the first page past the SPTE.
+ PageManager::addMemory(l_vaddr + (spte_pages*PAGESIZE),
+ l_size/PAGESIZE - spte_pages);
+
+ // Update the physical Memory size to now be 32MEG. by adding the extended
+ // block size to the physical mem size.
+ iv_physMemSize += VMM_EXTEND_BLOCK_SIZE;
+
+ return 0;
+}
+
+/**
+ * Allocates a block of virtual memory of the given size
+ * to at a specified physical address.
+ */
+int BaseSegment::mmLinearMap(void *i_paddr, uint64_t i_size)
+{
+ return Singleton<BaseSegment>::instance()._mmLinearMap(i_paddr, i_size);
+}
+
+/**
+ * Allocates a block of virtual memory of the given size
+ * to at a specified physical address
+ */
+int BaseSegment::_mmLinearMap(void *i_paddr, uint64_t i_size)
+{
+
+ int rc = _mmAllocBlock(NULL, i_paddr, i_size, true);
+
+ if (rc)
+ {
+ printk("Got an error in mmAllocBlock\n");
+ return rc;
+ }
+
+ uint64_t l_addr = reinterpret_cast<uint64_t>(i_paddr);
+
+ // set the default permissions and the va-pa mapping in the SPTE
+ for (uint64_t i = l_addr; i < l_addr + i_size; i += PAGESIZE)
+ {
+ iv_block->setPhysicalPage(i, i, WRITABLE);
+ }
+
+ return 0;
+
+}
diff --git a/src/kernel/block.C b/src/kernel/block.C
index 1ea12d2c0..c8546f760 100644
--- a/src/kernel/block.C
+++ b/src/kernel/block.C
@@ -39,6 +39,8 @@
#include <kernel/basesegment.H>
#include <arch/ppc.H>
+#include <new>
+
// Track eviction requests due to aging pages
uint32_t Block::cv_ro_evict_req = 0;
uint32_t Block::cv_rw_evict_req = 0;
@@ -58,10 +60,21 @@ Block::~Block()
}
}
-void Block::init(MessageQueue* i_msgQ)
+void Block::init(MessageQueue* i_msgQ, uint64_t *i_spteAddr)
{
- // Create a shadow PTE for each page.
- iv_ptes = new ShadowPTE[iv_size / PAGESIZE];
+
+ if (i_spteAddr == NULL)
+ {
+ // Create a shadow PTE for each page.
+ iv_ptes = new ShadowPTE[iv_size / PAGESIZE];
+ }
+ else // set the page table to reside at the address requested
+ {
+ // Doing a placement new to put the SPTE at the beginging
+ // of the block we allocated.
+ iv_ptes = new(i_spteAddr) ShadowPTE[iv_size / PAGESIZE];
+ }
+
if (i_msgQ != NULL)
{
//Create message handler to handle read operations for this block
@@ -265,7 +278,12 @@ uint64_t Block::findPhysicalAddress(uint64_t i_vaddr) const
{
paddr = pte->getPageAddr();
paddr += i_vaddr % PAGESIZE;
- paddr |= getHRMOR();
+
+ // If not a physically mapped block then add HRMOR
+ if (!iv_mappedToPhysical)
+ {
+ paddr |= getHRMOR();
+ }
}
return paddr;
diff --git a/src/kernel/pagemgr.C b/src/kernel/pagemgr.C
index 4415cb81d..ca534efdd 100644
--- a/src/kernel/pagemgr.C
+++ b/src/kernel/pagemgr.C
@@ -388,3 +388,22 @@ void PageManagerCore::coalesce( void )
printkd("PAGEMGR low page count %ld\n", PageManager::cv_low_page_count);
}
+void PageManager::addMemory(size_t i_addr, size_t i_pageCount)
+{
+ PageManager& pmgr = Singleton<PageManager>::instance();
+ return pmgr._addMemory(i_addr, i_pageCount);
+}
+
+// add memory to the heap
+void PageManager::_addMemory(size_t i_addr, size_t i_pageCount)
+{
+ iv_heap.addMemory(i_addr,i_pageCount);
+
+ // Update statistics.
+ __sync_add_and_fetch(&iv_pagesAvail, i_pageCount);
+
+ // Update statistics.
+ __sync_add_and_fetch(&iv_pagesTotal, i_pageCount);
+
+ return;
+}
diff --git a/src/kernel/syscall.C b/src/kernel/syscall.C
index 4eb3b4fa8..23239a8a3 100644
--- a/src/kernel/syscall.C
+++ b/src/kernel/syscall.C
@@ -42,6 +42,7 @@
#include <kernel/intmsghandler.H>
#include <sys/sync.h>
+
extern "C"
void kernel_execute_decrementer()
{
@@ -92,6 +93,8 @@ namespace Systemcalls
void MmSetPermission(task_t *t);
void MmAllocPages(task_t *t);
void MmVirtToPhys(task_t *t);
+ void MmExtend(task_t *t);
+ void MmLinearMap(task_t *t);
syscall syscalls[] =
@@ -131,6 +134,8 @@ namespace Systemcalls
&MmSetPermission, // MM_SET_PERMISSION
&MmAllocPages, // MM_ALLOC_PAGES
&MmVirtToPhys, // MM_VIRT_TO_PHYS
+ &MmExtend, // MM_EXTEND
+ &MmLinearMap, // MM_LINEAR_MAP
};
};
@@ -794,5 +799,28 @@ namespace Systemcalls
uint64_t phys = VmmManager::findPhysicalAddress(i_vaddr);
TASK_SETRTN(t, phys);
}
+
+ /**
+ * Allocates a block of virtual memory that extends the VMM
+ * space upto 32MEG of Mainstore.
+ * @param[in] t: The task used to extend Memory
+ */
+ void MmExtend(task_t* t)
+ {
+ TASK_SETRTN(t, VmmManager::mmExtend());
+ }
+
+ /**
+ * Allocates a block of memory of the given size
+ * to at a specified physical address
+ */
+ void MmLinearMap(task_t* t)
+ {
+ void* paddr = (void *)TASK_GETARG0(t);
+ uint64_t size = (uint64_t)TASK_GETARG1(t);
+
+ TASK_SETRTN(t, VmmManager::mmLinearMap(paddr,size));
+ }
+
};
diff --git a/src/kernel/vmmmgr.C b/src/kernel/vmmmgr.C
index b7c98a0c5..d5152bf11 100644
--- a/src/kernel/vmmmgr.C
+++ b/src/kernel/vmmmgr.C
@@ -41,7 +41,6 @@ VmmManager::VmmManager() : lock()
void VmmManager::init()
{
- printk("Starting VMM...\n");
VmmManager& v = Singleton<VmmManager>::instance();
@@ -57,7 +56,6 @@ void VmmManager::init()
v.initPTEs();
v.initSDR1(); /*no effect*/ // BEAM Fix.
- printk("...done.\n");
};
void VmmManager::init_slb()
@@ -209,6 +207,20 @@ void VmmManager::_flushPageTable( void )
lock.unlock();
}
+
+int VmmManager::mmExtend(void)
+{
+ return Singleton<VmmManager>::instance()._mmExtend();
+}
+
+int VmmManager::_mmExtend(void)
+{
+ lock.lock();
+ int rc = BaseSegment::mmExtend();
+ lock.unlock();
+ return rc;
+}
+
void* VmmManager::_devMap(void* ra, uint64_t i_devDataSize)
{
void* ea = NULL;
@@ -247,3 +259,16 @@ uint64_t VmmManager::findKernelAddress(uint64_t i_vaddr)
}
return phys;
}
+
+int VmmManager::mmLinearMap(void *i_paddr, uint64_t i_size)
+{
+ return Singleton<VmmManager>::instance()._mmLinearMap(i_paddr, i_size);
+}
+
+int VmmManager::_mmLinearMap(void *i_paddr, uint64_t i_size)
+{
+ lock.lock();
+ int rc = BaseSegment::mmLinearMap(i_paddr, i_size);
+ lock.unlock();
+ return rc;
+}
OpenPOWER on IntegriCloud