summaryrefslogtreecommitdiffstats
path: root/src/kernel
diff options
context:
space:
mode:
authorPatrick Williams <iawillia@us.ibm.com>2013-03-26 11:23:47 -0500
committerA. Patrick Williams III <iawillia@us.ibm.com>2013-06-14 09:09:09 -0500
commite89e72d2f8a2efe86acad95ed0769aa7a8fe64ae (patch)
tree5541b72a698f58757ab2fe36b1a264fff6bcb3a2 /src/kernel
parent92255af10842c672550a586d342c67ac1c7e11ca (diff)
downloadtalos-hostboot-e89e72d2f8a2efe86acad95ed0769aa7a8fe64ae.tar.gz
talos-hostboot-e89e72d2f8a2efe86acad95ed0769aa7a8fe64ae.zip
Secureboot memory layout support.
* Start kernel in 1/4 cache mode per Secureboot. * Copy Secureboot header for base image for later use. * Blind-purge bottom half of cache. * Add bottom of cache into memory maps for 1/2 cache mode. RTC: 64762 Change-Id: I1b45f30a2d45c9709d4fd486cfe0ca2ce86b051c Reviewed-on: http://gfw160.austin.ibm.com:8080/gerrit/3773 Reviewed-by: Michael Baiocchi <baiocchi@us.ibm.com> Tested-by: Jenkins Server Reviewed-by: ADAM R. MUHLE <armuhle@us.ibm.com> Reviewed-by: Daniel M. Crowell <dcrowell@us.ibm.com> Reviewed-by: A. Patrick Williams III <iawillia@us.ibm.com>
Diffstat (limited to 'src/kernel')
-rw-r--r--src/kernel/basesegment.C18
-rw-r--r--src/kernel/misc.C47
-rw-r--r--src/kernel/pagemgr.C113
-rw-r--r--src/kernel/syscall.C4
4 files changed, 143 insertions, 39 deletions
diff --git a/src/kernel/basesegment.C b/src/kernel/basesegment.C
index 5317d2a25..e9f966433 100644
--- a/src/kernel/basesegment.C
+++ b/src/kernel/basesegment.C
@@ -62,8 +62,6 @@ void BaseSegment::_init()
iv_block = new Block(0x0, iv_physMemSize);
iv_block->setParent(this);
- // TODO iv_physMemSize needs to be recalculated when DIMM memory is avail.
-
// Set default page permissions on block.
for (uint64_t i = 0; i < VMM_BASE_BLOCK_SIZE; i += PAGESIZE)
{
@@ -246,9 +244,9 @@ int BaseSegment::_mmExtend(void)
// Call to allocate a block passing in the requested address of where the
// SPTEs should be created
- int rc = _mmAllocBlock(NULL, reinterpret_cast<void *>(l_vaddr), l_size, false,
- /*(uint64_t *)*/reinterpret_cast<uint64_t *>(l_vaddr));
-
+ int rc = _mmAllocBlock(NULL, reinterpret_cast<void *>(l_vaddr), l_size,
+ false, reinterpret_cast<uint64_t *>(l_vaddr));
+
if (rc)
{
printk("Got an error in mmAllocBlock\n");
@@ -264,7 +262,8 @@ int BaseSegment::_mmExtend(void)
// Now need to take the pages past the SPTE and add them to the heap.
//get the number of pages needed to hold the SPTE entries.
- uint64_t spte_pages = (ALIGN_PAGE(l_size)/PAGESIZE * sizeof(ShadowPTE))/PAGESIZE;
+ uint64_t spte_pages = (ALIGN_PAGE(l_size)/PAGESIZE *
+ sizeof(ShadowPTE))/PAGESIZE;
printkd("Number of SPTE pages %ld\n", spte_pages);
@@ -273,7 +272,8 @@ int BaseSegment::_mmExtend(void)
// used for the SPTE.
// Call Add Memory with the starting address , size.. it will put the pages
- // on the heap call this with the address being the first page past the SPTE.
+ // on the heap call this with the address being the first page past the
+ // SPTE.
PageManager::addMemory(l_vaddr + (spte_pages*PAGESIZE),
l_size/PAGESIZE - spte_pages);
@@ -291,7 +291,7 @@ int BaseSegment::_mmExtend(void)
/**
* Allocates a block of virtual memory of the given size
- * to at a specified physical address.
+ * to at a specified physical address.
*/
int BaseSegment::mmLinearMap(void *i_paddr, uint64_t i_size)
{
@@ -300,7 +300,7 @@ int BaseSegment::mmLinearMap(void *i_paddr, uint64_t i_size)
/**
* Allocates a block of virtual memory of the given size
- * to at a specified physical address
+ * to at a specified physical address
*/
int BaseSegment::_mmLinearMap(void *i_paddr, uint64_t i_size)
{
diff --git a/src/kernel/misc.C b/src/kernel/misc.C
index e280a1527..6d7e866f6 100644
--- a/src/kernel/misc.C
+++ b/src/kernel/misc.C
@@ -314,6 +314,51 @@ namespace KernelMisc
kassert(false);
}
+ int expand_half_cache()
+ {
+ static bool executed = false;
+
+ if (executed) // Why are we being called a second time?
+ {
+ return -EFAULT;
+ }
+
+ uint64_t startAddr = 512*KILOBYTE;
+ uint64_t endAddr = 1*MEGABYTE;
+
+ size_t cache_columns = 0;
+
+ switch(CpuID::getCpuType())
+ {
+ case CORE_POWER8_MURANO:
+ case CORE_POWER8_VENICE:
+ cache_columns = 4;
+ break;
+
+ default:
+ kassert(false);
+ break;
+ }
+
+ for (size_t i = 0; i < cache_columns; i++)
+ {
+ size_t offset = i * MEGABYTE;
+ populate_cache_lines(
+ reinterpret_cast<uint64_t*>(startAddr + offset),
+ reinterpret_cast<uint64_t*>(endAddr + offset));
+
+ PageManager::addMemory(startAddr + offset,
+ (512*KILOBYTE)/PAGESIZE);
+ }
+
+ executed = true;
+
+ KernelMemState::setMemScratchReg(KernelMemState::MEM_CONTAINED_L3,
+ KernelMemState::HALF_CACHE);
+
+ return 0;
+ }
+
int expand_full_cache()
{
static bool executed = false;
@@ -334,8 +379,10 @@ namespace KernelMisc
( VmmManager::INITIAL_MEM_SIZE ) ;
endAddr =
reinterpret_cast<uint64_t*>(8 * MEGABYTE);
+ break;
default:
+ kassert(false);
break;
}
diff --git a/src/kernel/pagemgr.C b/src/kernel/pagemgr.C
index f1e74c695..c402c51bb 100644
--- a/src/kernel/pagemgr.C
+++ b/src/kernel/pagemgr.C
@@ -187,44 +187,98 @@ uint64_t PageManager::availPages()
PageManager::PageManager()
: iv_pagesAvail(0), iv_pagesTotal(0), iv_lock(0)
{
- // Determine first page of un-allocated memory
- // and number of pages available.
- uint64_t addr = firstPageAddr();
- size_t length = (MEMLEN - addr) / PAGESIZE;
+ this->_initialize();
+}
+void PageManager::_initialize()
+{
+ typedef PageManagerCore::page_t page_t;
+ uint64_t totalPages = 0;
- // Display.
- printk("Initializing PageManager with %zd pages starting at %lx...",
- length,
- addr);
+ page_t* startAddr = reinterpret_cast<page_t*>(firstPageAddr());
+ page_t* endAddr = reinterpret_cast<page_t*>(VmmManager::INITIAL_MEM_SIZE);
+ printk("Initializing PageManager starting at %p...", startAddr);
- // Populate L3 cache lines.
- uint64_t* cache_line = reinterpret_cast<uint64_t*>(addr);
- uint64_t* end_cache_line = (uint64_t*) VmmManager::INITIAL_MEM_SIZE;
- KernelMisc::populate_cache_lines(cache_line, end_cache_line);
+ // Calculate chunks along the top half of the L3 and erase them.
+ uint64_t currentBlock = reinterpret_cast<uint64_t>(startAddr);
+ do
+ {
+ if (currentBlock % (1*MEGABYTE) >= (512*KILOBYTE))
+ {
+ currentBlock = ALIGN_MEGABYTE(currentBlock);
+ continue;
+ }
+ uint64_t endBlock = ALIGN_MEGABYTE_DOWN(currentBlock) + 512*KILOBYTE;
- // Allocate pages
- iv_heapKernel.addMemory( addr, RESERVED_PAGES );
- addr += RESERVED_PAGES * PAGESIZE;
- length -= RESERVED_PAGES;
+ // Populate L3 cache lines for this chunk.
+ KernelMisc::populate_cache_lines(
+ reinterpret_cast<uint64_t*>(currentBlock),
+ reinterpret_cast<uint64_t*>(endBlock));
- iv_heap.addMemory( addr, length );
+ // Adjust address to compensate for reserved hole and add to
+ // heap...
- KernelMemState::setMemScratchReg(KernelMemState::MEM_CONTAINED_L3,
- KernelMemState::HALF_CACHE);
+ // Check if this block starts in the hole.
+ if ((currentBlock >= VmmManager::FIRST_RESERVED_PAGE) &&
+ (currentBlock < VmmManager::END_RESERVED_PAGE))
+ {
+ // End of the block is in the hole, skip.
+ if (endBlock < VmmManager::END_RESERVED_PAGE)
+ {
+ currentBlock = ALIGN_MEGABYTE(endBlock);
+ continue;
+ }
- // Statistics
- iv_pagesTotal = length;
- iv_pagesAvail = length;
- cv_low_page_count = length;
+ // Advance the current block past the hole.
+ currentBlock = VmmManager::END_RESERVED_PAGE;
+ }
+ // Check if the block is has the hole in it.
+ if ((endBlock >= VmmManager::FIRST_RESERVED_PAGE) &&
+ (currentBlock < VmmManager::FIRST_RESERVED_PAGE))
+ {
+ // Hole is at the end of the block, shrink it down.
+ if (endBlock < VmmManager::END_RESERVED_PAGE)
+ {
+ endBlock = VmmManager::FIRST_RESERVED_PAGE;
+ }
+ // Hole is in the middle... yuck.
+ else
+ {
+ uint64_t pages =
+ (VmmManager::FIRST_RESERVED_PAGE - currentBlock) / PAGESIZE;
+
+ iv_heap.addMemory(currentBlock, pages);
+ totalPages += pages;
+
+ currentBlock = VmmManager::END_RESERVED_PAGE;
+ }
+ }
+
+ uint64_t pages = (endBlock - currentBlock) / PAGESIZE;
+
+ iv_heap.addMemory(currentBlock, pages);
+ totalPages += pages;
- // @TODO: Venice: Clear 3-8MB region and add to free memory pool.
- // Can't do this now due to fake-PNOR driver.
- // iv_heap.addMemory(...);
+ currentBlock = ALIGN_MEGABYTE(endBlock);
- printk("done\n");
+ } while (reinterpret_cast<page_t*>(currentBlock) != endAddr);
+
+ printk("%ld pages.\n", totalPages);
+
+ // Reserve pages for the kernel.
+ iv_heapKernel.addMemory(reinterpret_cast<uint64_t>(
+ iv_heap.allocatePage(KERNEL_HEAP_RESERVED_PAGES)),
+ KERNEL_HEAP_RESERVED_PAGES);
+
+ // Statistics
+ iv_pagesTotal = totalPages;
+ iv_pagesAvail = totalPages;
+ cv_low_page_count = totalPages;
+
+ KernelMemState::setMemScratchReg(KernelMemState::MEM_CONTAINED_L3,
+ KernelMemState::PRE_SECURE_BOOT);
}
void* PageManager::_allocatePage(size_t n, bool userspace)
@@ -271,11 +325,10 @@ void PageManager::_freePage(void* p, size_t n)
__sync_add_and_fetch(&iv_pagesAvail, n);
// Keep the reserved page count for the kernel full
- // Should it be continuous RESERVED_PAGES??
size_t ks = iv_heapKernel.getFreePageCount();
- if(ks < RESERVED_PAGES)
+ if(ks < KERNEL_HEAP_RESERVED_PAGES)
{
- ks = RESERVED_PAGES - ks;
+ ks = KERNEL_HEAP_RESERVED_PAGES - ks;
PageManagerCore::page_t * page = iv_heap.allocatePage(ks);
if(page)
{
diff --git a/src/kernel/syscall.C b/src/kernel/syscall.C
index 610f36d26..1c9b62ab6 100644
--- a/src/kernel/syscall.C
+++ b/src/kernel/syscall.C
@@ -865,6 +865,10 @@ namespace Systemcalls
switch (size)
{
+ case MM_EXTEND_POST_SECUREBOOT:
+ TASK_SETRTN(t, KernelMisc::expand_half_cache());
+ break;
+
case MM_EXTEND_FULL_CACHE:
TASK_SETRTN(t, KernelMisc::expand_full_cache());
break;
OpenPOWER on IntegriCloud