summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStephen Cprek <smcprek@us.ibm.com>2017-06-19 15:26:19 -0500
committerDaniel M. Crowell <dcrowell@us.ibm.com>2017-07-20 15:54:29 -0400
commit0b680113fbc55b91bc7785ef235df32df6103eda (patch)
treea8c11e0beee39d5d7e615a92791cc232f3624dcf
parente43ee27ebd0f6028f74ee51793f4ca00b1fb04b9 (diff)
downloadtalos-hostboot-0b680113fbc55b91bc7785ef235df32df6103eda.tar.gz
talos-hostboot-0b680113fbc55b91bc7785ef235df32df6103eda.zip
Remove half cache init and do it in the page manager
Change-Id: I3e870c9b50d13704c4c88adfc96e5943cff9dae2 RTC: 175114 Reviewed-on: http://ralgit01.raleigh.ibm.com/gerrit1/42153 Reviewed-by: Michael Baiocchi <mbaiocch@us.ibm.com> Tested-by: Jenkins Server <pfd-jenkins+hostboot@us.ibm.com> Reviewed-by: Martin Gloff <mgloff@us.ibm.com> Tested-by: Jenkins OP Build CI <op-jenkins+hostboot@us.ibm.com> Tested-by: Jenkins OP HW <op-hw-jenkins+hostboot@us.ibm.com> Tested-by: FSP CI Jenkins <fsp-CI-jenkins+hostboot@us.ibm.com> Reviewed-by: Daniel M. Crowell <dcrowell@us.ibm.com>
-rwxr-xr-xsrc/build/debug/Hostboot/Dump.pm20
-rw-r--r--src/include/bootloader/bootloaderif.H6
-rw-r--r--src/include/kernel/memstate.H3
-rw-r--r--src/include/kernel/misc.H9
-rw-r--r--src/include/kernel/vmmmgr.H14
-rw-r--r--src/include/sys/mm.h3
-rw-r--r--src/kernel/misc.C51
-rw-r--r--src/kernel/pagemgr.C111
-rw-r--r--src/kernel/syscall.C4
-rw-r--r--src/kernel/vmmmgr.C14
-rw-r--r--src/usr/secureboot/base/service.C5
11 files changed, 66 insertions, 174 deletions
diff --git a/src/build/debug/Hostboot/Dump.pm b/src/build/debug/Hostboot/Dump.pm
index 6a89f0365..9ce3dd679 100755
--- a/src/build/debug/Hostboot/Dump.pm
+++ b/src/build/debug/Hostboot/Dump.pm
@@ -37,7 +37,6 @@ use constant MEMSTATE_HALF_CACHE => 0x4;
use constant MEMSTATE_REDUCED_CACHE => 0x8;
use constant MEMSTATE_FULL_CACHE => 0xa;
use constant MEMSTATE_MS_32MEG => 0x20;
-use constant MEMSTATE_PRE_SECURE_BOOT => 0xff;
use constant _KB => 1024;
use constant _MB => 1024 * 1024;
@@ -50,16 +49,6 @@ our %memory_maps = (
# Secureboot Header.
[ 0, (512 - 4) * _KB
],
- MEMSTATE_PRE_SECURE_BOOT() =>
- # Until the early secureboot operations have been done, we can
- # only access the top 512k of each 1MB column. Need to avoid
- # the hole for the MBOX DMA buffers (64K @ 3MB + 256K).
- [ (512 - 4) * _KB, 4 * _KB,
- 1 * _MB, 512 * _KB,
- 2 * _MB, 512 * _KB,
- 3 * _MB, 256 * _KB,
- 3 * _MB + (256 + 64) * _KB, (256 - 64) * _KB
- ],
MEMSTATE_HALF_CACHE() =>
# All of the first 4MB can now be read (except reserved MBOX).
[ 512 * _KB, 512 * _KB,
@@ -88,16 +77,15 @@ our %memory_maps = (
# Map the current state to the combined states available.
our %memory_states = (
MEMSTATE_NO_MEM() => [ MEMSTATE_NO_MEM ],
- MEMSTATE_PRE_SECURE_BOOT() => [ MEMSTATE_NO_MEM, MEMSTATE_PRE_SECURE_BOOT ],
- MEMSTATE_HALF_CACHE() => [ MEMSTATE_NO_MEM, MEMSTATE_PRE_SECURE_BOOT,
+ MEMSTATE_HALF_CACHE() => [ MEMSTATE_NO_MEM,
MEMSTATE_HALF_CACHE ],
MEMSTATE_REDUCED_CACHE() =>
- [ MEMSTATE_NO_MEM, MEMSTATE_PRE_SECURE_BOOT,
+ [ MEMSTATE_NO_MEM,
MEMSTATE_HALF_CACHE, MEMSTATE_REDUCED_CACHE ],
- MEMSTATE_FULL_CACHE() => [ MEMSTATE_NO_MEM, MEMSTATE_PRE_SECURE_BOOT,
+ MEMSTATE_FULL_CACHE() => [ MEMSTATE_NO_MEM,
MEMSTATE_HALF_CACHE, MEMSTATE_REDUCED_CACHE,
MEMSTATE_FULL_CACHE ],
- MEMSTATE_MS_32MEG() => [ MEMSTATE_NO_MEM, MEMSTATE_PRE_SECURE_BOOT,
+ MEMSTATE_MS_32MEG() => [ MEMSTATE_NO_MEM,
MEMSTATE_HALF_CACHE, MEMSTATE_REDUCED_CACHE,
MEMSTATE_FULL_CACHE, MEMSTATE_MS_32MEG ]
);
diff --git a/src/include/bootloader/bootloaderif.H b/src/include/bootloader/bootloaderif.H
index dd1c3a8ff..25b3520e8 100644
--- a/src/include/bootloader/bootloaderif.H
+++ b/src/include/bootloader/bootloaderif.H
@@ -40,11 +40,13 @@ namespace Bootloader{
// Size of exception vector reserved space at start of the HBBL section
#define HBBL_EXCEPTION_VECTOR_SIZE (12 * KILOBYTE)
+#define MAX_HBB_SIZE (512 * KILOBYTE)
+
// The Bootloader to Hostboot communication area exists after the working HBB
#ifdef BOOTLOADER
-#define BLTOHB_COMM_DATA_ADDR (getHRMOR() - ( 2*MEGABYTE) + 512*KILOBYTE)
+#define BLTOHB_COMM_DATA_ADDR (getHRMOR() - ( 2*MEGABYTE) + MAX_HBB_SIZE)
#else
-#define BLTOHB_COMM_DATA_ADDR (getHRMOR() + 512*KILOBYTE)
+#define BLTOHB_COMM_DATA_ADDR (getHRMOR() + MAX_HBB_SIZE)
#endif
// Expected BlToHbData eye catch
diff --git a/src/include/kernel/memstate.H b/src/include/kernel/memstate.H
index b5215a806..84acf3b4e 100644
--- a/src/include/kernel/memstate.H
+++ b/src/include/kernel/memstate.H
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HostBoot Project */
/* */
-/* Contributors Listed Below - COPYRIGHT 2013,2016 */
+/* Contributors Listed Below - COPYRIGHT 2013,2017 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -63,7 +63,6 @@ namespace KernelMemState
REDUCED_CACHE = 0x00000008,
FULL_CACHE = 0x0000000A,
MS_32MEG = 0x00000020,
- PRE_SECURE_BOOT = 0x000000FF,
};
struct mem_location
diff --git a/src/include/kernel/misc.H b/src/include/kernel/misc.H
index f08af361f..4c570423a 100644
--- a/src/include/kernel/misc.H
+++ b/src/include/kernel/misc.H
@@ -152,15 +152,6 @@ namespace KernelMisc
};
- /** @fn expand_half_cache
- *
- * @brief Expands the image footprint from a quarter-cache (top 512k of
- * each cache column) to a half-cache (full 1mb of each column).
- *
- * @return 0 or -errno
- */
- int expand_half_cache();
-
/** @fn expand_full_cache
*
* @brief Expands the image footprint from a half-cache to full-cache
diff --git a/src/include/kernel/vmmmgr.H b/src/include/kernel/vmmmgr.H
index 84a981b24..9c437fe23 100644
--- a/src/include/kernel/vmmmgr.H
+++ b/src/include/kernel/vmmmgr.H
@@ -47,18 +47,16 @@ class VmmManager
// Place the page table at the top side of the cache, 256k in size.
INITIAL_PT_OFFSET = INITIAL_MEM_SIZE - 1*MEGABYTE,
PTSIZE = 256*KILOBYTE,
- HTABORG_OFFSET = INITIAL_PT_OFFSET,
// Put the DMA Pages just after the Page Table
MBOX_DMA_PAGES = 64, // must be <= 64
MBOX_DMA_PAGESIZE = (1 * KILOBYTE),
- MBOX_DMA_ADDR = INITIAL_PT_OFFSET + PTSIZE,
MBOX_DMA_SIZE = MBOX_DMA_PAGES * MBOX_DMA_PAGESIZE,
/** We need to reserve a hole in heap memory for the page table,
* etc. Use these constants to define the hole. */
FIRST_RESERVED_PAGE = INITIAL_PT_OFFSET,
- END_RESERVED_PAGE = INITIAL_PT_OFFSET + PTSIZE + MBOX_DMA_SIZE,
+ END_RESERVED_PAGE = INITIAL_PT_OFFSET + PTSIZE,
BLTOHB_DATA_START = END_RESERVED_PAGE,
@@ -215,6 +213,13 @@ class VmmManager
*/
static int mmLinearMap(void *i_paddr, uint64_t i_size);
+ /** @fn pageTableOffset()
+ * @brief Gets starting address of Page Table
+ *
+ * @return uint64_t - starting address of Page Table
+ */
+ static uint64_t pageTableOffset();
+
protected:
VmmManager();
~VmmManager() {};
@@ -274,6 +279,9 @@ class VmmManager
/** See mmLinearMap */
int _mmLinearMap(void*, uint64_t);
+ /** See pageTableOffset */
+ uint64_t _pageTableOffset() const;
+
public:
friend class Block;
friend class StackSegment;
diff --git a/src/include/sys/mm.h b/src/include/sys/mm.h
index e7b49f8f7..b89c82fea 100644
--- a/src/include/sys/mm.h
+++ b/src/include/sys/mm.h
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HostBoot Project */
/* */
-/* Contributors Listed Below - COPYRIGHT 2011,2016 */
+/* Contributors Listed Below - COPYRIGHT 2011,2017 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -102,7 +102,6 @@ int mm_set_permission(void* va, uint64_t size, uint64_t access_type);
enum MM_EXTEND_SIZE
{
- MM_EXTEND_PARTIAL_CACHE, //< Extend memory to include 512KB to 4MB
MM_EXTEND_REDUCED_CACHE, //< Extend memory to include reduced cache (8MB).
MM_EXTEND_FULL_CACHE, //< Extend memory to include full cache (10MB).
MM_EXTEND_REAL_MEMORY, //< Extend memory into real mainstore.
diff --git a/src/kernel/misc.C b/src/kernel/misc.C
index 6122e40a2..75f476187 100644
--- a/src/kernel/misc.C
+++ b/src/kernel/misc.C
@@ -490,54 +490,6 @@ namespace KernelMisc
kassert(false);
}
- int expand_half_cache()
- {
- static bool executed = false;
-
- if (executed) // Why are we being called a second time?
- {
- return -EFAULT;
- }
-
- uint64_t startAddr = 512*KILOBYTE;
- uint64_t endAddr = 1*MEGABYTE;
-
- size_t cache_columns = 0;
-
- switch(CpuID::getCpuType())
- {
- case CORE_POWER8_MURANO:
- case CORE_POWER8_VENICE:
- case CORE_POWER8_NAPLES:
- case CORE_POWER9_NIMBUS:
- case CORE_POWER9_CUMULUS:
- cache_columns = 4;
- break;
-
- default:
- kassert(false);
- break;
- }
-
- for (size_t i = 0; i < cache_columns; i++)
- {
- size_t offset = i * MEGABYTE;
- populate_cache_lines(
- reinterpret_cast<uint64_t*>(startAddr + offset),
- reinterpret_cast<uint64_t*>(endAddr + offset));
-
- PageManager::addMemory(startAddr + offset,
- (512*KILOBYTE)/PAGESIZE);
- }
-
- executed = true;
-
- KernelMemState::setMemScratchReg(KernelMemState::MEM_CONTAINED_L3,
- KernelMemState::HALF_CACHE);
-
- return 0;
- }
-
int expand_full_cache(uint64_t i_expandSize)
{
static bool executed = false;
@@ -596,6 +548,9 @@ namespace KernelMisc
{
size_t cache_line_size = getCacheLineWords();
+ // Assert start/end address is divisible by Cache Line Words
+ kassert(reinterpret_cast<uint64_t>(i_start)%cache_line_size == 0);
+ kassert(reinterpret_cast<uint64_t>(i_end)%cache_line_size == 0);
while(i_start != i_end)
{
dcbz(i_start);
diff --git a/src/kernel/pagemgr.C b/src/kernel/pagemgr.C
index 5382dc0e0..02e8ce710 100644
--- a/src/kernel/pagemgr.C
+++ b/src/kernel/pagemgr.C
@@ -206,95 +206,44 @@ void PageManager::_initialize()
uint64_t totalPages = 0;
page_t* startAddr = reinterpret_cast<page_t*>(firstPageAddr());
- page_t* endAddr = reinterpret_cast<page_t*>(VmmManager::INITIAL_MEM_SIZE);
- printk("PageManager starts at %p...", startAddr);
+ printk("PageManager starts at %p\n", startAddr);
+ // Populate cache lines from end of HBB to PT offset and add to heap
+ uint64_t startBlock = reinterpret_cast<uint64_t>(startAddr);
+ uint64_t endBlock = VmmManager::INITIAL_PT_OFFSET;
+ KernelMisc::populate_cache_lines(
+ reinterpret_cast<uint64_t*>(startBlock),
+ reinterpret_cast<uint64_t*>(endBlock));
+
+ uint64_t pages = (endBlock - startBlock) / PAGESIZE;
+ iv_heap.addMemory(startBlock, pages);
+ totalPages += pages;
+
+ // Populate cache lines of PT
+ startBlock = VmmManager::INITIAL_PT_OFFSET;
+ endBlock = VmmManager::INITIAL_PT_OFFSET + VmmManager::PTSIZE;
+ KernelMisc::populate_cache_lines(reinterpret_cast<uint64_t*>(startBlock),
+ reinterpret_cast<uint64_t*>(endBlock));
+
+ // Populate cachelines from end of Preserved read (PT + securebood data) to
+ // 4MB and add to heap
// Add on secureboot data size to end of reserved space
size_t securebootDataSize = 0;
if (g_BlToHbDataManager.isValid())
{
securebootDataSize = g_BlToHbDataManager.getPreservedSize();
}
- size_t l_endReservedPage = VmmManager::END_RESERVED_PAGE
+ size_t l_endReservedPage = VmmManager::BLTOHB_DATA_START
+ securebootDataSize;
-
- // Calculate chunks along the top half of the L3 and erase them.
- uint64_t currentBlock = reinterpret_cast<uint64_t>(startAddr);
- do
- {
- if (currentBlock % (1*MEGABYTE) >= (512*KILOBYTE))
- {
- currentBlock = ALIGN_MEGABYTE(currentBlock);
- continue;
- }
-
- uint64_t endBlock = ALIGN_MEGABYTE_DOWN(currentBlock) + 512*KILOBYTE;
-
- // Adjust address to compensate for reserved hole and add to
- // heap...
-
- // Check if this block starts in the hole.
- if ((currentBlock >= VmmManager::FIRST_RESERVED_PAGE) &&
- (currentBlock < l_endReservedPage))
- {
- // End of the block is in the hole, skip.
- if (endBlock < l_endReservedPage)
- {
- currentBlock = ALIGN_MEGABYTE(endBlock);
- continue;
- }
-
- // Advance the current block past the hole.
- currentBlock = l_endReservedPage;
- }
-
- // Check if the block is has the hole in it.
- if ((endBlock >= VmmManager::FIRST_RESERVED_PAGE) &&
- (currentBlock < VmmManager::FIRST_RESERVED_PAGE))
- {
- // Hole is at the end of the block, shrink it down.
- if (endBlock < l_endReservedPage)
- {
- endBlock = VmmManager::FIRST_RESERVED_PAGE;
- }
- // Hole is in the middle... yuck.
- else
- {
- uint64_t hole_end =
- (VmmManager::FIRST_RESERVED_PAGE - currentBlock);
-
- // Populate L3 for the first part of the chunk.
- KernelMisc::populate_cache_lines(
- reinterpret_cast<uint64_t*>(currentBlock),
- reinterpret_cast<uint64_t*>(hole_end));
-
- // Add it to the heap.
- iv_heap.addMemory(currentBlock, hole_end / PAGESIZE);
- totalPages += (hole_end / PAGESIZE);
-
- currentBlock = l_endReservedPage;
- }
- }
-
- // Populate L3 cache lines for this chunk.
- KernelMisc::populate_cache_lines(
- reinterpret_cast<uint64_t*>(currentBlock),
- reinterpret_cast<uint64_t*>(endBlock));
-
- uint64_t pages = (endBlock - currentBlock) / PAGESIZE;
-
- iv_heap.addMemory(currentBlock, pages);
- totalPages += pages;
-
- currentBlock = ALIGN_MEGABYTE(endBlock);
-
- } while (reinterpret_cast<page_t*>(currentBlock) != endAddr);
-
- // Ensure HW page table area is erased / populated.
+ startBlock = l_endReservedPage;
+ endBlock = VmmManager::INITIAL_MEM_SIZE;
KernelMisc::populate_cache_lines(
- reinterpret_cast<uint64_t*>(VmmManager::INITIAL_PT_OFFSET),
- reinterpret_cast<uint64_t*>(VmmManager::INITIAL_PT_OFFSET +
- VmmManager::PTSIZE));
+ reinterpret_cast<uint64_t*>(startBlock),
+ reinterpret_cast<uint64_t*>(endBlock));
+
+ pages = (endBlock - startBlock) / PAGESIZE;
+ iv_heap.addMemory(startBlock, pages);
+ totalPages += pages;
printk("%ld pages.\n", totalPages);
@@ -309,7 +258,7 @@ void PageManager::_initialize()
cv_low_page_count = totalPages;
KernelMemState::setMemScratchReg(KernelMemState::MEM_CONTAINED_L3,
- KernelMemState::PRE_SECURE_BOOT);
+ KernelMemState::HALF_CACHE);
}
void* PageManager::_allocatePage(size_t n, bool userspace)
diff --git a/src/kernel/syscall.C b/src/kernel/syscall.C
index adef18afe..bf92fe913 100644
--- a/src/kernel/syscall.C
+++ b/src/kernel/syscall.C
@@ -908,10 +908,6 @@ namespace Systemcalls
switch (size)
{
- case MM_EXTEND_PARTIAL_CACHE:
- TASK_SETRTN(t, KernelMisc::expand_half_cache());
- break;
-
case MM_EXTEND_REDUCED_CACHE:
TASK_SETRTN(t, KernelMisc::expand_full_cache(8*MEGABYTE));
break;
diff --git a/src/kernel/vmmmgr.C b/src/kernel/vmmmgr.C
index e7240dc11..9902a0527 100644
--- a/src/kernel/vmmmgr.C
+++ b/src/kernel/vmmmgr.C
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HostBoot Project */
/* */
-/* Contributors Listed Below - COPYRIGHT 2010,2016 */
+/* Contributors Listed Below - COPYRIGHT 2010,2017 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -277,7 +277,7 @@ int VmmManager::_devUnmap(void* ea)
uint64_t VmmManager::HTABORG()
{
- return ((uint32_t)HTABORG_OFFSET + getHRMOR());
+ return static_cast<uint32_t>(pageTableOffset()) + getHRMOR();
}
uint64_t VmmManager::findKernelAddress(uint64_t i_vaddr)
@@ -304,3 +304,13 @@ int VmmManager::_mmLinearMap(void *i_paddr, uint64_t i_size)
lock.unlock();
return rc;
}
+
+uint64_t VmmManager::pageTableOffset()
+{
+ return Singleton<VmmManager>::instance()._pageTableOffset();
+}
+
+uint64_t VmmManager::_pageTableOffset() const
+{
+ return INITIAL_PT_OFFSET;
+}
diff --git a/src/usr/secureboot/base/service.C b/src/usr/secureboot/base/service.C
index 8c8f742f8..2674005c0 100644
--- a/src/usr/secureboot/base/service.C
+++ b/src/usr/secureboot/base/service.C
@@ -98,14 +98,9 @@ void* initializeBase(void* unused)
Singleton<Header>::instance().loadSecurely();
}
- // Extend memory footprint into lower portion of cache.
- assert(0 == mm_extend(MM_EXTEND_PARTIAL_CACHE));
-
// Don't extend more than 1/2 cache in VPO as fake PNOR is there
// Don't enable SecureRomManager in VPO
#ifndef CONFIG_P9_VPO_COMPILE
- // Run dcbz on the entire 10MB cache
- assert(0 == mm_extend(MM_EXTEND_FULL_CACHE));
// Initialize the Secure ROM
l_errl = initializeSecureRomManager();
OpenPOWER on IntegriCloud