summaryrefslogtreecommitdiffstats
path: root/src/kernel
diff options
context:
space:
mode:
authorDoug Gilbert <dgilbert@us.ibm.com>2011-09-12 12:47:53 -0500
committerDouglas R. Gilbert <dgilbert@us.ibm.com>2011-09-19 16:05:34 -0500
commitde8a529d349aebb344979609055f123c196ccfe3 (patch)
treeb91665407f69730aaba8da794afc47240997dd47 /src/kernel
parentb754f8b47e343f449e5f05f67b948513363abd12 (diff)
downloadtalos-hostboot-de8a529d349aebb344979609055f123c196ccfe3.tar.gz
talos-hostboot-de8a529d349aebb344979609055f123c196ccfe3.zip
Mechanism to detect low memory and cast out older page
Change-Id: Icce8e01f3d1cd2942f2b9ff802993da0441535ee Reviewed-on: http://gfw160.austin.ibm.com:8080/gerrit/344 Tested-by: Jenkins Server Reviewed-by: A. Patrick Williams III <iawillia@us.ibm.com> Reviewed-by: Douglas R. Gilbert <dgilbert@us.ibm.com>
Diffstat (limited to 'src/kernel')
-rw-r--r--src/kernel/basesegment.C9
-rw-r--r--src/kernel/block.C99
-rw-r--r--src/kernel/cpumgr.C32
-rw-r--r--src/kernel/pagemgr.C9
-rw-r--r--src/kernel/ptmgr.C22
-rw-r--r--src/kernel/scheduler.C2
-rw-r--r--src/kernel/segmentmgr.C18
-rw-r--r--src/kernel/vmmmgr.C28
8 files changed, 217 insertions, 2 deletions
diff --git a/src/kernel/basesegment.C b/src/kernel/basesegment.C
index e59e41753..eab00accc 100644
--- a/src/kernel/basesegment.C
+++ b/src/kernel/basesegment.C
@@ -29,6 +29,7 @@
#include <kernel/segmentmgr.H>
#include <kernel/block.H>
#include <kernel/cpuid.H>
+#include <kernel/console.H>
BaseSegment::~BaseSegment()
@@ -147,3 +148,11 @@ int BaseSegment::mmSetPermission(void* i_va, uint64_t i_size,PAGE_PERMISSIONS i_
{
return 0;
}
+
+void BaseSegment::castOutPages(uint64_t i_type)
+{
+ size_t cast_out = 0;
+ cast_out = iv_block->castOutPages(i_type);
+ // Could try again with a more agressive constraint if cast_out == 0 ????
+ if(cast_out) printkd("Cast out %ld pages,Type=%ld\n",cast_out,i_type);
+}
diff --git a/src/kernel/block.C b/src/kernel/block.C
index 59a7892f2..63c1fae92 100644
--- a/src/kernel/block.C
+++ b/src/kernel/block.C
@@ -108,6 +108,15 @@ bool Block::handlePageFault(task_t* i_task, uint64_t i_addr)
}
else
{
+ // Test code @TODO remove - SET up ro pages to test cast out pages
+ //if(pte->getPage() == 0)
+ //{
+ // void* l_page = PageManager::allocatePage();
+ // memset(l_page,'U',PAGESIZE);
+ // pte->setPageAddr(reinterpret_cast<uint64_t>(l_page));
+ // pte->setPresent(true);
+ // pte->setWritable(false);
+ //}
return false; //TODO - Swap kernel base block pages for user pages
}
}
@@ -301,3 +310,93 @@ void Block::updateRefCount( uint64_t i_vaddr,
spte->setDirty( i_stats.C );
}
+
+bool Block::evictPage(ShadowPTE* i_pte)
+{
+ ShadowPTE* pte = i_pte;
+ bool do_cast_out = false;
+
+ if(!pte->isWritable()) // ro, executable
+ {
+ do_cast_out = true;
+ }
+ else // is writable...
+ {
+ // if pte->isWriteTracked() flush then cast out
+ }
+
+ if(do_cast_out)
+ {
+ PageTableManager::delEntry(pte->getPageAddr());
+ PageManager::freePage(reinterpret_cast<void*>(pte->getPageAddr()));
+ pte->setPresent(false);
+ pte->setPageAddr(NULL);
+ }
+
+ return do_cast_out;
+}
+
+size_t Block::castOutPages(uint64_t i_type)
+{
+ size_t cast_out = 0;
+ // drill down
+ if(iv_nextBlock)
+ {
+ cast_out += iv_nextBlock->castOutPages(i_type);
+ }
+
+ // TODO We will eventually need to skip other blocks as well, such as
+ // when the memory space grows.
+ if(iv_baseAddr != 0) // Skip base area
+ {
+ bool is_cast_out = false;
+ size_t rw_constraint = 5;
+ size_t ro_constraint = 3;
+
+ if(i_type == VmmManager::CRITICAL)
+ {
+ rw_constraint = 2;
+ ro_constraint = 1;
+ }
+ //printk("Block = %p:%ld\n",(void*)iv_baseAddr,iv_size / PAGESIZE);
+ for(uint64_t page = iv_baseAddr;
+ page < (iv_baseAddr + iv_size);
+ page += PAGESIZE)
+ {
+ ShadowPTE* pte = getPTE(page);
+ if (pte->isPresent() && (0 != pte->getPageAddr()))
+ {
+ //if(pte->isExecutable()) printk("x");
+ //else if(pte->isWritable()) printk("w");
+ //else printk("r");
+ //printk("%d",(int)pte->getLRU());
+
+ if(pte->isWritable())
+ {
+ if((pte->getLRU() > rw_constraint) && pte->isWriteTracked())
+ {
+ is_cast_out = evictPage(pte);
+ //printk("+");
+ }
+ }
+ else // ro and/or executable
+ {
+ if(pte->getLRU() > ro_constraint)
+ {
+ is_cast_out = evictPage(pte);
+ }
+ }
+
+ if(is_cast_out)
+ {
+ //printk("-");
+ ++cast_out;
+ is_cast_out = false;
+ }
+ }
+ }
+ //printk("\n");
+ }
+
+ return cast_out;
+}
diff --git a/src/kernel/cpumgr.C b/src/kernel/cpumgr.C
index d8a9960f6..01af160f1 100644
--- a/src/kernel/cpumgr.C
+++ b/src/kernel/cpumgr.C
@@ -33,6 +33,7 @@
#include <kernel/timemgr.H>
#include <sys/sync.h>
#include <kernel/cpuid.H>
+#include <kernel/ptmgr.H>
cpu_t* CpuManager::cv_cpus[CpuManager::MAXCPUS] = { NULL };
bool CpuManager::cv_shutdown_requested = false;
@@ -141,6 +142,7 @@ void CpuManager::startCPU(ssize_t i)
// Create idle task.
cpu->idle_task = TaskManager::createIdleTask();
cpu->idle_task->cpu = cpu;
+ cpu->periodic_count = 0;
printk("done\n");
}
@@ -160,3 +162,33 @@ void CpuManager::startSlaveCPU(cpu_t* cpu)
return;
}
+
+void CpuManager::executePeriodics(cpu_t * i_cpu)
+{
+ if(i_cpu->master)
+ {
+ ++(i_cpu->periodic_count);
+ if(0 == (i_cpu->periodic_count % CPU_PERIODIC_CHECK_MEMORY))
+ {
+ uint64_t pcntAvail = PageManager::queryAvail();
+ if(pcntAvail < 16) // Less than 16% pages left TODO 16 ok?
+ {
+ VmmManager::flushPageTable();
+ ++(i_cpu->periodic_count); // prevent another flush below
+ if(pcntAvail < 5) // TODO 5% ok
+ {
+ VmmManager::castOutPages(VmmManager::CRITICAL);
+ }
+ else
+ {
+ VmmManager::castOutPages(VmmManager::NORMAL);
+ }
+ }
+ }
+ if(0 == (i_cpu->periodic_count % CPU_PERIODIC_FLUSH_PAGETABLE))
+ {
+ VmmManager::flushPageTable();
+ }
+ }
+}
+
diff --git a/src/kernel/pagemgr.C b/src/kernel/pagemgr.C
index f74ee89c1..826e1bc4d 100644
--- a/src/kernel/pagemgr.C
+++ b/src/kernel/pagemgr.C
@@ -44,7 +44,12 @@ void PageManager::freePage(void* p, size_t n)
return pmgr._freePage(p, n);
}
-PageManager::PageManager() : iv_pagesAvail(0)
+uint64_t PageManager::queryAvail()
+{
+ return Singleton<PageManager>::instance()._queryAvail();
+}
+
+PageManager::PageManager() : iv_pagesAvail(0), iv_pagesTotal(0)
{
// Determine first page of un-allocated memory.
uint64_t addr = (uint64_t) VFS_LAST_ADDRESS;
@@ -55,6 +60,7 @@ PageManager::PageManager() : iv_pagesAvail(0)
page_t* page = (page_t*)((void*) addr);
size_t length = (MEMLEN - addr) / PAGESIZE;
+ iv_pagesTotal = length;
// Update statistics.
__sync_add_and_fetch(&iv_pagesAvail, length);
@@ -156,3 +162,4 @@ void PageManager::push_bucket(page_t* p, size_t n)
if (n >= BUCKETS) return;
first_page[n].push(p);
}
+
diff --git a/src/kernel/ptmgr.C b/src/kernel/ptmgr.C
index 0feffb7e6..92d211888 100644
--- a/src/kernel/ptmgr.C
+++ b/src/kernel/ptmgr.C
@@ -251,6 +251,10 @@ void PageTableManager::printPT( void )
Singleton<PageTableManager>::instance()._printPT();
}
+void PageTableManager::flush( void )
+{
+ Singleton<PageTableManager>::instance()._flush();
+}
/********************
Private/Protected Methods
@@ -1102,3 +1106,21 @@ void PageTableManager::pushUsageStats( PageTableEntry* i_pte )
uint64_t va = getVirtAddrFromPTE(i_pte);
SegmentManager::updateRefCount( va, stats );
}
+
+void PageTableManager::_flush( void )
+{
+ if( ivTABLE )
+ {
+ return;
+ }
+
+ PageTableEntry* pte = (PageTableEntry*)getAddress();
+ uint64_t num_ptes = getSize() / sizeof(PageTableEntry);
+ for (uint64_t i = 0; i < num_ptes; ++i)
+ {
+ updateLRU( pte );
+ pushUsageStats ( pte );
+ ++pte;
+ }
+}
+
diff --git a/src/kernel/scheduler.C b/src/kernel/scheduler.C
index 7b57792a5..28f70debd 100644
--- a/src/kernel/scheduler.C
+++ b/src/kernel/scheduler.C
@@ -77,6 +77,8 @@ void Scheduler::setNextRunnable()
task_t* t = NULL;
cpu_t* cpu = CpuManager::getCurrentCPU();
+ CpuManager::executePeriodics(cpu);
+
// Check for ready task in local run-queue, if it exists.
if (NULL != cpu->scheduler_extra)
{
diff --git a/src/kernel/segmentmgr.C b/src/kernel/segmentmgr.C
index 890bd1812..0b4370cfa 100644
--- a/src/kernel/segmentmgr.C
+++ b/src/kernel/segmentmgr.C
@@ -55,6 +55,11 @@ void SegmentManager::updateRefCount( uint64_t i_vaddr,
Singleton<SegmentManager>::instance()._updateRefCount(i_vaddr,i_stats);
}
+void SegmentManager::castOutPages(uint64_t i_type)
+{
+ Singleton<SegmentManager>::instance()._castOutPages(i_type);
+}
+
bool SegmentManager::_handlePageFault(task_t* i_task, uint64_t i_addr)
{
size_t segId = getSegmentIdFromAddress(i_addr);
@@ -122,7 +127,7 @@ void SegmentManager::_updateRefCount( uint64_t i_vaddr,
PageTableManager::UsageStats_t i_stats )
{
// Get segment ID from effective address.
- size_t segId = i_vaddr >> SLBE_s;
+ size_t segId = getSegmentIdFromAddress(i_vaddr);
// Call contained segment object to update the reference count
if ((segId < MAX_SEGMENTS) && (NULL != iv_segments[segId]))
@@ -130,3 +135,14 @@ void SegmentManager::_updateRefCount( uint64_t i_vaddr,
iv_segments[segId]->updateRefCount( i_vaddr, i_stats );
}
}
+
+void SegmentManager::_castOutPages(uint64_t i_type)
+{
+ for (size_t i = 0; i < MAX_SEGMENTS; i++)
+ {
+ if (NULL != iv_segments[i])
+ {
+ iv_segments[i]->castOutPages(i_type);
+ }
+ }
+}
diff --git a/src/kernel/vmmmgr.C b/src/kernel/vmmmgr.C
index 4009ff380..b2026944e 100644
--- a/src/kernel/vmmmgr.C
+++ b/src/kernel/vmmmgr.C
@@ -72,6 +72,16 @@ uint64_t VmmManager::findPhysicalAddress(uint64_t i_vaddr)
return Singleton<VmmManager>::instance()._findPhysicalAddress(i_vaddr);
}
+void VmmManager::castOutPages(VmmManager::castout_t i_ct)
+{
+ Singleton<VmmManager>::instance()._castOutPages(i_ct);
+}
+
+void VmmManager::flushPageTable( void )
+{
+ Singleton<VmmManager>::instance()._flushPageTable();
+}
+
/**
* STATIC
* @brief DEPRECATED
@@ -184,3 +194,21 @@ int VmmManager::_mmSetPermission(void* i_va, uint64_t i_size, PAGE_PERMISSIONS i
return rc;
}
+
+void VmmManager::_castOutPages(VmmManager::castout_t i_ct)
+{
+ lock.lock();
+
+ SegmentManager::castOutPages((uint64_t)i_ct);
+
+ lock.unlock();
+}
+
+void VmmManager::_flushPageTable( void )
+{
+ lock.lock();
+
+ PageTableManager::flush();
+
+ lock.unlock();
+}
OpenPOWER on IntegriCloud