summaryrefslogtreecommitdiffstats
path: root/src/kernel/vmmmgr.C
diff options
context:
space:
mode:
authorPatrick Williams <iawillia@us.ibm.com>2011-07-08 19:33:40 -0500
committerA. Patrick Williams III <iawillia@us.ibm.com>2011-07-20 14:58:43 -0500
commit471f09f1a9bcc46fc385fa8aca776cb682075c0b (patch)
treee0a4969825799dcc4c28a71975cb68439f507390 /src/kernel/vmmmgr.C
parent3ecf7085ccc55eb4f815a62f47ea09f55bb6688e (diff)
downloadtalos-hostboot-471f09f1a9bcc46fc385fa8aca776cb682075c0b.tar.gz
talos-hostboot-471f09f1a9bcc46fc385fa8aca776cb682075c0b.zip
VMM Improvements.
- Segment Manager - Base / Device Segments - Block for Base image. Change-Id: Ic0c058e5c5b210ec1c48d30f6ed9f9837d74a3c8 Reviewed-on: http://gfw160.austin.ibm.com:8080/gerrit/193 Tested-by: Jenkins Server Reviewed-by: MATTHEW S. BARTH <msbarth@us.ibm.com>
Diffstat (limited to 'src/kernel/vmmmgr.C')
-rw-r--r--src/kernel/vmmmgr.C193
1 files changed, 21 insertions, 172 deletions
diff --git a/src/kernel/vmmmgr.C b/src/kernel/vmmmgr.C
index ba1191d90..98e5a0410 100644
--- a/src/kernel/vmmmgr.C
+++ b/src/kernel/vmmmgr.C
@@ -1,8 +1,12 @@
+#include <limits.h>
#include <util/singleton.H>
#include <kernel/vmmmgr.H>
#include <kernel/console.H>
#include <arch/ppc.H>
#include <kernel/ptmgr.H>
+#include <kernel/segmentmgr.H>
+#include <kernel/devicesegment.H>
+#include <kernel/basesegment.H>
extern void* data_load_address;
@@ -13,10 +17,13 @@ VmmManager::VmmManager() : lock()
void VmmManager::init()
{
printk("Starting VMM...\n");
-
+
VmmManager& v = Singleton<VmmManager>::instance();
- v.initSLB();
+ BaseSegment::init();
+ DeviceSegment::init();
+ SegmentManager::initSLB();
+
v.initPTEs();
v.initSDR1();
@@ -26,46 +33,24 @@ void VmmManager::init()
void VmmManager::init_slb()
{
VmmManager& v = Singleton<VmmManager>::instance();
- v.initSLB();
+ SegmentManager::initSLB();
+
v.initSDR1();
}
-bool VmmManager::pteMiss(task_t* t)
+bool VmmManager::pteMiss(task_t* t, uint64_t effAddr)
{
- return Singleton<VmmManager>::instance()._pteMiss(t);
+ return Singleton<VmmManager>::instance()._pteMiss(t, effAddr);
}
void* VmmManager::mmioMap(void* ra, size_t pages)
{
- return Singleton<VmmManager>::instance()._mmioMap(ra,pages);
+ return DeviceSegment::mmioMap(ra, pages);
}
int VmmManager::mmioUnmap(void* ea, size_t pages)
{
- return Singleton<VmmManager>::instance()._mmioUnmap(ea,pages);
-}
-
-void VmmManager::initSLB()
-{
- register uint64_t slbRS, slbRB;
-
- // ESID = 0, V = 1, Index = 1.
- slbRB = 0x0000000008000001;
-
- // B = 01 (1TB), VSID = 0, Ks = 0, Kp = 1, NLCLP = 0
- slbRS = 0x4000000000000400;
-
- asm volatile("slbia" ::: "memory");
- asm volatile("isync" ::: "memory");
- asm volatile("slbmte %0, %1" :: "r"(slbRS), "r"(slbRB) : "memory");
-
- // ESID = 2TB, V = 1, Index = 3
- slbRB = 0x0000020008000003;
- // B = 01 (1TB), VSID = 2TB, Ks = 0, Kp = 1, NLCLP = 0
- slbRS = 0x4000020000000400;
-
- asm volatile("slbmte %0, %1" :: "r"(slbRS), "r"(slbRB) : "memory");
- asm volatile("isync" ::: "memory");
+ return DeviceSegment::mmioUnmap(ea, pages);
}
void VmmManager::initPTEs()
@@ -73,21 +58,8 @@ void VmmManager::initPTEs()
// Initialize and invalidate the page table
PageTableManager::init();
- // Set up linear map for every 4K page
- for(size_t i = 0; i < (FULL_MEM_SIZE / PAGESIZE); i++)
- {
- ACCESS_TYPES access = NORMAL_ACCESS;
- if (0 == i)
- {
- access = NO_USER_ACCESS;
- }
- else if (((uint64_t)&data_load_address) > (i * PAGESIZE))
- {
- access = READ_O_ACCESS;
- }
-
- PageTableManager::addEntry( i*PAGESIZE, i, access );
- }
+ // There is no need to add PTE entries because the PTE-miss page fault
+ // handler will add as-needed.
}
void VmmManager::initSDR1()
@@ -97,138 +69,15 @@ void VmmManager::initSDR1()
asm volatile("mtsdr1 %0" :: "r"(sdr1) : "memory");
}
-bool VmmManager::_pteMiss(task_t* t)
+bool VmmManager::_pteMiss(task_t* t, uint64_t effAddr)
{
lock.lock();
- uint64_t effAddr = getDAR();
- uint64_t effPid = effAddr / FULL_MEM_SIZE;
-
-
- if (effPid <= LinearSpace)
- {
- lock.unlock();
- return false; // Should not get this exception in Linear space
- // because it is all mapped in all the time.
- }
-
- // Check for exception in MMIO vs Dynamic Stack space.
- if (effPid <= MMIOSpace)
- {
- // Do MMIO mapping.
- uint64_t effAddrPage = (effAddr - FULL_MEM_SIZE) / PAGESIZE;
-
- // Check for valid entry in MMIO map.
- uint64_t mmioMapEntry = mmioMapT[effAddrPage];
- if (0 == mmioMapEntry)
- {
- lock.unlock();
- return false;
- }
-
- uint64_t mmioMapPage = mmioMapEntry / PAGESIZE;
-
- // Update PTE.
- PageTableManager::addEntry( effAddr, mmioMapPage, CI_ACCESS );
-
- lock.unlock();
- return true;
- }
- else
- {
- // TODO: Do dynamic stack mapping.
- lock.unlock();
- return false;
- }
-}
+ bool rc = SegmentManager::handlePageFault(t, effAddr);
-void* VmmManager::_mmioMap(void* ra, size_t pages)
-{
- lock.lock();
-
- ssize_t match = -1;
- uint64_t _ra = (uint64_t) ra;
-
- // Search for memory already mapped in.
- for (size_t i = 0; i < MMIO_T_ENTRIES; i++)
- {
- if ((mmioMapT[i] & ~(PAGESIZE - 1)) == _ra)
- {
- if (i + pages < MMIO_T_ENTRIES)
- {
- bool matched = true;
- for (size_t j = 1; j < pages; j++)
- {
- if ((mmioMapT[i+j] & ~(PAGESIZE - 1)) !=
- (_ra + (j*PAGESIZE)))
- {
- matched = false;
- break;
- }
- }
- if (matched)
- {
- match = i;
- break;
- }
- }
- }
- }
-
- // Found region already mapped in.
- if (-1 != match)
- {
- // Increment ref counts.
- for (size_t i = 0; i < pages; i++)
- {
- mmioMapT[match + i]++;
- }
- // Return calculated effective address.
- lock.unlock();
- return (void*)(FULL_MEM_SIZE + (match * PAGESIZE));
- }
-
- // Search for empty region in map.
- for (size_t i = 0; i < MMIO_T_ENTRIES; i++)
- {
- if (0 == mmioMapT[i])
- {
- bool matched = true;
- for (size_t j = 1; j < pages; j++)
- {
- if (0 != mmioMapT[i+j])
- {
- matched = false;
- break;
- }
- }
- if (matched)
- {
- match = i;
- break;
- }
- }
- }
-
- // Found region to use for map.
- if (-1 != match)
- {
- for (size_t i = 0; i < pages; i++)
- {
- mmioMapT[match + i] = _ra + 1; // RA + ref count of 1.
- }
-
- lock.unlock();
- return (void*)(FULL_MEM_SIZE + (match * PAGESIZE));
- }
-
- // No entry found and no space for more, return NULL.
lock.unlock();
- return NULL;
-}
-int VmmManager::_mmioUnmap(void* ea, size_t pages)
-{
- return -1;
+ return rc;
}
+
OpenPOWER on IntegriCloud