summaryrefslogtreecommitdiffstats
path: root/src/kernel/vmmmgr.C
diff options
context:
space:
mode:
authorPatrick Williams <iawillia@us.ibm.com>2010-07-08 17:42:40 -0500
committerPatrick Williams <iawillia@us.ibm.com>2010-07-08 17:42:40 -0500
commit204c3d248426c2ba0b332c05994d67a80f49f958 (patch)
treed0bbf0ca1b17349458cc938fd08846fa1c74e2ff /src/kernel/vmmmgr.C
parent2c2101232adf2b134cf408f05f00a21dc5b8b0f3 (diff)
downloadtalos-hostboot-204c3d248426c2ba0b332c05994d67a80f49f958.tar.gz
talos-hostboot-204c3d248426c2ba0b332c05994d67a80f49f958.zip
MMIO mapping support and syscalls.
Diffstat (limited to 'src/kernel/vmmmgr.C')
-rw-r--r--src/kernel/vmmmgr.C186
1 files changed, 182 insertions, 4 deletions
diff --git a/src/kernel/vmmmgr.C b/src/kernel/vmmmgr.C
index de7029483..71fb33523 100644
--- a/src/kernel/vmmmgr.C
+++ b/src/kernel/vmmmgr.C
@@ -1,8 +1,11 @@
#include <util/singleton.H>
#include <kernel/vmmmgr.H>
#include <kernel/console.H>
+#include <kernel/ppcarch.H>
-VmmManager::VmmManager()
+extern void* data_load_address;
+
+VmmManager::VmmManager() : lock()
{
}
@@ -26,6 +29,21 @@ void VmmManager::init_slb()
v.initSDR1();
}
+bool VmmManager::pteMiss(task_t* t)
+{
+ return Singleton<VmmManager>::instance()._pteMiss(t);
+}
+
+void* VmmManager::mmioMap(void* ra, size_t pages)
+{
+ return Singleton<VmmManager>::instance()._mmioMap(ra,pages);
+}
+
+int VmmManager::mmioUnmap(void* ea, size_t pages)
+{
+ return Singleton<VmmManager>::instance()._mmioUnmap(ea,pages);
+}
+
void VmmManager::initSLB()
{
register uint64_t slbRS, slbRB;
@@ -50,10 +68,19 @@ void VmmManager::initPTEs()
// Set up linear map.
for(int i = 0; i < (FULL_MEM_SIZE / PAGESIZE); i++)
{
- pte_t& pte = getPte(i, 0);
+ ACCESS_TYPES access = NORMAL_ACCESS;
+ if (0 == i)
+ {
+ access = NO_USER_ACCESS;
+ }
+ else if (((uint64_t)&data_load_address) > (i * PAGESIZE))
+ {
+ access = READ_O_ACCESS;
+ }
+ volatile pte_t& pte = getPte(i, 0);
defaultPte(pte);
setTid(LinearSpace, pte);
- setAccess( (0 == i) ? NO_USER_ACCESS : NORMAL_ACCESS, pte);
+ setAccess(access, pte);
setPage(i, pte);
setValid(true, pte);
}
@@ -66,5 +93,156 @@ void VmmManager::initSDR1()
asm volatile("mtsdr1 %0" :: "r"(sdr1) : "memory");
}
+VmmManager::pte_t* VmmManager::page_table
+ = (VmmManager::pte_t*) HTABORG;
+
+bool VmmManager::_pteMiss(task_t* t)
+{
+ lock.lock();
+
+ uint64_t effAddr = ppc_getDAR();
+ uint64_t effPid = effAddr / FULL_MEM_SIZE;
+
+
+ if (effPid == LinearSpace)
+ {
+ lock.unlock();
+ return false; // Should not get this exception in Linear space
+ // because it is all mapped in all the time.
+ }
+
+ // Check for exception in MMIO vs Dynamic Stack space.
+ if (effPid <= MMIOSpace)
+ {
+ // Do MMIO mapping.
+ uint64_t effAddrPage = (effAddr - FULL_MEM_SIZE) / PAGESIZE;
+
+ // Check for valid entry in MMIO map.
+ uint64_t mmioMapEntry = mmioMapT[effAddrPage];
+ if (0 == mmioMapEntry)
+ {
+ lock.unlock();
+ return false;
+ }
+
+ uint64_t mmioMapPage = mmioMapEntry / PAGESIZE;
+
+ // Update PTE.
+ volatile pte_t& pte = getPte(effAddrPage, 1);
+ if ((getTid(pte) == effPid) &&
+ (getPage(pte) == mmioMapPage) &&
+ (isValid(pte)))
+ {
+ // Already present, maybe another thread.
+ lock.unlock();
+ return true;
+ }
+ if (isValid(pte)) // Invalidate if already valid.
+ setValid(false, pte);
+ defaultPte(pte);
+ setTid(effPid, pte);
+ setPage(mmioMapPage, pte);
+ setAccess(CI_ACCESS, pte);
+ setValid(true, pte);
+
+ lock.unlock();
+ return true;
+ }
+ else
+ {
+ // TODO: Do dynamic stack mapping.
+ lock.unlock();
+ return false;
+ }
+}
+
+void* VmmManager::_mmioMap(void* ra, size_t pages)
+{
+ lock.lock();
+
+ ssize_t match = -1;
+ uint64_t _ra = (uint64_t) ra;
+
+ // Search for memory already mapped in.
+ for (int i = 0; i < MMIO_T_ENTRIES; i++)
+ {
+ if ((mmioMapT[i] & ~(PAGESIZE - 1) == _ra))
+ {
+ if (i + pages < MMIO_T_ENTRIES)
+ {
+ bool matched = true;
+ for (int j = 1; j < pages; j++)
+ {
+ if ((mmioMapT[i+j] & ~(PAGESIZE - 1) !=
+ (_ra + (j*PAGESIZE))))
+ {
+ matched = false;
+ break;
+ }
+ }
+ if (matched)
+ {
+ match = i;
+ break;
+ }
+ }
+ }
+ }
+
+ // Found region already mapped in.
+ if (-1 != match)
+ {
+ // Increment ref counts.
+ for (int i = 0; i < pages; i++)
+ {
+ mmioMapT[match + i]++;
+ }
+ // Return calculated effective address.
+ lock.unlock();
+ return (void*)(FULL_MEM_SIZE + (match * PAGESIZE));
+ }
+
+ // Search for empty region in map.
+ for (int i = 0; i < MMIO_T_ENTRIES; i++)
+ {
+ if (0 == mmioMapT[i])
+ {
+ bool matched = true;
+ for (int j = 1; j < pages; j++)
+ {
+ if (0 != mmioMapT[i+j])
+ {
+ matched = false;
+ break;
+ }
+ }
+ if (matched)
+ {
+ match = i;
+ break;
+ }
+ }
+ }
+
+ // Found region to use for map.
+ if (-1 != match)
+ {
+ for (int i = 0; i < pages; i++)
+ {
+ mmioMapT[match + i] = _ra + 1; // RA + ref count of 1.
+ }
+
+ lock.unlock();
+ return (void*)(FULL_MEM_SIZE + (match * PAGESIZE));
+ }
+
+ // No entry found and no space for more, return NULL.
+ lock.unlock();
+ return NULL;
+}
+
+int VmmManager::_mmioUnmap(void* ea, size_t pages)
+{
+ return -1;
+}
-VmmManager::pte_t* VmmManager::page_table = (VmmManager::pte_t*) HTABORG;
OpenPOWER on IntegriCloud