summaryrefslogtreecommitdiffstats
path: root/src/kernel
diff options
context:
space:
mode:
authorPatrick Williams <iawillia@us.ibm.com>2010-07-08 17:42:40 -0500
committerPatrick Williams <iawillia@us.ibm.com>2010-07-08 17:42:40 -0500
commit204c3d248426c2ba0b332c05994d67a80f49f958 (patch)
treed0bbf0ca1b17349458cc938fd08846fa1c74e2ff /src/kernel
parent2c2101232adf2b134cf408f05f00a21dc5b8b0f3 (diff)
downloadtalos-hostboot-204c3d248426c2ba0b332c05994d67a80f49f958.tar.gz
talos-hostboot-204c3d248426c2ba0b332c05994d67a80f49f958.zip
MMIO mapping support and syscalls.
Diffstat (limited to 'src/kernel')
-rw-r--r--src/kernel/exception.C57
-rw-r--r--src/kernel/start.S10
-rw-r--r--src/kernel/syscall.C21
-rw-r--r--src/kernel/vmmmgr.C186
4 files changed, 265 insertions, 9 deletions
diff --git a/src/kernel/exception.C b/src/kernel/exception.C
index 98d36c35c..040284d72 100644
--- a/src/kernel/exception.C
+++ b/src/kernel/exception.C
@@ -3,6 +3,7 @@
#include <kernel/task.H>
#include <kernel/taskmgr.H>
#include <kernel/ppcarch.H>
+#include <kernel/vmmmgr.H>
namespace Systemcalls { void TaskEnd(task_t*); }
namespace ExceptionHandles
@@ -33,6 +34,62 @@ void kernel_execute_prog_ex()
}
}
+const uint64_t EXCEPTION_DSISR_MASK = 0x0000000040000000;
+const uint64_t EXCEPTION_DSISR_PTEMISS = 0x0000000040000000;
+
+extern "C"
+void kernel_execute_data_storage()
+{
+ task_t* t = TaskManager::getCurrentTask();
+ uint64_t exception = ppc_getDSISR() & EXCEPTION_DSISR_MASK;
+
+ bool handled = false;
+ switch(exception)
+ {
+ case EXCEPTION_DSISR_PTEMISS:
+ handled = VmmManager::pteMiss(t);
+ break;
+ }
+ if (!handled)
+ {
+ printk("Data Storage exception on %d: %llx, %llx\n",
+ t->tid, ppc_getDAR(), ppc_getDSISR());
+ Systemcalls::TaskEnd(t);
+ }
+}
+
+extern "C"
+void kernel_execute_data_segment()
+{
+ task_t* t = TaskManager::getCurrentTask();
+ printk("Data Segment exception, killing task %d\n", t->tid);
+ Systemcalls::TaskEnd(t);
+}
+
+extern "C"
+void kernel_execute_inst_storage()
+{
+ task_t* t = TaskManager::getCurrentTask();
+ printk("Inst Storage exception, killing task %d\n", t->tid);
+ Systemcalls::TaskEnd(t);
+}
+
+extern "C"
+void kernel_execute_inst_segment()
+{
+ task_t* t = TaskManager::getCurrentTask();
+ printk("Inst Segment exception, killing task %d\n", t->tid);
+ Systemcalls::TaskEnd(t);
+}
+
+extern "C"
+void kernel_execute_alignment()
+{
+ task_t* t = TaskManager::getCurrentTask();
+ printk("Alignment exception, killing task %d\n", t->tid);
+ Systemcalls::TaskEnd(t);
+}
+
namespace ExceptionHandles
{
bool HvEmulation(task_t* t)
diff --git a/src/kernel/start.S b/src/kernel/start.S
index 6df27281c..3f7c2a5f8 100644
--- a/src/kernel/start.S
+++ b/src/kernel/start.S
@@ -102,12 +102,12 @@ hbi_pre_phyp_breakpoint:
b _start
UNIMPL_INTERRUPT(machine_check, 0x200)
-UNIMPL_INTERRUPT(data_storage, 0x300)
-UNIMPL_INTERRUPT(data_segment, 0x380)
-UNIMPL_INTERRUPT(inst_storage, 0x400)
-UNIMPL_INTERRUPT(inst_segment, 0x480)
+STD_INTERRUPT(data_storage, 0x300)
+STD_INTERRUPT(data_segment, 0x380)
+STD_INTERRUPT(inst_storage, 0x400)
+STD_INTERRUPT(inst_segment, 0x480)
UNIMPL_INTERRUPT(external, 0x500)
-UNIMPL_INTERRUPT(alignment, 0x600)
+STD_INTERRUPT(alignment, 0x600)
STD_INTERRUPT(prog_ex, 0x700)
UNIMPL_INTERRUPT(fp_unavail, 0x800)
STD_INTERRUPT(decrementer, 0x900)
diff --git a/src/kernel/syscall.C b/src/kernel/syscall.C
index b969745ca..7ba14e9b8 100644
--- a/src/kernel/syscall.C
+++ b/src/kernel/syscall.C
@@ -40,6 +40,8 @@ namespace Systemcalls
void MsgSendRecv(task_t*);
void MsgRespond(task_t*);
void MsgWait(task_t*);
+ void MmioMap(task_t*);
+ void MmioUnmap(task_t*);
syscall syscalls[] =
{
@@ -62,6 +64,9 @@ namespace Systemcalls
&MsgSendRecv,
&MsgRespond,
&MsgWait,
+
+ &MmioMap,
+ &MmioUnmap,
};
};
@@ -321,6 +326,22 @@ namespace Systemcalls
}
mq->lock.unlock();
}
+
+ void MmioMap(task_t* t)
+ {
+ void* ra = (void*)TASK_GETARG0(t);
+ size_t pages = TASK_GETARG1(t);
+
+ TASK_SETRTN(t, (uint64_t) VmmManager::mmioMap(ra,pages));
+ }
+
+ void MmioUnmap(task_t* t)
+ {
+ void* ea = (void*)TASK_GETARG0(t);
+ size_t pages = TASK_GETARG1(t);
+
+ TASK_SETRTN(t, VmmManager::mmioUnmap(ea,pages));
+ }
};
diff --git a/src/kernel/vmmmgr.C b/src/kernel/vmmmgr.C
index de7029483..71fb33523 100644
--- a/src/kernel/vmmmgr.C
+++ b/src/kernel/vmmmgr.C
@@ -1,8 +1,11 @@
#include <util/singleton.H>
#include <kernel/vmmmgr.H>
#include <kernel/console.H>
+#include <kernel/ppcarch.H>
-VmmManager::VmmManager()
+extern void* data_load_address;
+
+VmmManager::VmmManager() : lock()
{
}
@@ -26,6 +29,21 @@ void VmmManager::init_slb()
v.initSDR1();
}
+bool VmmManager::pteMiss(task_t* t)
+{
+ return Singleton<VmmManager>::instance()._pteMiss(t);
+}
+
+void* VmmManager::mmioMap(void* ra, size_t pages)
+{
+ return Singleton<VmmManager>::instance()._mmioMap(ra,pages);
+}
+
+int VmmManager::mmioUnmap(void* ea, size_t pages)
+{
+ return Singleton<VmmManager>::instance()._mmioUnmap(ea,pages);
+}
+
void VmmManager::initSLB()
{
register uint64_t slbRS, slbRB;
@@ -50,10 +68,19 @@ void VmmManager::initPTEs()
// Set up linear map.
for(int i = 0; i < (FULL_MEM_SIZE / PAGESIZE); i++)
{
- pte_t& pte = getPte(i, 0);
+ ACCESS_TYPES access = NORMAL_ACCESS;
+ if (0 == i)
+ {
+ access = NO_USER_ACCESS;
+ }
+ else if (((uint64_t)&data_load_address) > (i * PAGESIZE))
+ {
+ access = READ_O_ACCESS;
+ }
+ volatile pte_t& pte = getPte(i, 0);
defaultPte(pte);
setTid(LinearSpace, pte);
- setAccess( (0 == i) ? NO_USER_ACCESS : NORMAL_ACCESS, pte);
+ setAccess(access, pte);
setPage(i, pte);
setValid(true, pte);
}
@@ -66,5 +93,156 @@ void VmmManager::initSDR1()
asm volatile("mtsdr1 %0" :: "r"(sdr1) : "memory");
}
+VmmManager::pte_t* VmmManager::page_table
+ = (VmmManager::pte_t*) HTABORG;
+
+bool VmmManager::_pteMiss(task_t* t)
+{
+ lock.lock();
+
+ uint64_t effAddr = ppc_getDAR();
+ uint64_t effPid = effAddr / FULL_MEM_SIZE;
+
+
+ if (effPid == LinearSpace)
+ {
+ lock.unlock();
+ return false; // Should not get this exception in Linear space
+ // because it is all mapped in all the time.
+ }
+
+ // Check for exception in MMIO vs Dynamic Stack space.
+ if (effPid <= MMIOSpace)
+ {
+ // Do MMIO mapping.
+ uint64_t effAddrPage = (effAddr - FULL_MEM_SIZE) / PAGESIZE;
+
+ // Check for valid entry in MMIO map.
+ uint64_t mmioMapEntry = mmioMapT[effAddrPage];
+ if (0 == mmioMapEntry)
+ {
+ lock.unlock();
+ return false;
+ }
+
+ uint64_t mmioMapPage = mmioMapEntry / PAGESIZE;
+
+ // Update PTE.
+ volatile pte_t& pte = getPte(effAddrPage, 1);
+ if ((getTid(pte) == effPid) &&
+ (getPage(pte) == mmioMapPage) &&
+ (isValid(pte)))
+ {
+ // Already present, maybe another thread.
+ lock.unlock();
+ return true;
+ }
+ if (isValid(pte)) // Invalidate if already valid.
+ setValid(false, pte);
+ defaultPte(pte);
+ setTid(effPid, pte);
+ setPage(mmioMapPage, pte);
+ setAccess(CI_ACCESS, pte);
+ setValid(true, pte);
+
+ lock.unlock();
+ return true;
+ }
+ else
+ {
+ // TODO: Do dynamic stack mapping.
+ lock.unlock();
+ return false;
+ }
+}
+
+void* VmmManager::_mmioMap(void* ra, size_t pages)
+{
+ lock.lock();
+
+ ssize_t match = -1;
+ uint64_t _ra = (uint64_t) ra;
+
+ // Search for memory already mapped in.
+ for (int i = 0; i < MMIO_T_ENTRIES; i++)
+ {
+ if ((mmioMapT[i] & ~(PAGESIZE - 1) == _ra))
+ {
+ if (i + pages < MMIO_T_ENTRIES)
+ {
+ bool matched = true;
+ for (int j = 1; j < pages; j++)
+ {
+ if ((mmioMapT[i+j] & ~(PAGESIZE - 1) !=
+ (_ra + (j*PAGESIZE))))
+ {
+ matched = false;
+ break;
+ }
+ }
+ if (matched)
+ {
+ match = i;
+ break;
+ }
+ }
+ }
+ }
+
+ // Found region already mapped in.
+ if (-1 != match)
+ {
+ // Increment ref counts.
+ for (int i = 0; i < pages; i++)
+ {
+ mmioMapT[match + i]++;
+ }
+ // Return calculated effective address.
+ lock.unlock();
+ return (void*)(FULL_MEM_SIZE + (match * PAGESIZE));
+ }
+
+ // Search for empty region in map.
+ for (int i = 0; i < MMIO_T_ENTRIES; i++)
+ {
+ if (0 == mmioMapT[i])
+ {
+ bool matched = true;
+ for (int j = 1; j < pages; j++)
+ {
+ if (0 != mmioMapT[i+j])
+ {
+ matched = false;
+ break;
+ }
+ }
+ if (matched)
+ {
+ match = i;
+ break;
+ }
+ }
+ }
+
+ // Found region to use for map.
+ if (-1 != match)
+ {
+ for (int i = 0; i < pages; i++)
+ {
+ mmioMapT[match + i] = _ra + 1; // RA + ref count of 1.
+ }
+
+ lock.unlock();
+ return (void*)(FULL_MEM_SIZE + (match * PAGESIZE));
+ }
+
+ // No entry found and no space for more, return NULL.
+ lock.unlock();
+ return NULL;
+}
+
+int VmmManager::_mmioUnmap(void* ea, size_t pages)
+{
+ return -1;
+}
-VmmManager::pte_t* VmmManager::page_table = (VmmManager::pte_t*) HTABORG;
OpenPOWER on IntegriCloud