summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPatrick Williams <iawillia@us.ibm.com>2010-07-08 17:42:40 -0500
committerPatrick Williams <iawillia@us.ibm.com>2010-07-08 17:42:40 -0500
commit204c3d248426c2ba0b332c05994d67a80f49f958 (patch)
treed0bbf0ca1b17349458cc938fd08846fa1c74e2ff
parent2c2101232adf2b134cf408f05f00a21dc5b8b0f3 (diff)
downloadtalos-hostboot-204c3d248426c2ba0b332c05994d67a80f49f958.tar.gz
talos-hostboot-204c3d248426c2ba0b332c05994d67a80f49f958.zip
MMIO mapping support and syscalls.
-rw-r--r--src/include/kernel/ppcarch.H18
-rw-r--r--src/include/kernel/syscalls.H3
-rw-r--r--src/include/kernel/vmmmgr.H40
-rw-r--r--src/include/sys/mmio.h18
-rw-r--r--src/kernel/exception.C57
-rw-r--r--src/kernel/start.S10
-rw-r--r--src/kernel/syscall.C21
-rw-r--r--src/kernel/vmmmgr.C186
-rw-r--r--src/lib/makefile1
-rw-r--r--src/lib/syscall_mmio.C14
-rw-r--r--src/sys/init/init_main.C6
11 files changed, 354 insertions, 20 deletions
diff --git a/src/include/kernel/ppcarch.H b/src/include/kernel/ppcarch.H
index 71d4516d4..91ee138a2 100644
--- a/src/include/kernel/ppcarch.H
+++ b/src/include/kernel/ppcarch.H
@@ -58,4 +58,22 @@ inline void ppc_setMSR(uint64_t _msr)
asm volatile("mtmsr %0; isync" :: "r" (msr));
}
+__attribute__((always_inline))
+inline uint64_t ppc_getDSISR()
+{
+ register uint64_t dsisr = 0;
+ asm volatile("mfspr %0, 18" : "=r" (dsisr));
+ return dsisr;
+}
+
+__attribute__((always_inline))
+inline uint64_t ppc_getDAR()
+{
+ register uint64_t dar = 0;
+ asm volatile("mfspr %0, 19" : "=r" (dar));
+ return dar;
+}
+
+
+
#endif
diff --git a/src/include/kernel/syscalls.H b/src/include/kernel/syscalls.H
index 9a0672d63..a33a0cef7 100644
--- a/src/include/kernel/syscalls.H
+++ b/src/include/kernel/syscalls.H
@@ -25,6 +25,9 @@ namespace Systemcalls
MSG_RESPOND,
MSG_WAIT,
+ MMIO_MAP,
+ MMIO_UNMAP,
+
SYSCALL_MAX
};
};
diff --git a/src/include/kernel/vmmmgr.H b/src/include/kernel/vmmmgr.H
index e55595ed5..a8bb5ae3f 100644
--- a/src/include/kernel/vmmmgr.H
+++ b/src/include/kernel/vmmmgr.H
@@ -2,6 +2,7 @@
#define __KERNEL_VMMMGR_H
#include <kernel/types.h>
+#include <kernel/spinlock.H>
class VmmManager
{
@@ -26,6 +27,7 @@ class VmmManager
enum ACCESS_TYPES
{
NO_USER_ACCESS,
+ READ_O_ACCESS,
NORMAL_ACCESS,
CI_ACCESS,
};
@@ -37,32 +39,49 @@ class VmmManager
FirstPid,
};
+ enum MMIO_SPACE_INFO
+ {
+ MMIO_T_ENTRIES =
+ FULL_MEM_SIZE * (MMIOSpace - LinearSpace) / PAGESIZE,
+ };
+
static void init();
static void init_slb();
+ static bool pteMiss(task_t*);
+
+ static void* mmioMap(void*, size_t);
+ static int mmioUnmap(void*, size_t);
protected:
VmmManager();
~VmmManager() {};
private:
+ Spinlock lock;
+ uint64_t mmioMapT[MMIO_T_ENTRIES];
+
void initSLB();
void initPTEs();
void initSDR1();
+ bool _pteMiss(task_t*);
+ void* _mmioMap(void*, size_t);
+ int _mmioUnmap(void*, size_t);
+
static pte_t* page_table;
- inline pte_t& getPte(uint64_t pteg, uint64_t idx)
+ inline volatile pte_t& getPte(uint64_t pteg, uint64_t idx)
{
return page_table[pteg * PTEG_SIZE + idx];
}
- inline void defaultPte(pte_t& pte)
+ inline void defaultPte(volatile pte_t& pte)
{
pte.a = 0x4000000000000000; // B = 01 (1TB).
pte.b = 0x0;
}
- inline void setValid(bool valid, pte_t& pte)
+ inline void setValid(bool valid, volatile pte_t& pte)
{
// Adding a page requires EIEIO to ensure update of PTE prior
// to making valid and PTESYNC afterwards.
@@ -78,39 +97,40 @@ class VmmManager
asm volatile("ptesync" ::: "memory");
}
- inline bool isValid(pte_t& pte)
+ inline bool isValid(volatile pte_t& pte)
{
return ((pte.a & 0x01) == 0x01);
}
- inline void setTid(tid_t tid, pte_t& pte)
+ inline void setTid(tid_t tid, volatile pte_t& pte)
{
pte.a &= 0xC00000000000007F;
pte.a |= ((uint64_t) tid) << 7;
}
- inline tid_t getTid(pte_t& pte)
+ inline tid_t getTid(volatile pte_t& pte)
{
return (tid_t) ((pte.a & 0xC00000000000007F) >> 7);
}
- inline void setAccess(ACCESS_TYPES t, pte_t& pte)
+ inline void setAccess(ACCESS_TYPES t, volatile pte_t& pte)
{
uint64_t pteMask = ~0x800000000000007B;
pte.b &= pteMask;
pte.b |= (NO_USER_ACCESS == t ? 0x0000000000000010 :
+ (READ_O_ACCESS == t ? 0x0000000000000011 :
(NORMAL_ACCESS == t ? 0x0000000000000012 :
(CI_ACCESS == t ? 0x000000000000002A :
- 0x0)));
+ 0x0))));
}
- inline void setPage(uint64_t page, pte_t& pte)
+ inline void setPage(uint64_t page, volatile pte_t& pte)
{
pte.b &= ~0x0FFFFFFFFFFFF000;
pte.b |= page << 12;
}
- inline uint64_t getPage(pte_t& pte)
+ inline uint64_t getPage(volatile pte_t& pte)
{
return (pte.b & 0x0FFFFFFFFFFFF000) >> 12;
}
diff --git a/src/include/sys/mmio.h b/src/include/sys/mmio.h
new file mode 100644
index 000000000..6a0e7a02b
--- /dev/null
+++ b/src/include/sys/mmio.h
@@ -0,0 +1,18 @@
+#ifndef __SYS_MMIO_H
+#define __SYS_MMIO_H
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+void* mmio_map(void* ra, size_t pages);
+int mmio_unmap(void* ea, size_t pages);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/kernel/exception.C b/src/kernel/exception.C
index 98d36c35c..040284d72 100644
--- a/src/kernel/exception.C
+++ b/src/kernel/exception.C
@@ -3,6 +3,7 @@
#include <kernel/task.H>
#include <kernel/taskmgr.H>
#include <kernel/ppcarch.H>
+#include <kernel/vmmmgr.H>
namespace Systemcalls { void TaskEnd(task_t*); }
namespace ExceptionHandles
@@ -33,6 +34,62 @@ void kernel_execute_prog_ex()
}
}
+const uint64_t EXCEPTION_DSISR_MASK = 0x0000000040000000;
+const uint64_t EXCEPTION_DSISR_PTEMISS = 0x0000000040000000;
+
+extern "C"
+void kernel_execute_data_storage()
+{
+ task_t* t = TaskManager::getCurrentTask();
+ uint64_t exception = ppc_getDSISR() & EXCEPTION_DSISR_MASK;
+
+ bool handled = false;
+ switch(exception)
+ {
+ case EXCEPTION_DSISR_PTEMISS:
+ handled = VmmManager::pteMiss(t);
+ break;
+ }
+ if (!handled)
+ {
+ printk("Data Storage exception on %d: %llx, %llx\n",
+ t->tid, ppc_getDAR(), ppc_getDSISR());
+ Systemcalls::TaskEnd(t);
+ }
+}
+
+extern "C"
+void kernel_execute_data_segment()
+{
+ task_t* t = TaskManager::getCurrentTask();
+ printk("Data Segment exception, killing task %d\n", t->tid);
+ Systemcalls::TaskEnd(t);
+}
+
+extern "C"
+void kernel_execute_inst_storage()
+{
+ task_t* t = TaskManager::getCurrentTask();
+ printk("Inst Storage exception, killing task %d\n", t->tid);
+ Systemcalls::TaskEnd(t);
+}
+
+extern "C"
+void kernel_execute_inst_segment()
+{
+ task_t* t = TaskManager::getCurrentTask();
+ printk("Inst Segment exception, killing task %d\n", t->tid);
+ Systemcalls::TaskEnd(t);
+}
+
+extern "C"
+void kernel_execute_alignment()
+{
+ task_t* t = TaskManager::getCurrentTask();
+ printk("Alignment exception, killing task %d\n", t->tid);
+ Systemcalls::TaskEnd(t);
+}
+
namespace ExceptionHandles
{
bool HvEmulation(task_t* t)
diff --git a/src/kernel/start.S b/src/kernel/start.S
index 6df27281c..3f7c2a5f8 100644
--- a/src/kernel/start.S
+++ b/src/kernel/start.S
@@ -102,12 +102,12 @@ hbi_pre_phyp_breakpoint:
b _start
UNIMPL_INTERRUPT(machine_check, 0x200)
-UNIMPL_INTERRUPT(data_storage, 0x300)
-UNIMPL_INTERRUPT(data_segment, 0x380)
-UNIMPL_INTERRUPT(inst_storage, 0x400)
-UNIMPL_INTERRUPT(inst_segment, 0x480)
+STD_INTERRUPT(data_storage, 0x300)
+STD_INTERRUPT(data_segment, 0x380)
+STD_INTERRUPT(inst_storage, 0x400)
+STD_INTERRUPT(inst_segment, 0x480)
UNIMPL_INTERRUPT(external, 0x500)
-UNIMPL_INTERRUPT(alignment, 0x600)
+STD_INTERRUPT(alignment, 0x600)
STD_INTERRUPT(prog_ex, 0x700)
UNIMPL_INTERRUPT(fp_unavail, 0x800)
STD_INTERRUPT(decrementer, 0x900)
diff --git a/src/kernel/syscall.C b/src/kernel/syscall.C
index b969745ca..7ba14e9b8 100644
--- a/src/kernel/syscall.C
+++ b/src/kernel/syscall.C
@@ -40,6 +40,8 @@ namespace Systemcalls
void MsgSendRecv(task_t*);
void MsgRespond(task_t*);
void MsgWait(task_t*);
+ void MmioMap(task_t*);
+ void MmioUnmap(task_t*);
syscall syscalls[] =
{
@@ -62,6 +64,9 @@ namespace Systemcalls
&MsgSendRecv,
&MsgRespond,
&MsgWait,
+
+ &MmioMap,
+ &MmioUnmap,
};
};
@@ -321,6 +326,22 @@ namespace Systemcalls
}
mq->lock.unlock();
}
+
+ void MmioMap(task_t* t)
+ {
+ void* ra = (void*)TASK_GETARG0(t);
+ size_t pages = TASK_GETARG1(t);
+
+ TASK_SETRTN(t, (uint64_t) VmmManager::mmioMap(ra,pages));
+ }
+
+ void MmioUnmap(task_t* t)
+ {
+ void* ea = (void*)TASK_GETARG0(t);
+ size_t pages = TASK_GETARG1(t);
+
+ TASK_SETRTN(t, VmmManager::mmioUnmap(ea,pages));
+ }
};
diff --git a/src/kernel/vmmmgr.C b/src/kernel/vmmmgr.C
index de7029483..71fb33523 100644
--- a/src/kernel/vmmmgr.C
+++ b/src/kernel/vmmmgr.C
@@ -1,8 +1,11 @@
#include <util/singleton.H>
#include <kernel/vmmmgr.H>
#include <kernel/console.H>
+#include <kernel/ppcarch.H>
-VmmManager::VmmManager()
+extern void* data_load_address;
+
+VmmManager::VmmManager() : lock()
{
}
@@ -26,6 +29,21 @@ void VmmManager::init_slb()
v.initSDR1();
}
+bool VmmManager::pteMiss(task_t* t)
+{
+ return Singleton<VmmManager>::instance()._pteMiss(t);
+}
+
+void* VmmManager::mmioMap(void* ra, size_t pages)
+{
+ return Singleton<VmmManager>::instance()._mmioMap(ra,pages);
+}
+
+int VmmManager::mmioUnmap(void* ea, size_t pages)
+{
+ return Singleton<VmmManager>::instance()._mmioUnmap(ea,pages);
+}
+
void VmmManager::initSLB()
{
register uint64_t slbRS, slbRB;
@@ -50,10 +68,19 @@ void VmmManager::initPTEs()
// Set up linear map.
for(int i = 0; i < (FULL_MEM_SIZE / PAGESIZE); i++)
{
- pte_t& pte = getPte(i, 0);
+ ACCESS_TYPES access = NORMAL_ACCESS;
+ if (0 == i)
+ {
+ access = NO_USER_ACCESS;
+ }
+ else if (((uint64_t)&data_load_address) > (i * PAGESIZE))
+ {
+ access = READ_O_ACCESS;
+ }
+ volatile pte_t& pte = getPte(i, 0);
defaultPte(pte);
setTid(LinearSpace, pte);
- setAccess( (0 == i) ? NO_USER_ACCESS : NORMAL_ACCESS, pte);
+ setAccess(access, pte);
setPage(i, pte);
setValid(true, pte);
}
@@ -66,5 +93,156 @@ void VmmManager::initSDR1()
asm volatile("mtsdr1 %0" :: "r"(sdr1) : "memory");
}
+VmmManager::pte_t* VmmManager::page_table
+ = (VmmManager::pte_t*) HTABORG;
+
+bool VmmManager::_pteMiss(task_t* t)
+{
+ lock.lock();
+
+ uint64_t effAddr = ppc_getDAR();
+ uint64_t effPid = effAddr / FULL_MEM_SIZE;
+
+
+ if (effPid == LinearSpace)
+ {
+ lock.unlock();
+ return false; // Should not get this exception in Linear space
+ // because it is all mapped in all the time.
+ }
+
+ // Check for exception in MMIO vs Dynamic Stack space.
+ if (effPid <= MMIOSpace)
+ {
+ // Do MMIO mapping.
+ uint64_t effAddrPage = (effAddr - FULL_MEM_SIZE) / PAGESIZE;
+
+ // Check for valid entry in MMIO map.
+ uint64_t mmioMapEntry = mmioMapT[effAddrPage];
+ if (0 == mmioMapEntry)
+ {
+ lock.unlock();
+ return false;
+ }
+
+ uint64_t mmioMapPage = mmioMapEntry / PAGESIZE;
+
+ // Update PTE.
+ volatile pte_t& pte = getPte(effAddrPage, 1);
+ if ((getTid(pte) == effPid) &&
+ (getPage(pte) == mmioMapPage) &&
+ (isValid(pte)))
+ {
+ // Already present, maybe another thread.
+ lock.unlock();
+ return true;
+ }
+ if (isValid(pte)) // Invalidate if already valid.
+ setValid(false, pte);
+ defaultPte(pte);
+ setTid(effPid, pte);
+ setPage(mmioMapPage, pte);
+ setAccess(CI_ACCESS, pte);
+ setValid(true, pte);
+
+ lock.unlock();
+ return true;
+ }
+ else
+ {
+ // TODO: Do dynamic stack mapping.
+ lock.unlock();
+ return false;
+ }
+}
+
+void* VmmManager::_mmioMap(void* ra, size_t pages)
+{
+ lock.lock();
+
+ ssize_t match = -1;
+ uint64_t _ra = (uint64_t) ra;
+
+ // Search for memory already mapped in.
+ for (int i = 0; i < MMIO_T_ENTRIES; i++)
+ {
+ if ((mmioMapT[i] & ~(PAGESIZE - 1) == _ra))
+ {
+ if (i + pages < MMIO_T_ENTRIES)
+ {
+ bool matched = true;
+ for (int j = 1; j < pages; j++)
+ {
+ if ((mmioMapT[i+j] & ~(PAGESIZE - 1) !=
+ (_ra + (j*PAGESIZE))))
+ {
+ matched = false;
+ break;
+ }
+ }
+ if (matched)
+ {
+ match = i;
+ break;
+ }
+ }
+ }
+ }
+
+ // Found region already mapped in.
+ if (-1 != match)
+ {
+ // Increment ref counts.
+ for (int i = 0; i < pages; i++)
+ {
+ mmioMapT[match + i]++;
+ }
+ // Return calculated effective address.
+ lock.unlock();
+ return (void*)(FULL_MEM_SIZE + (match * PAGESIZE));
+ }
+
+ // Search for empty region in map.
+ for (int i = 0; i < MMIO_T_ENTRIES; i++)
+ {
+ if (0 == mmioMapT[i])
+ {
+ bool matched = true;
+ for (int j = 1; j < pages; j++)
+ {
+ if (0 != mmioMapT[i+j])
+ {
+ matched = false;
+ break;
+ }
+ }
+ if (matched)
+ {
+ match = i;
+ break;
+ }
+ }
+ }
+
+ // Found region to use for map.
+ if (-1 != match)
+ {
+ for (int i = 0; i < pages; i++)
+ {
+ mmioMapT[match + i] = _ra + 1; // RA + ref count of 1.
+ }
+
+ lock.unlock();
+ return (void*)(FULL_MEM_SIZE + (match * PAGESIZE));
+ }
+
+ // No entry found and no space for more, return NULL.
+ lock.unlock();
+ return NULL;
+}
+
+int VmmManager::_mmioUnmap(void* ea, size_t pages)
+{
+ return -1;
+}
-VmmManager::pte_t* VmmManager::page_table = (VmmManager::pte_t*) HTABORG;
diff --git a/src/lib/makefile b/src/lib/makefile
index febe3f27f..e1e0f1e50 100644
--- a/src/lib/makefile
+++ b/src/lib/makefile
@@ -3,6 +3,7 @@ include ../../config.mk
OBJS = string.o stdlib.o
OBJS += syscall_stub.o syscall_task.o syscall_mutex.o syscall_msg.o
+OBJS += syscall_mmio.o
OBJECTS = $(addprefix ${OBJDIR}/, ${OBJS})
all: ${OBJECTS}
diff --git a/src/lib/syscall_mmio.C b/src/lib/syscall_mmio.C
new file mode 100644
index 000000000..f2dc8ccf8
--- /dev/null
+++ b/src/lib/syscall_mmio.C
@@ -0,0 +1,14 @@
+#include <sys/syscall.h>
+#include <sys/mmio.h>
+
+using namespace Systemcalls;
+
+void* mmio_map(void* ra, size_t pages)
+{
+ return _syscall2(MMIO_MAP, ra, (void*)pages);
+}
+
+int mmio_unmap(void* ea, size_t pages)
+{
+ return (int64_t) _syscall2(MMIO_UNMAP, ea, (void*)pages);
+}
diff --git a/src/sys/init/init_main.C b/src/sys/init/init_main.C
index 91c30bee4..4be986ba7 100644
--- a/src/sys/init/init_main.C
+++ b/src/sys/init/init_main.C
@@ -3,6 +3,7 @@
#include <sys/task.h>
#include <sys/mutex.h>
#include <sys/msg.h>
+#include <sys/mmio.h>
mutex_t global_mutex;
@@ -24,7 +25,10 @@ void init_main(void* unused)
printk("Bringing up VFS...");
task_create(&vfs_main, NULL);
task_yield(); // TODO... add a barrier to ensure VFS is fully up.
-
+
+ uint64_t* mmio_addr = (uint64_t*) mmio_map((void*)0x800000000, 1);
+ printk("MMIO Access %llx\n", *mmio_addr);
+
global_mutex = mutex_create();
msg_q_t msgq = msg_q_create();
OpenPOWER on IntegriCloud