summaryrefslogtreecommitdiffstats
path: root/src/kernel/vmmmgr.C
blob: 74c3178d0b86f2e14cfead9298bf32b993018856 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
#include <limits.h>
#include <util/singleton.H>
#include <kernel/vmmmgr.H>
#include <kernel/console.H>
#include <arch/ppc.H>
#include <kernel/ptmgr.H>
#include <kernel/segmentmgr.H>
#include <kernel/devicesegment.H>
#include <kernel/basesegment.H>

extern void* data_load_address;

VmmManager::VmmManager() : lock()
{
}

void VmmManager::init()
{
    printk("Starting VMM...\n");

    VmmManager& v = Singleton<VmmManager>::instance();

    BaseSegment::init();
    DeviceSegment::init();
    SegmentManager::initSLB();

    v.initPTEs();
    v.initSDR1();

    printk("...done.\n");
};

void VmmManager::init_slb()
{
    VmmManager& v = Singleton<VmmManager>::instance();
    SegmentManager::initSLB();

    v.initSDR1();
}

bool VmmManager::pteMiss(task_t* t, uint64_t effAddr)
{
    return Singleton<VmmManager>::instance()._pteMiss(t, effAddr);
}

/**
 * STATIC
 * @brief DEPRECATED
 */
void* VmmManager::mmioMap(void* ra, size_t pages)
{
    return DeviceSegment::mmioMap(ra, pages);
}

/**
 * STATIC
 * @brief DEPRECATED
 */
int VmmManager::mmioUnmap(void* ea, size_t pages)
{
    return DeviceSegment::mmioUnmap(ea, pages);
}

/**
 * STATIC
 * @brief A facade to map a device into the device segment(2TB)
 */
void* VmmManager::devMap(void* ra, SEG_DATA_SIZES i_devDataSize)
{
    return DeviceSegment::devMap(ra, i_devDataSize);
}

/**
 * STATIC
 * @brief A facade to unmap a device from the device segment(2TB)
 */
int VmmManager::devUnmap(void* ea)
{
    return DeviceSegment::devUnmap(ea);
}

void VmmManager::initPTEs()
{
    // Initialize and invalidate the page table
    PageTableManager::init();

    // There is no need to add PTE entries because the PTE-miss page fault
    // handler will add as-needed.
}

void VmmManager::initSDR1()
{
    // HTABORG, HTABSIZE = 0 (11 bits, 256k table)
    register uint64_t sdr1 = (uint64_t)HTABORG;
    asm volatile("mtsdr1 %0" :: "r"(sdr1) : "memory");
}

bool VmmManager::_pteMiss(task_t* t, uint64_t effAddr)
{
    lock.lock();

    bool rc = SegmentManager::handlePageFault(t, effAddr);

    lock.unlock();

    return rc;
}

int VmmManager::mmAllocBlock(MessageQueue* i_mq,void* i_va,uint64_t i_size)
{
    return BaseSegment::mmAllocBlock(i_mq,i_va,i_size);
}

Spinlock* VmmManager::getLock()
{
    return &Singleton<VmmManager>::instance().lock;
}

OpenPOWER on IntegriCloud