summaryrefslogtreecommitdiffstats
path: root/src/include/kernel/vmmmgr.H
blob: 864eed0fd28e94b9fc75e28ef8d44b2d1072aa21 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
//  IBM_PROLOG_BEGIN_TAG
//  This is an automatically generated prolog.
//
//  $Source: src/include/kernel/vmmmgr.H $
//
//  IBM CONFIDENTIAL
//
//  COPYRIGHT International Business Machines Corp. 2010 - 2011
//
//  p1
//
//  Object Code Only (OCO) source materials
//  Licensed Internal Code Source Materials
//  IBM HostBoot Licensed Internal Code
//
//  The source code for this program is not published or other-
//  wise divested of its trade secrets, irrespective of what has
//  been deposited with the U.S. Copyright Office.
//
//  Origin: 30
//
//  IBM_PROLOG_END
#ifndef __KERNEL_VMMMGR_H
#define __KERNEL_VMMMGR_H

#include <limits.h>
#include <sys/mmio.h>
#include <sys/mm.h>
#include <kernel/types.h>
#include <kernel/spinlock.H>

class MessageQueue;

class VmmManager
{
    public:
        /**
         * Constants used throughout the virtual memory management classes
         */
        enum VMM_CONSTS
        {
            FULL_MEM_SIZE = 4*MEGABYTE,

            // put the Page Table at the end of our memory space
            PTSIZE = (1 << 18),
            HTABORG = (FULL_MEM_SIZE - PTSIZE),
        };

        enum ACCESS_TYPES
        {
            NO_USER_ACCESS,
            READ_O_ACCESS,
            NORMAL_ACCESS,
            CI_ACCESS,
            RO_EXE_ACCESS,
        };

        enum castout_t
        {
            NORMAL,
            CRITICAL,
        };

        /**
         * Kernel mapped page removal operations
         *
         * RELEASE : Writes dirty&write-tracked pages out to a storage device
         *           and removes other pages
         * FLUSH   : Only writes dirty&write-tracked pages out to a storage
         *           device
         * EVICT   : (Kernel) Writes dirty&write-tracked pages out to a storage
         *           device and removes other pages
         */
        enum PAGE_REMOVAL_OPS
        {
            RELEASE = 0,
            FLUSH = 1,
            EVICT = 2,
        };

        static void init();
        static void init_slb();

        /**
         * @brief Responsible for handling PTE misses.
         *
         * @param[in] t - Task causing the page fault.
         * @param[in] effAddr - Effective address accessed to cause fault.
         *
         * @return true - PTE miss was successfully handled.
         *
         * If the PTE miss is not successfully handled, the exception
         * handler should collect debug information and kill the task.
         */
        static bool pteMiss(task_t* t, uint64_t effAddr);

        static void* mmioMap(void*, size_t);
        static int mmioUnmap(void*, size_t);

        /**
         * @brief Map a device into the device segment(2TB)
         * @param ra[in] - Void pointer to real address to be mapped in
         * @param i_devDataSize[in] - Size of device segment block
         * @return void* - Pointer to beginning virtual address, NULL otherwise
         */
        static void* devMap(void* ra, SEG_DATA_SIZES i_devDataSize);

        /**
         * @brief Unmap a device from the device segment(2TB)
         * @param ea[in] - Void pointer to effective address
         * @return int - 0 for successful unmap, non-zero otherwise
         */
        static int devUnmap(void* ea);

        /**
         * @brief Allocates a block of virtual memory of the given size
         * @param i_mq[in] - Message queue to be associated with the block
         * @param i_va[in] - Page aligned base virtual address of the block
         *                   to be allocated
         * @param i_size[in] - Requested virtual memory size of the block
         * @return int - 0 for successful block allocation, non-zero otherwise
         */
        static int mmAllocBlock(MessageQueue* i_mq,void* i_va,uint64_t i_size);

        /**
         * @brief Find the phyiscal address bound to the given address
         * @param[in] i_vaddr The address
         * @return the physical address or -EFAULT @see errno.h
         */
        static uint64_t findPhysicalAddress(uint64_t i_vaddr);

        /**
         * @brief Cast out older physical memory pages
         * @param[in] castout constraint
         */
        static void castOutPages(castout_t i_ct);

        /**
         * @brief Flush pagetable, Update shadow page info
         */
        static void flushPageTable( void);
        
        /**
         * @brief Remove pages by a specified operation of the given size
         * @param[in] i_op - Page removal operation to perform
         * @param[in] i_vaddr - Virtual address associated to page(s)
         * @param[in] i_size - Size of memory to perform page removal on
         * @param[in] i_task - OPTIONAL:Task requesting page removal.
         * @return int - 0 for successful page removal, non-zero otherwise
         *
         * The given virtual address will be 'rounded' down to the nearest page
         * boundary, along with the given size will be 'rounded' up to the
         * nearest divisible page size.
         *
         * When a task is given, it will be deferred until all pages requested
         * for removal have completed.
         */
        static int mmRemovePages(VmmManager::PAGE_REMOVAL_OPS i_op,
                                 void* i_vaddr, uint64_t i_size,
                                 task_t* i_task = NULL);
        /**
         * @brief Sets the permissions for a given page or range of pages
         * @param i_va[in] - Virtual address of the page to update permission
         * @param i_size[in] - range of memory that needs permissions updated...
         *             if i_size equals 0 then we only need to update an 
         *            individual page.
         * @return int - 0 for successful permission update, non-zero otherwise
         *
         * The given virtual address will be 'rounded' down to the nearest page
         * boundary, along with the given size will be 'rounded' up to the
         * nearest divisible page size.
         */
        static int mmSetPermission(void* i_va,uint64_t i_size, uint64_t i_access_type);

    protected:
        VmmManager();
        ~VmmManager() {};

        /** @brief Get spinlock for memory subsystem.
         *  This is useful for passing to a deferred user-space message
         *  handler so that the subsystem code is SMP-safe when the message
         *  response is obtained.
         */
        static Spinlock* getLock();

    private:
        Spinlock lock;

        void initPTEs();
        void initSDR1();

        bool _pteMiss(task_t*, uint64_t);

        /** See findPhysicalAddress */
        uint64_t _findPhysicalAddress(uint64_t i_vaddr);

        /* See mmSetPermission */
        int _mmSetPermission(void* i_va,uint64_t i_size, uint64_t i_access_type);

        /** See castOutPages */
        void _castOutPages(castout_t i_ct);

        /** See flushPageTable */
        void _flushPageTable( void );

        /** See mmAllocBlock */
        int _mmAllocBlock(MessageQueue* i_mq,void* i_va,uint64_t i_size);

        /** See mmRemovePages */
        int _mmRemovePages(VmmManager::PAGE_REMOVAL_OPS i_op,void* i_vaddr,
                           uint64_t i_size,task_t* i_task);

    public:
        friend class Block;

};

#endif
OpenPOWER on IntegriCloud