/* IBM_PROLOG_BEGIN_TAG */ /* This is an automatically generated prolog. */ /* */ /* $Source: src/kernel/vmmmgr.C $ */ /* */ /* IBM CONFIDENTIAL */ /* */ /* COPYRIGHT International Business Machines Corp. 2010,2013 */ /* */ /* p1 */ /* */ /* Object Code Only (OCO) source materials */ /* Licensed Internal Code Source Materials */ /* IBM HostBoot Licensed Internal Code */ /* */ /* The source code for this program is not published or otherwise */ /* divested of its trade secrets, irrespective of what has been */ /* deposited with the U.S. Copyright Office. */ /* */ /* Origin: 30 */ /* */ /* IBM_PROLOG_END_TAG */ #include #include #include #include #include #include #include #include #include #include extern void* data_load_address; VmmManager::VmmManager() : lock() { printk("HRMOR = %lX\n", getHRMOR()); } void VmmManager::init() { VmmManager& v = Singleton::instance(); BaseSegment::init(); StackSegment::init(); for (size_t i = SegmentManager::MMIO_FIRST_SEGMENT_ID; i < SegmentManager::MMIO_LAST_SEGMENT_ID; ++i) { new DeviceSegment(i); // Self-registers with SegmentManager. } SegmentManager::initSLB(); v.initPTEs(); v.initSDR1(); /*no effect*/ // BEAM Fix. }; void VmmManager::init_slb() { VmmManager& v = Singleton::instance(); SegmentManager::initSLB(); v.initSDR1(); /*no effect*/ // BEAM Fix. } bool VmmManager::pteMiss(task_t* t, uint64_t effAddr, bool store) { return Singleton::instance()._pteMiss(t, effAddr, store); } uint64_t VmmManager::findPhysicalAddress(uint64_t i_vaddr) { return Singleton::instance()._findPhysicalAddress(i_vaddr); } void VmmManager::castOutPages(VmmManager::castout_t i_ct) { Singleton::instance()._castOutPages(i_ct); } void VmmManager::flushPageTable( void ) { Singleton::instance()._flushPageTable(); } void* VmmManager::devMap(void* ra, uint64_t i_devDataSize, bool i_nonCI) { return Singleton::instance()._devMap(ra, i_devDataSize, i_nonCI); } int VmmManager::devUnmap(void* ea) { return Singleton::instance()._devUnmap(ea); } void VmmManager::initPTEs() { // Initialize and invalidate the page table PageTableManager::init(); // There is no need to add PTE entries because the PTE-miss page fault // handler will add as-needed. } void VmmManager::initSDR1() { // HTABORG, HTABSIZE = 0 (11 bits, 256k table) register uint64_t sdr1 = HTABORG(); asm volatile("mtsdr1 %0" :: "r"(sdr1) : "memory"); } bool VmmManager::_pteMiss(task_t* t, uint64_t effAddr, bool store) { lock.lock(); bool rc = SegmentManager::handlePageFault(t, effAddr, store); lock.unlock(); return rc; } int VmmManager::mmAllocBlock(MessageQueue* i_mq,void* i_va,uint64_t i_size) { return Singleton::instance()._mmAllocBlock(i_mq,i_va,i_size); } int VmmManager::_mmAllocBlock(MessageQueue* i_mq,void* i_va,uint64_t i_size) { lock.lock(); int rc = BaseSegment::mmAllocBlock(i_mq,i_va,i_size); lock.unlock(); return rc; } Spinlock* VmmManager::getLock() { return &Singleton::instance().lock; } uint64_t VmmManager::_findPhysicalAddress(uint64_t i_vaddr) { uint64_t paddr = 0; lock.lock(); paddr = SegmentManager::findPhysicalAddress(i_vaddr); lock.unlock(); return paddr; } int VmmManager::mmRemovePages(VmmManager::PAGE_REMOVAL_OPS i_op, void* i_vaddr, uint64_t i_size, task_t* i_task) { return Singleton::instance()._mmRemovePages(i_op,i_vaddr, i_size,i_task); } int VmmManager::_mmRemovePages(VmmManager::PAGE_REMOVAL_OPS i_op,void* i_vaddr, uint64_t i_size,task_t* i_task) { lock.lock(); int rc = BaseSegment::mmRemovePages(i_op,i_vaddr,i_size,i_task); lock.unlock(); return rc; } int VmmManager::mmSetPermission(void* i_va, uint64_t i_size, uint64_t i_access_type) { return Singleton::instance()._mmSetPermission(i_va, i_size, i_access_type); } int VmmManager::_mmSetPermission(void* i_va, uint64_t i_size, uint64_t i_access_type) { int rc = 1; lock.lock(); rc = BaseSegment::mmSetPermission(i_va, i_size, i_access_type); lock.unlock(); return rc; } void VmmManager::_castOutPages(VmmManager::castout_t i_ct) { lock.lock(); SegmentManager::castOutPages((uint64_t)i_ct); lock.unlock(); } void VmmManager::_flushPageTable( void ) { lock.lock(); PageTableManager::flush(); lock.unlock(); } int VmmManager::mmExtend(void) { return Singleton::instance()._mmExtend(); } int VmmManager::_mmExtend(void) { lock.lock(); int rc = BaseSegment::mmExtend(); lock.unlock(); return rc; } void* VmmManager::_devMap(void* ra, uint64_t i_devDataSize, bool i_nonCI) { void* ea = NULL; lock.lock(); ea = SegmentManager::devMap(ra, i_devDataSize, i_nonCI); lock.unlock(); return ea; } int VmmManager::_devUnmap(void* ea) { int rc = 0; lock.lock(); rc = SegmentManager::devUnmap(ea); lock.unlock(); return rc; } uint64_t VmmManager::HTABORG() { return ((uint32_t)HTABORG_OFFSET + getHRMOR()); } uint64_t VmmManager::findKernelAddress(uint64_t i_vaddr) { //in hypervisor mode the HRMOR is automatically ORed onto // the address so we need to tell the hardware to ignore it uint64_t phys = VmmManager::findPhysicalAddress(i_vaddr); if( static_cast(-EFAULT) != phys ) { phys |= FORCE_PHYS_ADDR; } return phys; } int VmmManager::mmLinearMap(void *i_paddr, uint64_t i_size) { return Singleton::instance()._mmLinearMap(i_paddr, i_size); } int VmmManager::_mmLinearMap(void *i_paddr, uint64_t i_size) { lock.lock(); int rc = BaseSegment::mmLinearMap(i_paddr, i_size); lock.unlock(); return rc; }