/* IBM_PROLOG_BEGIN_TAG */ /* This is an automatically generated prolog. */ /* */ /* $Source: src/kernel/cpumgr.C $ */ /* */ /* IBM CONFIDENTIAL */ /* */ /* COPYRIGHT International Business Machines Corp. 2010,2013 */ /* */ /* p1 */ /* */ /* Object Code Only (OCO) source materials */ /* Licensed Internal Code Source Materials */ /* IBM HostBoot Licensed Internal Code */ /* */ /* The source code for this program is not published or otherwise */ /* divested of its trade secrets, irrespective of what has been */ /* deposited with the U.S. Copyright Office. */ /* */ /* Origin: 30 */ /* */ /* IBM_PROLOG_END_TAG */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include cpu_t** CpuManager::cv_cpus[KERNEL_MAX_SUPPORTED_NODES]; bool CpuManager::cv_shutdown_requested = false; uint64_t CpuManager::cv_shutdown_status = 0; size_t CpuManager::cv_cpuSeq = 0; bool CpuManager::cv_forcedMemPeriodic = false; InteractiveDebug CpuManager::cv_interactive_debug; CpuManager::CpuManager() : iv_lastStartTimebase(0) { for (int i = 0; i < KERNEL_MAX_SUPPORTED_NODES; i++) cv_cpus[i] = NULL; memset(&cv_interactive_debug, '\0', sizeof(cv_interactive_debug)); } cpu_t* CpuManager::getMasterCPU() { for (int i = 0; i < KERNEL_MAX_SUPPORTED_NODES; i++) { if (NULL == cv_cpus[i]) { continue; } for (int j = 0; j < KERNEL_MAX_SUPPORTED_CPUS_PER_NODE; j++) { if ((cv_cpus[i][j] != NULL) && (cv_cpus[i][j]->master)) { return cv_cpus[i][j]; } } } return NULL; } void CpuManager::init() { // For the initial boot we only want to set up CPU objects for the threads // on this core. Otherwise we waste memory with kernel / idle task stacks. // // As long as the CPU object pointer is NULL, the start.S code won't // enter the kernel, so we skip initializing all the other CPUs for now. // Determine number of threads on this core. size_t threads = getThreadCount(); // Set up CPU structure. cv_cpus[getPIR() / KERNEL_MAX_SUPPORTED_CPUS_PER_NODE] = new cpu_t*[KERNEL_MAX_SUPPORTED_CPUS_PER_NODE](); // Create CPU objects starting at the thread-0 for this core. size_t baseCpu = getCpuId() & ~(threads-1); for (size_t i = 0; i < threads; i++) Singleton::instance().startCPU(i + baseCpu); } void CpuManager::init_slave_smp(cpu_t* cpu) { Singleton::instance().startSlaveCPU(cpu); } void CpuManager::requestShutdown(uint64_t i_status) { cv_shutdown_status = i_status; __sync_synchronize(); cv_shutdown_requested = true; // If the shutdown was not called with a Good shutdown status // then we know we are shutting down due to error. We need to // figure out if the error provided is a PLID or reasoncode // and write it appropriately. // Hostboot PLIDs always start with 0x9 (32-bit) static const uint64_t PLID_MASK = 0x0000000090000000; if (i_status != SHUTDOWN_STATUS_GOOD) { if ((i_status & 0x00000000F0000000) == PLID_MASK) { termWritePlid(TI_SHUTDOWN, i_status); } else { termWriteSRC(TI_SHUTDOWN,i_status, 0); } printk("TI initiated on all threads (shutdown)\n"); } class ExecuteShutdown : public DeferredWork { public: void masterPreWork() { // The stats can be retrieved from global variables as needed. // This can be uncommented for debug if desired #ifdef __MEMSTATS__ if(c->master) HeapManager::stats(); #endif } void activeMainWork() { KernelMisc::shutdown(); } void nonactiveMainWork() { // Something wasn't synchronized correctly if we got to here. // Should not have CPUs coming online while trying to execute // a shutdown. kassert(false); } }; DeferredQueue::insert(new ExecuteShutdown()); } void CpuManager::startCPU(ssize_t i) { // Save away the current timebase for TB synchronization. iv_lastStartTimebase = getTB(); bool currentCPU = false; if (i < 0) { i = getCpuId(); currentCPU = true; } else if (getCpuId() == (uint64_t)i) { currentCPU = true; } size_t nodeId = i / KERNEL_MAX_SUPPORTED_CPUS_PER_NODE; size_t cpuId = i % KERNEL_MAX_SUPPORTED_CPUS_PER_NODE; // Initialize node structure. if (NULL == cv_cpus[nodeId]) { cv_cpus[nodeId] = new cpu_t*[KERNEL_MAX_SUPPORTED_CPUS_PER_NODE](); } // Initialize CPU structure. if (NULL == cv_cpus[nodeId][cpuId]) { printk("Starting CPU %ld...", i); cpu_t* cpu = cv_cpus[nodeId][cpuId] = new cpu_t(); // Initialize CPU. cpu->cpu = i; if (currentCPU) { cpu->master = true; } else { cpu->master = false; } cpu->scheduler = &Singleton::instance(); cpu->scheduler_extra = NULL; const size_t kernel_page_count = 4; const size_t kernel_page_offset = kernel_page_count * PAGESIZE - 8 * sizeof(uint64_t); cpu->kernel_stack_bottom = PageManager::allocatePage(kernel_page_count); cpu->kernel_stack = reinterpret_cast( reinterpret_cast(cpu->kernel_stack_bottom) + kernel_page_offset); cpu->xscom_mutex = (mutex_t)MUTEX_INITIALIZER; // Create idle task. cpu->idle_task = TaskManager::createIdleTask(); cpu->idle_task->cpu = cpu; cpu->periodic_count = 0; // Call TimeManager setup for a CPU. TimeManager::init_cpu(cpu); printk("done\n"); } if (currentCPU) { setDEC(TimeManager::getTimeSliceCount()); activateCPU(getCpu(i)); } return; } void CpuManager::startSlaveCPU(cpu_t* cpu) { // Activate CPU. activateCPU(cpu); // Sync timebase with master. while(getTB() < iv_lastStartTimebase) { class SyncTimebase : public DeferredWork { public: void masterPreWork() { iv_timebase = getTB(); } void activeMainWork() { if (getTB() < iv_timebase) { setTB(iv_timebase); } } private: uint64_t iv_timebase; }; SyncTimebase* deferred = new SyncTimebase(); DeferredQueue::insert(deferred); DeferredQueue::execute(); } // Update decrementer. setDEC(TimeManager::getTimeSliceCount()); return; } void CpuManager::activateCPU(cpu_t * i_cpu) { // Set active. i_cpu->active = true; // Update sequence ID. do { uint64_t old_seq = cv_cpuSeq; i_cpu->cpu_start_seqid = old_seq + 1 + (1ull << 32); if (__sync_bool_compare_and_swap(&cv_cpuSeq, old_seq, i_cpu->cpu_start_seqid)) { break; } } while (1); i_cpu->cpu_start_seqid >>= 32; // Verify / set SPRs. uint64_t msr = getMSR(); msr |= 0x1000; // TODO: RTC: 51148 - Simics workaround for SW170137. kassert(WAKEUP_MSR_VALUE == msr); setLPCR(WAKEUP_LPCR_VALUE); setRPR(WAKEUP_RPR_VALUE); } void CpuManager::deactivateCPU(cpu_t * i_cpu) { // Set inactive. i_cpu->active = false; // Update sequence ID. do { uint64_t old_seq = cv_cpuSeq; uint64_t new_seq = old_seq - 1 + (1ull << 32); if (__sync_bool_compare_and_swap(&cv_cpuSeq, old_seq, new_seq)) { break; } } while(1); } void CpuManager::executePeriodics(cpu_t * i_cpu) { if(i_cpu->master) { if (cv_interactive_debug.isReady()) { cv_interactive_debug.startDebugTask(); } bool forceMemoryPeriodic = __sync_fetch_and_and(&cv_forcedMemPeriodic, false); ++(i_cpu->periodic_count); if((0 == (i_cpu->periodic_count % CPU_PERIODIC_CHECK_MEMORY)) || (forceMemoryPeriodic)) { uint64_t pcntAvail = PageManager::queryAvail(); if((pcntAvail < PageManager::LOWMEM_NORM_LIMIT) || (forceMemoryPeriodic)) { VmmManager::flushPageTable(); ++(i_cpu->periodic_count); // prevent another flush below if(pcntAvail < PageManager::LOWMEM_CRIT_LIMIT) { VmmManager::castOutPages(VmmManager::CRITICAL); } else { VmmManager::castOutPages(VmmManager::NORMAL); } } } if(0 == (i_cpu->periodic_count % CPU_PERIODIC_FLUSH_PAGETABLE)) { VmmManager::flushPageTable(); } if((0 == (i_cpu->periodic_count % CPU_PERIODIC_DEFRAG)) || (forceMemoryPeriodic)) { class MemoryCoalesce : public DeferredWork { public: void masterPreWork() { setThreadPriorityVeryHigh(); HeapManager::coalesce(); PageManager::coalesce(); setThreadPriorityHigh(); } }; DeferredQueue::insert(new MemoryCoalesce()); } } DeferredQueue::execute(); } int CpuManager::startCore(uint64_t pir,uint64_t i_threads) { size_t threads = getThreadCount(); pir = pir & ~(threads-1); if (pir >= (KERNEL_MAX_SUPPORTED_NODES * KERNEL_MAX_SUPPORTED_CPUS_PER_NODE)) { return -ENXIO; } for(size_t i = 0; i < threads; i++) { // Only start the threads we were told to start if( i_threads & (0x8000000000000000 >> i) ) { Singleton::instance().startCPU(pir + i); } } __sync_synchronize(); InterruptMsgHdlr::addCpuCore(pir); return 0; }; size_t CpuManager::getThreadCount() { size_t threads = 0; switch (CpuID::getCpuType()) { case CORE_POWER8_VENICE: case CORE_POWER8_MURANO: threads = 8; break; case CORE_UNKNOWN: default: kassert(false); break; } return threads; } void CpuManager::forceMemoryPeriodic() { cv_forcedMemPeriodic = true; } void CpuManager::critAssert(uint64_t i_failAddr) { /* create SRC amd call terminate immediate*/ termWriteSRC(TI_CRIT_ASSERT,RC_SHUTDOWN, i_failAddr); class ExecuteCritAssert : public DeferredWork { public: void masterPreWork() { // print status to the console. printk("TI initiated on all threads (crit_assert)\n"); } void activeMainWork() { // Call the function to perform the TI terminateExecuteTI(); } void nonactiveMainWork() { // Something wasn't synchronized correctly if we got to here. // Should not have CPUs coming online while trying to execute // a shutdown. terminateExecuteTI(); } }; DeferredQueue::insert(new ExecuteCritAssert()); // Force executeion of the deferred queue. DeferredQueue::execute(); }