/* IBM_PROLOG_BEGIN_TAG */ /* This is an automatically generated prolog. */ /* */ /* $Source: src/usr/intr/intrrp.C $ */ /* */ /* OpenPOWER HostBoot Project */ /* */ /* Contributors Listed Below - COPYRIGHT 2011,2018 */ /* [+] International Business Machines Corp. */ /* */ /* */ /* Licensed under the Apache License, Version 2.0 (the "License"); */ /* you may not use this file except in compliance with the License. */ /* You may obtain a copy of the License at */ /* */ /* http://www.apache.org/licenses/LICENSE-2.0 */ /* */ /* Unless required by applicable law or agreed to in writing, software */ /* distributed under the License is distributed on an "AS IS" BASIS, */ /* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ /* implied. See the License for the specific language governing */ /* permissions and limitations under the License. */ /* */ /* IBM_PROLOG_END_TAG */ /** * @file intrrp.C * @brief Interrupt Resource Provider */ #include "intrrp.H" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define INTR_TRACE_NAME INTR_COMP_NAME using namespace INTR; using namespace TARGETING; trace_desc_t * g_trac_intr = NULL; TRAC_INIT(&g_trac_intr, INTR_TRACE_NAME, 16*KILOBYTE, TRACE::BUFFER_SLOW); /** * setup _start and handle barrier */ TASK_ENTRY_MACRO( IntrRp::init ); void IntrRp::init( errlHndl_t &io_errlHndl_t ) { errlHndl_t err = NULL; err = Singleton::instance()._init(); // pass task error back to parent io_errlHndl_t = err ; } errlHndl_t IntrRp::resetIntpForMpipl() { errlHndl_t err = NULL; do{ TARGETING::TargetHandleList l_funcProcs; TARGETING::Target* masterProc = NULL; TARGETING::targetService().masterProcChipTargetHandle( masterProc ); getAllChips(l_funcProcs, TYPE_PROC); //Need to make sure we have all of the functional procs in iv_chipList for(const auto & l_procChip : l_funcProcs) { //make sure it is not the master, as master has already been added if (l_procChip != masterProc) { intr_hdlr_t* l_procIntrHdlr = new intr_hdlr_t(l_procChip); TRACFCOMP(g_trac_intr, "IntrRp::resetIntpForMpipl() Adding slave proc %lx to list of chips.", get_huid(l_procChip)); iv_chipList.push_back(l_procIntrHdlr); // Set the common Interrupt BAR Scom Registers for the proc err = setCommonInterruptBARs(l_procIntrHdlr); if (err) { TRACFCOMP(g_trac_intr, "IntrRp::resetIntpForMpipl() Setting common interrupt Bars on proc %lx.", get_huid(l_procChip)); break; } } } //Break if there was an error in the previous for-loop if(err) { break; } TRACFCOMP(g_trac_intr, "IntrRp::resetIntpForMpipl() Masking all interrupt sources."); //Mask any future interrupts to avoid receiving anymore while in the process // of resetting the rest of the Interrupt Logic err = maskAllInterruptSources(); if (err) { TRACFCOMP(g_trac_intr, "IntrRp::resetIntpForMpipl() Error while masking all interrupt sources."); break; } // Clear out the PC registers that did not get properly cleared during // the SBE steps of MPIPL clearIntPcRegs(); //Reset PSIHB Interrupt Space TRACFCOMP(g_trac_intr, "Reset PSIHB Interrupt Space"); //First reset INTRP logic for slave procs for(ChipList_t::iterator targ_itr = iv_chipList.begin(); targ_itr != iv_chipList.end(); ++targ_itr) { if (*targ_itr != iv_masterHdlr) { PSIHB_SW_INTERFACES_t * this_psihb_ptr = (*targ_itr)->psiHbBaseAddr; this_psihb_ptr->icr = PSI_BRIDGE_INTP_STATUS_CTL_RESET; // TODO RTC: 172905 due to a DD1 workaround we have to do a SW reset // The SW is causing a recoverable fir when we attemp to pull // the thread context. The workaround earlier in the SBE's // MPIPL steps is clearing out the phys_thread_enable regs // so we can't pull thread context. All the SW reset is doing // is enabling LSI interrutps (we do later) and clear the // phys_thread_enabled regs which are already 0 // Still need to determine if we can skip this in DD2 //resetIntUnit(*targ_itr); //Turn off VPC error when in LSI mode err = disableVPCPullErr(*targ_itr); if (err) { TRACFCOMP(g_trac_intr, "Error masking VPC Pull Lsi Err"); break; } } } //Then reset master proc INTRP logic PSIHB_SW_INTERFACES_t * this_psihb_ptr = iv_masterHdlr->psiHbBaseAddr; this_psihb_ptr->icr = PSI_BRIDGE_INTP_STATUS_CTL_RESET; TRACFCOMP(g_trac_intr, "Reset PSIHB INTR Complete"); // TODO RTC: 172905 Still need to determine if we can skip this in DD2 // (same as above) //Reset XIVE Interrupt unit //resetIntUnit(iv_masterHdlr); //Turn off VPC error when in LSI mode err = disableVPCPullErr(iv_masterHdlr); if (err) { TRACFCOMP(g_trac_intr, "Error masking VPC Pull Lsi Err"); break; } //Hostboot routes all interrupts to the master proc. This gets set up during istep 8 //of a normal IPL but later the Hypervisor will reset the routing. During an mpipl, //istep 8 wont get ran so we need to set the routing up now. for(ChipList_t::iterator targ_itr = iv_chipList.begin(); targ_itr != iv_chipList.end(); ++targ_itr) { if (*targ_itr != iv_masterHdlr) { TRACDCOMP(g_trac_intr, "IntrRp::resetIntpForMpipl() Setting up Slave Proc Interrupt Routing for proc %lx", get_huid((*targ_itr)->proc)); enableSlaveProcInterruptRouting(*targ_itr); } } //Clear out the mask list because pq state buffer gets cleared after //resetting the XIVE Interrupt unit iv_maskList.clear(); }while(0); return err; } errlHndl_t setHbModeOnP3PCReg() { errlHndl_t l_err = nullptr; do{ TARGETING::TargetHandleList l_funcProcs; getAllChips(l_funcProcs, TYPE_PROC); uint64_t HOSTBOOT_MODE_MASK = 0x8000000000000000ull >> P9N2_PU_INT_PC_GLOBAL_CFG_HOSTBOOT_MODE; uint64_t scom_data = 0; size_t DATA_SIZE = sizeof(scom_data); //Need to set this bit on all functional processors for(const auto & l_procChip : l_funcProcs) { l_err = deviceRead(l_procChip, &scom_data, DATA_SIZE, DEVICE_SCOM_ADDRESS(PU_INT_PC_GLOBAL_CFG)); if( l_err) { break; } scom_data |= HOSTBOOT_MODE_MASK; l_err = deviceWrite(l_procChip, &scom_data, DATA_SIZE, DEVICE_SCOM_ADDRESS(PU_INT_PC_GLOBAL_CFG)); if( l_err) { break; } } }while(0); return l_err; } errlHndl_t IntrRp::_init() { errlHndl_t l_err = nullptr; // get the PIR // Which ever cpu core this is running on is the MASTER cpu // Make master thread 0 uint32_t cpuid = task_getcpuid(); iv_masterCpu = cpuid; iv_masterCpu.threadId = 0; TRACFCOMP(g_trac_intr,"IntrRp::_init() Master cpu group[%d], " "chip[%d], core[%d], thread[%d]", iv_masterCpu.groupId, iv_masterCpu.chipId, iv_masterCpu.coreId, iv_masterCpu.threadId); // Do the initialization steps on the master proc chip // The other proc chips will be setup at a later point TARGETING::Target* procTarget = NULL; TARGETING::targetService().masterProcChipTargetHandle( procTarget ); intr_hdlr_t* l_procIntrHdlr = new intr_hdlr_t(procTarget); iv_masterHdlr = l_procIntrHdlr; iv_chipList.push_back(l_procIntrHdlr); // Set up the IPC message Data area TARGETING::Target * sys = NULL; TARGETING::targetService().getTopLevelTarget( sys ); assert(sys != NULL); uint64_t hrmor_base = sys->getAttr(); KernelIpc::ipc_data_area.pir = iv_masterCpu.word; KernelIpc::ipc_data_area.hrmor_base = hrmor_base; KernelIpc::ipc_data_area.msg_queue_id = IPC_DATA_AREA_CLEAR; do { // Set the Interrupt BAR Scom Registers specific to the master l_err = setMasterInterruptBARs(procTarget); if (l_err) { TRACFCOMP(g_trac_intr, "IntrRp::_init() Error setting Master Proc Interrupt BARs."); break; } // Set the common Interrupt BAR Scom Registers for the master l_err = setCommonInterruptBARs(iv_masterHdlr); if (l_err) { TRACFCOMP(g_trac_intr, "IntrRp::_init() Error setting Common Proc Interrupt BARs."); break; } l_err = setHbModeOnP3PCReg(); if (l_err) { TRACFCOMP(g_trac_intr, "IntrRp::_init() Error setting Hostboot Mode bit on in P3PC registers"); break; } uint8_t is_mpipl = 0; TARGETING::Target * sys = NULL; TARGETING::targetService().getTopLevelTarget(sys); if(sys && sys->tryGetAttr(is_mpipl) && is_mpipl) { TRACFCOMP(g_trac_intr,"Reset interrupt service for MPIPL"); l_err = resetIntpForMpipl(); if(l_err) { TRACFCOMP(g_trac_intr,"Failed to reset interrupt service for MPIPL"); break; } } //Disable Incoming PSI Interrupts TRACDCOMP(g_trac_intr, "IntrRp::_init() Disabling PSI Interrupts"); uint64_t l_disablePsiIntr = PSI_BRIDGE_INTP_STATUS_CTL_DISABLE_PSI; uint64_t size = sizeof(l_disablePsiIntr); l_err = deviceWrite(procTarget, &l_disablePsiIntr, size, DEVICE_SCOM_ADDRESS(PSI_BRIDGE_INTP_STATUS_CTL_CLR_SCOM_ADDR)); if (l_err) { TRACFCOMP(g_trac_intr, "IntrRp::_init() Error disabling PSI Interrupts."); break; } // Check if we need to run the MPIPL path if(is_mpipl) { // In MPIPL we enable Interrupt before masking sources -- while the // system is in this state interupts can get stuck, need to let any // interrupts have time to present themselves before we mask things TRACFCOMP(g_trac_intr, "IntrRp::_init() Enabling PSIHB Interrupts"); //Enable PSIHB Interrupts l_err = enableInterrupts(l_procIntrHdlr); if (l_err) { TRACFCOMP(g_trac_intr, "IntrRp::_init() Error enabling Interrupts"); break; } TRACFCOMP(g_trac_intr, "IntrRp::_init() Masking Interrupts"); //Mask off all interrupt sources - these will be enabled as SW entities // register for specific interrupts via the appropriate message queue l_err = maskAllInterruptSources(); if (l_err) { TRACFCOMP(g_trac_intr, "IntrRp::_init() Error masking all interrupt sources."); break; } enableLsiInterrupts(); } else { TRACFCOMP(g_trac_intr, "IntrRp::_init() Masking Interrupts"); //Mask off all interrupt sources - these will be enabled as SW entities // register for specific interrupts via the appropriate message queue l_err = maskAllInterruptSources(); if (l_err) { TRACFCOMP(g_trac_intr, "IntrRp::_init() Error masking all interrupt sources."); break; } enableLsiInterrupts(); TRACFCOMP(g_trac_intr, "IntrRp::_init() Enabling PSIHB Interrupts"); //Enable PSIHB Interrupts l_err = enableInterrupts(l_procIntrHdlr); if (l_err) { TRACFCOMP(g_trac_intr, "IntrRp::_init() Error enabling Interrupts"); break; } } // Create the kernel msg queue for external interrupts iv_msgQ = msg_q_create(); msg_intr_q_register(iv_msgQ, procTarget->getAttr()); // Create a task to handle the messages task_create(IntrRp::msg_handler, NULL); // Register event to be called on shutdown INITSERVICE::registerShutdownEvent(iv_msgQ, MSG_INTR_SHUTDOWN, INITSERVICE::INTR_PRIORITY); //The INTRP itself will monitor/handle PSU Interrupts // so unmask those interrupts l_err = unmaskInterruptSource(LSI_PSU, l_procIntrHdlr); //Set value for enabled threads uint64_t l_en_threads = get_enabled_threads(); TRACFCOMP(g_trac_intr, "IntrRp::_init() Threads enabled:" " %lx", l_en_threads); } while(0); return l_err; } void IntrRp::enableLsiInterrupts() { TRACDCOMP(g_trac_intr, "IntrRp:: enableLsiInterrupts() enter"); //The XIVE HW is expecting these MMIO accesses to come from the // core/thread they were setup (master core, thread 0) // These functions will ensure this code executes there task_affinity_pin(); task_affinity_migrate_to_master(); uint64_t * l_lsiEoi = iv_masterHdlr->xiveIcBarAddr; l_lsiEoi += XIVE_IC_LSI_EOI_OFFSET; l_lsiEoi += (0xC00 / sizeof(uint64_t)); volatile uint64_t l_eoiRead = *l_lsiEoi; TRACFCOMP(g_trac_intr, "IntrRp:: enableLsiInterrupts() read 0x%lx from pointer %p", l_eoiRead, l_lsiEoi); //MMIO Complete, rest of code can run on any thread task_affinity_unpin(); TRACDCOMP(g_trac_intr, "IntrRp:: enableLsiInterrupts() exit"); } /** * Clear INT_PC registers that didn't get cleared by the HW reset * during the SBE steps of the MPIPL */ void IntrRp::clearIntPcRegs() { TRACDCOMP(g_trac_intr, "IntrRp:: clearIntPcRegs() enter"); //The XIVE HW is expecting these MMIO accesses to come from the // core/thread they were setup (master core, thread 0) // These functions will ensure this code executes there task_affinity_pin(); task_affinity_migrate_to_master(); uint64_t * l_vsdTableAddr = iv_masterHdlr->xiveIcBarAddr + XIVE_IC_PC_VSD_TABLE_ADDR_OFFSET; *l_vsdTableAddr = 0x0000000000000000; uint64_t * l_vsdTableData = iv_masterHdlr->xiveIcBarAddr + XIVE_IC_PC_VSD_TABLE_DATA_OFFSET; *l_vsdTableData = 0x0000000000000000; uint64_t * l_blockModeAddr = iv_masterHdlr->xiveIcBarAddr + XIVE_IC_PC_VPD_BLOCK_MODE_OFFSET; *l_blockModeAddr = 0x0000000000000000; //MMIO Complete, rest of code can run on any thread task_affinity_unpin(); TRACDCOMP(g_trac_intr, "IntrRp:: clearIntPcRegs() exit"); } void IntrRp::acknowledgeInterrupt() { //The XIVE HW is expecting these MMIO accesses to come from the // core/thread they were setup (master core, thread 0) // These functions will ensure this code executes there task_affinity_pin(); task_affinity_migrate_to_master(); //A uint16 store from the Acknowledge Hypervisor Interrupt // offset in the Thread Management BAR space signals // the interrupt is acknowledged volatile uint16_t * l_ack_int_ptr = (uint16_t *)iv_xiveTmBar1Address; l_ack_int_ptr += ACK_HYPERVISOR_INT_REG_OFFSET; eieio(); uint16_t l_ackRead = *l_ack_int_ptr; //MMIO Complete, rest of code can run on any thread task_affinity_unpin(); TRACFCOMP(g_trac_intr, "IntrRp::acknowledgeInterrupt(), read result: %16x", l_ackRead); } void IntrRp::disablePsiInterrupts(intr_hdlr_t* i_proc) { //Disable Incoming PSI Interrupts PSIHB_SW_INTERFACES_t * l_psihb_ptr = i_proc->psiHbBaseAddr; //Clear bit to disable PSI CEC interrupts l_psihb_ptr->psihbcr = (l_psihb_ptr->psihbcr & ~PSI_BRIDGE_INTP_STATUS_CTL_DISABLE_PSI); } errlHndl_t IntrRp::resetIntUnit(intr_hdlr_t* i_proc) { errlHndl_t l_err = NULL; uint64_t l_barValue = XIVE_RESET_POWERBUS_QUIESCE_ENABLE; uint64_t size = sizeof(l_barValue); uint32_t l_addr = XIVE_RESET_INT_CQ_RST_CTL_SCOM_ADDR; TARGETING::Target* procTarget = i_proc->proc; do { //Anything greater than DD10 should do the HW-based reset bool l_doHwReset = true; PVR_t l_pvr( mmio_pvr_read() & 0xFFFFFFFF ); if( l_pvr.isNimbusDD1() ) { l_doHwReset = false; } if (l_doHwReset) { //Disable the PSI CEC interrupts disablePsiInterrupts(i_proc); //Use HW-based XIVE Reset //First quiesce the power bus TRACDCOMP(g_trac_intr, "IntrRp::resetIntUnit() - " "Quiesce the PowerBus Interface"); l_err = deviceWrite(procTarget, &l_barValue, size, DEVICE_SCOM_ADDRESS(l_addr)); if (l_err) { TRACFCOMP(g_trac_intr, "IntrRp::resetIntUnit() - " "Error Quiescing the PowerBus"); break; } //A short amount of time is needed to let the powerbus quiesce // before the next step in the reset can occur, so do a short // polling loop for the indicator the power bus has been quiesced uint64_t l_quiesceTimeout = XIVE_RESET_POWERBUS_QUIESCE_TIMEOUT; uint64_t l_timeWaited = 0; uint64_t reg = 0x0; do { if (l_timeWaited >= l_quiesceTimeout) { TRACFCOMP(g_trac_intr, "IntrRp::resetIntUnit() - Timeout " "waiting for PowerBus to quiesce"); // @errorlog tag // @errortype ERRL_SEV_UNRECOVERABLE // @moduleid INTR::MOD_INTRRP_RESETINTUNIT // @reasoncode INTR::RC_XIVE_PBUS_QUIESCE_TIMEOUT // @userdata1 XIVE Powerbus Scom Register Address // @userdata2 XIVE Powerbus Scom Register Data // // @devdesc Timeout waiting for Powerbus to Quiesce // l_err = new ERRORLOG::ErrlEntry ( ERRORLOG::ERRL_SEV_UNRECOVERABLE, // severity INTR::MOD_INTRRP_RESETINTUNIT, // moduleid INTR::RC_XIVE_PBUS_QUIESCE_TIMEOUT, // reason code l_addr, reg ); break; } uint64_t scom_len = sizeof(reg); //Read the powerbus state l_err = deviceRead( procTarget, ®, scom_len, DEVICE_SCOM_ADDRESS(l_addr)); if (l_err) { //Logging below this loop break; } if (reg & POWERBUS_STATE_QUIESCE) { //Powerbus Quiesced break; } else { nanosleep(0,XIVE_RESET_POWERBUS_QUIESCE_TIMEOUT / 10); l_timeWaited += XIVE_RESET_POWERBUS_QUIESCE_TIMEOUT / 10; } } while(1); if (l_err) { TRACFCOMP(g_trac_intr, "Error getting Powerbus state"); break; } TRACDCOMP(g_trac_intr, "Reset XIVE INT unit"); l_barValue = XIVE_RESET_UNIT_ENABLE; l_err = deviceWrite(procTarget, &l_barValue, size, DEVICE_SCOM_ADDRESS(l_addr)); if (l_err) { TRACFCOMP(g_trac_intr, "Error resetting XIVE INT unit"); break; } //Additional settings for fused mode //Needed because the HW XIVE reset clears too much HW state if (is_fused_mode()) { //Do a Read-Modify-Write on INT Thread Context Register //setting the FUSED_CORE_EN bit as the 'modify' part uint64_t l_int_tctxt_reg = 0x0; l_err = deviceRead(procTarget, &l_int_tctxt_reg, size, DEVICE_SCOM_ADDRESS(PU_INT_TCTXT_CFG)); if (l_err) { TRACFCOMP(g_trac_intr, "Error reading the INT_TCTXT_CFG(%lx) scom register", PU_INT_TCTXT_CFG); break; } l_int_tctxt_reg |= INT_TCTXT_CFG_FUSE_CORE_EN; l_err = deviceWrite(procTarget, &l_int_tctxt_reg, size, DEVICE_SCOM_ADDRESS(PU_INT_TCTXT_CFG)); if (l_err) { TRACFCOMP(g_trac_intr, "Error writing %lx the INT_TCTXT_CFG(%lx) scom register", l_int_tctxt_reg, PU_INT_TCTXT_CFG ); break; } } } else { //Do SW Based XIVE Reset if (i_proc == iv_masterHdlr) { l_err = disableInterrupts(i_proc); if (l_err) { TRACFCOMP(g_trac_intr, "IntrRp::resetIntUnit() Error disabling interrupts"); break; } } } } while (0); if (l_err) { TRACFCOMP(g_trac_intr, "Error: Interrupt Engine not reset successfully"); } return l_err; } errlHndl_t IntrRp::disableInterrupts(intr_hdlr_t *i_proc) { errlHndl_t l_err = NULL; do { //Disable PSI CEC interrupts disablePsiInterrupts(i_proc); //The XIVE HW is expecting these MMIO accesses to come from the // core/thread they were setup (master core, thread 0) // These functions will ensure this code executes there task_affinity_pin(); task_affinity_migrate_to_master(); //Pull thread context to register - View Section 4.4.4.15 of the // XIVE spec. Doing a 1b MMIO read will clear the cams VT bit. volatile uint8_t * l_pull_thread_ptr = (uint8_t *)iv_xiveTmBar1Address; l_pull_thread_ptr += PULL_THREAD_CONTEXT_OFFSET; eieio(); uint8_t l_ackRead = *l_pull_thread_ptr; TRACFCOMP(g_trac_intr, "IntrRp::disableInterrupts()," " pull thread context read result: %8x", l_ackRead); eieio(); sync(); TRACFCOMP(g_trac_intr, INFO_MRK"LSI Mode inactive (cams_vt)"); //MMIO Complete, rest of code can run on any thread task_affinity_unpin(); // Unset Physical Thread Enable register in the PC space for the master // core - Simply reset both regs. uint64_t * l_ic_ptr = i_proc->xiveIcBarAddr; l_ic_ptr += XIVE_IC_BAR_INT_PC_MMIO_REG_OFFSET; volatile XIVE_IC_THREAD_CONTEXT_t * l_xive_ic_ptr = reinterpret_cast(l_ic_ptr); TRACFCOMP(g_trac_intr, INFO_MRK"IntrRp::disableInterrupts() " " Reset phys_thread_enable0_reg: 0x%016lx", 0x0); l_xive_ic_ptr->phys_thread_enable0_set = 0x0; TRACFCOMP(g_trac_intr, INFO_MRK"IntrRp::disableInterrupts() " " Reset phys_thread_enable1_reg: 0x%016lx", 0x0); l_xive_ic_ptr->phys_thread_enable1_set = 0x0; } while (0); return l_err; } errlHndl_t IntrRp::enableInterrupts(intr_hdlr_t *i_proc) { errlHndl_t err = NULL; PSIHB_SW_INTERFACES_t * l_psihb_ptr = i_proc->psiHbBaseAddr; do { //Set bit to route interrupts to CEC instead of FSP l_psihb_ptr->psihbcr = (l_psihb_ptr->psihbcr | PSI_BRIDGE_ENABLE_CEC_INTERRUPT); //Set bit to enable PSIHB Interrupts l_psihb_ptr->icr = (l_psihb_ptr->icr | PSI_BRIDGE_INTP_STATUS_CTL_ENABLE); // This XIVE register supports both Normal and Fused core, but Normal // Core mode can be safely assumed and the proper bits will be set. // //Set Physical Thread Enable register in the PC space for the master // core PIR_t l_masterPir(task_getcpuid()); uint64_t l_masterCoreID = l_masterPir.coreId; uint64_t l_masterThreadID = l_masterPir.threadId; uint64_t * l_ic_ptr = i_proc->xiveIcBarAddr; l_ic_ptr += XIVE_IC_BAR_INT_PC_MMIO_REG_OFFSET; volatile XIVE_IC_THREAD_CONTEXT_t * l_xive_ic_ptr = reinterpret_cast(l_ic_ptr); TRACFCOMP(g_trac_intr, INFO_MRK"IntrRp::enableInterrupts() " "Set Physical Thread Enable for master core: %lx, " "master thread: %lx ", l_masterCoreID, l_masterThreadID); //Normal Cores 0-15 are handled in thread enable0 reg if (l_masterCoreID < 16) { uint64_t l_enable = (XIVE_IC_THREAD0_ENABLE >> ((4*l_masterCoreID)+l_masterThreadID)); TRACFCOMP(g_trac_intr, INFO_MRK"IntrRp::enableInterrupts() " " Set phys_thread_enable0_reg: 0x%016lx", l_enable); l_xive_ic_ptr->phys_thread_enable0_set = l_enable; } else //Normal Cores 16-23 are handled in thread enable1 reg { //Shift offset as a second register is used for cores 16-23 // so core 16 in reg 1 is equivalent to core 0 in reg0 l_masterCoreID = l_masterCoreID - 16; uint64_t l_enable = (XIVE_IC_THREAD0_ENABLE >> ((4*l_masterCoreID)+l_masterThreadID)); TRACFCOMP(g_trac_intr, INFO_MRK"IntrRp::enableInterrupts() " " Set phys_thread_enable1_reg: 0x%016lx", l_enable); l_xive_ic_ptr->phys_thread_enable1_set = l_enable; } eieio(); //Set bit to configure LSI mode for HB cec interrupts volatile XIVE_IVPE_THREAD_CONTEXT_t * this_ivpe_ptr = reinterpret_cast (iv_xiveTmBar1Address); this_ivpe_ptr->cams_vt = XIVE_IVPE_QW3_LSI_ENABLE; eieio(); TRACFCOMP(g_trac_intr, INFO_MRK"LSI Mode active (cams_vt)"); } while (0); //TODO RTC 150260 - Determine if any error checking can be done above, if so // create/return errorlogs. If not, change the funciton return type. return err; } void IntrRp::enableSlaveProcInterruptRouting(intr_hdlr_t *i_proc) { PSIHB_SW_INTERFACES_t * l_psihb_ptr = i_proc->psiHbBaseAddr; //Route LSI Trigger Page to Master Proc Chip by setting the // ESB Notification Address register on the PSIHB uint64_t l_baseAddr = iv_masterHdlr->proc->getAttr(); TRACFCOMP(g_trac_intr, INFO_MRK"Routing LSI Trigger page to Master Proc" " chip by setting esb notification address to:%lx", l_baseAddr + XIVE_IC_ESB_LSI_TRIGGER_PAGE_OFFSET); //Set notify to base address, then set valid bit uint64_t l_notifyValue = l_baseAddr + XIVE_IC_ESB_LSI_TRIGGER_PAGE_OFFSET; l_psihb_ptr->esbnotifyaddr = l_notifyValue; l_psihb_ptr->esbnotifyaddr = l_notifyValue + PSI_BRIDGE_ESB_NOTIFY_VALID; //Enable Interrupt routing to trigger page written above by setting // the Interrupt Control Register to all 0's l_psihb_ptr->icr = PSI_BRIDGE_ENABLE_LSI_INTR_REMOTE; } /** * Helper function to start the messge handler */ void* IntrRp::msg_handler(void * unused) { Singleton::instance().msgHandler(); return NULL; } void IntrRp::msgHandler() { TRACDCOMP(g_trac_intr, ENTER_MRK"IntrRp::msgHandler()"); while(1) { msg_t* msg = msg_wait(iv_msgQ); // wait for interrupt msg switch(msg->type) { //Both cases require the same functinality, EXTERN comes from // the kernel. COALESCE comes from userspace as the final step of // the EOI path involves a read, if that returns 1 it signifies a // new interrupt is already pending. So the EOI path will send a // new COALESCE message to trigger the handling. case MSG_INTR_COALESCE: case MSG_INTR_EXTERN: { //ext_intr_t type = NO_INTERRUPT; //Keep a list of all pending interrupts and which proc the // interrupt condition was seen on std::vector< std::pair > l_pendingIntr; uint32_t ackResponse = static_cast(msg->data[0]>>32); //Check if LSI-Based Interrupt if ((ackResponse & LSI_INTERRUPT) == LSI_INTERRUPT) { TRACFCOMP(g_trac_intr, "IntrRp::msgHandler() " "- LSI Interrupt Detected"); //An external interrupt comes from two paths // 1) kernel space - synchronous - response needed // 2) User space (coalesce interrupt) - asynchronous // - no response needed, just free message if (msg_is_async(msg)) { msg_free(msg); } else { // Acknowlege msg msg->data[1] = 0; msg_respond(iv_msgQ, msg); } //Read Interrupt Condition(s) and route to appropriate //interrupt handlers handleExternalInterrupt(); } } break; case MSG_INTR_CPU_WAKEUP: { uint64_t l_xirr_pir = msg->data[0]; uint64_t l_data0 = (l_xirr_pir & 0xFFFFFFFF); PIR_t l_pir = static_cast(l_data0); PIR_t l_core_pir = l_pir; l_core_pir.threadId = 0; if (iv_ipisPending.count(l_core_pir)) { TRACFCOMP(g_trac_intr,INFO_MRK "IntrRp::msgHandler Doorbell wakeup received" " for %d", l_pir.word); IPI_Info_t& ipiInfo = iv_ipisPending[l_core_pir]; ipiInfo.first &= ~(0x8000000000000000 >> l_pir.threadId); if (0 == ipiInfo.first) { msg_t* ipiMsg = ipiInfo.second; iv_ipisPending.erase(l_core_pir); ipiMsg->data[1] = 0; msg_respond(iv_msgQ, ipiMsg); } else { TRACFCOMP(g_trac_intr,INFO_MRK "IPI still pending for %x", ipiInfo.first); } } } break; case MSG_INTR_IPC: { errlHndl_t l_err = NULL; uint64_t l_xirr_pir = msg->data[0]; TRACFCOMP(g_trac_intr,INFO_MRK "IntrRp::msgHandler Doorbell IPC msg received" " for %x", l_xirr_pir); // Now handle any IPC messages // If something is registered for the IPC msg Registry_t::iterator r = iv_registry.find(ISN_INTERPROC); if(r != iv_registry.end() && (KernelIpc::ipc_data_area.msg_queue_id != IPC_DATA_AREA_CLEAR) && (KernelIpc::ipc_data_area.msg_queue_id != IPC_DATA_AREA_LOCKED)) { msg_q_t msgQ = r->second.msgQ; msg_t * rmsg = msg_allocate(); rmsg->type = r->second.msgType; rmsg->data[0] = ISN_INTERPROC; rmsg->data[1] = l_xirr_pir; rmsg->extra_data = NULL; int rc = msg_sendrecv_noblk(msgQ, rmsg, iv_msgQ); if(rc) { TRACFCOMP(g_trac_intr,ERR_MRK "IPC message received, but could " "not send message to the registered " "handler. Ignoring it. rc = %d", rc); } } else if(KernelIpc::ipc_data_area.msg_queue_id == IPC_DATA_AREA_CLEAR || KernelIpc::ipc_data_area.msg_queue_id == IPC_DATA_AREA_LOCKED) { TRACFCOMP(g_trac_intr,ERR_MRK "IPC message received but data area is in" " an invalid state. msg_queue_id = 0x%lx", KernelIpc::ipc_data_area.msg_queue_id); /*@ errorlog tag * @errortype ERRL_SEV_PREDICTIVE * @moduleid INTR::MOD_INTRRP_IPC * @reasoncode INTR::RC_IPC_DATA_INVALID * @userdata1 IPC Data Area MSG Queue ID * @userdata2 PIR * @devdesc Error encountered routing IPC * message */ l_err = new ERRORLOG::ErrlEntry ( ERRORLOG::ERRL_SEV_PREDICTIVE, // severity INTR::MOD_INTRRP_IPC, // moduleid INTR::RC_IPC_DATA_INVALID, // reason code KernelIpc::ipc_data_area.msg_queue_id, l_xirr_pir ); } else { TRACFCOMP(g_trac_intr,ERR_MRK "IPC Message received type but nothing registered " "to handle it. Ignoring it."); } // Always acknowlege msg to kernel // kernel expects rc in data[1] // rc of 0 means a successful return msg->data[1] = 0; msg_respond(iv_msgQ, msg); if (l_err) { l_err->collectTrace(INTR_COMP_NAME, 256); errlCommit(l_err, INTR_COMP_ID); } } break; case MSG_INTR_EOI: { // Use standrard EOI (End of Interrupt) sequence if(msg->data[0] != ISN_INTERPROC) { uint64_t intSource = msg->data[0]; PIR_t l_pir = msg->data[1]; //The physical HW EOI is issued as the defect is // discovered. At this point we just need to remove // the pending interrupt obj and unmasking the // interrupt source to properly handle new ints completeInterruptProcessing(intSource, l_pir); } msg_free(msg); } break; case MSG_INTR_REGISTER_MSGQ: { msg_q_t l_msgQ = reinterpret_cast(msg->data[0]); uint64_t l_type = msg->data[1]; LSIvalue_t l_intr_type = static_cast (l_type & LSI_SOURCE_MASK); errlHndl_t err = registerInterruptXISR(l_msgQ, l_type >> 32, l_intr_type); if (err) { TRACFCOMP(g_trac_intr, "IntrRp::msgHandler MSG_INTR_REGISTER_MSGQ error " "registering handler for interrupt type: %lx", l_intr_type); } else { //Enable (aka unmask) Interrupts for the source being // registered for for(ChipList_t::iterator targ_itr = iv_chipList.begin(); targ_itr != iv_chipList.end(); ++targ_itr) { err = unmaskInterruptSource(l_intr_type, *targ_itr); if (err) { TRACFCOMP(g_trac_intr, "IntrRp::msgHandler MSG_INTR_REGISTER_MSGQ " "error unmasking interrupt type: %lx", l_intr_type); errlCommit(err, INTR_COMP_ID); break; } } } msg->data[1] = reinterpret_cast(err); msg_respond(iv_msgQ,msg); } break; case MSG_INTR_UNREGISTER_MSGQ: { TRACFCOMP(g_trac_intr, "INTR remove registration of interrupt type = 0x%lx", msg->data[0]); ext_intr_t l_type = static_cast(msg->data[0]); ext_intr_t l_intr_type = static_cast (l_type & LSI_SOURCE_MASK); if (l_type != ISN_INTERPROC) { // Mask the interrupt source prior to unregistering errlHndl_t err = maskInterruptSource(l_intr_type); if(err) { TRACFCOMP(g_trac_intr, "IntrRp::msgHandler MSG_INTR_UNREGISTER_MSGQ" " error masking interrupt type: %lx", l_intr_type); errlCommit(err,INTR_COMP_ID); } } // Unregister for this source and return rc in response msg_q_t msgQ = unregisterInterruptXISR(l_type); msg->data[1] = reinterpret_cast(msgQ); msg_respond(iv_msgQ, msg); } break; case MSG_INTR_ENABLE: { errlHndl_t err = NULL; for(ChipList_t::iterator targ_itr = iv_chipList.begin(); targ_itr != iv_chipList.end(); ++targ_itr) { err = enableInterrupts(*targ_itr); if (err) { break; } } msg->data[1] = reinterpret_cast(err); msg_respond(iv_msgQ,msg); } break; case MSG_INTR_DISABLE: { errlHndl_t err = disableInterrupts(iv_masterHdlr); msg->data[1] = reinterpret_cast(err); msg_respond(iv_msgQ,msg); } break; case MSG_INTR_ENABLE_PSI_INTR: { TRACFCOMP(g_trac_intr, "MSG_INTR_ENABLE_PSI_INTR received"); TARGETING::Target * target = reinterpret_cast(msg->data[0]); errlHndl_t err = enableSlaveProcInterrupts(target); msg->data[1] = reinterpret_cast(err); msg_respond(iv_msgQ,msg); } break; // Called when a new cpu becomes active other than the master // Expect a call for each new core case MSG_INTR_ADD_CPU: { //Get the base PIR sent from the kernel PIR_t pir = msg->data[1]; //No need to care about thread ID as that will be gathered // below pir.threadId = 0; //Push back base core PIR for later use iv_cpuList.push_back(pir); TRACFCOMP(g_trac_intr,"Add CPU group[%d], chip[%d]," "core[%d], thread[%d]", pir.groupId, pir.chipId, pir.coreId, pir.threadId); //Get threads to be enabled so they will be monitored uint64_t en_threads = get_enabled_threads(); iv_ipisPending[pir] = IPI_Info_t(en_threads, msg); //Create handleCpuTimeout task - this task will monitor // for wakeup messages from each individual expected // thread to be sent. task_create(handleCpuTimeout, reinterpret_cast(pir.word)); TRACFCOMP(g_trac_intr, "handleCpuTimeout task started" " responding to kernel message"); } break; case MSG_INTR_ADD_CPU_TIMEOUT: { PIR_t pir = msg->data[0]; TRACDCOMP("IntrRp::msgHandler() CPU Timeout Message " "received for: %x", pir.word); size_t count = msg->data[1]; if(iv_ipisPending.count(pir)) { if (count < CPU_WAKEUP_INTERVAL_COUNT) { TRACFCOMP(g_trac_intr, INFO_MRK "Cpu wakeup pending on %x", pir.word); // Tell child thread to retry. msg->data[1] = EAGAIN; } else // Timed out. { TRACFCOMP(g_trac_intr, ERR_MRK "Cpu wakeup timeout on %x", pir.word); // Tell child thread to exit. msg->data[1] = 0; // Get saved thread info. IPI_Info_t& ipiInfo = iv_ipisPending[pir]; msg_t* ipiMsg = ipiInfo.second; iv_ipisPending.erase(pir); // Respond to waiting thread with ETIME. ipiMsg->data[1] = -ETIME; msg_respond(iv_msgQ, ipiMsg); } } else // Ended successfully. { TRACFCOMP(g_trac_intr, INFO_MRK "Cpu wakeup completed on %x", pir.word); // Tell child thread to exit. msg->data[1] = 0; } msg_respond(iv_msgQ, msg); } break; case MSG_INTR_SHUTDOWN: { TRACFCOMP(g_trac_intr,"Shutdown event received"); shutDown(msg->data[0]); msg_respond(iv_msgQ, msg); } break; case MSG_INTR_ADD_HBNODE: // node info for mpipl { #ifdef CONFIG_MPIPL_ENABLED //TODO RTC 134431 errlHndl_t err = addHbNodeToMpiplSyncArea(msg->data[0]); if(err) { errlCommit(err,INTR_COMP_ID); } #endif msg_free(msg); // async message } break; case MSG_INTR_DRAIN_QUEUE: { //The purpose of this message is allow the //intrp to drain its message queue of pending EOIs //just respond msg_respond(iv_msgQ,msg); } break; default: msg->data[1] = -EINVAL; msg_respond(iv_msgQ, msg); } } } errlHndl_t IntrRp::sendXiveEOI(uint64_t& i_intSource, PIR_t& i_pir) { errlHndl_t l_err = NULL; do { //The XIVE HW is expecting these MMIO accesses to come from the // core/thread they were setup (master core, thread 0) // These functions will ensure this code executes there task_affinity_pin(); task_affinity_migrate_to_master(); //LSI ESB Internal to the IVPE of the Master Proc volatile uint64_t * l_lsiEoi = iv_masterHdlr->xiveIcBarAddr; l_lsiEoi += XIVE_IC_LSI_EOI_OFFSET; uint64_t l_intPending = *l_lsiEoi; //MMIO Complete, rest of code can run on any thread task_affinity_unpin(); //If an interrupt is pending, HB userspace will send a message to // trigger the handling of a 'new' interrupt. In this situation the // interrupt will not be triggered via the kernel. if (l_intPending == 1) { TRACFCOMP(g_trac_intr, "IntrRp::Need to acknowledge interrupt\n"); //First acknowledge the interrupt so it won't be re-presented acknowledgeInterrupt(); uint64_t l_data0 = LSI_INTERRUPT << 32; if (iv_msgQ) { msg_t * int_msg = msg_allocate(); int_msg->type = MSG_INTR_COALESCE; int_msg->data[0] = reinterpret_cast(l_data0); int send_rc = msg_send(iv_msgQ, int_msg); if (send_rc != 0) { TRACFCOMP(g_trac_intr, ERR_MRK"IntrRp::sendEOI error " "sending coalesce message"); /*@ errorlog tag * @errortype ERRL_SEV_UNRECOVERABLE * @moduleid INTR::MOD_INTRRP_XIVE_SENDEOI * @reasoncode INTR::RC_MESSAGE_SEND_ERROR * @userdata1 RC from msg_send command * @devdesc Error encountered sending coalesce * message to INTRP */ l_err = new ERRORLOG::ErrlEntry ( ERRORLOG::ERRL_SEV_UNRECOVERABLE, // severity INTR::MOD_INTRRP_XIVE_SENDEOI, // moduleid INTR::RC_MESSAGE_SEND_ERROR, // reason code send_rc, 0 ); break; } } } } while(0); return l_err; } errlHndl_t IntrRp::sendEOI(uint64_t& i_intSource, PIR_t& i_pir) { intr_hdlr_t* l_proc = NULL; errlHndl_t l_err = NULL; //Find target handle for Proc to send EOI to for(ChipList_t::iterator targ_itr = iv_chipList.begin(); targ_itr != iv_chipList.end(); ++targ_itr) { uint64_t l_groupId = (*targ_itr)->proc->getAttr (); uint64_t l_chipId = (*targ_itr)->proc->getAttr (); //Core + Thread IDs not important so use 0's PIR_t l_pir = PIR_t(l_groupId, l_chipId, 0, 0); if (l_pir == i_pir) { l_proc = *targ_itr; break; } } do { //The XIVE HW is expecting these MMIO accesses to come from the // core/thread they were setup (master core, thread 0) // These functions will ensure this code executes there task_affinity_pin(); task_affinity_migrate_to_master(); //Send an EOI to the Power bus using the PSIHB ESB Space //This is done with a read to the page specific to the interrupt source. //Each interrupt source gets one page uint64_t * l_psiHbPowerBusEoiAddr = l_proc->psiHbEsbBaseAddr + ((i_intSource)*PAGE_SIZE)/sizeof(uint64_t); uint64_t eoiRead = *l_psiHbPowerBusEoiAddr; //MMIO Complete, rest of code can run on any thread task_affinity_unpin(); if (eoiRead != 0) { TRACFCOMP(g_trac_intr, ERR_MRK"IntrRp::sendEOI error sending EOI" " to PSIHB ESB. EOI load returned: %x", eoiRead); /*@ errorlog tag * @errortype ERRL_SEV_UNRECOVERABLE * @moduleid INTR::MOD_INTRRP_SENDEOI * @reasoncode INTR::RC_PSIHB_ESB_EOI_FAIL * @userdata1 Value read from EOI load * @userdata2 Interrupt Source to issue EOI to * @devdesc Unexpected RC from issuing PSIHB EOI store */ l_err = new ERRORLOG::ErrlEntry ( ERRORLOG::ERRL_SEV_UNRECOVERABLE, // severity INTR::MOD_INTRRP_SENDEOI, // moduleid INTR::RC_PSIHB_ESB_EOI_FAIL, // reason code eoiRead, // read value i_intSource // interrupt source number ); break; } TRACDCOMP(g_trac_intr, "IntrRp::sendEOI read response: %lx", eoiRead); l_err = sendXiveEOI(i_intSource, i_pir); } while(0); return l_err; } void IntrRp::routeInterrupt(intr_hdlr_t* i_proc, ext_intr_t i_type, PIR_t& i_pir) { //Search if anyone is subscribed to the given // interrupt source Registry_t::iterator r = iv_registry.find(i_type); if(r != iv_registry.end() && i_type != ISN_INTERPROC) { msg_q_t msgQ = r->second.msgQ; msg_t * rmsg = msg_allocate(); rmsg->type = r->second.msgType; rmsg->data[0] = i_type; // interrupt type rmsg->data[1] = i_pir.word; rmsg->extra_data = NULL; int rc = msg_sendrecv_noblk(msgQ,rmsg, iv_msgQ); if(rc) { TRACFCOMP(g_trac_intr,ERR_MRK "External Interrupt received type = %d, " "but could not send message to registered" " handler. Ignoring it. rc = %d", (uint32_t) i_type, rc); } } else if (i_type == LSI_PSU) { handlePsuInterrupt(i_type, i_proc, i_pir); } else // no queue registered for this interrupt type { // Throw it away for now. TRACFCOMP(g_trac_intr,ERR_MRK "External Interrupt received type = %d, but " "nothing registered to handle it. Ignoring it.", (uint32_t)i_type); } return; } void IntrRp::handleExternalInterrupt() { //Read LSI Interrupt Status register from each enabled // proc chip to see which caused the interrupt for(ChipList_t::iterator targ_itr = iv_chipList.begin(); targ_itr != iv_chipList.end(); ++targ_itr) { uint64_t lsiIntStatus = (*targ_itr)->psiHbBaseAddr->lsiintstatus; TRACFCOMP(g_trac_intr, "IntrRp::msgHandler() lsiIntStatus 0x%016lx", lsiIntStatus); //Loop through each bit, and add any pending // interrupts to list for later handling for (uint8_t i=0; i < LSI_LAST_SOURCE; i++) { uint64_t lsiIntMask = 0x8000000000000000 >> i; if (lsiIntMask & lsiIntStatus) { TRACDCOMP(g_trac_intr,"IntrRp::msgHandler()" " Interrupt Type: %d found", i); //Get PIR value for the proc with the // interrupt condition uint64_t l_groupId = (*targ_itr)->proc->getAttr(); uint64_t l_chipId = (*targ_itr)->proc->getAttr(); //Core + Thread IDs not important so use 0's PIR_t l_pir = PIR_t(l_groupId, l_chipId, 0, 0); //Make object to search pending interrupt // list for std::pair l_intr = std::make_pair( l_pir, static_cast(i)); //See if an interrupt with from Proc with // the same PIR + interrupt source are // still being processed auto l_found = std::find_if( iv_pendingIntr.begin(), iv_pendingIntr.end(), [&l_intr](auto k)->bool { return ((k.first == l_intr.first) && (k.second == l_intr.second)); }); if (l_found != iv_pendingIntr.end()) { TRACFCOMP(g_trac_intr, "IntrRp::msgHandler() Pending" " Interrupt already found for pir: 0x%lx," " interrupt type: %d, Ignoring", l_pir, static_cast(i)); } else { //New pending interrupt for source type TRACFCOMP(g_trac_intr, "IntrRp::msgHandler() External " "Interrupt found for pir: 0x%lx,interrupt type: %d", l_pir, static_cast(i)); //Add to list of interrupts in flight iv_pendingIntr.push_back(l_intr); uint64_t intSource = l_intr.second; //Mask off current interrupt source maskInterruptSource(intSource, *targ_itr); //Send EOI so other interrupt sources other than the one // masked previously can be presented sendXiveEOI(intSource, l_pir); //Call function to route the interrupt //to the appropriate handler routeInterrupt((*targ_itr), static_cast(i), l_pir); } } } } } errlHndl_t IntrRp::maskAllInterruptSources() { errlHndl_t l_err = NULL; for (uint8_t i = 0; i < LSI_LAST_SOURCE; i++) { TRACDCOMP(g_trac_intr, "MaskInterruptSource: %d", i); l_err = maskInterruptSource(i); if (l_err) { TRACFCOMP(g_trac_intr, "Error Masking Interrupt source: %x", i); break; } } TRACDCOMP(g_trac_intr, EXIT_MRK"MaskAllInterruptSources"); return l_err; } errlHndl_t IntrRp::maskInterruptSource(uint8_t i_intr_source, intr_hdlr_t *i_chip) { errlHndl_t l_err = NULL; uint64_t * l_psiHbEsbptr = i_chip->psiHbEsbBaseAddr; l_psiHbEsbptr += (((i_intr_source*PAGE_SIZE)+PSI_BRIDGE_ESB_OFF_OFFSET) /sizeof(uint64_t)); //MMIO Read to this address transitions the ESB to the off state volatile uint64_t l_maskRead = *l_psiHbEsbptr; eieio(); //Perform 2nd read to verify in OFF state using query offset l_psiHbEsbptr = i_chip->psiHbEsbBaseAddr + (((i_intr_source*PAGE_SIZE)+PSI_BRIDGE_ESB_QUERY_OFFSET) /sizeof(uint64_t)); l_maskRead = *l_psiHbEsbptr; if (l_maskRead != ESB_STATE_OFF) { TRACFCOMP(g_trac_intr, "Error masking interrupt source: %x." " ESB state is: %lx.", i_intr_source, l_maskRead); l_err = new ERRORLOG::ErrlEntry ( ERRORLOG::ERRL_SEV_INFORMATIONAL, // severity INTR::MOD_INTRRP_MASKINTERRUPT, // moduleid INTR::RC_XIVE_ESB_WRONG_STATE, // reason code i_intr_source, l_maskRead ); } return l_err; } errlHndl_t IntrRp::maskInterruptSource(uint8_t l_intr_source) { bool l_masked = false; errlHndl_t l_err = NULL; for(MaskList_t::iterator mask_itr = iv_maskList.begin(); mask_itr != iv_maskList.end(); ++mask_itr) { if ((*mask_itr) == l_intr_source) { TRACFCOMP(g_trac_intr, "IntrRp::maskInterruptSource()" " Interrupt source: %x already masked - ignoring", l_intr_source); l_masked = true; } } if(l_masked == false) { iv_maskList.push_back(l_intr_source); for(ChipList_t::iterator targ_itr = iv_chipList.begin(); targ_itr != iv_chipList.end(); ++targ_itr) { l_err = maskInterruptSource(l_intr_source, *targ_itr); if (l_err) { break; } } } return l_err; } errlHndl_t IntrRp::unmaskInterruptSource(uint8_t l_intr_source, intr_hdlr_t *i_proc, bool i_force_unmask) { bool l_masked = false; errlHndl_t l_err = NULL; for(MaskList_t::iterator mask_itr = iv_maskList.begin(); mask_itr != iv_maskList.end(); ++mask_itr) { if ((*mask_itr) == l_intr_source) { TRACFCOMP(g_trac_intr, "IntrRp::unmaskInterruptSource()" " Interrupt source: %x masked - will unmask", l_intr_source); l_masked = true; iv_maskList.erase(mask_itr); break; } } if (l_masked == true || i_force_unmask == true) { for(ChipList_t::iterator targ_itr = iv_chipList.begin(); targ_itr != iv_chipList.end(); ++targ_itr) { uint64_t * l_psiHbEsbptr = (*targ_itr)->psiHbEsbBaseAddr; l_psiHbEsbptr += (((l_intr_source*PAGE_SIZE)+PSI_BRIDGE_ESB_RESET_OFFSET) /sizeof(uint64_t)); //MMIO Read to this address transitions the ESB to the RESET state volatile uint64_t l_unmaskRead = *l_psiHbEsbptr; eieio(); //Perform 2nd read to verify in RESET state using query offset l_psiHbEsbptr = (*targ_itr)->psiHbEsbBaseAddr + (((l_intr_source*PAGE_SIZE)+PSI_BRIDGE_ESB_QUERY_OFFSET) /sizeof(uint64_t)); l_unmaskRead = *l_psiHbEsbptr; if (l_unmaskRead == ESB_STATE_OFF) { TRACFCOMP(g_trac_intr, "Error unmasking interrupt source: %x." " ESB state is: %lx.", l_intr_source, l_unmaskRead); /*@ errorlog tag * @errortype ERRL_SEV_INFORMATIONAL * @moduleid INTR::MOD_INTRRP_UNMASKINTERRUPT * @reasoncode INTR::RC_XIVE_ESB_WRONG_STATE * @userdata1 Interrupt Source Number * @userdata12 MMIO Read Value for unmasking * @devdesc Error unmasking interrupt source */ l_err = new ERRORLOG::ErrlEntry ( ERRORLOG::ERRL_SEV_INFORMATIONAL, // severity INTR::MOD_INTRRP_UNMASKINTERRUPT, // moduleid INTR::RC_XIVE_ESB_WRONG_STATE, // reason code l_intr_source, l_unmaskRead ); break; } } } return l_err; } errlHndl_t IntrRp::setMasterInterruptBARs(TARGETING::Target * i_target, bool i_enable) { errlHndl_t l_err = NULL; do { l_err = setXiveIvpeTmBAR1(i_target, i_enable); if (l_err) { TRACFCOMP(g_trac_intr, "Error setting XIVE TM BAR1"); break; } } while (0); return l_err; } errlHndl_t IntrRp::setCommonInterruptBARs(intr_hdlr_t * i_proc, bool i_enable) { errlHndl_t l_err = NULL; do { l_err = setFspBAR(i_proc, i_enable); if(l_err) { TRACFCOMP(g_trac_intr, "Error setting FSP BAR"); break; } l_err = setPsiHbBAR(i_proc, i_enable); if (l_err) { TRACFCOMP(g_trac_intr, "Error setting PSIHB BAR"); break; } //Turn off VPC error when in LSI mode l_err = disableVPCPullErr(i_proc); if (l_err) { TRACFCOMP(g_trac_intr, "Error masking VPC Pull Lsi Err"); break; } l_err = setPsiHbEsbBAR(i_proc, i_enable); if (l_err) { TRACFCOMP(g_trac_intr, "Error setting PSIHB ESB BAR"); break; } l_err = setXiveIcBAR(i_proc, i_enable); if (l_err) { TRACFCOMP(g_trac_intr, "Error setting XIVE IC BAR"); break; } } while (0); return l_err; } errlHndl_t IntrRp::handlePsuInterrupt(ext_intr_t i_type, intr_hdlr_t* i_proc, PIR_t& i_pir) { //TODO FIXME RTC 149698 // Long term will leverage mask register to avoid // polling loop below errlHndl_t l_err = NULL; TARGETING::Target* procTarget = i_proc->proc; do { size_t scom_len = 8; uint64_t l_reg = 0x0; l_err = deviceRead(procTarget, &l_reg, scom_len, DEVICE_SCOM_ADDRESS(PSI_BRIDGE_PSU_DOORBELL_REG)); if (l_err) { break; } TRACDCOMP( g_trac_intr, "%.8X = %.16llX", PSI_BRIDGE_PSU_DOORBELL_REG, l_reg ); //If the interrupt is driven by the doorbell, yield // to give the driver a chance to take care of it if( l_reg & PSI_BRIDGE_PSU_HOST_DOORBELL ) { nanosleep(0,10000); task_yield(); } //Clear the PSU Scom Reg Interrupt Status register // but ignore the bit that the PSU driver uses // to avoid a race condition uint64_t l_andVal = PSI_BRIDGE_PSU_HOST_DOORBELL; uint64_t size = sizeof(l_andVal); l_err = deviceWrite(procTarget, &l_andVal, size, DEVICE_SCOM_ADDRESS(PSI_BRIDGE_PSU_DOORBELL_ANDREG)); if (l_err) { TRACFCOMP(g_trac_intr, "Error clearing scom - %x", PSI_BRIDGE_PSU_DOORBELL_ANDREG); break; } //Interrupt Processing is complete - re-enable // this interrupt source uint64_t intSource = i_type; TRACFCOMP(g_trac_intr, "handlePsuInterrupt - Calling completeInterruptProcessing"); completeInterruptProcessing(intSource, i_pir); } while(0); return l_err; } void IntrRp::completeInterruptProcessing(uint64_t& i_intSource, PIR_t& i_pir) { intr_hdlr_t* l_proc = NULL; errlHndl_t l_err = NULL; //Find target handle for Proc to remove pending interrupt for for (ChipList_t::iterator targ_itr = iv_chipList.begin(); targ_itr != iv_chipList.end(); ++targ_itr) { uint64_t l_groupId = (*targ_itr)->proc->getAttr (); uint64_t l_chipId = (*targ_itr)->proc->getAttr (); //Core + Thread IDs not important so use 0's PIR_t l_pir = PIR_t(l_groupId, l_chipId, 0, 0); if (l_pir == i_pir) { l_proc = *targ_itr; break; } } do { //Check if we found a matching proc handler for the interrupt to remove // This is needed so the iNTRRP will honor new interrupts from this // source if (l_proc == NULL) { TRACFCOMP(g_trac_intr, ERR_MRK"IntrRp::completeInterruptProcessing:" " couldn't find proc handler that matches pir: 0x%lx", i_pir); break; } else { //Make object to search pending interrupt // list for std::pair l_intr = std::make_pair( i_pir, static_cast(i_intSource)); //See if an interrupt with from Proc with // the same PIR + interrupt source are // still being processed auto l_found = std::find_if( iv_pendingIntr.begin(), iv_pendingIntr.end(), [&l_intr](auto k)->bool { return (k.first == l_intr.first) && (k.second == l_intr.second); }); //Remove Interrupt source from pending interrupt list if (l_found == iv_pendingIntr.end()) { TRACFCOMP(g_trac_intr,ERR_MRK"IntrRp::completeInterruptHandling()" " Pending Interrupt NOT found for pir:" " 0x%lx, interrupt type: %d. Ignoring.", i_pir, static_cast(i_intSource)); } else { TRACFCOMP(g_trac_intr, "IntrRp::completeInterruptProcessing()" " Removing pending interrupt for pir: 0x%lx," "interrupt type: %d", i_pir, static_cast(i_intSource)); iv_pendingIntr.erase(l_found); } //Enable this interrupt source again l_err = unmaskInterruptSource(i_intSource, l_proc, true); if (l_err) { TRACFCOMP(g_trac_intr, "IntrRp::completeInterruptProcessing " "error unmasking interrupt type: %lx", i_intSource); errlCommit(l_err, INTR_COMP_ID); } //Send final EOI to enable interrupts for this source again sendEOI(i_intSource, i_pir); } } while(0); return; } errlHndl_t IntrRp::getNxIRSN(TARGETING::Target * i_target, uint32_t& o_irsn, uint32_t& o_num) { errlHndl_t err = NULL; size_t scom_len = sizeof(uint64_t); uint64_t reg = 0x0; do{ err = deviceRead ( i_target, ®, scom_len, DEVICE_SCOM_ADDRESS(NX_BUID_SCOM_ADDR)); if(err) { break; } //only calc IRSN if downstream interrupts are enabled o_irsn = 0; if(reg &(1ull << (63-NX_BUID_ENABLE))) //reg has NX_BUID_ENABLE set { uint32_t l_mask = ((static_cast(reg >> NX_IRSN_MASK_SHIFT) & NX_IRSN_MASK_MASK) | NX_IRSN_UPPER_MASK); o_irsn = ((static_cast(reg >> NX_IRSN_COMP_SHIFT) & IRSN_COMP_MASK) & l_mask); //To get the number of interrupts, we need to "count" the 0 bits //cheat by extending mask to FFF8 + mask, then invert and add 1 o_num = (~((~IRSN_COMP_MASK) | l_mask)) +1; } }while(0); TRACFCOMP(g_trac_intr,"NX_ISRN: 0x%x, num: 0x%x",o_irsn, o_num); return err; } //---------------------------------------------------------------------------- errlHndl_t IntrRp::registerInterruptXISR(msg_q_t i_msgQ, uint32_t i_msg_type, ext_intr_t i_xisr) { errlHndl_t err = NULL; Registry_t::iterator r = iv_registry.find(i_xisr); if(r == iv_registry.end()) { TRACFCOMP(g_trac_intr,"INTR::register intr type 0x%x", i_xisr); iv_registry[i_xisr] = intr_response_t(i_msgQ,i_msg_type); } else { if(r->second.msgQ != i_msgQ) { /*@ errorlog tag * @errortype ERRL_SEV_INFORMATIONAL * @moduleid INTR::MOD_INTRRP_REGISTERINTERRUPT * @reasoncode INTR::RC_ALREADY_REGISTERED * @userdata1 XISR * @userdata2 0 * * @devdesc Interrupt type already registered * */ err = new ERRORLOG::ErrlEntry ( ERRORLOG::ERRL_SEV_INFORMATIONAL, // severity INTR::MOD_INTRRP_REGISTERINTERRUPT, // moduleid INTR::RC_ALREADY_REGISTERED, // reason code i_xisr, 0 ); } } return err; } msg_q_t IntrRp::unregisterInterruptXISR(ext_intr_t i_xisr) { msg_q_t msgQ = NULL; Registry_t::iterator r = iv_registry.find(i_xisr); if(r != iv_registry.end()) { TRACFCOMP(g_trac_intr,INFO_MRK "Removing interrupt listener: %lx", i_xisr); msgQ = r->second.msgQ; iv_registry.erase(r); } return msgQ; } void IntrRp::shutDown(uint64_t i_status) { msg_t * rmsg = msg_allocate(); errlHndl_t l_err = NULL; TRACFCOMP(g_trac_intr, "IntrRp::shutDown - Sending shutdown message" " to registered handlers"); // Call everyone and say shutting down! for(Registry_t::iterator r = iv_registry.begin(); r != iv_registry.end(); ++r) { msg_q_t msgQ = r->second.msgQ; rmsg->type = r->second.msgType; rmsg->data[0] = SHUT_DOWN; rmsg->data[1] = i_status; rmsg->extra_data = NULL; int rc = msg_sendrecv(msgQ,rmsg); if(rc) { TRACFCOMP(g_trac_intr,ERR_MRK "Could not send message to registered handler to Shut" " down. Ignoring it. rc = %d", rc); } } msg_free(rmsg); //Mask any future interrupts to avoid receiving anymore while in the process // of resetting the rest of the Interrupt Logic l_err = maskAllInterruptSources(); if (l_err) { delete l_err; //errl comp already shutdown. Log error and continue l_err = nullptr; TRACFCOMP(g_trac_intr, "IntrRp::shutDown() Error masking all interrupt sources."); } //Reset PSIHB Interrupt Space TRACFCOMP(g_trac_intr, "Reset PSIHB Interrupt Space"); //First reset INTRP logic for slave procs for(ChipList_t::iterator targ_itr = iv_chipList.begin(); targ_itr != iv_chipList.end(); ++targ_itr) { if (*targ_itr != iv_masterHdlr) { PSIHB_SW_INTERFACES_t * this_psihb_ptr = (*targ_itr)->psiHbBaseAddr; this_psihb_ptr->icr = PSI_BRIDGE_INTP_STATUS_CTL_RESET; resetIntUnit(*targ_itr); //Enable VPC Pull Err regardles of XIVE HW Reset settings l_err = enableVPCPullErr((*targ_itr)->proc); if (l_err) { delete l_err; l_err = nullptr; TRACFCOMP(g_trac_intr, "IntrRp::shutDown() Error re-enabling VPC Pull Err"); } //Disable common interrupt BARs l_err = setCommonInterruptBARs(*targ_itr, false); if (l_err) { delete l_err; //errl cmp already shutdown. Log error + continue l_err = nullptr; TRACFCOMP(g_trac_intr, "IntrRp::shutDown() Error disabling Common Interrupt BARs"); } } } //Then reset master proc INTRP logic PSIHB_SW_INTERFACES_t * this_psihb_ptr = iv_masterHdlr->psiHbBaseAddr; this_psihb_ptr->icr = PSI_BRIDGE_INTP_STATUS_CTL_RESET; TRACFCOMP(g_trac_intr, "Reset PSIHB INTR Complete"); //Reset XIVE Interrupt unit resetIntUnit(iv_masterHdlr); //Enable VPC Pull Err regardles of XIVE HW Reset settings l_err = enableVPCPullErr(iv_masterHdlr->proc); if (l_err) { delete l_err; l_err = nullptr; TRACFCOMP(g_trac_intr, "IntrRp::shutDown() Error re-enabling VPC Pull Err"); } //Disable common interrupt BARs for master proc l_err = setCommonInterruptBARs(iv_masterHdlr, false); if (l_err) { delete l_err; //errl cmp already shutdown. Log error + continue l_err = nullptr; TRACFCOMP(g_trac_intr, "IntrRp::shutDown() Error disabling Common" " Interrupt BARs for master proc"); } //Disable master interrupt BARs l_err = setMasterInterruptBARs(iv_masterHdlr->proc, false); if (l_err) { delete l_err; //errl cmp already shutdown. Log error + continue l_err = nullptr; TRACFCOMP(g_trac_intr, "IntrRp::shutDown() Error disabling Master Interrupt BARs"); } #ifdef CONFIG_ENABLE_P9_IPI size_t threads = cpu_thread_count(); uint64_t en_threads = get_enabled_threads(); for(CpuList_t::iterator pir_itr = iv_cpuList.begin(); pir_itr != iv_cpuList.end(); ++pir_itr) { PIR_t pir = *pir_itr; for(size_t thread = 0; thread < threads; ++thread) { // Skip threads that were never started if( !(en_threads & (0x8000000000000000>>thread)) ) { TRACDCOMP(g_trac_intr,"IntrRp::shutDown: Skipping thread %d",thread); continue; } pir.threadId = thread; //wh_p9 disableInterruptPresenter(pir); } } #endif TRACFCOMP(g_trac_intr,INFO_MRK"INTR is shutdown"); } //---------------------------------------------------------------------------- #ifdef CONFIG_MPIPL_ENABLED errlHndl_t IntrRp::hw_disableRouting(TARGETING::Target * i_proc, INTR_ROUTING_t i_rx_tx) { errlHndl_t err = NULL; do { size_t scom_len = sizeof(uint64_t); // PSI PSIHB_ISRN_REG_t reg; err = deviceRead ( i_proc, ®, scom_len, DEVICE_SCOM_ADDRESS(PSIHB_ISRN_REG_t::PSIHB_STATUS_CTL_REG) ); if(err) { break; } switch(i_rx_tx) { case INTR_UPSTREAM: reg.uie = 0; //upstream interrupt enable = 0 (disable) break; case INTR_DOWNSTREAM: reg.die = 0; //downstream interrupt enable = 0 (disable) break; } scom_len = sizeof(uint64_t); err = deviceWrite ( i_proc, ®, scom_len, DEVICE_SCOM_ADDRESS(PSIHB_ISRN_REG_t::PSIHB_STATUS_CTL_REG) ); if(err) { break; } for(size_t i = 0; i < sizeof(cv_PE_BAR_SCOM_LIST)/sizeof(cv_PE_BAR_SCOM_LIST[0]); ++i) { uint64_t reg = 0; scom_len = sizeof(uint64_t); err = deviceRead ( i_proc, ®, scom_len, DEVICE_SCOM_ADDRESS(cv_PE_BAR_SCOM_LIST[i]) ); if(err) { break; } switch(i_rx_tx) { case INTR_UPSTREAM: // reset bit PE_IRSN_UPSTREAM reg &= ~((1ull << (63-PE_IRSN_UPSTREAM))); break; case INTR_DOWNSTREAM: // reset bit PE_IRSN_DOWNSTREAM reg &= ~((1ull << (63-PE_IRSN_DOWNSTREAM))); break; } scom_len = sizeof(uint64_t); err = deviceWrite ( i_proc, ®, scom_len, DEVICE_SCOM_ADDRESS(cv_PE_BAR_SCOM_LIST[i]) ); if(err) { break; } } if(err) { break; } //NX has no up/down stream enable bit - just one enable bit. //The NX should be cleared as part of an MPIPL so no //interrupts should be pending from this unit, however //we must allow EOIs to flow, so only disable when //downstream is requested if(i_rx_tx == INTR_DOWNSTREAM) { uint64_t reg = 0; scom_len = sizeof(uint64_t); err = deviceRead ( i_proc, ®, scom_len, DEVICE_SCOM_ADDRESS(NX_BUID_SCOM_ADDR) ); if(err) { break; } // reset bit NX_BUID_ENABLE reg &= ~(1ull << (63-NX_BUID_ENABLE)); scom_len = sizeof(uint64_t); err = deviceWrite ( i_proc, ®, scom_len, DEVICE_SCOM_ADDRESS(NX_BUID_SCOM_ADDR) ); if(err) { break; } } } while(0); return err; } #endif //---------------------------------------------------------------------------- #ifdef CONFIG_MPIPL_ENABLED errlHndl_t IntrRp::hw_resetIRSNregs(TARGETING::Target * i_proc) { errlHndl_t err = NULL; size_t scom_len = sizeof(uint64_t); do { // PSI PSIHB_ISRN_REG_t reg1; // zeros self reg1.irsn -= 1; // default all '1's according to scom spec // all other fields = 0 err = deviceWrite ( i_proc, ®1, scom_len, DEVICE_SCOM_ADDRESS(PSIHB_ISRN_REG_t::PSIHB_ISRN_REG) ); if(err) { break; } // PE for(size_t i = 0; i < sizeof(cv_PE_BAR_SCOM_LIST)/sizeof(cv_PE_BAR_SCOM_LIST[0]); ++i) { uint64_t reg = 0; scom_len = sizeof(uint64_t); // Note: no default value specified in scom spec - assume 0 err = deviceWrite ( i_proc, ®, scom_len, DEVICE_SCOM_ADDRESS(cv_PE_IRSN_COMP_SCOM_LIST[i]) ); if(err) { break; } scom_len = sizeof(uint64_t); // Note: no default value specified in scom spec - assume 0 err = deviceWrite ( i_proc, ®, scom_len, DEVICE_SCOM_ADDRESS(cv_PE_IRSN_MASK_SCOM_LIST[i]) ); if(err) { break; } } if(err) { break; } // NX [1:19] is BUID [20:32] mask // No default value specified in scom spec. assume 0 uint64_t reg = 0; scom_len = sizeof(uint64_t); err = deviceWrite ( i_proc, ®, scom_len, DEVICE_SCOM_ADDRESS(NX_BUID_SCOM_ADDR) ); if(err) { break; } } while(0); return err; } #endif //---------------------------------------------------------------------------- #ifdef CONFIG_MPIPL_ENABLED errlHndl_t IntrRp::blindIssueEOIs(TARGETING::Target * i_proc) { errlHndl_t err = NULL; TARGETING::TargetHandleList procCores; getChildChiplets(procCores, i_proc, TYPE_CORE, false); //state can change do { //Issue eio to IPIs first for(TARGETING::TargetHandleList::iterator core = procCores.begin(); core != procCores.end(); ++core) { FABRIC_CHIP_ID_ATTR chip = i_proc->getAttr(); FABRIC_GROUP_ID_ATTR node = i_proc->getAttr(); CHIP_UNIT_ATTR coreId = (*core)->getAttr(); PIR_t pir(0); pir.groupId = node; pir.chipId = chip; pir.coreId = coreId; size_t threads = cpu_thread_count(); for(size_t thread = 0; thread < threads; ++thread) { pir.threadId = thread; uint64_t xirrAddr = iv_baseAddr + cpuOffsetAddr(pir); uint32_t * xirrPtr = reinterpret_cast(xirrAddr + XIRR_OFFSET); uint8_t * mfrrPtr = reinterpret_cast( xirrAddr + MFRR_OFFSET); //need to set mfrr to 0xFF first TRACDCOMP(g_trac_intr,"Clearing IPI to xirrPtr[%p]", xirrPtr); *mfrrPtr = 0xFF; *xirrPtr = 0xFF000002; } } PIR_t pir(iv_masterCpu); pir.threadId = 0; //Can just write all EOIs to master core thread 0 XIRR uint64_t xirrAddr = iv_baseAddr + cpuOffsetAddr(pir); volatile uint32_t * xirrPtr = reinterpret_cast(xirrAddr +XIRR_OFFSET); //Issue eio to PSI logic uint32_t l_psiBaseIsn; uint32_t l_maxInt = 0; err = getPsiIRSN(i_proc, l_psiBaseIsn, l_maxInt); if(err) { break; } //Only issue if ISN is non zero (ie set) if(l_psiBaseIsn) { l_psiBaseIsn |= 0xFF000000; uint32_t l_psiMaxIsn = l_psiBaseIsn + l_maxInt; TRACFCOMP(g_trac_intr,"Issuing EOI to PSIHB range %x - %x", l_psiBaseIsn, l_psiMaxIsn); for(uint32_t l_isn = l_psiBaseIsn; l_isn < l_psiMaxIsn; ++l_isn) { TRACDCOMP(g_trac_intr," xirrPtr[%p] xirr[%x]\n", xirrPtr, l_isn); *xirrPtr = l_isn; } } //Don't need to issue EOIs to PHBs //since PHB ETU reset cleans them up //Issue eio to NX logic uint32_t l_nxBaseIsn; err = getNxIRSN(i_proc, l_nxBaseIsn, l_maxInt); if(err) { break; } //Only issue if ISN is non zero (ie set) if(l_nxBaseIsn) { l_nxBaseIsn |= 0xFF000000; uint32_t l_nxMaxIsn = l_nxBaseIsn + l_maxInt; TRACFCOMP(g_trac_intr,"Issuing EOI to NX range %x - %x", l_nxBaseIsn, l_nxMaxIsn); for(uint32_t l_isn = l_nxBaseIsn; l_isn < l_nxMaxIsn; ++l_isn) { *xirrPtr = l_isn; } } } while(0); return err; } #endif //---------------------------------------------------------------------------- errlHndl_t IntrRp::findProcs_Cores(TARGETING::TargetHandleList & o_procs, TARGETING::TargetHandleList& o_cores) { errlHndl_t err = NULL; do { //Build a list of "functional" processors. This needs to be //done without targeting support (just blueprint) since //on MPIPL the targeting information is obtained in //discover_targets -- much later in the IPL. //Since this is MPIPL we will rely on two things: // 1) FSI will be active to present chips // 2) The MPIPL HW bit in CFAM 2839 will be set //force FSI to init so we can rely on slave data err = FSI::initializeHardware(); if(err) { break; } TARGETING::TargetHandleList procChips; TARGETING::PredicateCTM predProc( TARGETING::CLASS_CHIP, TARGETING::TYPE_PROC ); TARGETING::TargetService& tS = TARGETING::targetService(); TARGETING::Target * sysTarget = NULL; tS.getTopLevelTarget( sysTarget ); assert( sysTarget != NULL ); TARGETING::Target* masterProcTarget = NULL; TARGETING::targetService().masterProcChipTargetHandle( masterProcTarget ); tS.getAssociated( procChips, sysTarget, TARGETING::TargetService::CHILD, TARGETING::TargetService::ALL, &predProc ); for(TARGETING::TargetHandleList::iterator proc = procChips.begin(); proc != procChips.end(); ++proc) { //if master proc -- just add it as we are running on it if (*proc == masterProcTarget) { o_procs.push_back(*proc); continue; } //First see if present if(FSI::isSlavePresent(*proc)) { TRACFCOMP(g_trac_intr,"Proc %x detected via FSI", TARGETING::get_huid(*proc)); //Second check to see if MPIPL bit is on cfam "2839" which //Note 2839 is ecmd addressing, real address is 0x28E4 (byte) uint64_t l_addr = 0x28E4; uint32_t l_data = 0; size_t l_size = sizeof(uint32_t); err = deviceRead(*proc, &l_data, l_size, DEVICE_FSI_ADDRESS(l_addr)); if (err) { TRACFCOMP(g_trac_intr,"Failed to read CFAM 2839 on %x", TARGETING::get_huid(*proc)); break; } TRACFCOMP(g_trac_intr,"Proc %x 2839 val [%x]", TARGETING::get_huid(*proc), l_data); if(l_data & 0x80000000) { //Chip is present and functional -- add it to our list o_procs.push_back(*proc); //Also need to force it to use Xscom //Note that it has to support (ie it is part of the SMP) ScomSwitches l_switches = (*proc)->getAttr(); l_switches.useSbeScom = 0; l_switches.useFsiScom = 0; l_switches.useXscom = 1; (*proc)->setAttr(l_switches); } } } if (err) { break; } //Build up a list of all possible cores (don't care if func/present, //just that they exist in the blueprint TARGETING::TargetHandleList l_cores; for(TARGETING::TargetHandleList::iterator proc = o_procs.begin(); proc != o_procs.end(); ++proc) { l_cores.clear(); getChildChiplets(l_cores, *proc, TYPE_CORE, false); for(TARGETING::TargetHandleList::iterator core = l_cores.begin(); core != l_cores.end(); ++core) { o_cores.push_back(*core); } } }while(0); return err; } void IntrRp::allowAllInterrupts(TARGETING::Target* i_core) { const TARGETING::Target * proc = getParentChip(i_core); FABRIC_CHIP_ID_ATTR chip = proc->getAttr(); FABRIC_GROUP_ID_ATTR node = proc->getAttr(); CHIP_UNIT_ATTR coreId = i_core->getAttr(); PIR_t pir(0); pir.groupId = node; pir.chipId = chip; pir.coreId = coreId; size_t threads = cpu_thread_count(); for(size_t thread = 0; thread < threads; ++thread) { pir.threadId = thread; uint64_t cpprAddr=cpuOffsetAddr(pir)+iv_baseAddr+CPPR_OFFSET; uint8_t *cppr = reinterpret_cast(cpprAddr); *cppr = 0xff; // allow all interrupts } } void IntrRp::disableAllInterrupts(TARGETING::Target* i_core) { const TARGETING::Target * proc = getParentChip(i_core); FABRIC_CHIP_ID_ATTR chip = proc->getAttr(); FABRIC_GROUP_ID_ATTR node = proc->getAttr(); CHIP_UNIT_ATTR coreId = i_core->getAttr(); PIR_t pir(0); pir.groupId = node; pir.chipId = chip; pir.coreId = coreId; size_t threads = cpu_thread_count(); for(size_t thread = 0; thread < threads; ++thread) { pir.threadId = thread; //wh_p9 disableInterruptPresenter(pir); } } void IntrRp::drainMpIplInterrupts(TARGETING::TargetHandleList & i_cores) { TRACFCOMP(g_trac_intr,"Drain pending interrupts"); bool interrupt_found = false; size_t retryCount = 10; do { interrupt_found = false; nanosleep(0,1000000); // 1 ms for(TARGETING::TargetHandleList::iterator core = i_cores.begin(); core != i_cores.end(); ++core) { const TARGETING::Target * proc = getParentChip(*core); FABRIC_CHIP_ID_ATTR chip = proc->getAttr(); FABRIC_GROUP_ID_ATTR node = proc->getAttr(); CHIP_UNIT_ATTR coreId = (*core)->getAttr(); PIR_t pir(0); pir.groupId = node; pir.chipId = chip; pir.coreId = coreId; TRACFCOMP(g_trac_intr," n%d p%d c%d", node, chip, coreId); size_t threads = cpu_thread_count(); for(size_t thread = 0; thread < threads; ++thread) { pir.threadId = thread; uint64_t xirrAddr = iv_baseAddr + cpuOffsetAddr(pir) + XIRR_RO_OFFSET; volatile uint32_t * xirrPtr = reinterpret_cast(xirrAddr); uint32_t xirr = *xirrPtr & 0x00FFFFFF; TRACDCOMP(g_trac_intr," xirrPtr[%p] xirr[%x]\n", xirrPtr, xirr); if(xirr) { // Found pending interrupt! interrupt_found = true; TRACFCOMP(g_trac_intr, ERR_MRK "Pending interrupt found on MPIPL." " CpuId:0x%x XIRR:0x%x", pir.word, xirr); uint8_t * mfrrPtr = reinterpret_cast(xirrAddr + MFRR_OFFSET); // Signal EOI - read then write xirr value ++xirrPtr; // move to RW XIRR reg volatile uint32_t xirr_rw = *xirrPtr; //If IPI need to set mfrr to 0xFF if(ISN_INTERPROC == xirr) { *mfrrPtr = 0xFF; } *xirrPtr = xirr_rw; --xirrPtr; // back to RO XIRR reg } } } } while(interrupt_found == true && --retryCount != 0); if(interrupt_found && (retryCount == 0)) { // traces above should identify stuck interrupt INITSERVICE::doShutdown(INTR::RC_PERSISTENT_INTERRUPTS); } } #ifdef CONFIG_MPIPL_ENABLED errlHndl_t IntrRp::hw_disableIntrMpIpl() { errlHndl_t err = NULL; TARGETING::TargetHandleList funcProc, procCores; //Need to clear out all pending interrupts. This includes //ones that PHYP already accepted and ones "hot" in the XIRR //register. Must be done for all processors prior to opening //up traffic for mailbox (since we switch the IRSN). PHYP //can route PSI interrupts to any chip in the system so all //must be cleaned up prior to switching do { //extract the node layout for later err = extractHbNodeInfo(); if(err) { break; } //Get the procs/cores err = findProcs_Cores(funcProc, procCores); if(err) { break; } //since HB will need to use PSI interrupt block, we need to //perform the extra step of disabling FSP PSI interrupts at //source(theoretically upstream disable should have handled, //but it seesms to slip through somehow and doesn't get fully //cleaned up cause we clear the XIVR for(TARGETING::TargetHandleList::iterator proc = funcProc.begin(); (proc != funcProc.end()) && !err; ++proc) { uint64_t reg = PSI_FSP_INT_ENABLE; size_t scom_len = sizeof(uint64_t); err = deviceWrite ( (*proc), ®, scom_len, DEVICE_SCOM_ADDRESS(PSI_HBCR_AND_SCOM_ADDR) ); } if(err) { break; } // Disable upstream intr routing on all processor chips TRACFCOMP(g_trac_intr,"Disable upstream interrupt"); for(TARGETING::TargetHandleList::iterator proc = funcProc.begin(); (proc != funcProc.end()) && !err; ++proc) { // disable upstream intr routing err = hw_disableRouting(*proc,INTR_UPSTREAM); } if(err) { break; } err = syncNodes(INTR_MPIPL_UPSTREAM_DISABLED); if ( err ) { break; } // Set interrupt presenter to allow all interrupts TRACFCOMP(g_trac_intr,"Allow interrupts"); for(TARGETING::TargetHandleList::iterator core = procCores.begin(); core != procCores.end(); ++core) { allowAllInterrupts(*core); } // Now look for interrupts drainMpIplInterrupts(procCores); // Issue blind EOIs to all threads IPIs and to clean up stale XIRR TRACFCOMP(g_trac_intr,"Issue blind EOIs to all ISRN and IPIs"); for(TARGETING::TargetHandleList::iterator proc = funcProc.begin(); (proc != funcProc.end()) && !err; ++proc) { err = blindIssueEOIs(*proc); } if(err) { break; } err = syncNodes(INTR_MPIPL_DRAINED); if( err ) { break; } // Disable all interrupt presenters for(TARGETING::TargetHandleList::iterator core = procCores.begin(); core != procCores.end(); ++core) { disableAllInterrupts(*core); } // disable downstream routing and clean up IRSN regs for(TARGETING::TargetHandleList::iterator proc = funcProc.begin(); proc != funcProc.end(); ++proc) { // disable downstream routing err = hw_disableRouting(*proc,INTR_DOWNSTREAM); if(err) { break; } // reset IRSN values err = hw_resetIRSNregs(*proc); if(err) { break; } //Now mask off all XIVRs under the PSI unit //This prevents hot PSI mbox interrupts from flowing up to HB //and allows PHYP to deal with them err = maskXIVR(*proc); if(err) { break; } } if(err) { break; } } while(0); return err; } #endif errlHndl_t syncNodesError(void * i_p, uint64_t i_len) { TRACFCOMP(g_trac_intr,"Failure calling mm_block_map: phys_addr=%p", i_p); /*@ * @errortype ERRL_SEV_UNRECOVERABLE * @moduleid INTR::MOD_INTR_SYNC_NODES * @reasoncode INTR::RC_CANNOT_MAP_MEMORY * @userdata1 physical address * @userdata2 Block size requested * @devdesc Error mapping in memory */ return new ERRORLOG::ErrlEntry ( ERRORLOG::ERRL_SEV_UNRECOVERABLE, INTR::MOD_INTR_SYNC_NODES, INTR::RC_CANNOT_MAP_MEMORY, reinterpret_cast(i_p), i_len, true /*Add HB Software Callout*/); } errlHndl_t IntrRp::syncNodes(intr_mpipl_sync_t i_sync_type) { errlHndl_t err = NULL; bool reported[MAX_NODES_PER_SYS] = { false,}; uint64_t hrmorBase = KernelIpc::ipc_data_area.hrmor_base; void * node_info_ptr = reinterpret_cast((iv_masterCpu.groupId * hrmorBase) + VMM_INTERNODE_PRESERVED_MEMORY_ADDR); internode_info_t * this_node_info = reinterpret_cast (mm_block_map(node_info_ptr,INTERNODE_INFO_SIZE)); do { if(this_node_info == NULL) { err = syncNodesError(this_node_info, INTERNODE_INFO_SIZE); break; } if(this_node_info->eye_catcher != NODE_INFO_EYE_CATCHER) { TRACFCOMP(g_trac_intr, INFO_MRK "MPIPL, but INTR node data sync area unintialized." " Assuming single HB Intance system"); break; } // Map the internode data areas to a virtual address internode_info_t * vaddr[MAX_NODES_PER_SYS]; for(uint64_t node = 0; node < MAX_NODES_PER_SYS; ++node) { if (node == iv_masterCpu.groupId) { vaddr[node] = this_node_info; } else if(this_node_info->exist[node]) { node_info_ptr = reinterpret_cast ((node*hrmorBase)+VMM_INTERNODE_PRESERVED_MEMORY_ADDR); internode_info_t * node_info = reinterpret_cast (mm_block_map(node_info_ptr, INTERNODE_INFO_SIZE)); if(node_info == NULL) { err = syncNodesError(node_info_ptr, INTERNODE_INFO_SIZE); break; } vaddr[node] = node_info; reported[node] = false; } } if (err) { break; } // This node has hit the sync point this_node_info->mpipl_intr_sync = i_sync_type; lwsync(); bool synched = false; // Loop until all nodes have reached the sync point while(synched == false) { synched = true; for(uint64_t node = 0; node < MAX_NODES_PER_SYS; ++node) { if(this_node_info->exist[node]) { intr_mpipl_sync_t sync_type = vaddr[node]->mpipl_intr_sync; if(sync_type < i_sync_type) { synched = false; // Insure simics does a context switch setThreadPriorityLow(); setThreadPriorityHigh(); } else if(reported[node] == false) { reported[node] = true; TRACFCOMP( g_trac_intr, INFO_MRK "MPIPL node %ld reached syncpoint %d", node, (uint32_t)i_sync_type); } } } } isync(); for(uint64_t node = 0; node < MAX_NODES_PER_SYS; ++node) { if(this_node_info->exist[node]) { // We are still using this_node_info area // so unmap it later. if(node != iv_masterCpu.groupId) { mm_block_unmap(vaddr[node]); } } } mm_block_unmap(this_node_info); } while(0); return err; } #ifdef CONFIG_MPIPL_ENABLED errlHndl_t IntrRp::initializeMpiplSyncArea() { errlHndl_t err = NULL; uint64_t hrmorBase = KernelIpc::ipc_data_area.hrmor_base; void * node_info_ptr = reinterpret_cast((iv_masterCpu.groupId * hrmorBase) + VMM_INTERNODE_PRESERVED_MEMORY_ADDR); internode_info_t * this_node_info = reinterpret_cast (mm_block_map(node_info_ptr,INTERNODE_INFO_SIZE)); if(this_node_info) { TRACFCOMP( g_trac_intr, "MPIPL SYNC at phys %p virt %p value %lx\n", node_info_ptr, this_node_info, NODE_INFO_EYE_CATCHER ); this_node_info->eye_catcher = NODE_INFO_EYE_CATCHER; this_node_info->version = NODE_INFO_VERSION; this_node_info->mpipl_intr_sync = INTR_MPIPL_SYNC_CLEAR; for(uint64_t node = 0; node < MAX_NODES_PER_SYS; ++node) { if(iv_masterCpu.groupId == node) { this_node_info->exist[node] = true; } else { this_node_info->exist[node] = false; } } mm_block_unmap(this_node_info); } else { TRACFCOMP( g_trac_intr, "Failure calling mm_block_map : phys_addr=%p", node_info_ptr); /*@ * @errortype ERRL_SEV_UNRECOVERABLE * @moduleid INTR::MOD_INTR_INIT_MPIPLAREA * @reasoncode INTR::RC_CANNOT_MAP_MEMORY * @userdata1 physical address * @userdata2 Size * @devdesc Error mapping in memory */ err = new ERRORLOG::ErrlEntry( ERRORLOG::ERRL_SEV_UNRECOVERABLE, INTR::MOD_INTR_INIT_MPIPLAREA, INTR::RC_CANNOT_MAP_MEMORY, reinterpret_cast(node_info_ptr), INTERNODE_INFO_SIZE, true /*Add HB Software Callout*/); } return err; } #endif #ifdef CONFIG_MPIPL_ENABLED errlHndl_t IntrRp::addHbNodeToMpiplSyncArea(uint64_t i_hbNode) { errlHndl_t err = NULL; uint64_t hrmorBase = KernelIpc::ipc_data_area.hrmor_base; void * node_info_ptr = reinterpret_cast((iv_masterCpu.groupId * hrmorBase) + VMM_INTERNODE_PRESERVED_MEMORY_ADDR); internode_info_t * this_node_info = reinterpret_cast (mm_block_map(node_info_ptr,INTERNODE_INFO_SIZE)); if(this_node_info) { if(this_node_info->eye_catcher != NODE_INFO_EYE_CATCHER) { // Initialize the mutli-node area for this node. err = initializeMpiplSyncArea(); } this_node_info->exist[i_hbNode] = true; this_node_info->mpipl_intr_sync = INTR_MPIPL_SYNC_CLEAR; mm_block_unmap(this_node_info); } else { TRACFCOMP( g_trac_intr, "Failure calling mm_block_map : phys_addr=%p", node_info_ptr); /*@ * @errortype ERRL_SEV_UNRECOVERABLE * @moduleid INTR::MOD_INTR_SYNC_ADDNODE * @reasoncode INTR::RC_CANNOT_MAP_MEMORY * @userdata1 physical address * @userdata2 Size * @devdesc Error mapping in memory */ err = new ERRORLOG::ErrlEntry( ERRORLOG::ERRL_SEV_UNRECOVERABLE, INTR::MOD_INTR_SYNC_ADDNODE, INTR::RC_CANNOT_MAP_MEMORY, reinterpret_cast(node_info_ptr), INTERNODE_INFO_SIZE, true /*Add HB Software Callout*/); } return err; } #endif #ifdef CONFIG_MPIPL_ENABLED errlHndl_t IntrRp::extractHbNodeInfo(void) { errlHndl_t err = NULL; uint64_t hrmorBase = KernelIpc::ipc_data_area.hrmor_base; TARGETING::ATTR_HB_EXISTING_IMAGE_type hb_existing_image = 0; void * node_info_ptr = reinterpret_cast((iv_masterCpu.groupId * hrmorBase) + VMM_INTERNODE_PRESERVED_MEMORY_ADDR); internode_info_t * this_node_info = reinterpret_cast (mm_block_map(node_info_ptr,INTERNODE_INFO_SIZE)); if(this_node_info) { if(this_node_info->eye_catcher != NODE_INFO_EYE_CATCHER) { TRACFCOMP(g_trac_intr, INFO_MRK "MPIPL, but INTR node data sync area unintialized." " Assuming single HB Intance system"); } else //multinode { TARGETING::ATTR_HB_EXISTING_IMAGE_type mask = 0x1 << (MAX_NODES_PER_SYS -1); for(uint64_t node = 0; node < MAX_NODES_PER_SYS; ++node) { //If comm area indicates node exists, add to map if(this_node_info->exist[node]) { hb_existing_image |= (mask >> node); } } } mm_block_unmap(this_node_info); } else { TRACFCOMP( g_trac_intr, "Failure calling mm_block_map : phys_addr=%p", node_info_ptr); /*@ * @errortype ERRL_SEV_UNRECOVERABLE * @moduleid INTR::MOD_INTR_EXTRACTNODEINFO * @reasoncode INTR::RC_CANNOT_MAP_MEMORY * @userdata1 physical address * @userdata2 Size * @devdesc Error mapping in memory */ err = new ERRORLOG::ErrlEntry( ERRORLOG::ERRL_SEV_UNRECOVERABLE, INTR::MOD_INTR_EXTRACTNODEINFO, INTR::RC_CANNOT_MAP_MEMORY, reinterpret_cast(node_info_ptr), INTERNODE_INFO_SIZE, true /*Add HB Software Callout*/); } TARGETING::Target * sys = NULL; TARGETING::targetService().getTopLevelTarget(sys); sys->setAttr(hb_existing_image); TRACFCOMP( g_trac_intr, "extractHbNodeInfo found map: %x", hb_existing_image); return err; } #endif //---------------------------------------------------------------------------- // External interfaces //---------------------------------------------------------------------------- // Register a message queue with a particular intr type errlHndl_t INTR::registerMsgQ(msg_q_t i_msgQ, uint32_t i_msg_type, ext_intr_t i_intr_type) { errlHndl_t err = NULL; // Can't add while handling an interrupt, so // send msg instead of direct call msg_q_t intr_msgQ = msg_q_resolve(VFS_ROOT_MSG_INTR); if(intr_msgQ) { msg_t * msg = msg_allocate(); msg->type = MSG_INTR_REGISTER_MSGQ; msg->data[0] = reinterpret_cast(i_msgQ); msg->data[1] = static_cast(i_intr_type); msg->data[1] |= static_cast(i_msg_type) << 32; int rc = msg_sendrecv(intr_msgQ, msg); if(!rc) { err = reinterpret_cast(msg->data[1]); } else { TRACFCOMP(g_trac_intr,ERR_MRK "INTR::registerMsgQ - msg_sendrecv failed. errno = %d", rc); } msg_free(msg); } else { /*@ errorlog tag * @errortype ERRL_SEV_INFORMATIONAL * @moduleid INTR::MOD_INTR_REGISTER * @reasoncode INTR::RC_REGISTRY_NOT_READY * @userdata1 Interrupt type to register * @userdata2 0 * * @devdesc Interrupt resource provider not initialized yet. * */ err = new ERRORLOG::ErrlEntry ( ERRORLOG::ERRL_SEV_INFORMATIONAL, // severity INTR::MOD_INTR_REGISTER, // moduleid INTR::RC_REGISTRY_NOT_READY, // reason code static_cast(i_intr_type), 0 ); } return err; } void INTR::sendEOI(msg_q_t i_q, msg_t* i_msg) { //Fix up message to make it easier to handle //Users are required to NOT touch it i_msg->type = MSG_INTR_EOI; msg_respond(i_q,i_msg); } // Unregister message queue from interrupt handler msg_q_t INTR::unRegisterMsgQ(ext_intr_t i_type) { msg_q_t msgQ = NULL; msg_q_t intr_msgQ = msg_q_resolve(VFS_ROOT_MSG_INTR); if(intr_msgQ) { msg_t * msg = msg_allocate(); msg->type = MSG_INTR_UNREGISTER_MSGQ; msg->data[0] = static_cast(i_type); int rc = msg_sendrecv(intr_msgQ,msg); if(!rc) { msgQ = reinterpret_cast(msg->data[1]); } else { TRACFCOMP(g_trac_intr,ERR_MRK "INTR::unRegisterMsgQ - msg_sendrecv failed. errno = %d", rc); } msg_free(msg); } return msgQ; } /* * Enable hardware to report external interrupts */ errlHndl_t INTR::enableExternalInterrupts() { errlHndl_t err = NULL; msg_q_t intr_msgQ = msg_q_resolve(VFS_ROOT_MSG_INTR); if(intr_msgQ) { msg_t * msg = msg_allocate(); msg->type = MSG_INTR_ENABLE; msg_sendrecv(intr_msgQ, msg); err = reinterpret_cast(msg->data[1]); msg_free(msg); } else { /*@ errorlog tag * @errortype ERRL_SEV_INFORMATIONAL * @moduleid INTR::MOD_INTR_ENABLE * @reasoncode INTR::RC_RP_NOT_INITIALIZED * @userdata1 MSG_INTR_ENABLE * @userdata2 0 * * @devdesc Interrupt resource provider not initialized yet. * */ err = new ERRORLOG::ErrlEntry ( ERRORLOG::ERRL_SEV_INFORMATIONAL, // severity INTR::MOD_INTR_ENABLE, // moduleid INTR::RC_RP_NOT_INITIALIZED, // reason code static_cast(MSG_INTR_ENABLE), 0 ); } return err; } /* * Disable hardware from reporting external interrupts */ errlHndl_t INTR::disableExternalInterrupts() { errlHndl_t err = NULL; // Can't disable while handling interrupt, so send msg to serialize msg_q_t intr_msgQ = msg_q_resolve(VFS_ROOT_MSG_INTR); if(intr_msgQ) { msg_t * msg = msg_allocate(); msg->type = MSG_INTR_DISABLE; msg_sendrecv(intr_msgQ, msg); err = reinterpret_cast(msg->data[1]); msg_free(msg); } else { /*@ errorlog tag * @errortype ERRL_SEV_INFORMATIONAL * @moduleid INTR::MOD_INTR_DISABLE * @reasoncode INTR::RC_RP_NOT_INITIALIZED * @userdata1 MSG_INTR_DISABLE * @userdata2 0 * * @devdesc Interrupt resource provider not initialized yet. * */ err = new ERRORLOG::ErrlEntry ( ERRORLOG::ERRL_SEV_INFORMATIONAL, // severity INTR::MOD_INTR_DISABLE, // moduleid INTR::RC_RP_NOT_INITIALIZED, // reason code static_cast(MSG_INTR_DISABLE), 0 ); } return err; } errlHndl_t IntrRp::setFspBAR( const intr_hdlr_t* const i_pProcIntrHdlr, const bool i_enable) { errlHndl_t pError = nullptr; do { if (!i_enable) { // Noop on disable break; } assert(i_pProcIntrHdlr != nullptr,"BUG! Input interrupt handler pointer " "was nullptr"); auto * const pProc = i_pProcIntrHdlr->proc; assert(pProc != nullptr,"BUG! proc target was nullptr"); uint64_t fspBAR = pProc->getAttr(); const size_t expSize = sizeof(fspBAR); auto size = expSize; pError = deviceWrite( pProc, &fspBAR, size, DEVICE_SCOM_ADDRESS(PU_PSI_BRIDGE_FSP_BAR_REG)); if(pError) { TRACFCOMP(g_trac_intr,ERR_MRK "Failed writing %d bytes of FSP BAR " "address value (0x%016llX) to FSP BAR register for proc 0x%08X", expSize,fspBAR,get_huid(pProc)); break; } assert(size == expSize,"Actual SCOM write size (%d) does not match " "expected SCOM write size (%d)",size,expSize); } while(0); return pError; } errlHndl_t IntrRp::setPsiHbBAR(intr_hdlr_t *i_proc, bool i_enable) { errlHndl_t l_err = NULL; TARGETING::Target *l_target = i_proc->proc; uint64_t l_baseBarValue = l_target->getAttr(); do { if (!i_enable) { break; } //Don't ever disable, PHYP needs this set //Get base BAR Value from attribute uint64_t l_barValue = l_baseBarValue; TRACFCOMP(g_trac_intr,"INTR: Setting PSI BRIDGE Bar Address value for -" " Target %p. PSI BRIDGE BAR value: 0x%016lx", l_target,l_barValue); //Set base BAR Value uint64_t size = sizeof(l_barValue); l_err = deviceWrite(l_target, &l_barValue, size, DEVICE_SCOM_ADDRESS(PSI_BRIDGE_BAR_SCOM_ADDR)); if(l_err) { TRACFCOMP(g_trac_intr,ERR_MRK"Unable to set PSI BRIDGE BAR Address"); break; } //Now set the enable bit l_barValue += PSI_BRIDGE_BAR_ENABLE; size = sizeof(l_barValue); TRACDCOMP(g_trac_intr,"INTR: Setting PSI BRIDGE Bar enable value for Target - %p. PSI BRIDGE BAR value: 0x%016lx", l_target,l_barValue); l_err = deviceWrite(l_target, &l_barValue, size, DEVICE_SCOM_ADDRESS(PSI_BRIDGE_BAR_SCOM_ADDR)); if(l_err) { TRACFCOMP(g_trac_intr,ERR_MRK"Error enabling PSIHB BAR"); break; } //Map Memory Internally for HB and store in member variable void *l_psiHbAddress = reinterpret_cast(l_baseBarValue); i_proc->psiHbBaseAddr = reinterpret_cast (mmio_dev_map(l_psiHbAddress, PAGE_SIZE)); } while(0); return l_err; } errlHndl_t IntrRp::setPsiHbEsbBAR(intr_hdlr_t *i_proc, bool i_enable) { TARGETING::Target *l_target = i_proc->proc; errlHndl_t l_err = NULL; uint64_t l_baseBarValue = l_target->getAttr(); do { uint64_t l_barValue = l_baseBarValue; TRACFCOMP(g_trac_intr,"INTR: Target %p. " "PSI BRIDGE ESB BASE BAR value: 0x%016lx", l_target,l_barValue); uint64_t size = sizeof(l_barValue); l_err = deviceWrite(l_target, &l_barValue, size, DEVICE_SCOM_ADDRESS(PSI_BRIDGE_ESB_BAR_SCOM_ADDR)); if(l_err) { TRACFCOMP(g_trac_intr,ERR_MRK"Unable to set PSIHB ESB BAR "); break; } //If we are trying to enable this BAR register if (i_enable) { l_barValue += PSI_BRIDGE_ESB_BAR_VALID; TRACFCOMP(g_trac_intr,"INTR: Target %p. PSI BRIDGE ESB BAR value: 0x%016lx", l_target,l_barValue); size = sizeof(l_barValue); l_err = deviceWrite(l_target, &l_barValue, size, DEVICE_SCOM_ADDRESS(PSI_BRIDGE_ESB_BAR_SCOM_ADDR)); if(l_err) { TRACFCOMP(g_trac_intr,ERR_MRK"Error setting PSIHB ESB BAR"); break; } //Map Memory Internally for HB and store in member variable void *l_psiHbEoiAddress = reinterpret_cast(l_baseBarValue); i_proc->psiHbEsbBaseAddr = reinterpret_cast (mmio_dev_map(l_psiHbEoiAddress, (LSI_LAST_SOURCE)*PAGE_SIZE)); } } while (0); return l_err; } errlHndl_t IntrRp::setXiveIvpeTmBAR1(TARGETING::Target * i_target, bool i_enable) { errlHndl_t l_err = NULL; uint64_t l_baseBarValue = i_target->getAttr(); do { uint64_t l_barValue = l_baseBarValue; if (i_enable) { l_barValue += XIVE_IVPE_TM_BAR1_VALIDATE; } TRACDCOMP(g_trac_intr,"INTR: Target %p. XIVE IVPE TM BAR1 value: 0x%016lx", i_target,l_barValue); uint64_t size = sizeof(l_barValue); l_err = deviceWrite(i_target, &l_barValue, size, DEVICE_SCOM_ADDRESS(XIVE_IVPE_TM_BAR1_SCOM_ADDR)); if(l_err) { TRACFCOMP(g_trac_intr,ERR_MRK"Unable to set XIVE IVPE TM BAR1"); break; } //Map Memory Internally for HB and store in member variable void *l_xiveTmBar1Address = reinterpret_cast(l_baseBarValue); iv_xiveTmBar1Address = reinterpret_cast (mmio_dev_map(l_xiveTmBar1Address, PAGE_SIZE)); } while(0); return l_err; } errlHndl_t IntrRp::setXiveIcBAR(intr_hdlr_t *i_proc, bool i_enable) { TARGETING::Target *l_target = i_proc->proc; errlHndl_t l_err = NULL; uint64_t l_baseBarValue = l_target->getAttr(); do { uint64_t l_barValue = l_baseBarValue; if (i_enable) { l_barValue += XIVE_IC_BAR_VALID; } TRACDCOMP(g_trac_intr,"INTR: Target %p. XIVE IC BAR value: 0x%016lx", l_target, l_barValue); uint64_t size = sizeof(l_barValue); l_err = deviceWrite(l_target, &l_barValue, size, DEVICE_SCOM_ADDRESS(XIVE_IC_BAR_SCOM_ADDR)); if(l_err) { TRACFCOMP(g_trac_intr,ERR_MRK"Unable to set XIVE IC BAR"); break; } //Map Memory Internally for HB and store in member variable void *l_xiveIcBarAddress = reinterpret_cast(l_baseBarValue); i_proc->xiveIcBarAddr = reinterpret_cast (mmio_dev_map(l_xiveIcBarAddress, 40*PAGE_SIZE)); } while(0); return l_err; } errlHndl_t IntrRp::disableVPCPullErr(intr_hdlr_t * i_proc) { errlHndl_t l_err = NULL; TARGETING::Target *l_target = i_proc->proc; size_t size; do { uint64_t l_vpcErrCnfg; size = sizeof(l_vpcErrCnfg); l_err = deviceRead(l_target, &l_vpcErrCnfg, size, DEVICE_SCOM_ADDRESS(PU_INT_PC_VPC_ERR_CFG1)); if(l_err) { TRACFCOMP(g_trac_intr,ERR_MRK"Unable to read VPC Err Cnfg"); break; } l_vpcErrCnfg &= ~XIVE_IC_VPC_PULL_ERR; l_err = deviceWrite(l_target, &l_vpcErrCnfg, size, DEVICE_SCOM_ADDRESS(PU_INT_PC_VPC_ERR_CFG1)); if(l_err) { TRACFCOMP(g_trac_intr,ERR_MRK"Unable to write VPC Err Cnfg"); break; } } while(0); return l_err; } errlHndl_t IntrRp::enableVPCPullErr(TARGETING::Target * i_target) { errlHndl_t l_err = NULL; size_t size; do { uint64_t l_vpcErrCnfg; size = sizeof(l_vpcErrCnfg); l_err = deviceRead(i_target, &l_vpcErrCnfg, size, DEVICE_SCOM_ADDRESS(PU_INT_PC_VPC_ERR_CFG1)); if(l_err) { TRACFCOMP(g_trac_intr,ERR_MRK"Unable to read VPC Err Cnfg"); break; } l_vpcErrCnfg |= XIVE_IC_VPC_PULL_ERR; l_err = deviceWrite(i_target, &l_vpcErrCnfg, size, DEVICE_SCOM_ADDRESS(PU_INT_PC_VPC_ERR_CFG1)); if(l_err) { TRACFCOMP(g_trac_intr,ERR_MRK"Unable to write VPC Err Cnfg"); break; } } while(0); return l_err; } uint64_t INTR::getIntpAddr(const TARGETING::Target * i_ec, uint8_t i_thread) { const TARGETING::Target * l_proc = getParentChip(i_ec); uint64_t l_intB =l_proc->getAttr(); PIR_t pir(0); pir.groupId = l_proc->getAttr(); pir.chipId = l_proc->getAttr(); pir.coreId = i_ec->getAttr(); pir.threadId = i_thread; return (l_intB+ InterruptMsgHdlr::mmio_offset( pir.word & (InterruptMsgHdlr::P9_PIR_THREADID_MSK | InterruptMsgHdlr::P9_PIR_COREID_MSK))); } void* INTR::IntrRp::handleCpuTimeout(void* _pir) { uint64_t pir = reinterpret_cast(_pir); task_detach(); int count = 0; int rc = 0; // Allocate a message to send to the RP thread. msg_t* msg = msg_allocate(); msg->type = MSG_INTR_ADD_CPU_TIMEOUT; msg->data[0] = pir; msg_q_t intr_msgQ = msg_q_resolve(VFS_ROOT_MSG_INTR); TRACFCOMP( g_trac_intr,"handleCpuTimeout for pir: %lx", pir); do { // Sleep for the right amount. nanosleep(0, CPU_WAKEUP_INTERVAL_NS); // Check the status with the RP thread. msg->data[1] = count; msg_sendrecv(intr_msgQ, msg); // Get the status from the response message. rc = msg->data[1]; count++; } while(rc == EAGAIN); msg_free(msg); return NULL; } errlHndl_t INTR::addHbNode(uint64_t i_hbNode) { errlHndl_t err = NULL; msg_q_t intr_msgQ = msg_q_resolve(VFS_ROOT_MSG_INTR); TRACFCOMP( g_trac_intr,"Add node %d for MPIPL",i_hbNode); if(intr_msgQ) { msg_t * msg = msg_allocate(); msg->data[0] = i_hbNode; msg->type = MSG_INTR_ADD_HBNODE; msg_send(intr_msgQ, msg); } else { /*@ errorlog tag * @errortype ERRL_SEV_INFORMATIONAL * @moduleid INTR::MOD_INTR_ADDHBNODE * @reasoncode INTR::RC_RP_NOT_INITIALIZED * @userdata1 MSG_INTR_ADD_HBNODE * @userdata2 hbNode to add * * @devdesc Interrupt resource provider not initialized yet. * */ err = new ERRORLOG::ErrlEntry ( ERRORLOG::ERRL_SEV_INFORMATIONAL, // severity INTR::MOD_INTR_ADDHBNODE, // moduleid INTR::RC_RP_NOT_INITIALIZED, // reason code static_cast(MSG_INTR_ADD_HBNODE), i_hbNode ); } return err; } void INTR::drainQueue() { // send a sync message if queue is found msg_q_t intr_msgQ = msg_q_resolve(VFS_ROOT_MSG_INTR); if(intr_msgQ) { msg_t * msg = msg_allocate(); msg->type = MSG_INTR_DRAIN_QUEUE; msg_sendrecv(intr_msgQ, msg); msg_free(msg); } //else no queue, no need to do anything } uint64_t INTR::get_enabled_threads( void ) { TARGETING::Target* sys = NULL; TARGETING::targetService().getTopLevelTarget(sys); assert( sys != NULL ); uint64_t en_threads = sys->getAttr(); if( en_threads == 0 ) { //Read mbox scratch regs for enabled threads value //and set attribute appropriately INITSERVICE::SPLESS::MboxScratch3_t l_scratch3; TARGETING::ATTR_MASTER_MBOX_SCRATCH_type l_scratchRegs; assert(sys->tryGetAttr (l_scratchRegs), "INTR::get_enabled_threads() failed to get MASTER_MBOX_SCRATCH"); l_scratch3.data32 = l_scratchRegs[INITSERVICE::SPLESS::SCRATCH_3]; if(l_scratch3.smtMode == 0x1) { en_threads = 0x8000000000000000; //SMT1 == thread 0 } else if (l_scratch3.smtMode == 0x2) { en_threads = 0xC000000000000000; //SMT2 == thread 0,1 } else { en_threads = 0xF000000000000000; //SMT4 == thread 0..3 } sys->setAttr(en_threads); } return en_threads; } errlHndl_t INTR::enablePsiIntr(TARGETING::Target * i_target) { errlHndl_t err = NULL; msg_q_t intr_msgQ = msg_q_resolve(VFS_ROOT_MSG_INTR); if(intr_msgQ) { msg_t * msg = msg_allocate(); msg->type = MSG_INTR_ENABLE_PSI_INTR; msg->data[0] = reinterpret_cast(i_target); msg_sendrecv(intr_msgQ, msg); err = reinterpret_cast(msg->data[1]); msg_free(msg); } else { /*@ errorlog tag * @errortype ERRL_SEV_INFORMATIONAL * @moduleid INTR::MOD_INTR_ENABLE_PSI_INTR * @reasoncode INTR::RC_RP_NOT_INITIALIZED * @userdata1 MSG_INTR_ENABLE_PSI_INTR * @userdata2 0 * * @devdesc Interrupt resource provider not initialized yet. * */ err = new ERRORLOG::ErrlEntry ( ERRORLOG::ERRL_SEV_INFORMATIONAL, // severity INTR::MOD_INTR_ENABLE_PSI_INTR, // moduleid INTR::RC_RP_NOT_INITIALIZED, // reason code static_cast(MSG_INTR_ENABLE_PSI_INTR), 0 ); } return err; } errlHndl_t INTR::IntrRp::enableSlaveProcInterrupts(TARGETING::Target * i_target) { errlHndl_t l_err = NULL; do { TRACFCOMP(g_trac_intr, "Enabling Interrupts for slave proc with huid: %x", TARGETING::get_huid(i_target)); intr_hdlr_t* l_procIntrHdlr = new intr_hdlr_t(i_target); iv_chipList.push_back(l_procIntrHdlr); //Setup the base Interrupt BAR Registers for this non-master proc l_err = setCommonInterruptBARs(l_procIntrHdlr); if (l_err) { TRACFCOMP(g_trac_intr, ERR_MRK" could not set common interrupt BARs"); break; } //Apply the masking of the interrupt sources from the master chip to // the slave chip to block unwanted spurious interrupts that there is // no handler for for(MaskList_t::iterator mask_itr = iv_maskList.begin(); mask_itr != iv_maskList.end(); ++mask_itr) { l_err = maskInterruptSource(*mask_itr, l_procIntrHdlr); if (l_err) { break; } } if (l_err) { break; } //Setup the PSIHB interrupt routing to route interrupts from nom-master // proc back to master proc enableSlaveProcInterruptRouting(l_procIntrHdlr); } while(0); TRACFCOMP(g_trac_intr, INFO_MRK"Slave Proc Interrupt Routing setup complete"); return l_err; }