/* IBM_PROLOG_BEGIN_TAG */ /* This is an automatically generated prolog. */ /* */ /* $Source: src/usr/targeting/attrrp.C $ */ /* */ /* OpenPOWER HostBoot Project */ /* */ /* Contributors Listed Below - COPYRIGHT 2011,2018 */ /* [+] International Business Machines Corp. */ /* */ /* */ /* Licensed under the Apache License, Version 2.0 (the "License"); */ /* you may not use this file except in compliance with the License. */ /* You may obtain a copy of the License at */ /* */ /* http://www.apache.org/licenses/LICENSE-2.0 */ /* */ /* Unless required by applicable law or agreed to in writing, software */ /* distributed under the License is distributed on an "AS IS" BASIS, */ /* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ /* implied. See the License for the specific language governing */ /* permissions and limitations under the License. */ /* */ /* IBM_PROLOG_END_TAG */ /** * @file targeting/attrrp.C * * @brief Attribute resource provider implementation which establishes and * initializes virtual memory ranges for attributes as needed, and works * with other resource providers (such as the PNOR resource provider) to * retrieve attribute data which it connot directly provide. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include using namespace INITSERVICE; using namespace ERRORLOG; #include "attrrp_common.C" namespace TARGETING { const char* ATTRRP_MSG_Q = "attrrpq"; const char* ATTRRP_ATTR_SYNC_MSG_Q = "attrrpattrsyncq"; void* AttrRP::getBaseAddress(const NODE_ID i_nodeIdUnused) { return reinterpret_cast(VMM_VADDR_ATTR_RP); } void* AttrRP::startMsgServiceTask(void* i_pInstance) { // Call msgServiceTask loop on instance. TARG_ASSERT(i_pInstance, "No instance passed to startMsgServiceTask"); static_cast(i_pInstance)->msgServiceTask(); return NULL; } void* AttrRP::startAttrSyncTask(void* i_pInstance) { TARG_ASSERT(i_pInstance, "No instance passed to startAttrSyncTask"); static_cast(i_pInstance)->attrSyncTask(); return nullptr; } void AttrRP::startup(errlHndl_t& io_taskRetErrl, bool i_isMpipl) { errlHndl_t l_errl = NULL; do { iv_isMpipl = i_isMpipl; // Parse PNOR headers. l_errl = this->parseAttrSectHeader(); if (l_errl) { break; } // Create corresponding VMM blocks for each section. l_errl = this->createVmmSections(); if (l_errl) { break; } // Now that the VMM blocks have been created we must set // the appropriate R/W permissions l_errl = this->editPagePermissions(ALL_SECTION_TYPES, DEFAULT_PERMISSIONS); if (l_errl) { break; } // Spawn daemon thread. task_create(&AttrRP::startMsgServiceTask, this); // Register attribute sync message queue so it can be discovered by // istep 21 in order to deregister it from shutdown event handling. auto rc = msg_q_register(iv_attrSyncMsgQ, ATTRRP_ATTR_SYNC_MSG_Q); assert(rc == 0, "Bug! Unable to register attribute sync message " "queue"); // Spawn attribute sync thread task_create(&AttrRP::startAttrSyncTask, this); if(iv_isMpipl) { populateAttrsForMpipl(); } } while (false); // If an error occurred, post to TaskArgs. if (l_errl) { l_errl->setSev(ERRORLOG::ERRL_SEV_UNRECOVERABLE); } // return any errlogs to _start() io_taskRetErrl = l_errl; } errlHndl_t AttrRP::notifyResourceReady(const RESOURCE i_resource) { return Singleton::instance()._notifyResourceReady(i_resource); } errlHndl_t AttrRP::syncAllAttributesToFsp() { return Singleton::instance()._syncAllAttributesToFsp(); } errlHndl_t AttrRP::_syncAllAttributesToFsp() const { TRACFCOMP(g_trac_targeting, ENTER_MRK "AttrRP::_syncAllAttributesToFsp"); auto pError = _sendAttrSyncMsg(MSG_INVOKE_ATTR_SYNC,true); TRACFCOMP(g_trac_targeting, EXIT_MRK "AttrRP::_syncAllAttributesToFsp"); return pError; } errlHndl_t AttrRP::_notifyResourceReady(const RESOURCE i_resource) const { TRACFCOMP(g_trac_targeting, ENTER_MRK "AttrRP::_notifyResourceReady: resource type = 0x%02X.", i_resource); auto msgType = MSG_INVALID; const bool sync=false; switch (i_resource) { case MAILBOX: { msgType = MSG_PRIME_ATTR_SYNC; } break; default: { TRACFCOMP(g_trac_targeting, ERR_MRK "AttrRP::_notifyResourceReady: Bug! Unhandled " "resource type = 0x%02X.", i_resource); assert(0); } break; } errlHndl_t pError = _sendAttrSyncMsg(msgType,sync); if(pError) { TRACFCOMP(g_trac_targeting, ERR_MRK "AttrRP::_notifyResourceReady: Failed in call to " "_sendAttrSyncMsg; msgType = 0x%08X, sync = %d.", msgType,sync); } TRACFCOMP(g_trac_targeting, EXIT_MRK "AttrRP::_notifyResourceReady"); return pError; } errlHndl_t AttrRP::_sendAttrSyncMsg( const ATTRRP_MSG_TYPE i_msgType, const bool i_sync) const { TRACFCOMP(g_trac_targeting, ENTER_MRK "AttrRP::_sendAttrSyncMsg: i_msgType = 0x%08X, i_sync = %d.", i_msgType, i_sync); errlHndl_t pError = nullptr; auto pMsg = msg_allocate(); pMsg->type = i_msgType; int rc = 0; if(sync) { rc = msg_sendrecv(iv_attrSyncMsgQ,pMsg); } else { rc = msg_send(iv_attrSyncMsgQ,pMsg); } bool logError=false; uint32_t plid=0; int msgRc=0; if (rc) { TRACFCOMP(g_trac_targeting, ERR_MRK "AttrRP::_sendAttrSyncMsg: Failed in %s. " "Message type = 0x%08X, sync = %d, rc = %d.", sync ? "msg_sendrecv" : "msg_send", pMsg->type,sync,rc); logError=true; } else if(sync) { if(pMsg->data[1]) { msgRc=static_cast(pMsg->data[1]); TRACFCOMP(g_trac_targeting, ERR_MRK "AttrRP::_sendAttrSyncMsg: Message (type = 0x%08X) " "returned with rc = %d.", pMsg->type,msgRc); logError=true; } if(pMsg->extra_data) { plid=static_cast( reinterpret_cast(pMsg->extra_data)); TRACFCOMP(g_trac_targeting, ERR_MRK "AttrRP::_sendAttrSyncMsg: Message (type = 0x%08X) " "returned with failure related to PLID = 0x%08X.", pMsg->type,plid); logError=true; } } if(logError) { /*@ * @errortype * @moduleid TARG_SEND_ATTR_SYNC_MSG * @reasoncode TARG_RC_ATTR_MSG_FAIL * @userdata1[00:31] Message type * @userdata1[32:63] API return code (from msg_send or * msg_sendrecv; 0=N/A) * @userdata2[00:31] Message return code (0=N/A) * @userdata2[32:63] Message error PLID (0=N/A) * @devdesc Failed to either send/(receive) the requested * message to/from the attribute resource provider OR the * provider failed executing the message request. * @custdesc Unexpected boot firmware error occurred */ pError = new ErrlEntry( ERRL_SEV_UNRECOVERABLE, TARG_SEND_ATTR_SYNC_MSG, TARG_RC_ATTR_MSG_FAIL, TWO_UINT32_TO_UINT64(pMsg->type,rc), TWO_UINT32_TO_UINT64(msgRc,plid), ERRORLOG::ErrlEntry::ADD_SW_CALLOUT); pError->collectTrace(TARG_COMP_NAME); if(plid) { pError->plid(plid); } } if(sync) { msg_free(pMsg); pMsg = nullptr; } TRACFCOMP(g_trac_targeting, EXIT_MRK "AttrRP::_sendAttrSyncMsg: rc = %d, msgRc = %d, plid = " "0x%08X.", rc,msgRc,plid); return pError; } errlHndl_t AttrRP::_invokeAttrSync() const { errlHndl_t pError = nullptr; do { if(!iv_attrSyncPrimed) { TRACFCOMP(g_trac_targeting, INFO_MRK "_invokeAttrSync: " "Attribute sync not primed; suppressing " "attribute sync."); break; } // Nothing to do unless FSP is available to respond to the attribute // sync request if(!INITSERVICE::spBaseServicesEnabled()) { TRACFCOMP(g_trac_targeting, INFO_MRK "_invokeAttrSync: " "FSP services not available; suppressing " "attribute sync."); break; } // Ensure that SBE is not quiesced (in which case mailbox related SBE // FIFO traffic will not be serviced) TARGETING::Target* pMasterProc=nullptr; // Master processor is assumed to be functional since we're running on // it; if no functional master is found, we'll error out. pError = TARGETING::targetService().queryMasterProcChipTargetHandle( pMasterProc); if(pError) { TRACFCOMP(g_trac_targeting, ERR_MRK "_invokeAttrSync: " "Failed to determine master processor target; " "suppressing attribute sync."); break; } if(pMasterProc->getAttr()) { TRACFCOMP(g_trac_targeting, INFO_MRK "_invokeAttrSync; SBE " "is quiesced; suppressing attribute sync."); break; } pError = TARGETING::syncAllAttributesToFsp(); if(pError) { TRACFCOMP(g_trac_targeting, ERR_MRK "_invokeAttrSync: " "Failed syncing attributes to FSP."); break; } } while(0); return pError; } void AttrRP::attrSyncTask() { // Crash Hostboot if this task dies (void)task_detach(); errlHndl_t pError=nullptr; TRACFCOMP(g_trac_targeting, ENTER_MRK "AttrRP::attrSyncTask."); // Register to synchronize applicable attributes down to FSP when // a shutdown occurs. NO_PRIORITY priority forces the attribute // synchronization to complete prior to the mailbox shutdown. // Intentionally ignores the return code that simply indicates if this // registration happened already. INITSERVICE::registerShutdownEvent(TARG_COMP_ID, iv_attrSyncMsgQ, MSG_INVOKE_ATTR_SYNC, INITSERVICE::NO_PRIORITY); while(1) { int rc = 0; uint32_t plid = 0; auto pMsg = msg_wait(iv_attrSyncMsgQ); if (!pMsg) { continue; } TRACFCOMP(g_trac_targeting, INFO_MRK "AttrRP: attrSyncTask: " "Received message of type = 0x%08X.", pMsg->type); do { switch(pMsg->type) { case MSG_PRIME_ATTR_SYNC: { iv_attrSyncPrimed=true; TRACFCOMP(g_trac_targeting, INFO_MRK "AttrRP: attrSyncTask: " "Attribute provider primed to synchronize " "attributes."); } break; case MSG_INVOKE_ATTR_SYNC: { TRACFCOMP(g_trac_targeting, INFO_MRK "AttrRP: attrSyncTask: " "Invoking attribute sync."); pError = _invokeAttrSync(); } break; default: { TRACFCOMP(g_trac_targeting,ERR_MRK "AttrRP: attrSyncTask: " "Unhandled message type = 0x%08X.", pMsg->type); rc = -EINVAL; } break; } } while (0); if (rc != 0) { /*@ * @errortype * @moduleid TARG_ATTR_SYNC_TASK * @reasoncode TARG_RC_UNSUPPORTED_ATTR_SYNC_MSG * @userdata1 Return code * @userdata2 Message type * @devdesc Invalid message type requested through the * attribute resource provider's attribute synchronization * sync daemon. * @custdesc Unexpected boot firmware failure */ pError = new ErrlEntry( ERRL_SEV_UNRECOVERABLE, TARG_ATTR_SYNC_TASK, TARG_RC_UNSUPPORTED_ATTR_SYNC_MSG, TO_UINT64(rc), TO_UINT64(pMsg->type), ERRORLOG::ErrlEntry::ADD_SW_CALLOUT); } if(pError) { plid = pError->plid(); errlCommit(pError,TARG_COMP_ID); } if(msg_is_async(pMsg)) { // When caller sends an async message, the receiver must // deallocate the message (void)msg_free(pMsg); // Free doesn't nullify the caller's pointer automatically, pMsg=nullptr; } else { // Respond to request. pMsg->data[1] = static_cast(rc); pMsg->extra_data = reinterpret_cast( static_cast(plid)); rc = msg_respond(iv_attrSyncMsgQ, pMsg); if (rc) { TRACFCOMP(g_trac_targeting,ERR_MRK "AttrRP: attrSyncTask: " "Bad rc = %d from msg_respond.", rc); } } } } void AttrRP::msgServiceTask() const { TRACFCOMP(g_trac_targeting, ENTER_MRK "AttrRP::msgServiceTask"); // Daemon loop. while(1) { int rc = 0; // Wait for message. msg_t* msg = msg_wait(iv_msgQ); if (!msg) continue; // Parse message data members. uint64_t vAddr = 0; void* pAddr = nullptr; ssize_t section = -1; uint64_t offset = 0; uint64_t size = 0; TRACDCOMP(g_trac_targeting, INFO_MRK "AttrRP: Message recv'd: " "0x%08X",msg->type); // These messages are sent directly from the kernel and have // virtual/physical addresses for data 0 and 1 respectively. const std::array kernelMessageTypes = {MSG_MM_RP_READ, MSG_MM_RP_WRITE}; do { if( std::find(kernelMessageTypes.begin(), kernelMessageTypes.end(), msg->type) != kernelMessageTypes.end()) { vAddr = msg->data[0]; pAddr = reinterpret_cast(msg->data[1]); TRACDCOMP(g_trac_targeting,INFO_MRK "AttrRP: message type = 0x%08X, vAddr = 0x%016llX, " "pAddr = 0x%016llX.", msg->type, vAddr, pAddr); // Locate corresponding attribute section for message. for (size_t i = 0; i < iv_sectionCount; ++i) { if ((vAddr >= iv_sections[i].vmmAddress) && (vAddr < iv_sections[i].vmmAddress + iv_sections[i].size)) { section = i; break; } } // Return EINVAL if no section was found. Kernel bug? if (section == -1) { rc = -EINVAL; TRACFCOMP(g_trac_targeting, ERR_MRK "AttrRP: Address given outside section " "ranges: %p", vAddr); break; // go to error handler } // Determine PNOR offset and page size. offset = vAddr - iv_sections[section].vmmAddress; size = std::min(PAGE_SIZE, iv_sections[section].vmmAddress + iv_sections[section].size - vAddr); // We could be requested to read/write less than a page // if the virtual address requested is at the end of the // section and the section size is not page aligned. // // Example: Section size is 6k and vAddr = vmmAddr + 4k, // we should only operate on 2k of content. } // Process request. // Read / Write message behavior. switch(msg->type) { case MSG_MM_RP_READ: // HEAP_ZERO_INIT should never be requested for read // because kernel should automatically get a zero page. if ( (iv_sections[section].type == SECTION_TYPE_HEAP_ZERO_INIT) || (iv_sections[section].type == SECTION_TYPE_HB_HEAP_ZERO_INIT) ) { TRACFCOMP(g_trac_targeting, ERR_MRK "AttrRP: Read request on " "HEAP_ZERO section."); rc = -EINVAL; break; } // if we are NOT in mpipl OR if this IS a r/w section, // Do a memcpy from PNOR address into physical page. if(!iv_isMpipl || (iv_sections[section].type == SECTION_TYPE_PNOR_RW) ) { memcpy(pAddr, reinterpret_cast( iv_sections[section].pnorAddress + offset), size); } else { // Do memcpy from real memory into physical page. memcpy(pAddr, reinterpret_cast( iv_sections[section].realMemAddress + offset), size); } break; case MSG_MM_RP_WRITE: // Only PNOR_RW should ever be requested for write-back // because others are not allowed to be pushed back to // PNOR. if (iv_sections[section].type != SECTION_TYPE_PNOR_RW) { TRACFCOMP(g_trac_targeting, ERR_MRK "AttrRP: Write request on " "non-PNOR_RW section."); rc = -EINVAL; break; } // Do memcpy from physical page into PNOR. memcpy(reinterpret_cast( iv_sections[section].pnorAddress + offset), pAddr, size); break; case MSG_MM_RP_RUNTIME_PREP: { // used for security purposes to pin all the attribute // memory just prior to copying to reserve memory uint64_t l_access = msg->data[0] == MSG_MM_RP_RUNTIME_PREP_BEGIN? WRITABLE: msg->data[0] == MSG_MM_RP_RUNTIME_PREP_END? WRITE_TRACKED: 0; if (!l_access) { rc = -EINVAL; break; } for (size_t i = 0; i < iv_sectionCount; ++i) { if ( iv_sections[i].type == SECTION_TYPE_PNOR_RW) { rc = mm_set_permission(reinterpret_cast( iv_sections[i].vmmAddress), iv_sections[i].size, l_access); } } break; } default: TRACFCOMP(g_trac_targeting, ERR_MRK "AttrRP: Unhandled command type %d.", msg->type); rc = -EINVAL; break; } } while (0); // Log an error log if the AttrRP was unable to handle a message // for any reason. if (rc != 0) { /*@ * @errortype ERRORLOG::ERRL_SEV_UNRECOVERABLE * @moduleid TARG_MSG_SERVICE_TASK * @reasoncode TARG_RC_ATTR_MSG_FAIL * @userdata1 Virtual Address * @userdata2 (Msg Type << 32) | Section # * * @devdesc The attribute resource provider was unable to * satisfy a message request from the VMM portion * of the kernel. This was either due to an * address outside a valid range or a message * request that is invalid for the attribute * section containing the address. * * @custdesc Attribute Resource Provider failed to handle * request */ const bool hbSwError = true; errlHndl_t l_errl = new ErrlEntry(ERRL_SEV_UNRECOVERABLE, TARG_MSG_SERVICE_TASK, TARG_RC_ATTR_MSG_FAIL, vAddr, TWO_UINT32_TO_UINT64( msg->type, section), hbSwError); errlCommit(l_errl,TARG_COMP_ID); } // Respond to request. msg->data[1] = rc; rc = msg_respond(iv_msgQ, msg); if (rc) { TRACFCOMP(g_trac_targeting, ERR_MRK"AttrRP: Bad rc from msg_respond: %d", rc); } } } uint64_t AttrRP::getHbDataTocAddr() { // Setup physical TOC address uint64_t l_toc_addr = 0; Bootloader::keyAddrPair_t l_keyAddrPairs = g_BlToHbDataManager.getKeyAddrPairs(); for (uint8_t keyIndex = 0; keyIndex < MAX_ROW_COUNT; keyIndex++) { if(l_keyAddrPairs.key[keyIndex] == SBEIO::RSV_MEM_ATTR_ADDR) { l_toc_addr = l_keyAddrPairs.addr[keyIndex]; break; } } if(!l_toc_addr) { // Setup physical TOC address to hardcoded value l_toc_addr = cpu_spr_value(CPU_SPR_HRMOR) + VMM_HB_DATA_TOC_START_OFFSET; } // return the vaddr found from the mapping return l_toc_addr; } uint64_t AttrRP::getHbDataRelocPayloadAddr() { uint64_t payload_addr = 0; Bootloader::keyAddrPair_t l_keyAddrPairs = g_BlToHbDataManager.getKeyAddrPairs(); for (uint8_t keyIndex = 0; keyIndex < MAX_ROW_COUNT; keyIndex++) { if(l_keyAddrPairs.key[keyIndex] == SBEIO::RELOC_PAYLOAD_ADDR) { payload_addr = l_keyAddrPairs.addr[keyIndex]; break; } } // return relocated payload physical address return payload_addr; } errlHndl_t AttrRP::parseAttrSectHeader() { errlHndl_t l_errl = NULL; do { #ifdef CONFIG_SECUREBOOT // Securely load HB_DATA section l_errl = PNOR::loadSecureSection(PNOR::HB_DATA); if (l_errl) { break; } #endif // Locate attribute section in PNOR. PNOR::SectionInfo_t l_pnorSectionInfo; TargetingHeader* l_header = nullptr; l_errl = PNOR::getSectionInfo(PNOR::HB_DATA, l_pnorSectionInfo); if(l_errl) { break; } if(!iv_isMpipl) { // Find attribute section header. l_header = reinterpret_cast(l_pnorSectionInfo.vaddr); } else { TRACFCOMP(g_trac_targeting, "Reading attributes from memory, NOT PNOR"); //Create a block map of the address space we used to store //attribute information on the initial IPL //Account HRMOR (non 0 base addr) uint64_t l_phys_attr_data_addr = 0; uint64_t l_attr_data_size = 0; // Setup physical TOC address uint64_t l_toc_addr = AttrRP::getHbDataTocAddr(); // Now map the TOC to find the ATTR label address & size Util::hbrtTableOfContents_t * l_toc_ptr = reinterpret_cast( mm_block_map(reinterpret_cast(l_toc_addr), sizeof(Util::hbrtTableOfContents_t))); if (l_toc_ptr != 0) { // read the TOC and look for ATTR data section uint64_t l_attr_data_addr = Util::hb_find_rsvd_mem_label( Util::HBRT_MEM_LABEL_ATTR, l_toc_ptr, l_attr_data_size); // calculate the offset from the start of the TOC uint64_t l_attr_offset = l_attr_data_addr - reinterpret_cast(l_toc_ptr); // Setup where the ATTR data can be found l_phys_attr_data_addr = l_toc_addr + l_attr_offset; // Clear the mapped memory for the TOC int l_rc = mm_block_unmap( reinterpret_cast(l_toc_ptr)); if(l_rc) { TRACFCOMP( g_trac_targeting, "parseAttrSectHeader. fail to unmap virt addr %p, " " rc = %d", reinterpret_cast(l_toc_ptr), l_rc); //Error mm_block_unmap returned non-zero /*@ * @errortype ERRORLOG::ERRL_SEV_UNRECOVERABLE * @moduleid TARG_PARSE_ATTR_SECT_HEADER * @reasoncode TARG_RC_MM_BLOCK_UNMAP_FAIL * @userdata1 return code * @userdata2 Unmap virtual address * * @devdesc While attempting to unmap a virtual * addr for our targeting information the * kernel returned an error * @custdesc Kernel failed to unblock mapped memory */ l_errl = new ErrlEntry(ERRL_SEV_UNRECOVERABLE, TARG_PARSE_ATTR_SECT_HEADER, TARG_RC_MM_BLOCK_FAIL, l_rc, reinterpret_cast (l_toc_ptr), true); break; } // Now just map the ATTR data section l_header = reinterpret_cast( mm_block_map( reinterpret_cast(l_phys_attr_data_addr), l_attr_data_size)); } else { TRACFCOMP(g_trac_targeting, "Failed mapping Table of Contents section"); l_header = 0; l_phys_attr_data_addr = l_toc_addr; l_attr_data_size = sizeof(Util::hbrtTableOfContents_t); } /////////////////////////////////////////////////////////////// if(l_header == 0) { TRACFCOMP(g_trac_targeting, "Failed mapping phys addr: %p for %lx bytes", l_phys_attr_data_addr, l_attr_data_size); //Error mm_block_map returned invalid ptr /*@ * @errortype ERRORLOG::ERRL_SEV_UNRECOVERABLE * @moduleid TARG_PARSE_ATTR_SECT_HEADER * @reasoncode TARG_RC_MM_BLOCK_MAP_FAIL * @userdata1 physical address of target info * @userdata2 size we tried to map * * @devdesc While attempting to map a phys addr to a virtual * addr for our targeting information the kernel * returned an error * @custdesc Kernel failed to block map memory */ l_errl = new ErrlEntry(ERRL_SEV_UNRECOVERABLE, TARG_PARSE_ATTR_SECT_HEADER, TARG_RC_MM_BLOCK_FAIL, l_phys_attr_data_addr, l_attr_data_size, true); break; } TRACFCOMP(g_trac_targeting, "Mapped phys addr: %p to virt addr: %p", reinterpret_cast(l_phys_attr_data_addr), l_header); } // Validate eye catch. if (l_header->eyeCatcher != PNOR_TARG_EYE_CATCHER) { TRACFCOMP(g_trac_targeting, "ATTR_DATA section in pnor header mismatch found" " header: %d expected header: %d", l_header->eyeCatcher, PNOR_TARG_EYE_CATCHER); /*@ * @errortype ERRORLOG::ERRL_SEV_UNRECOVERABLE * @moduleid TARG_PARSE_ATTR_SECT_HEADER * @reasoncode TARG_RC_BAD_EYECATCH * @userdata1 Observed Header Eyecatch Value * @userdata2 Expected Eyecatch Value * * @devdesc The eyecatch value observed in PNOR does not * match the expected value of * PNOR_TARG_EYE_CATCHER and therefore the * contents of the Attribute PNOR section are * unable to be parsed. * @custdesc A problem occurred during the IPL of the * system. * The eyecatch value observed in memory does not * match the expected value and therefore the * contents of the attribute sections are unable * to be parsed. */ l_errl = new ErrlEntry(ERRL_SEV_UNRECOVERABLE, TARG_PARSE_ATTR_SECT_HEADER, TARG_RC_BAD_EYECATCH, l_header->eyeCatcher, PNOR_TARG_EYE_CATCHER); break; } // Allocate section structures based on section count in header. iv_sectionCount = l_header->numSections; iv_sections = new AttrRP_Section[iv_sectionCount]; TargetingSection* l_section = nullptr; if(!iv_isMpipl) { // Find start to first section: // (PNOR addr + size of header + offset in header). l_section = reinterpret_cast( l_pnorSectionInfo.vaddr + sizeof(TargetingHeader) + l_header->offsetToSections ); } else { // Find start to first section: // (header address + size of header + offset in header) l_section = reinterpret_cast( reinterpret_cast(l_header) + sizeof(TargetingHeader) + l_header->offsetToSections ); } //Keep a running offset of how far into our real memory section we are uint64_t l_realMemOffset = 0; // Parse each section. for (size_t i = 0; i < iv_sectionCount; i++, ++l_section) { iv_sections[i].type = l_section->sectionType; // Conversion cast for templated abstract pointer object only // works when casting to pointer of the templated type. Since // cache is of a different type, we first cast to extract the // real pointer, then recast it into the cache iv_sections[i].vmmAddress = static_cast( TARG_TO_PLAT_PTR(l_header->vmmBaseAddress)) + l_header->vmmSectionOffset*i; iv_sections[i].pnorAddress = l_pnorSectionInfo.vaddr + l_section->sectionOffset; #ifdef CONFIG_SECUREBOOT // RW targeting section is part of the unprotected payload // so use the normal PNOR virtual address space if( l_pnorSectionInfo.secure && iv_sections[i].type == SECTION_TYPE_PNOR_RW) { iv_sections[i].pnorAddress -= (VMM_VADDR_SPNOR_DELTA + VMM_VADDR_SPNOR_DELTA); } #endif if(iv_isMpipl) { //For MPIPL we are reading from real memory, //not pnor flash. Set the real memory address iv_sections[i].realMemAddress = reinterpret_cast(l_header) + l_realMemOffset; } iv_sections[i].size = l_section->sectionSize; //Increment our offset variable by the size of this section l_realMemOffset += iv_sections[i].size; TRACFCOMP(g_trac_targeting, "Decoded Attribute Section: %d, 0x%lx 0x%lx 0x%lx 0x%lx", iv_sections[i].type, iv_sections[i].vmmAddress, iv_sections[i].pnorAddress, iv_sections[i].realMemAddress, iv_sections[i].size); } } while (false); return l_errl; } errlHndl_t AttrRP::editPagePermissions(uint8_t i_type, uint32_t i_permission) { errlHndl_t l_errl = NULL; int rc; uint32_t l_perm = i_permission; do { // Create VMM block for each section, assign permissions. for (size_t i = 0; i < iv_sectionCount; ++i) { if(i_permission == DEFAULT_PERMISSIONS) { switch(iv_sections[i].type) { case SECTION_TYPE_PNOR_RO: l_perm = READ_ONLY; break; case SECTION_TYPE_PNOR_RW: l_perm = WRITABLE | WRITE_TRACKED; break; case SECTION_TYPE_HEAP_PNOR_INIT: l_perm = WRITABLE; break; case SECTION_TYPE_HEAP_ZERO_INIT: case SECTION_TYPE_HB_HEAP_ZERO_INIT: l_perm = WRITABLE | ALLOCATE_FROM_ZERO; break; default: /*@ * @errortype ERRORLOG::ERRL_SEV_UNRECOVERABLE * @moduleid TARG_EDIT_PAGE_PERMISSIONS * @reasoncode TARG_RC_UNHANDLED_ATTR_SEC_TYPE * @userdata1 Section type * * @devdesc Found unhandled attribute section type * @custdesc FW error, unexpected Attribute section type */ const bool hbSwError = true; l_errl = new ErrlEntry(ERRL_SEV_UNRECOVERABLE, TARG_EDIT_PAGE_PERMISSIONS, TARG_RC_UNHANDLED_ATTR_SEC_TYPE, iv_sections[i].type, 0, hbSwError); break; } } if( i_type == ALL_SECTION_TYPES || i_type == iv_sections[i].type) { rc = mm_set_permission(reinterpret_cast( iv_sections[i].vmmAddress), iv_sections[i].size, l_perm); } if (rc) { /*@ * @errortype ERRORLOG::ERRL_SEV_UNRECOVERABLE * @moduleid TARG_EDIT_PAGE_PERMISSIONS * @reasoncode TARG_RC_MM_PERM_FAIL * @userdata1 vAddress attempting to allocate. * @userdata2 (kernel-rc << 32) | (Permissions) * * @devdesc While attempting to set permissions on * a virtual memory block for an attribute * section, the kernel returned an error. * * @custdesc Kernel failed to set permissions on * virtual memory block */ const bool hbSwError = true; l_errl = new ErrlEntry(ERRL_SEV_UNRECOVERABLE, TARG_EDIT_PAGE_PERMISSIONS, TARG_RC_MM_PERM_FAIL, iv_sections[i].vmmAddress, TWO_UINT32_TO_UINT64(rc, l_perm), hbSwError); break; } } } while(0); return l_errl; } errlHndl_t AttrRP::createVmmSections() { errlHndl_t l_errl = NULL; do { // Allocate message queue for VMM requests. iv_msgQ = msg_q_create(); // register it so it can be discovered by istep 21 and thus allow // secure runtime preparation of persistent r/w attributes int rc = msg_q_register(iv_msgQ, ATTRRP_MSG_Q); assert(rc == 0, "Bug! Unable to register message queue"); // Create VMM block for each section, assign permissions. for (size_t i = 0; i < iv_sectionCount; ++i) { int rc = 0; msg_q_t l_msgQ = iv_msgQ; if ( (iv_sections[i].type == SECTION_TYPE_HEAP_ZERO_INIT) || (iv_sections[i].type == SECTION_TYPE_HB_HEAP_ZERO_INIT) ) { l_msgQ = NULL; } rc = mm_alloc_block(l_msgQ, reinterpret_cast(iv_sections[i].vmmAddress), iv_sections[i].size); if (rc) { /*@ * @errortype ERRORLOG::ERRL_SEV_UNRECOVERABLE * @moduleid TARG_CREATE_VMM_SECTIONS * @reasoncode TARG_RC_MM_BLOCK_FAIL * @userdata1 vAddress attempting to allocate. * @userdata2 RC from kernel. * * @devdesc While attempting to allocate a virtual * memory block for an attribute section, the * kernel returned an error. * @custdesc Kernel failed to block memory */ const bool hbSwError = true; l_errl = new ErrlEntry(ERRL_SEV_UNRECOVERABLE, TARG_CREATE_VMM_SECTIONS, TARG_RC_MM_BLOCK_FAIL, iv_sections[i].vmmAddress, rc, hbSwError); break; } if(iv_sections[i].type == SECTION_TYPE_PNOR_RW) { /* * Register this memory range to be FLUSHed during * a shutdown. */ INITSERVICE::registerBlock( reinterpret_cast(iv_sections[i].vmmAddress), iv_sections[i].size,ATTR_PRIORITY); } } // End iteration through each section if(l_errl) { break; } } while (false); return l_errl; } void AttrRP::populateAttrsForMpipl() { do { // Copy RW, Heap Zero Init sections because we are not // running the isteps that set these attrs during MPIPL for (size_t i = 0; i < iv_sectionCount; ++i) { // The volatile sections in MPIPL need to be copied because // on the MPIPL flow we will not run the HWPs that set these attrs // the RW section of the attribute data must be copied // into the vmmAddress in order to make future r/w come // from the pnor address, not real memory if(((iv_sections[i].type == SECTION_TYPE_HEAP_ZERO_INIT) || (iv_sections[i].type == SECTION_TYPE_HB_HEAP_ZERO_INIT) || (iv_sections[i].type == SECTION_TYPE_PNOR_RW)) && iv_isMpipl) { memcpy(reinterpret_cast(iv_sections[i].vmmAddress), reinterpret_cast(iv_sections[i].realMemAddress), (iv_sections[i].size)); } } }while(0); } void* AttrRP::save(uint64_t& io_addr) { // Call save on singleton instance. return Singleton::instance()._save(io_addr); } errlHndl_t AttrRP::save( uint8_t* i_dest, size_t& io_size ) { // Call save on singleton instance. return Singleton::instance()._save(i_dest,io_size); } uint64_t AttrRP::maxSize( ) { // Find total size of the sections. uint64_t l_size = 0; for( size_t i = 0; i < Singleton::instance().iv_sectionCount; ++i) { l_size += ALIGN_PAGE(Singleton:: instance().iv_sections[i].size); } return(l_size); } // end maxSize errlHndl_t AttrRP::saveOverrides( uint8_t* i_dest, size_t& io_size ) { // Call save on singleton instance. return Singleton::instance()._saveOverrides(i_dest,io_size); } void* AttrRP::_save(uint64_t& io_addr) { TRACDCOMP(g_trac_targeting, "AttrRP::save: top @ 0x%lx", io_addr); void* region = reinterpret_cast(io_addr); uint8_t* pointer = reinterpret_cast(region); if (TARGETING::is_no_load()) { // Find total size of the sections. uint64_t l_size = maxSize(); io_addr = ALIGN_PAGE_DOWN(io_addr); // Determine bottom of the address region. io_addr = io_addr - l_size; // Align to 64KB for No Payload io_addr = ALIGN_DOWN_X(io_addr,64*KILOBYTE); // Map in region. region = mm_block_map(reinterpret_cast(io_addr),l_size); pointer = reinterpret_cast(region); } // Copy content. for (size_t i = 0; i < iv_sectionCount; ++i) { memcpy(pointer, reinterpret_cast(iv_sections[i].vmmAddress), iv_sections[i].size); pointer = &pointer[ALIGN_PAGE(iv_sections[i].size)]; } TRACFCOMP(g_trac_targeting, "AttrRP::save: bottom @ 0x%lx", io_addr); return region; } errlHndl_t AttrRP::_save( uint8_t* i_dest, size_t& io_size ) { TRACFCOMP( g_trac_targeting, ENTER_MRK"AttrRP::_save: i_dest=%p, io_size=%ld", i_dest, io_size ); errlHndl_t l_err = nullptr; uint8_t* pointer = i_dest; uint64_t l_totalSize = 0; uint64_t l_maxSize = io_size; uint64_t l_filledSize = 0; // Copy content. for (size_t i = 0; i < iv_sectionCount; ++i) { l_totalSize += iv_sections[i].size; if (l_totalSize <= l_maxSize) { l_filledSize = l_totalSize; memcpy(pointer, reinterpret_cast(iv_sections[i].vmmAddress), iv_sections[i].size); pointer = &pointer[ALIGN_PAGE(iv_sections[i].size)]; } else { // Need a larger buffer TRACFCOMP( g_trac_targeting, ERR_MRK"AttrRP::_save - max size %d exceeded, missing section %d, size %d", io_size,i, iv_sections[i].size); } } if (l_totalSize > io_size) { // Need to increase size of the buffer /*@ * @errortype * @moduleid TARG_MOD_SAVE_ATTR_TANK * @reasoncode TARG_SPACE_OVERRUN * @userdata1 Maximum Available size * @userdata2 Required size * * @devdesc Size of attribute data exceeds available * buffer space * * @custdesc Internal firmware error applying * custom configuration settings */ l_err = new ErrlEntry(ERRL_SEV_UNRECOVERABLE, TARG_MOD_SAVE_ATTR_TANK, TARG_SPACE_OVERRUN, io_size, l_totalSize, true /*SW Error */); } io_size = l_filledSize; TRACFCOMP(g_trac_targeting, EXIT_MRK"AttrRP::_save: i_dest=%p, io_size=%ld, size needed=%ld", i_dest, io_size, l_totalSize ); return l_err; } errlHndl_t AttrRP::_saveOverrides( uint8_t* i_dest, size_t& io_size ) { TRACFCOMP( g_trac_targeting, ENTER_MRK"AttrRP::_saveOverrides: i_dest=%p, io_size=%d", i_dest, io_size ); errlHndl_t l_err = nullptr; do { size_t l_maxSize = io_size; io_size = 0; if (!SECUREBOOT::allowAttrOverrides()) { TRACFCOMP( g_trac_targeting, "AttrRP::_saveOverrides: skipping " "since Attribute Overrides are not allowed."); } // Save the fapi and temp overrides // Note: no need to look at PERM because those were added to // the base targeting model size_t l_tankSize = l_maxSize; uint8_t* l_dest = i_dest; // FAPI l_err = saveOverrideTank( l_dest, l_tankSize, &fapi2::theAttrOverrideSync().iv_overrideTank, AttributeTank::TANK_LAYER_FAPI ); if( l_err ) { break; } l_maxSize -= l_tankSize; io_size += l_tankSize; // TARGETING l_tankSize = l_maxSize; l_dest = i_dest + io_size; l_err = saveOverrideTank( l_dest, l_tankSize, &Target::theTargOverrideAttrTank(), AttributeTank::TANK_LAYER_TARG ); if( l_err ) { break; } l_maxSize -= l_tankSize; io_size += l_tankSize; } while(0); TRACFCOMP( g_trac_targeting, EXIT_MRK"AttrRP::_saveOverrides: io_size=%d, l_err=%.8X", io_size, ERRL_GETRC_SAFE(l_err) ); return l_err; } errlHndl_t AttrRP::saveOverrideTank( uint8_t* i_dest, size_t& io_size, AttributeTank* i_tank, AttributeTank::TankLayer i_layer ) { TRACFCOMP( g_trac_targeting, ENTER_MRK"AttrRP::saveOverrideTank: i_dest=%p, io_size=%d, i_layer=%d", i_dest, io_size, i_layer ); errlHndl_t l_err = nullptr; size_t l_maxSize = io_size; io_size = 0; // List of chunks we're going to save away std::vector l_chunks; i_tank->serializeAttributes( TARGETING::AttributeTank::ALLOC_TYPE_MALLOC, PAGESIZE, l_chunks ); // Copy each chunk until we run out of space for( auto l_chunk : l_chunks ) { // total size of data plus header for this chunk uint32_t l_chunkSize = l_chunk.iv_size; l_chunkSize += sizeof(AttrOverrideSection); // don't want to double-count the data payload... l_chunkSize -= sizeof(AttrOverrideSection::iv_chunk); // look for overflow, but only create 1 error if( (l_err == nullptr) && (io_size + l_chunkSize > l_maxSize) ) { TRACFCOMP( g_trac_targeting, ERR_MRK"Size of chunk is too big" ); /*@ * @errortype * @moduleid TARG_MOD_SAVE_OVERRIDE_TANK * @reasoncode TARG_SPACE_OVERRUN * @userdata1[00:31] Maximum Available size * @userdata1[32:63] Required size * @userdata2[00:31] Chunk Size * @userdata2[32:63] Previous Size * * @devdesc Size of override data exceeds available * buffer space * * @custdesc Internal firmware error applying * custom configuration settings */ l_err = new ErrlEntry(ERRL_SEV_UNRECOVERABLE, TARG_CREATE_VMM_SECTIONS, TARG_RC_MM_PERM_FAIL, TWO_UINT32_TO_UINT64(l_maxSize, io_size + l_chunkSize), TWO_UINT32_TO_UINT64(l_chunkSize, io_size), true /*SW Error */); //deliberately not breaking out here so that we can // compute the required size and free the memory in // one place } if( l_err == nullptr ) { // fill in the header AttrOverrideSection* l_header = reinterpret_cast(i_dest+io_size); l_header->iv_layer = i_layer; l_header->iv_size = l_chunk.iv_size; // add the data memcpy( l_header->iv_chunk, l_chunk.iv_pAttributes, l_chunk.iv_size ); } io_size += l_chunkSize; // freeing data that was allocated by serializeAttributes() free( l_chunk.iv_pAttributes ); l_chunk.iv_pAttributes = NULL; } // add a terminator at the end since the size might get lost // but only if we found some overrides if( (io_size > 0) && (io_size + sizeof(AttributeTank::TankLayer) < l_maxSize) ) { AttrOverrideSection* l_term = reinterpret_cast(i_dest+io_size); l_term->iv_layer = AttributeTank::TANK_LAYER_TERM; io_size += sizeof(AttributeTank::TankLayer); } TRACFCOMP( g_trac_targeting, ENTER_MRK"AttrRP::saveOverrideTank: io_size=%d", io_size ); return l_err; } };