summaryrefslogtreecommitdiffstats
path: root/src/usr/targeting/runtime
diff options
context:
space:
mode:
authorMarty Gloff <mgloff@us.ibm.com>2018-01-25 10:42:24 -0600
committerDaniel M. Crowell <dcrowell@us.ibm.com>2018-02-27 14:53:57 -0500
commitd01ca15eccee25ca25bd26f15782d29ae9856c4f (patch)
tree49ef18ccf44af286b6c9ffd93a42b1d8098247d7 /src/usr/targeting/runtime
parent95c3ddc9290ba5549b10a8092648abb9d0b6cb97 (diff)
downloadblackbird-hostboot-d01ca15eccee25ca25bd26f15782d29ae9856c4f.tar.gz
blackbird-hostboot-d01ca15eccee25ca25bd26f15782d29ae9856c4f.zip
Support multiple nodes in HBRT - Add Node Container
Add node container to AttrRP class in support of multiple nodes in HBRT. Change-Id: I2d343a6165f3abdf271e5f60bee0df6256806857 RTC: 186579 Reviewed-on: http://ralgit01.raleigh.ibm.com/gerrit1/52720 Tested-by: Jenkins Server <pfd-jenkins+hostboot@us.ibm.com> Tested-by: Jenkins OP Build CI <op-jenkins+hostboot@us.ibm.com> Tested-by: Jenkins OP HW <op-hw-jenkins+hostboot@us.ibm.com> Tested-by: FSP CI Jenkins <fsp-CI-jenkins+hostboot@us.ibm.com> Reviewed-by: Richard J. Knight <rjknight@us.ibm.com> Reviewed-by: Prachi Gupta <pragupta@us.ibm.com> Reviewed-by: Daniel M. Crowell <dcrowell@us.ibm.com>
Diffstat (limited to 'src/usr/targeting/runtime')
-rw-r--r--src/usr/targeting/runtime/attrrp_rt.C366
1 files changed, 335 insertions, 31 deletions
diff --git a/src/usr/targeting/runtime/attrrp_rt.C b/src/usr/targeting/runtime/attrrp_rt.C
index 826cb4a87..21aa4b8c6 100644
--- a/src/usr/targeting/runtime/attrrp_rt.C
+++ b/src/usr/targeting/runtime/attrrp_rt.C
@@ -31,6 +31,7 @@
#include <targeting/targplatreasoncodes.H>
#include <targeting/attrsync.H>
#include <util/runtime/util_rt.H>
+#include <sys/internode.h>
#include "../attrrp_common.C"
@@ -92,18 +93,206 @@ namespace TARGETING
void AttrRP::startup(errlHndl_t& io_taskRetErrl, bool isMpipl)
{
TRACFCOMP(g_trac_targeting, "AttrRP::startup");
- errlHndl_t l_errl = NULL;
+ errlHndl_t l_errl = nullptr;
+ uint8_t l_index = 0;
+ uint32_t l_instance[MAX_NODES_PER_SYS];
+ l_instance[l_index] = NODE0; // First instance is always NODE 0
+ // Initialize rest of the instances to be invalid nodes
+ for(l_index = 1; l_index < MAX_NODES_PER_SYS; l_index++)
+ {
+ l_instance[l_index] = INVALID_NODE;
+ }
+
+ // Handle first instance
+ l_index = 0;
+ uint64_t attr_size = 0;
+ TargetingHeader* l_header =
+ reinterpret_cast<TargetingHeader*>(
+ hb_get_rt_rsvd_mem(Util::HBRT_MEM_LABEL_ATTR,
+ l_instance[l_index], attr_size));
+
+ // Create local copy of node struct
+ NodeInfo l_nodeCont;
+
+ // Initialize local copy of node struct
+ l_errl = nodeInfoInit(l_nodeCont,
+ l_header,
+ l_instance[l_index]);
+
+ // Push back node struct into the node container
+ TRACFCOMP(g_trac_targeting,
+ "Push node struct for Node %d",
+ l_instance[l_index]);
+ iv_nodeContainer.push_back(l_nodeCont);
+
+ // Get pointer to number of targets
+ const AbstractPointer<uint32_t>* l_pNumTargetsPtr =
+ static_cast<const AbstractPointer<uint32_t>*>(
+ reinterpret_cast<void*>(
+ reinterpret_cast<char*>(l_header) +
+ l_header->headerSize));
+ uint32_t* l_pNumTargets = TARG_TO_PLAT_PTR(*l_pNumTargetsPtr);
+
+ // Only translate addresses on platforms where addresses are 4 bytes
+ // wide (FSP). The compiler should perform dead code elimination of
+ // this path on platforms with 8 byte wide addresses (Hostboot), since
+ // the "if" check can be statically computed at compile time.
+ if(TARG_ADDR_TRANSLATION_REQUIRED)
+ {
+ l_pNumTargets = static_cast<uint32_t*>(
+ this->translateAddr(l_pNumTargets, l_instance[l_index]));
+ }
+
+ // Get pointer to targets
+ Target (*l_pTargets)[] =
+ reinterpret_cast<Target(*)[]> (l_pNumTargets + 1);
+
+ // Walk through targets
+ for(uint32_t l_targetNum = 1;
+ l_targetNum <= *l_pNumTargets;
+ ++l_targetNum)
+ {
+ Target* l_pTarget = &(*(l_pTargets))[l_targetNum - 1];
+ TRACDCOMP( g_trac_targeting,
+ "Target %d of %d, class %0.8x, type %0.8x",
+ l_targetNum,
+ *l_pNumTargets,
+ l_pTarget->getAttr<ATTR_CLASS>(),
+ l_pTarget->getAttr<ATTR_TYPE>());
+
+ if((l_pTarget->getAttr<ATTR_CLASS>() == CLASS_SYS) &&
+ (l_pTarget->getAttr<ATTR_TYPE>() == TYPE_SYS))
+ {
+ // This attribute is only set on a multi-node system.
+ // We will use it below to detect a multi-node scenario
+ auto l_hb_images =
+ l_pTarget->getAttr<ATTR_HB_EXISTING_IMAGE>();
+
+ EntityPath l_physPath = l_pTarget->getAttr<ATTR_PHYS_PATH>();
+ TRACFCOMP( g_trac_targeting,
+ "Target %d of %d, %s, HB images %0.8x",
+ l_targetNum,
+ *l_pNumTargets,
+ l_physPath.toString(),
+ l_hb_images);
+
+ // Start the 1 in the mask at leftmost position
+ decltype(l_hb_images) l_mask =
+ 0x1 << ((sizeof(l_hb_images) * 8) - 1);
+
+ uint32_t l_node = NODE0;
+ l_index = 0;
+ // While multi-node system and valid mask and valid index
+ while(l_hb_images && l_mask && (l_index < MAX_NODES_PER_SYS))
+ {
+ // Change node instance status
+ if(iv_instanceStatus == SINGLE_NODE)
+ {
+ iv_instanceStatus = MULTI_NODE;
+ }
+
+ // If node is present
+ if(l_mask & l_hb_images)
+ {
+ l_instance[l_index] = l_node;
+
+ // Check if a previous node was skipped
+ if(iv_instanceStatus == MULTI_NODE_LT_MAX_INSTANCES)
+ {
+ // Flag that instances are not contiguous
+ iv_instanceStatus = MULTI_NODE_INSTANCE_GAP;
+ }
+ }
+ else if(iv_instanceStatus == MULTI_NODE)
+ {
+ // Flag that an instance is being skipped
+ iv_instanceStatus = MULTI_NODE_LT_MAX_INSTANCES;
+ }
+
+ l_mask >>= 1; // shift to the right for the next node
+ l_node++;
+ l_index++;
+ }
+
+ if(iv_instanceStatus == MULTI_NODE_INSTANCE_GAP)
+ {
+ TRACFCOMP( g_trac_targeting,
+ "There is a gap in the node numbers");
+ }
+
+ break;
+ }
+ }
+
+ // Handle additional instances
+ l_index = 1;
do
{
+ // Check that a valid node is set for this instnace
+ if(l_instance[l_index] == INVALID_NODE)
+ {
+ l_index++;
+
+ // Continue with next instance
+ continue;
+ }
+
uint64_t attr_size = 0;
TargetingHeader* l_header =
reinterpret_cast<TargetingHeader*>(
- hb_get_rt_rsvd_mem(Util::HBRT_MEM_LABEL_ATTR,0,attr_size));
+ hb_get_rt_rsvd_mem(Util::HBRT_MEM_LABEL_ATTR,
+ l_instance[l_index], attr_size));
+ // Check if reserved memory does not exist for this instance
+ if ((NULL == l_header) && (l_instance[l_index] > NODE0))
+ {
+ TRACFCOMP(g_trac_targeting,
+ "Reserved memory does not exist for Node %d",
+ l_instance[l_index]);
- if ((NULL == l_header) ||
- (l_header->eyeCatcher != PNOR_TARG_EYE_CATCHER))
+ l_index++;
+
+ // Continue with next instance
+ continue;
+ }
+
+ // Create local copy of node struct
+ NodeInfo l_nodeCont;
+
+ // Initialize local copy of node struct
+ l_errl = nodeInfoInit(l_nodeCont,
+ l_header,
+ l_instance[l_index]);
+
+ // Push back node struct into the node container
+ TRACFCOMP(g_trac_targeting,
+ "Push node struct for Node %d",
+ l_instance[l_index]);
+ iv_nodeContainer.push_back(l_nodeCont);
+
+ l_index++;
+ } while(l_index < MAX_NODES_PER_SYS);
+
+ if (l_errl)
+ {
+ l_errl->setSev(ERRORLOG::ERRL_SEV_UNRECOVERABLE);
+ }
+
+ io_taskRetErrl = l_errl;
+ }
+
+ errlHndl_t AttrRP::nodeInfoInit(NodeInfo& io_nodeCont,
+ TargetingHeader* i_header,
+ const NODE_ID i_nodeId)
+ {
+ TRACFCOMP(g_trac_targeting, "AttrRP::nodeInfoInit");
+ errlHndl_t l_errl = nullptr;
+
+ do
+ {
+ if ((NULL == i_header) ||
+ (i_header->eyeCatcher != PNOR_TARG_EYE_CATCHER))
{
/*@
* @errortype
@@ -127,61 +316,176 @@ namespace TARGETING
l_errl = new ErrlEntry(ERRL_SEV_UNRECOVERABLE,
TARG_MOD_ATTRRP_RT,
TARG_RC_BAD_EYECATCH,
- NULL == l_header ?
- 0 : l_header->eyeCatcher,
- reinterpret_cast<uint64_t>(l_header));
+ NULL == i_header ?
+ 0 : i_header->eyeCatcher,
+ reinterpret_cast<uint64_t>(i_header));
break;
}
- // Allocate section structures based on section count in header.
- iv_sectionCount = l_header->numSections;
- iv_sections = new AttrRP_Section[iv_sectionCount]();
+ // Save pointer to targeting image in reserved memory for this node
+ io_nodeCont.pTargetMap = reinterpret_cast<void*>(i_header);
+
+ // Allocate section structures based on section count in header
+ io_nodeCont.sectionCount = i_header->numSections;
+ io_nodeCont.pSections =
+ new AttrRP_Section[io_nodeCont.sectionCount]();
+ if (i_nodeId == NODE0) // @TODO RTC:186585 remove
+ { // @TODO RTC:186585 remove
+ iv_sectionCount = io_nodeCont.sectionCount; // @TODO RTC:186585
+ iv_sections = io_nodeCont.pSections; // @TODO RTC:186585 remove
+ } // @TODO RTC:186585 remove
// Find start to the first section:
// (header address + size of header + offset in header)
TargetingSection* l_section =
reinterpret_cast<TargetingSection*>(
- reinterpret_cast<uint64_t>(l_header) +
- sizeof(TargetingHeader) + l_header->offsetToSections
+ reinterpret_cast<uint64_t>(i_header) +
+ sizeof(TargetingHeader) + i_header->offsetToSections
);
uint64_t l_offset = 0;
- for (size_t i = 0; i < iv_sectionCount; ++i, ++l_section)
+ for (size_t i = 0; i < io_nodeCont.sectionCount; ++i, ++l_section)
{
- iv_sections[i].type = l_section->sectionType;
- iv_sections[i].size = l_section->sectionSize;
+ io_nodeCont.pSections[i].type = l_section->sectionType;
+ io_nodeCont.pSections[i].size = l_section->sectionSize;
- iv_sections[i].vmmAddress =
+ io_nodeCont.pSections[i].vmmAddress =
static_cast<uint64_t>(
- TARG_TO_PLAT_PTR(l_header->vmmBaseAddress)) +
- l_header->vmmSectionOffset*i;
- iv_sections[i].pnorAddress =
- reinterpret_cast<uint64_t>(l_header) + l_offset;
+ TARG_TO_PLAT_PTR(i_header->vmmBaseAddress)) +
+ i_header->vmmSectionOffset*i;
+ io_nodeCont.pSections[i].pnorAddress =
+ reinterpret_cast<uint64_t>(i_header) + l_offset;
- l_offset += ALIGN_PAGE(iv_sections[i].size);
+ l_offset += ALIGN_PAGE(io_nodeCont.pSections[i].size);
TRACFCOMP(g_trac_targeting,
"Decoded Attribute Section: %d, 0x%lx, 0x%lx, 0x%lx",
- iv_sections[i].type,
- iv_sections[i].vmmAddress,
- iv_sections[i].pnorAddress,
- iv_sections[i].size);
+ io_nodeCont.pSections[i].type,
+ io_nodeCont.pSections[i].vmmAddress,
+ io_nodeCont.pSections[i].pnorAddress,
+ io_nodeCont.pSections[i].size);
}
-
} while(false);
- if (l_errl)
+ return l_errl;
+ }
+
+ void* AttrRP::getTargetMapPtr(const NODE_ID i_nodeId)
+ {
+ #define TARG_FN "getTargetMapPtr"
+ TARG_ENTER();
+
+ errlHndl_t l_errl = nullptr;
+ void* l_pTargetMap = nullptr;
+
+ // Cannot use isNodeValid method here since the vector itself is
+ // initialized in here.
+ if((i_nodeId >= NODE0) &&
+ (i_nodeId < AttrRP::INVALID_NODE_ID))
{
- l_errl->setSev(ERRORLOG::ERRL_SEV_UNRECOVERABLE);
+ do
+ {
+ if(iv_nodeContainer[i_nodeId].pTargetMap == nullptr)
+ {
+ // Locate targeting image for this node in reserved memory
+ uint64_t attr_size = 0;
+ iv_nodeContainer[i_nodeId].pTargetMap =
+ reinterpret_cast<void*>(
+ hb_get_rt_rsvd_mem(Util::HBRT_MEM_LABEL_ATTR,
+ i_nodeId,
+ attr_size));
+
+ // Check for failure to locate targeting image for this node
+ if (iv_nodeContainer[i_nodeId].pTargetMap == nullptr)
+ {
+ TARG_ERR("Error: hb_get_rt_rsvd_mem call failed");
+ break;
+ }
+
+ // Get pointer to targeting image header
+ TargetingHeader *l_header =
+ static_cast<TargetingHeader *>(
+ iv_nodeContainer[i_nodeId].pTargetMap);
+
+ // Initialize the node struct for this node
+ l_errl = nodeInfoInit(iv_nodeContainer[i_nodeId],
+ l_header,
+ i_nodeId);
+ if(l_errl)
+ {
+ break;
+ }
+
+ // Set the targeting image pointer
+ l_pTargetMap = iv_nodeContainer[i_nodeId].pTargetMap;
+ }
+ else
+ {
+ // This should return pTargetMap from here
+ break;
+ }
+ } while(0);
+ }
+ else
+ {
+ TARG_ERR("Invalid Node Id passed here to initialize [%d]",
+ i_nodeId);
}
- io_taskRetErrl = l_errl;
+ if(l_errl)
+ {
+ /* Commit the error */
+ errlCommit(l_errl, TARG_COMP_ID);
+ }
+
+ return l_pTargetMap;
+ #undef TARG_FN
}
- void* AttrRP::getBaseAddress(const NODE_ID i_nodeIdUnused)
+ void* AttrRP::getBaseAddress(const NODE_ID i_nodeId)
{
- return reinterpret_cast<void*>(iv_sections[0].pnorAddress);
+ #define TARG_FN "getBaseAddress()"
+ TARG_ENTER();
+
+ void* l_pMap = nullptr;
+
+ // The init for a node id might have been done via the other way i.e.
+ // setImageName, need to valid if node Id is valid and init is already
+ // done validate node Id, It's a special case validation, since the node
+ // which is getting validated doesn't yet have a container, so it should
+ // always point to the next to be initialized.
+ if(i_nodeId < INVALID_NODE_ID)
+ {
+ // Check if the Mmap is already done
+ if(iv_nodeContainer[i_nodeId].pTargetMap != nullptr)
+ {
+ l_pMap = iv_nodeContainer[i_nodeId].pTargetMap;
+ }
+ else
+ {
+ TARG_ASSERT(0,
+ TARG_ERR_LOC "Node Id [%d] should have been already"
+ " initialized, before but the Mmap Address is NULL",
+ i_nodeId);
+ }
+ }
+ else if(i_nodeId == INVALID_NODE_ID)
+ {
+ // Push back a node struct in the node container
+ NodeInfo l_nodeCont;
+ iv_nodeContainer.push_back(l_nodeCont);
+
+ l_pMap = getTargetMapPtr(i_nodeId);
+ }
+ else
+ {
+ TARG_ERR("Invalid Node Id [%d] passed here to initialize",
+ i_nodeId);
+ }
+
+ return l_pMap;
+ #undef TARG_FN
}
void* AttrRP::translateAddr(void* i_pAddress,
OpenPOWER on IntegriCloud