summaryrefslogtreecommitdiffstats
path: root/src/usr/isteps
diff options
context:
space:
mode:
authorDean Sanner <dsanner@us.ibm.com>2018-02-07 18:10:04 -0600
committerDaniel M. Crowell <dcrowell@us.ibm.com>2018-02-28 16:29:42 -0500
commit2414e7c8e5de03e829022693f7813809313f40fc (patch)
treef36ea03a8c09cc15b9c55fe7991304827b499e8c /src/usr/isteps
parentb6e41fc3329eeabb5d1356ec0b26c3dfed8bd903 (diff)
downloadtalos-hostboot-2414e7c8e5de03e829022693f7813809313f40fc.tar.gz
talos-hostboot-2414e7c8e5de03e829022693f7813809313f40fc.zip
Support sending chip info to SBEs on multinode
Existing code didn't support collecting the present chips on all nodes (Hostboot instances) and updating all SBEs with a system wide view. For non multi drawer configs it will just update based on HB instance view. For multi drawer configs the other hostboot instances will be queried for their configs, aggregated, and then informed of the system wide view via IPC commnuication. Change-Id: I45c5673df26f940de212cbe2b54525e32c9147ab Reviewed-on: http://ralgit01.raleigh.ibm.com/gerrit1/53603 Tested-by: Jenkins Server <pfd-jenkins+hostboot@us.ibm.com> Tested-by: Jenkins OP Build CI <op-jenkins+hostboot@us.ibm.com> Tested-by: Jenkins OP HW <op-hw-jenkins+hostboot@us.ibm.com> Tested-by: FSP CI Jenkins <fsp-CI-jenkins+hostboot@us.ibm.com> Reviewed-by: Dean Sanner <dsanner@us.ibm.com> Reviewed-by: William G. Hoffa <wghoffa@us.ibm.com> Reviewed-by: Daniel M. Crowell <dcrowell@us.ibm.com>
Diffstat (limited to 'src/usr/isteps')
-rw-r--r--src/usr/isteps/istep21/call_host_runtime_setup.C64
1 files changed, 8 insertions, 56 deletions
diff --git a/src/usr/isteps/istep21/call_host_runtime_setup.C b/src/usr/isteps/istep21/call_host_runtime_setup.C
index ab9fd21d0..bad88d537 100644
--- a/src/usr/isteps/istep21/call_host_runtime_setup.C
+++ b/src/usr/isteps/istep21/call_host_runtime_setup.C
@@ -457,62 +457,6 @@ void* call_host_runtime_setup (void *io_pArgs)
}
}
- //Need to send System Configuration down to SBE
- //Use targeting code to get a list of all processors
- TARGETING::TargetHandleList l_procChips;
- getAllChips( l_procChips, TARGETING::TYPE_PROC , true);
- uint64_t l_systemFabricConfigurationMap = 0x0;
-
-
- for(auto l_proc : l_procChips)
- {
- //Get fabric info from proc
- uint8_t l_fabricChipId =
- l_proc->getAttr<TARGETING::ATTR_FABRIC_CHIP_ID>();
- uint8_t l_fabricGroupId =
- l_proc->getAttr<TARGETING::ATTR_FABRIC_GROUP_ID>();
- //Calculate what bit position this will be
- uint8_t l_bitPos = l_fabricChipId + (8 * l_fabricGroupId);
-
- //Set the bit @ l_bitPos to be 1 because this is a functional proc
- l_systemFabricConfigurationMap |= (0x8000000000000000 >> l_bitPos);
- }
-
- TRACFCOMP( ISTEPS_TRACE::g_trac_isteps_trace,
- "Setting sending systemConfig to all Procs...");
-
- for(auto l_proc : l_procChips)
- {
- TRACDCOMP( ISTEPS_TRACE::g_trac_isteps_trace,
- "calling sendSystemConfig on proc 0x%x",
- l_proc->getAttr<TARGETING::ATTR_POSITION>());
- l_err = SBEIO::sendSystemConfig(l_systemFabricConfigurationMap,
- l_proc);
- if ( l_err )
- {
- TRACFCOMP( ISTEPS_TRACE::g_trac_isteps_trace,
- "sendSystemConfig ERROR : Error sending sbe chip-op to proc 0x%.8X. Returning errorlog, reason=0x%x",
- TARGETING::get_huid(l_proc),
- l_err->reasonCode() );
- break;
- }
- else
- {
- TRACDCOMP( ISTEPS_TRACE::g_trac_isteps_trace,
- "sendSystemConfig SUCCESS" );
- }
- }
-
- if(l_err)
- {
- break;
- }
- else
- {
- TRACFCOMP( ISTEPS_TRACE::g_trac_isteps_trace,
- "Successfully sent all system configs to procs via SBE chip op !!");
- }
-
// Need to load up the runtime module if it isn't already loaded
if ( !VFS::module_is_loaded( "libruntime.so" ) )
{
@@ -528,6 +472,14 @@ void* call_host_runtime_setup (void *io_pArgs)
}
}
+ //Need to send System Configuration down to SBE for all HB
+ //instances
+ l_err = RUNTIME::sendSBESystemConfig();
+ if(l_err)
+ {
+ break;
+ }
+
// Configure the ATTR_HBRT_HYP_ID attributes so that runtime code and
// whichever hypervisor is loaded can reference equivalent targets
l_err = RUNTIME::configureHbrtHypIds(TARGETING::is_phyp_load());
OpenPOWER on IntegriCloud