summaryrefslogtreecommitdiffstats
path: root/src/usr/runtime
diff options
context:
space:
mode:
authorRaja Das <rajadas2@in.ibm.com>2018-04-11 00:06:54 -0500
committerDaniel M. Crowell <dcrowell@us.ibm.com>2018-08-24 08:40:47 -0500
commitac96eaf6e91893d2ea98a02ad66d43232991cbcc (patch)
treeed63dffc65aff956fa63ec859fbbb8c89872255a /src/usr/runtime
parent47994fb03586109214d79f164cca786a8bded670 (diff)
downloadtalos-hostboot-ac96eaf6e91893d2ea98a02ad66d43232991cbcc.tar.gz
talos-hostboot-ac96eaf6e91893d2ea98a02ad66d43232991cbcc.zip
[OPAL-MPIPL][6] Reserve the HBBL and HBB load area for OPAL fspless
Presently, in Phyp systems this area is getting reserved basis the HRMOR fetch from CPU, we need to reserve this area for FspLess Opal sysytem as well, where HRMOR is hard-coded to 3968M by SBE Change-Id: I23d6225547d769e1a64046c8202364aa1dd2720b Reviewed-on: http://rchgit01.rchland.ibm.com/gerrit1/57038 Reviewed-by: Sachin Gupta <sgupta2m@in.ibm.com> Tested-by: Jenkins Server <pfd-jenkins+hostboot@us.ibm.com> Reviewed-by: VASANT HEGDE <hegdevasant@linux.vnet.ibm.com> Tested-by: Jenkins OP Build CI <op-jenkins+hostboot@us.ibm.com> Tested-by: FSP CI Jenkins <fsp-CI-jenkins+hostboot@us.ibm.com> Reviewed-by: Daniel M. Crowell <dcrowell@us.ibm.com>
Diffstat (limited to 'src/usr/runtime')
-rw-r--r--src/usr/runtime/populate_hbruntime.C38
1 files changed, 37 insertions, 1 deletions
diff --git a/src/usr/runtime/populate_hbruntime.C b/src/usr/runtime/populate_hbruntime.C
index 5450319e9..7f47c4138 100644
--- a/src/usr/runtime/populate_hbruntime.C
+++ b/src/usr/runtime/populate_hbruntime.C
@@ -1160,10 +1160,46 @@ errlHndl_t populate_HbRsvMem(uint64_t i_nodeId, bool i_master_node)
{
break;
}
-
}
else if(TARGETING::is_sapphire_load())
{
+ // Reserve the HRMOR space if it not at zero offset.
+ ////////////////////////////////////////////////////////////////////
+ // HRMOR Calculation on OPAL Vs PhyP systems
+ // For PhyP system, HRMOR is set to 128MB, which is calculated basis
+ // this theory ==>>
+ // "supported offset values are all values of the
+ // form i x 2 exp `r`, where 0 <= i <= 2 exp `j`, and j and r are
+ // implementation-dependent values having the properties that
+ // 12 <= r <= 26". (Texted quoted from PowerISA Doc)
+ // Basis the above, value of r is 26, which sets the offset
+ // granularity to 64MB, therefore value of i is '2', which makes the
+ // offset to 128MB.
+ // Basis the above calculation/assumption, calculation of HRMO in
+ // OPAL system is as follows -
+ // OPAL needs the HRMOR in the range of 4GB, so that HB reloading
+ // doesn't stamp on the OPAL/HostLinux Data. Now keeping the max
+ // granularity as 64MB, 'i' is the multiplication factor which comes
+ // to around 64 (64MB * 64 = 4096MB)
+ ////////////////////////////////////////////////////////////////////
+ uint64_t l_hbAddr = cpu_spr_value(CPU_SPR_HRMOR) - VMM_HRMOR_OFFSET;
+ // if l_hbAddr is zero that means PhyP system where HRMOR is set to
+ // 128MB, if this is not zero that means OPAL system where HRMOR is
+ // set to 3968MB
+ if(l_hbAddr)
+ {
+ l_elog = setNextHbRsvMemEntry(HDAT::RHB_TYPE_PRIMARY,
+ i_nodeId,
+ l_hbAddr,
+ VMM_HB_RSV_MEM_SIZE,
+ HBRT_RSVD_MEM__PRIMARY,
+ HDAT::RHB_READ_WRITE,
+ false);
+ if(l_elog != nullptr)
+ {
+ break;
+ }
+ }
// Opal data goes at top_of_mem
l_topMemAddr = TARGETING::get_top_mem_addr();
assert (l_topMemAddr != 0,
OpenPOWER on IntegriCloud