summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorMark Pizzutillo <Mark.Pizzutillo@ibm.com>2019-09-03 12:55:26 -0400
committerNicholas E Bofferding <bofferdn@us.ibm.com>2019-09-14 08:05:02 -0500
commit7a27f4603a1b222e2d527d1167e5a3c3653c404b (patch)
tree357a894034f2958fa13fcbbe38e87c608ca72dbc /src
parent1434b8952e3a2dbd55a0876ec9d89fe3ffd21da6 (diff)
downloadtalos-hostboot-7a27f4603a1b222e2d527d1167e5a3c3653c404b.tar.gz
talos-hostboot-7a27f4603a1b222e2d527d1167e5a3c3653c404b.zip
Add ekb dual drop support for p9a
SPD and VPD will need to be revisited once we architect how this will work from the Cronus and HB side. In the meantime, we pass EKB unit tests so we are on the right track. Change-Id: Ie4516339dcc6e43ee1e6dd7aaaa9589fbfcace89 Reviewed-on: http://rchgit01.rchland.ibm.com/gerrit1/83218 Tested-by: FSP CI Jenkins <fsp-CI-jenkins+hostboot@us.ibm.com> Tested-by: Jenkins Server <pfd-jenkins+hostboot@us.ibm.com> Tested-by: Hostboot CI <hostboot-ci+hostboot@us.ibm.com> Reviewed-by: Louis Stermole <stermole@us.ibm.com> Reviewed-by: STEPHEN GLANCY <sglancy@us.ibm.com> Dev-Ready: STEPHEN GLANCY <sglancy@us.ibm.com> Reviewed-by: Jennifer A Stofer <stofer@us.ibm.com> Reviewed-on: http://rchgit01.rchland.ibm.com/gerrit1/83436 Tested-by: Jenkins OP Build CI <op-jenkins+hostboot@us.ibm.com> Tested-by: Jenkins OP HW <op-hw-jenkins+hostboot@us.ibm.com> Reviewed-by: Nicholas E Bofferding <bofferdn@us.ibm.com>
Diffstat (limited to 'src')
-rw-r--r--src/import/chips/ocmb/explorer/procedures/hwp/memory/lib/exp_draminit_utils.H141
-rw-r--r--src/import/chips/ocmb/explorer/procedures/hwp/memory/lib/phy/exp_train_handler.H26
-rw-r--r--src/import/chips/p9a/procedures/hwp/memory/lib/freq/axone_mss_freq.C4
-rw-r--r--src/import/chips/p9a/procedures/hwp/memory/p9a_mss_eff_config.C25
-rw-r--r--src/import/generic/memory/lib/data_engine/data_engine_utils.H18
5 files changed, 138 insertions, 76 deletions
diff --git a/src/import/chips/ocmb/explorer/procedures/hwp/memory/lib/exp_draminit_utils.H b/src/import/chips/ocmb/explorer/procedures/hwp/memory/lib/exp_draminit_utils.H
index ba9602b3b..ba37f9abc 100644
--- a/src/import/chips/ocmb/explorer/procedures/hwp/memory/lib/exp_draminit_utils.H
+++ b/src/import/chips/ocmb/explorer/procedures/hwp/memory/lib/exp_draminit_utils.H
@@ -37,6 +37,8 @@
#define __MSS_EXP_DRAMINIT_UTILS__
#include <fapi2.H>
+#include <lib/shared/exp_defaults.H>
+#include <lib/dimm/exp_rank.H>
#include <lib/shared/exp_consts.H>
#include <exp_data_structs.H>
#include <mss_generic_attribute_getters.H>
@@ -240,6 +242,7 @@ class phy_params
fapi2::Target<fapi2::TARGET_TYPE_MEM_PORT> iv_target;
phy_params_t iv_params;
+ std::vector<mss::rank::info<>> iv_rank_info;
public:
@@ -323,6 +326,8 @@ class phy_params
iv_params.iv_encoded_quadcs = (l_has_rcd && l_4r) ? MSDG_QUAD_ENCODE_MODE : MSDG_DUAL_DIRECT_MODE;
}
+ FAPI_TRY(mss::rank::ranks_on_port(i_target, iv_rank_info));
+
fapi_try_exit:
o_rc = fapi2::current_err;
}
@@ -333,11 +338,14 @@ class phy_params
/// @brief Set params as per the value initialized (useful for testing)
/// @param[in] i_target the fapi2 target
/// @param[in] i_phy_params explorer specific data structure
+ /// @param[out] o_rc Return code from rank API initialization
///
phy_params(const fapi2::Target<fapi2::TARGET_TYPE_MEM_PORT>& i_target,
- const phy_params_t& i_phy_params):
+ const phy_params_t& i_phy_params,
+ const std::vector<mss::rank::info<>> i_rank_info):
iv_target(i_target),
- iv_params(i_phy_params)
+ iv_params(i_phy_params),
+ iv_rank_info(i_rank_info)
{}
///
@@ -346,6 +354,70 @@ class phy_params
~phy_params() = default;
///
+ /// @brief Set the rank-based phy field from the attribute
+ ///
+ /// @param[in] i_phy_param_ranks array of pointers to the rank fields to be filled in
+ /// @param[in] i_phy_param_attr pointer to the iv array indexed by dimm & rank
+ /// @note this function assumes i_phy_param_ranks is properly populated with 4 fields (1 per phy rank)
+ ///
+ void set_phy_field_by_rank(const std::vector<uint16_t*>& i_phy_param_ranks,
+ const uint8_t (&i_phy_param_attr)[MAX_DIMM_PER_PORT][MAX_RANK_PER_DIMM]) const
+ {
+ // First, zero everything out
+ for (uint8_t l_rank = 0; l_rank < i_phy_param_ranks.size(); ++l_rank)
+ {
+ *i_phy_param_ranks[l_rank] = 0;
+ }
+
+ // For each rank, the phy rank value (0-4) is what needs to be filled in for draminit
+ // This maps to the field corresponding to the ATTR index value
+ // indexed by the rank's dimm index and dimm rank
+ for (const auto& l_rank : iv_rank_info)
+ {
+ const uint8_t l_dimm_index = mss::index(l_rank.get_dimm_target());
+ const uint8_t l_dimm_rank = l_rank.get_dimm_rank();
+ *i_phy_param_ranks[l_rank.get_phy_rank()] = i_phy_param_attr[l_dimm_index][l_dimm_rank];
+ }
+ }
+
+ ///
+ /// @brief Maps the ODT RD/WR attributes to the form needed for exp_draminit
+ ///
+ /// @param[in] i_odt_rd_wr_attr iv array indexed by dimm & rank
+ /// @param[out] o_odt_buffer buffer to populate
+ ///
+ fapi2::ReturnCode populate_odt_buffer(const uint8_t (&i_odt_rd_wr_attr)[MAX_DIMM_PER_PORT][MAX_RANK_PER_DIMM],
+ fapi2::buffer<uint16_t>& o_odt_buffer) const
+ {
+ // Const vector to map phy ranks to their buffer offset position
+ const std::vector<uint8_t> l_buffer_rank_offset =
+ {
+ odt_fields::RANK0,
+ odt_fields::RANK1,
+ odt_fields::RANK2,
+ odt_fields::RANK3,
+ };
+
+ for (const auto& l_rank : iv_rank_info)
+ {
+ const auto OFFSET = l_buffer_rank_offset[l_rank.get_phy_rank()];
+ const auto DIMM_RANK = l_rank.get_dimm_rank();
+ const auto DIMM_INDEX = mss::index(l_rank.get_dimm_target());
+ FAPI_TRY(o_odt_buffer.insert(i_odt_rd_wr_attr[DIMM_INDEX][DIMM_RANK], OFFSET, odt_fields::FLD_LENGTH));
+ }
+
+ // Rest of the buffer should already be zeroed from declaration
+ // Our attribute values come in left aligned (LSB left), our buffers are left aligned, but MCHP wants things right aligned:
+ // (rank0 == [1:0], rank1 == [5:4])
+ // So we can set it up from the buffer perspective, but then flip the whole buffer, getting the values back to their
+ // correct form (MSB right aligned) in addition to flipping the rank positions to their expected locations
+ o_odt_buffer.reverse();
+
+ fapi_try_exit:
+ return fapi2::current_err;
+ }
+
+ ///
/// @brief user_input_msdg structure setup for parameter DimmType
/// @param[in,out] io_phy_params the phy params data struct
/// @return FAPI2_RC_SUCCESS iff okay
@@ -738,10 +810,15 @@ class phy_params
///
fapi2::ReturnCode set_RttNom(user_input_msdg& io_phy_params) const
{
- io_phy_params.DramRttNomR0[0] = iv_params.iv_dram_rtt_nom[0][0];
- io_phy_params.DramRttNomR1[0] = iv_params.iv_dram_rtt_nom[0][1];
- io_phy_params.DramRttNomR2[0] = iv_params.iv_dram_rtt_nom[0][2];
- io_phy_params.DramRttNomR3[0] = iv_params.iv_dram_rtt_nom[0][3];
+ const std::vector<uint16_t*> l_rtt_noms =
+ {
+ &io_phy_params.DramRttNomR0[0],
+ &io_phy_params.DramRttNomR1[0],
+ &io_phy_params.DramRttNomR2[0],
+ &io_phy_params.DramRttNomR3[0],
+ };
+
+ set_phy_field_by_rank(l_rtt_noms, iv_params.iv_dram_rtt_nom);
return fapi2::FAPI2_RC_SUCCESS;
}
@@ -752,10 +829,15 @@ class phy_params
///
fapi2::ReturnCode set_RttWr(user_input_msdg& io_phy_params) const
{
- io_phy_params.DramRttWrR0[0] = iv_params.iv_dram_rtt_wr[0][0];
- io_phy_params.DramRttWrR1[0] = iv_params.iv_dram_rtt_wr[0][1];
- io_phy_params.DramRttWrR2[0] = iv_params.iv_dram_rtt_wr[0][2];
- io_phy_params.DramRttWrR3[0] = iv_params.iv_dram_rtt_wr[0][3];
+ const std::vector<uint16_t*> l_rtt_wrs =
+ {
+ &io_phy_params.DramRttWrR0[0],
+ &io_phy_params.DramRttWrR1[0],
+ &io_phy_params.DramRttWrR2[0],
+ &io_phy_params.DramRttWrR3[0],
+ };
+
+ set_phy_field_by_rank(l_rtt_wrs, iv_params.iv_dram_rtt_wr);
return fapi2::FAPI2_RC_SUCCESS;
}
@@ -766,10 +848,15 @@ class phy_params
///
fapi2::ReturnCode set_RttPark(user_input_msdg& io_phy_params) const
{
- io_phy_params.DramRttParkR0[0] = iv_params.iv_dram_rtt_park[0][0];
- io_phy_params.DramRttParkR1[0] = iv_params.iv_dram_rtt_park[0][1];
- io_phy_params.DramRttParkR2[0] = iv_params.iv_dram_rtt_park[0][2];
- io_phy_params.DramRttParkR3[0] = iv_params.iv_dram_rtt_park[0][3];
+ const std::vector<uint16_t*> l_rtt_parks =
+ {
+ &io_phy_params.DramRttParkR0[0],
+ &io_phy_params.DramRttParkR1[0],
+ &io_phy_params.DramRttParkR2[0],
+ &io_phy_params.DramRttParkR3[0],
+ };
+
+ set_phy_field_by_rank(l_rtt_parks, iv_params.iv_dram_rtt_park);
return fapi2::FAPI2_RC_SUCCESS;
}
@@ -852,16 +939,11 @@ class phy_params
{
fapi2::buffer<uint16_t> odt_wr_map_cs_buff;
- // TK - Changes needed for Dual Drop / 4U in the future.
- // Will likely require ODT RD/WR attribute changes
- odt_wr_map_cs_buff.insert<odt_fields::RANK3, odt_fields::FLD_LENGTH>(iv_params.iv_odt_wr_map_cs[0][3]);
- odt_wr_map_cs_buff.insert<odt_fields::RANK2, odt_fields::FLD_LENGTH>(iv_params.iv_odt_wr_map_cs[0][2]);
- odt_wr_map_cs_buff.insert<odt_fields::RANK1, odt_fields::FLD_LENGTH>(iv_params.iv_odt_wr_map_cs[0][1]);
- odt_wr_map_cs_buff.insert<odt_fields::RANK0, odt_fields::FLD_LENGTH>(iv_params.iv_odt_wr_map_cs[0][0]);
-
- odt_wr_map_cs_buff.reverse();
+ FAPI_TRY(populate_odt_buffer(iv_params.iv_odt_wr_map_cs, odt_wr_map_cs_buff));
io_phy_params.OdtWrMapCs[0] = odt_wr_map_cs_buff;
- return fapi2::FAPI2_RC_SUCCESS;
+
+ fapi_try_exit:
+ return fapi2::current_err;
}
///
@@ -873,16 +955,11 @@ class phy_params
{
fapi2::buffer<uint16_t> odt_rd_map_cs_buff;
- // TK - Changes needed for Dual Drop / 4U in the future.
- // Will likely require ODT RD/WR attribute changes
- odt_rd_map_cs_buff.insert<odt_fields::RANK3, odt_fields::FLD_LENGTH>(iv_params.iv_odt_rd_map_cs[0][3]);
- odt_rd_map_cs_buff.insert<odt_fields::RANK2, odt_fields::FLD_LENGTH>(iv_params.iv_odt_rd_map_cs[0][2]);
- odt_rd_map_cs_buff.insert<odt_fields::RANK1, odt_fields::FLD_LENGTH>(iv_params.iv_odt_rd_map_cs[0][1]);
- odt_rd_map_cs_buff.insert<odt_fields::RANK0, odt_fields::FLD_LENGTH>(iv_params.iv_odt_rd_map_cs[0][0]);
-
- odt_rd_map_cs_buff.reverse();
+ FAPI_TRY(populate_odt_buffer(iv_params.iv_odt_rd_map_cs, odt_rd_map_cs_buff));
io_phy_params.OdtRdMapCs[0] = odt_rd_map_cs_buff;
- return fapi2::FAPI2_RC_SUCCESS;
+
+ fapi_try_exit:
+ return fapi2::current_err;
}
///
diff --git a/src/import/chips/ocmb/explorer/procedures/hwp/memory/lib/phy/exp_train_handler.H b/src/import/chips/ocmb/explorer/procedures/hwp/memory/lib/phy/exp_train_handler.H
index 41c868946..8cd501d67 100644
--- a/src/import/chips/ocmb/explorer/procedures/hwp/memory/lib/phy/exp_train_handler.H
+++ b/src/import/chips/ocmb/explorer/procedures/hwp/memory/lib/phy/exp_train_handler.H
@@ -38,6 +38,8 @@
#include <fapi2.H>
#include <lib/shared/exp_consts.H>
+#include <lib/shared/exp_defaults.H>
+#include <lib/dimm/exp_rank.H>
#include <exp_data_structs.H>
#include <generic/memory/lib/utils/mss_bad_bits.H>
#include <generic/memory/lib/mss_generic_attribute_setters.H>
@@ -154,28 +156,12 @@ class bad_bit_interface
fapi2::ReturnCode record_bad_bits_interface( const fapi2::Target<fapi2::TARGET_TYPE_DIMM>& i_target,
uint8_t (&o_bad_dq)[BAD_BITS_RANKS][BAD_DQ_BYTE_COUNT]) const
{
- // Gets the rank offset for this DIMM
- const uint64_t DIMM_OFFSET = mss::index(i_target) == 0 ? 0 : 2;
+ std::vector<mss::rank::info<>> l_ranks;
+ FAPI_TRY(mss::rank::ranks_on_dimm(i_target, l_ranks));
- // Loops through all of the ranks on this DIMM
- uint8_t l_num_ranks = 0;
- FAPI_TRY(mss::attr::get_num_master_ranks_per_dimm(i_target, l_num_ranks));
-
- // TK Add in num ranks check here
- // TK update for the ranks API
-
- for(uint64_t l_rank = 0; l_rank < l_num_ranks; ++l_rank)
+ for(const auto& l_rank : l_ranks)
{
- const uint64_t RANK = DIMM_OFFSET + l_rank;
- FAPI_ASSERT(RANK < mss::exp::MAX_RANK_PER_DIMM,
- fapi2::MSS_EXP_DRAMINIT_BAD_NUM_RANKS()
- .set_NUM_RANKS(RANK)
- .set_MAX_RANKS(mss::exp::MAX_RANK_PER_DIMM)
- .set_TARGET(i_target),
- "%s bad number of ranks passed num:%u, max:%u",
- mss::c_str(i_target), RANK, mss::exp::MAX_RANK_PER_DIMM);
-
- memcpy(&o_bad_dq[RANK], &iv_bad_bits[RANK], sizeof(uint8_t[BAD_DQ_BYTE_COUNT]));
+ memcpy(&o_bad_dq[l_rank.get_phy_rank()], &iv_bad_bits[l_rank.get_phy_rank()], sizeof(uint8_t[BAD_DQ_BYTE_COUNT]));
}
return fapi2::FAPI2_RC_SUCCESS;
diff --git a/src/import/chips/p9a/procedures/hwp/memory/lib/freq/axone_mss_freq.C b/src/import/chips/p9a/procedures/hwp/memory/lib/freq/axone_mss_freq.C
index d4c280c2f..1a66aa77a 100644
--- a/src/import/chips/p9a/procedures/hwp/memory/lib/freq/axone_mss_freq.C
+++ b/src/import/chips/p9a/procedures/hwp/memory/lib/freq/axone_mss_freq.C
@@ -286,9 +286,9 @@ fapi2::ReturnCode check_freq_support_vpd<mss::proc_type::AXONE>( const fapi2::Ta
continue;
}
- l_vpd_info.iv_rank = l_rank.get_port_rank();
+ l_vpd_info.iv_rank = l_rank.get_dimm_rank();
FAPI_INF("%s. VPD info - checking rank: %d",
- mss::c_str(i_target), l_rank.get_port_rank());
+ mss::c_str(i_target), l_rank.get_dimm_rank());
// Check if this VPD configuration is supported
FAPI_TRY(is_vpd_config_supported<mss::proc_type::AXONE>(l_vpd_target, i_proposed_freq, l_vpd_info, o_supported),
diff --git a/src/import/chips/p9a/procedures/hwp/memory/p9a_mss_eff_config.C b/src/import/chips/p9a/procedures/hwp/memory/p9a_mss_eff_config.C
index 9210684d1..7a62e36a0 100644
--- a/src/import/chips/p9a/procedures/hwp/memory/p9a_mss_eff_config.C
+++ b/src/import/chips/p9a/procedures/hwp/memory/p9a_mss_eff_config.C
@@ -33,9 +33,11 @@
// *HWP Level: 1
// *HWP Consumed by: FSP:HB
-// fapi2
#include <fapi2.H>
#include <p9a_mss_eff_config.H>
+#include <lib/shared/exp_defaults.H>
+#include <lib/dimm/exp_rank.H>
+#include <generic/memory/lib/utils/mss_rank.H>
#include <generic/memory/lib/data_engine/data_engine.H>
#include <generic/memory/lib/utils/find.H>
#include <generic/memory/lib/spd/ddimm/efd_factory.H>
@@ -57,25 +59,22 @@
///
fapi2::ReturnCode p9a_mss_eff_config( const fapi2::Target<fapi2::TARGET_TYPE_MEM_PORT>& i_target )
{
- mss::display_git_commit_info("p9a_mss_eff_config");
-
- // Workaround until DIMM level attrs work
- uint8_t l_ranks[mss::exp::MAX_DIMM_PER_PORT] = {};
+ using mss::DEFAULT_MC_TYPE;
- FAPI_TRY( mss::attr::get_num_master_ranks_per_dimm(i_target, l_ranks) );
+ mss::display_git_commit_info("p9a_mss_eff_config");
for(const auto& dimm : mss::find_targets<fapi2::TARGET_TYPE_DIMM>(i_target))
{
- uint8_t l_dimm_index = 0;
uint64_t l_freq = 0;
uint32_t l_omi_freq = 0;
- FAPI_TRY( mss::attr::get_freq(mss::find_target<fapi2::TARGET_TYPE_MEM_PORT>(dimm), l_freq) );
- FAPI_TRY( mss::convert_ddr_freq_to_omi_freq(mss::find_target<fapi2::TARGET_TYPE_MEM_PORT>(dimm), l_freq, l_omi_freq));
+ FAPI_TRY( mss::attr::get_freq(i_target, l_freq) );
+ FAPI_TRY( mss::convert_ddr_freq_to_omi_freq(i_target, l_freq, l_omi_freq));
- // Quick hack to get the index until DIMM level attrs work
- FAPI_TRY( FAPI_ATTR_GET(fapi2::ATTR_REL_POS, dimm, l_dimm_index) );
+ // Get ranks via rank API
+ std::vector<mss::rank::info<>> l_ranks;
+ mss::rank::ranks_on_dimm(dimm, l_ranks);
- for( auto rank = 0; rank < l_ranks[l_dimm_index]; ++rank )
+ for (const auto& l_rank : l_ranks)
{
std::shared_ptr<mss::efd::base_decoder> l_efd_data;
@@ -83,7 +82,7 @@ fapi2::ReturnCode p9a_mss_eff_config( const fapi2::Target<fapi2::TARGET_TYPE_MEM
const auto l_ocmb = mss::find_target<fapi2::TARGET_TYPE_OCMB_CHIP>(i_target);
fapi2::MemVpdData_t l_vpd_type(fapi2::MemVpdData::EFD);
fapi2::VPDInfo<fapi2::TARGET_TYPE_OCMB_CHIP> l_vpd_info(l_vpd_type);
- l_vpd_info.iv_rank = rank;
+ l_vpd_info.iv_rank = l_rank.get_dimm_rank();
l_vpd_info.iv_omi_freq_mhz = l_omi_freq;
FAPI_TRY( fapi2::getVPD(l_ocmb, l_vpd_info, nullptr), "failed getting VPD size from getVPD" );
diff --git a/src/import/generic/memory/lib/data_engine/data_engine_utils.H b/src/import/generic/memory/lib/data_engine/data_engine_utils.H
index 85a5ac48b..d29f31e86 100644
--- a/src/import/generic/memory/lib/data_engine/data_engine_utils.H
+++ b/src/import/generic/memory/lib/data_engine/data_engine_utils.H
@@ -46,6 +46,7 @@
#include <generic/memory/lib/spd/spd_facade.H>
#include <generic/memory/lib/mss_generic_attribute_getters.H>
#include <generic/memory/lib/utils/conversions.H>
+#include <generic/memory/lib/utils/shared/mss_generic_consts.H>
namespace mss
{
@@ -212,10 +213,9 @@ inline fapi2::ReturnCode update_data(const std::shared_ptr<efd::base_decoder>& i
{
const auto l_ocmb = i_efd_data->get_ocmb_target();
- // TK, remove hard-code when VPDinfo struct adds an iv_dimm index
- // For explorer we can only have 1 DDIMM (index 0), or up to 2 DIMMs
- constexpr size_t l_dimm_index = 0;
- const auto l_rank = i_efd_data->get_rank();
+ // mss::index of rank number will % 4 (RANKS_PER_DIMM) to get us the corresponding dimm
+ const size_t l_dimm_index = mss::index(i_efd_data->get_rank());
+ const auto l_dimm_rank = i_efd_data->get_rank() % mss::MAX_RANK_PER_DIMM; // TK HARDCODE TEST REMOVE ME!
FAPI_ASSERT( l_dimm_index < X,
fapi2::MSS_OUT_OF_BOUNDS_INDEXING()
@@ -228,19 +228,19 @@ inline fapi2::ReturnCode update_data(const std::shared_ptr<efd::base_decoder>& i
X,
mss::spd::c_str(l_ocmb) );
- FAPI_ASSERT( l_rank < Y,
+ FAPI_ASSERT( l_dimm_rank < Y,
fapi2::MSS_OUT_OF_BOUNDS_INDEXING()
- .set_INDEX(l_rank)
+ .set_INDEX(l_dimm_rank)
.set_LIST_SIZE(X)
.set_FUNCTION(i_ffdc_code)
.set_TARGET(l_ocmb),
"Rank index (%d) was larger than max (%d) on %s",
- l_rank,
+ l_dimm_rank,
Y,
mss::spd::c_str(l_ocmb) );
- FAPI_DBG("Updating data[%d][%d] with %d for %s", l_dimm_index, l_rank, i_setting, spd::c_str(l_ocmb));
- o_data[l_dimm_index][l_rank] = i_setting;
+ FAPI_DBG("Updating data[%d][%d] with %d for %s", l_dimm_index, l_dimm_rank, i_setting, spd::c_str(l_ocmb));
+ o_data[l_dimm_index][l_dimm_rank] = i_setting;
return fapi2::FAPI2_RC_SUCCESS;
OpenPOWER on IntegriCloud