diff options
15 files changed, 531 insertions, 100 deletions
diff --git a/src/import/chips/p9/procedures/hwp/memory/lib/ccs/ccs.H b/src/import/chips/p9/procedures/hwp/memory/lib/ccs/ccs.H index 0d0434bc1..606014ee7 100644 --- a/src/import/chips/p9/procedures/hwp/memory/lib/ccs/ccs.H +++ b/src/import/chips/p9/procedures/hwp/memory/lib/ccs/ccs.H @@ -1235,6 +1235,18 @@ fapi2::ReturnCode start_stop( const fapi2::Target<T>& i_target, const bool i_sta template< fapi2::TargetType T, typename TT = ccsTraits<T> > fapi2::ReturnCode status_query( const fapi2::Target<T>& i_target, std::pair<uint64_t, uint64_t>& io_status ); +/// +/// @brief Determine the CCS failure type +/// @param[in] i_target MCBIST target +/// @param[in] i_type the failure type +/// @param[in] i_mca The port the CCS instruction is training +/// @return ReturnCode associated with the fail. +/// @note FFDC is handled here, caller doesn't need to do it +/// +fapi2::ReturnCode fail_type( const fapi2::Target<fapi2::TARGET_TYPE_MCBIST>& i_target, + const uint64_t& i_type, + const fapi2::Target<fapi2::TARGET_TYPE_MCA>& i_mca ); + } // ends namespace ccs } diff --git a/src/import/chips/p9/procedures/hwp/memory/lib/dimm/ddr4/latch_wr_vref.C b/src/import/chips/p9/procedures/hwp/memory/lib/dimm/ddr4/latch_wr_vref.C index def06a0df..0d2cad989 100644 --- a/src/import/chips/p9/procedures/hwp/memory/lib/dimm/ddr4/latch_wr_vref.C +++ b/src/import/chips/p9/procedures/hwp/memory/lib/dimm/ddr4/latch_wr_vref.C @@ -5,7 +5,7 @@ /* */ /* OpenPOWER HostBoot Project */ /* */ -/* Contributors Listed Below - COPYRIGHT 2015,2017 */ +/* Contributors Listed Below - COPYRIGHT 2015,2019 */ /* [+] International Business Machines Corp. */ /* */ /* */ @@ -39,6 +39,7 @@ #include <lib/dimm/ddr4/mrs_load_ddr4.H> #include <lib/dimm/ddr4/latch_wr_vref.H> #include <lib/dimm/rank.H> +#include <lib/workarounds/ccs_workarounds.H> using fapi2::TARGET_TYPE_MCBIST; using fapi2::TARGET_TYPE_DIMM; @@ -96,13 +97,14 @@ fapi_try_exit: /// @param[in] i_rank_pair, rank pair on which to latch MRS 06 - hits all ranks in the rank pair /// @param[in] i_train_range, VREF range to setup /// @param[in] i_train_value, VREF value to setup -/// @param[in,out] a vector of CCS instructions we should add to +/// @param[in] i_nvdimm_workaround switch to indicate nvdimm workaround. Default to false /// @return FAPI2_RC_SUCCESS if and only if ok /// fapi2::ReturnCode latch_wr_vref_commands_by_rank_pair( const fapi2::Target<fapi2::TARGET_TYPE_MCA>& i_target, const uint64_t i_rank_pair, const uint8_t i_train_range, - const uint8_t i_train_value) + const uint8_t i_train_value, + const bool i_nvdimm_workaround) { // Declares variables const auto l_mcbist = find_target<fapi2::TARGET_TYPE_MCBIST>(i_target); @@ -141,7 +143,17 @@ fapi2::ReturnCode latch_wr_vref_commands_by_rank_pair( const fapi2::Target<fapi2 } // Executes the CCS commands - FAPI_TRY( mss::ccs::execute(l_mcbist, l_program, i_target), "Failed ccs execute %s", mss::c_str(i_target) ); + // Run the NVDIMM-specific execute procedure if this is for nvdimm workaround. + // Otherwise, execute as usual. + if (i_nvdimm_workaround) + { + FAPI_TRY( mss::ccs::workarounds::nvdimm::execute(l_mcbist, l_program, i_target), "Failed ccs execute %s", + mss::c_str(i_target) ); + } + else + { + FAPI_TRY( mss::ccs::execute(l_mcbist, l_program, i_target), "Failed ccs execute %s", mss::c_str(i_target) ); + } fapi_try_exit: return fapi2::current_err; diff --git a/src/import/chips/p9/procedures/hwp/memory/lib/dimm/ddr4/latch_wr_vref.H b/src/import/chips/p9/procedures/hwp/memory/lib/dimm/ddr4/latch_wr_vref.H index 39536d566..0cb2a285e 100644 --- a/src/import/chips/p9/procedures/hwp/memory/lib/dimm/ddr4/latch_wr_vref.H +++ b/src/import/chips/p9/procedures/hwp/memory/lib/dimm/ddr4/latch_wr_vref.H @@ -5,7 +5,7 @@ /* */ /* OpenPOWER HostBoot Project */ /* */ -/* Contributors Listed Below - COPYRIGHT 2015,2017 */ +/* Contributors Listed Below - COPYRIGHT 2015,2019 */ /* [+] International Business Machines Corp. */ /* */ /* */ @@ -73,13 +73,14 @@ fapi2::ReturnCode add_latch_wr_vref_commands( const fapi2::Target<fapi2::TARGET_ /// @param[in] i_rank_pair, rank pair on which to latch MRS 06 - hits all ranks in the rank pair /// @param[in] i_train_range, VREF range to setup /// @param[in] i_train_value, VREF value to setup -/// @param[in,out] a vector of CCS instructions we should add to +/// @param[in] i_nvdimm_workaround switch to indicate nvdimm workaround. Default to false /// @return FAPI2_RC_SUCCESS if and only if ok /// fapi2::ReturnCode latch_wr_vref_commands_by_rank_pair( const fapi2::Target<fapi2::TARGET_TYPE_MCA>& i_target, const uint64_t i_rank_pair, const uint8_t i_train_range, - const uint8_t i_train_value); + const uint8_t i_train_value, + const bool i_nvdimm_workaround = false); /// /// @brief enables VREF train enable in an MRS06 class diff --git a/src/import/chips/p9/procedures/hwp/memory/lib/dimm/ddr4/nvdimm_utils.C b/src/import/chips/p9/procedures/hwp/memory/lib/dimm/ddr4/nvdimm_utils.C index ed63071d5..fe2f396f1 100644 --- a/src/import/chips/p9/procedures/hwp/memory/lib/dimm/ddr4/nvdimm_utils.C +++ b/src/import/chips/p9/procedures/hwp/memory/lib/dimm/ddr4/nvdimm_utils.C @@ -53,8 +53,8 @@ #include <lib/mc/port.H> #include <lib/phy/dp16.H> #include <lib/dimm/mrs_load.H> -#include <lib/dimm/ddr4/pda.H> #include <lib/dimm/ddr4/zqcal.H> +#include <lib/dimm/ddr4/latch_wr_vref.H> #include <lib/dimm/ddr4/control_word_ddr4.H> #include <lib/workarounds/ccs_workarounds.H> #include <lib/eff_config/timing.H> @@ -322,70 +322,6 @@ fapi_try_exit: } /// -/// @brief PDA support for post restore transition -/// Specialization for TARGET_TYPE_DIMM -/// @param[in] i_target the target associated with this subroutine -/// @return FAPI2_RC_SUCCESS iff setup was successful -/// -template<> -fapi2::ReturnCode pda_vref_latch( const fapi2::Target<fapi2::TARGET_TYPE_DIMM>& i_target ) -{ - std::vector<uint64_t> l_ranks; - const auto& l_mca = mss::find_target<TARGET_TYPE_MCA>(i_target); - fapi2::buffer<uint8_t> l_value, l_range; - fapi2::ReturnCode l_rc(fapi2::FAPI2_RC_SUCCESS); - - // Creates the MRS container class - mss::ddr4::pda::commands<mss::ddr4::mrs06_data> l_container; - - // Get all the ranks in the dimm - mss::rank::ranks(i_target, l_ranks); - - // Get the number of DRAMs - uint8_t l_width = 0; - mss::eff_dram_width(i_target, l_width); - const uint64_t l_num_drams = (l_width == fapi2::ENUM_ATTR_EFF_DRAM_WIDTH_X8) ? MAX_DRAMS_X8 : MAX_DRAMS_X4; - - for (const auto& l_rank : l_ranks) - { - - uint64_t l_rp = 0; - uint64_t l_wr_vref_value = 0; - bool l_wr_vref_range = 0; - fapi2::buffer<uint64_t> l_data ; - - mss::rank::get_pair_from_rank(l_mca, l_rank, l_rp); - - // create mrs06 - mss::ddr4::mrs06_data l_mrs(i_target, l_rc); - - // loop through all the dram - for(uint64_t l_dram = 0; l_dram < l_num_drams; l_dram++) - { - mss::dp16::wr_vref::read_wr_vref_register( l_mca, l_rp, l_dram, l_data); - mss::dp16::wr_vref::get_wr_vref_range( l_data, l_dram, l_wr_vref_range); - mss::dp16::wr_vref::get_wr_vref_value( l_data, l_dram, l_wr_vref_value); - - l_mrs.iv_vrefdq_train_value[mss::index(l_rank)] = l_wr_vref_value; - l_mrs.iv_vrefdq_train_range[mss::index(l_rank)] = l_wr_vref_range; - l_container.add_command(i_target, l_rank, l_mrs, l_dram); - } - } - - // Disable refresh - FAPI_TRY( mss::change_refresh_enable(l_mca, states::LOW) ); - - // execute_wr_vref_latch(l_container) - FAPI_TRY( mss::ddr4::pda::execute_wr_vref_latch(l_container) ) - - // Enable refresh - FAPI_TRY( mss::change_refresh_enable(l_mca, states::HIGH) ); - -fapi_try_exit: - return fapi2::current_err; -} - -/// /// @brief Disable powerdown mode in rc09 /// @param[in] i_target, a fapi2::Target<TARGET_TYPE_DIMM> /// @param[in,out] io_inst a vector of CCS instructions we should add to @@ -540,7 +476,7 @@ fapi2::ReturnCode rcd_restore( const fapi2::Target<TARGET_TYPE_MCA>& i_target ) // Exit STR first so CKE is back to high and rcd isn't ignoring us FAPI_TRY( self_refresh_exit( i_target ) ); - FAPI_TRY( ccs::execute(l_mcbist, l_program, i_target), + FAPI_TRY( mss::ccs::workarounds::nvdimm::execute(l_mcbist, l_program, i_target), "Failed to execute ccs for %s", mss::c_str(i_target) ); // Now, drive CKE back to low via STR entry instead of pde (we have data in the drams!) @@ -557,7 +493,7 @@ fapi2::ReturnCode rcd_restore( const fapi2::Target<TARGET_TYPE_MCA>& i_target ) }// dimms // Restore the rcd - FAPI_TRY( ccs::execute(l_mcbist, l_program, i_target), + FAPI_TRY( mss::ccs::workarounds::nvdimm::execute(l_mcbist, l_program, i_target), "Failed to execute ccs for %s", mss::c_str(i_target) ); fapi_try_exit: @@ -597,7 +533,7 @@ fapi2::ReturnCode post_restore_zqcal( const fapi2::Target<fapi2::TARGET_TYPE_MCA }// dimms // execute ZQCAL instructions - FAPI_TRY( mss::ccs::execute(l_mcbist, l_program, i_target), + FAPI_TRY( mss::ccs::workarounds::nvdimm::execute(l_mcbist, l_program, i_target), "Failed to execute ccs for ZQCAL %s", mss::c_str(i_target) ); fapi_try_exit: @@ -605,6 +541,46 @@ fapi_try_exit: } /// +/// @brief Latch write vref +/// @param[in] i_target the target associated with this subroutine +/// @return FAPI2_RC_SUCCESS iff setup was successful +/// +fapi2::ReturnCode wr_vref_latch( const fapi2::Target<fapi2::TARGET_TYPE_MCA>& i_target ) +{ + std::vector<uint64_t> l_pairs; + const bool NVDIMM_WORKAROUND = true; + + // We are latching in the averaged value and we should have the averaged value + // (this step should be run after all the draminit) so just the first dram is fine + constexpr uint64_t l_dram = 0; + + // Get our rank pairs. + FAPI_TRY( mss::rank::get_rank_pairs(i_target, l_pairs) ); + + for (const auto& l_rp : l_pairs) + { + FAPI_INF("NVDIMM wr_vref_latch on rp %d %s", l_rp, mss::c_str(i_target)); + fapi2::buffer<uint64_t> l_data ; + uint64_t l_wr_vref_value = 0; + bool l_wr_vref_range = 0; + + mss::dp16::wr_vref::read_wr_vref_register( i_target, l_rp, l_dram, l_data); + mss::dp16::wr_vref::get_wr_vref_range( l_data, l_dram, l_wr_vref_range); + mss::dp16::wr_vref::get_wr_vref_value( l_data, l_dram, l_wr_vref_value); + + FAPI_TRY( mss::ddr4::latch_wr_vref_commands_by_rank_pair(i_target, + l_rp, + l_wr_vref_range, + l_wr_vref_value, + NVDIMM_WORKAROUND) ); + + }// rank pairs + +fapi_try_exit: + return fapi2::current_err; +} + +/// /// @brief Post restore transition to support restoring nvdimm to /// a functional state after the restoring the data from NAND flash /// to DRAM @@ -616,6 +592,7 @@ template<> fapi2::ReturnCode post_restore_transition( const fapi2::Target<fapi2::TARGET_TYPE_MCA>& i_target ) { mss::states l_maint_addr_enabled = mss::states::LOW; + const bool NVDIMM_WORKAROUND = true; FAPI_TRY(get_maint_addr_mode_en(i_target, l_maint_addr_enabled)); @@ -633,16 +610,13 @@ fapi2::ReturnCode post_restore_transition( const fapi2::Target<fapi2::TARGET_TYP FAPI_TRY( self_refresh_exit( i_target ) ); // Load the MRS - FAPI_TRY( mss::mrs_load( i_target ) ); + FAPI_TRY( mss::mrs_load( i_target, NVDIMM_WORKAROUND ) ); // Do ZQCAL FAPI_TRY( post_restore_zqcal(i_target) ); - // Latch the trained PDA vref values for each dimm under the port - for (const auto& l_dimm : mss::find_targets<fapi2::TARGET_TYPE_DIMM>(i_target)) - { - FAPI_TRY( pda_vref_latch( l_dimm ) ); - } + // Latch in the rank averaged vref value + FAPI_TRY(wr_vref_latch(i_target)); //Restore main_addr_mode_en to previous setting FAPI_TRY(change_maint_addr_mode_en(i_target, l_maint_addr_enabled)); diff --git a/src/import/chips/p9/procedures/hwp/memory/lib/dimm/ddr4/nvdimm_utils.H b/src/import/chips/p9/procedures/hwp/memory/lib/dimm/ddr4/nvdimm_utils.H index 538a650ba..5df44c13c 100644 --- a/src/import/chips/p9/procedures/hwp/memory/lib/dimm/ddr4/nvdimm_utils.H +++ b/src/import/chips/p9/procedures/hwp/memory/lib/dimm/ddr4/nvdimm_utils.H @@ -37,6 +37,7 @@ #include <generic/memory/lib/utils/find.H> #include <lib/shared/mss_const.H> #include <lib/ccs/ccs.H> +#include <lib/phy/dp16.H> namespace mss { @@ -114,16 +115,6 @@ template< fapi2::TargetType T > fapi2::ReturnCode self_refresh_exit( const fapi2::Target<T>& i_target ); /// -/// @brief Latch write vref at per-dram basis -/// @tparam T the target type associated with this subroutine -/// @param[in] i_target the target associated with this subroutine -/// @return FAPI2_RC_SUCCESS iff setup was successful -/// - -template< fapi2::TargetType T > -fapi2::ReturnCode pda_vref_latch( const fapi2::Target<T>& i_target ); - -/// /// @brief Disable powerdown mode in rc09 /// @param[in] i_target, a fapi2::Target<TARGET_TYPE_DIMM> /// @param[in,out] io_inst a vector of CCS instructions we should add to @@ -163,6 +154,13 @@ fapi2::ReturnCode rcd_restore( const fapi2::Target<fapi2::TARGET_TYPE_MCA>& i_ta fapi2::ReturnCode post_restore_zqcal( const fapi2::Target<fapi2::TARGET_TYPE_MCA>& i_target); /// +/// @brief Latch write vref +/// @param[in] i_target the target associated with this subroutine +/// @return FAPI2_RC_SUCCESS iff setup was successful +/// +fapi2::ReturnCode wr_vref_latch( const fapi2::Target<fapi2::TARGET_TYPE_MCA>& i_target ); + +/// /// @brief Full post-restore transition for NVDIMM /// @tparam T the target type associated with this subroutine /// @param[in] i_target the target associated with this subroutine @@ -180,6 +178,40 @@ fapi2::ReturnCode post_restore_transition( const fapi2::Target<T>& i_target ); /// fapi2::ReturnCode preload_epow_sequence( const fapi2::Target<fapi2::TARGET_TYPE_MCA>& i_target ); +namespace wr_vref +{ + +/// +/// @brief Helper to extract the values and ranges from wr_vref_value reg, and +/// convert JEDEC value to composite +/// @param[in] i_data - wr vref reg data +/// @param[out] o_values - vector of composite value +/// +inline void get_wr_vref_composite_value_helper(const fapi2::buffer<uint64_t> i_data, + std::vector<uint64_t>& o_values) +{ + FAPI_DBG("get_wr_vref_composite_value_helper() i_data = 0x%016lx", i_data); + const std::vector<uint64_t> DRAMS = {0, 1}; + + for (const auto l_dram : DRAMS) + { + uint64_t l_wr_vref_val = 0; + bool l_wr_vref_range = 0; + uint64_t l_composite_vref = 0; + + mss::dp16::wr_vref::get_wr_vref_value(i_data, l_dram, l_wr_vref_val); + mss::dp16::wr_vref::get_wr_vref_range(i_data, l_dram, l_wr_vref_range); + + l_composite_vref = mss::dp16::wr_vref::compute_composite_value(l_wr_vref_range, l_wr_vref_val); + + FAPI_DBG("l_composite_vref: 0x%016lx", l_composite_vref); + + o_values.push_back(l_composite_vref); + } +} + +}//ns wr_vref + }//ns nvdimm }//ns mss diff --git a/src/import/chips/p9/procedures/hwp/memory/lib/dimm/mrs_load.C b/src/import/chips/p9/procedures/hwp/memory/lib/dimm/mrs_load.C index 609251cb1..aae4cce28 100644 --- a/src/import/chips/p9/procedures/hwp/memory/lib/dimm/mrs_load.C +++ b/src/import/chips/p9/procedures/hwp/memory/lib/dimm/mrs_load.C @@ -5,7 +5,7 @@ /* */ /* OpenPOWER HostBoot Project */ /* */ -/* Contributors Listed Below - COPYRIGHT 2015,2018 */ +/* Contributors Listed Below - COPYRIGHT 2015,2019 */ /* [+] International Business Machines Corp. */ /* */ /* */ @@ -38,6 +38,7 @@ #include <mss.H> #include <lib/dimm/mrs_load.H> #include <lib/dimm/ddr4/mrs_load_ddr4.H> +#include <lib/workarounds/ccs_workarounds.H> using fapi2::TARGET_TYPE_MCBIST; using fapi2::TARGET_TYPE_DIMM; @@ -51,10 +52,12 @@ namespace mss /// /// @brief Perform the mrs_load operations - TARGET_TYPE_MCA specialization /// @param[in] i_target a fapi2::Target<TARGET_TYPE_MCA> +/// @param[in] i_nvdimm_workaround switch to indicate nvdimm workaround. Default to false /// @return FAPI2_RC_SUCCESS if and only if ok /// template<> -fapi2::ReturnCode mrs_load<TARGET_TYPE_MCA>( const fapi2::Target<TARGET_TYPE_MCA>& i_target ) +fapi2::ReturnCode mrs_load<TARGET_TYPE_MCA>( const fapi2::Target<TARGET_TYPE_MCA>& i_target, + const bool i_nvdimm_workaround ) { const auto& l_mcbist = mss::find_target<TARGET_TYPE_MCBIST>(i_target); @@ -75,7 +78,17 @@ fapi2::ReturnCode mrs_load<TARGET_TYPE_MCA>( const fapi2::Target<TARGET_TYPE_MCA // We have to configure the CCS engine to let it know which port these instructions are // going out (or whether it's broadcast ...) so lets execute the instructions we presently // have so that we kind of do this by port - FAPI_TRY( ccs::execute(l_mcbist, l_program, i_target) ); + // Run the NVDIMM-specific execute procedure if this is for nvdimm workaround. + // Otherwise, execute as usual. + if (i_nvdimm_workaround) + { + FAPI_TRY( mss::ccs::workarounds::nvdimm::execute(l_mcbist, l_program, i_target), "Failed ccs execute %s", + mss::c_str(i_target) ); + } + else + { + FAPI_TRY( mss::ccs::execute(l_mcbist, l_program, i_target), "Failed ccs execute %s", mss::c_str(i_target) ); + } fapi_try_exit: return fapi2::current_err; @@ -84,10 +97,12 @@ fapi_try_exit: /// /// @brief Perform the mrs_load operations - TARGET_TYPE_MCBIST specialization /// @param[in] i_target a fapi2::Target<TARGET_TYPE_MCBIST> +/// @param[in] i_nvdimm_workaround switch to indicate nvdimm workaround. Default to false /// @return FAPI2_RC_SUCCESS if and only if ok /// template<> -fapi2::ReturnCode mrs_load<TARGET_TYPE_MCBIST>( const fapi2::Target<TARGET_TYPE_MCBIST>& i_target ) +fapi2::ReturnCode mrs_load<TARGET_TYPE_MCBIST>( const fapi2::Target<TARGET_TYPE_MCBIST>& i_target, + const bool i_nvdimm_workaround ) { for ( const auto& p : find_targets<TARGET_TYPE_MCA>(i_target) ) { diff --git a/src/import/chips/p9/procedures/hwp/memory/lib/dimm/mrs_load.H b/src/import/chips/p9/procedures/hwp/memory/lib/dimm/mrs_load.H index 2b0418cfb..f19679bc7 100644 --- a/src/import/chips/p9/procedures/hwp/memory/lib/dimm/mrs_load.H +++ b/src/import/chips/p9/procedures/hwp/memory/lib/dimm/mrs_load.H @@ -5,7 +5,7 @@ /* */ /* OpenPOWER HostBoot Project */ /* */ -/* Contributors Listed Below - COPYRIGHT 2015,2018 */ +/* Contributors Listed Below - COPYRIGHT 2015,2019 */ /* [+] International Business Machines Corp. */ /* */ /* */ @@ -111,10 +111,11 @@ struct mrs_data /// @brief Perform the mrs_load operations /// @tparam T, the fapi2::TargetType of i_target /// @param[in] i_target, a fapi2::Target +/// @param[in] i_nvdimm_workaround switch to indicate nvdimm workaround. Default to false /// @return FAPI2_RC_SUCCESS if and only if ok /// template< fapi2::TargetType T > -fapi2::ReturnCode mrs_load( const fapi2::Target<T>& i_target ); +fapi2::ReturnCode mrs_load( const fapi2::Target<T>& i_target, const bool i_nvdimm_workaround = false ); // // Implement the polymorphism for mrs_load diff --git a/src/import/chips/p9/procedures/hwp/memory/lib/phy/dp16.C b/src/import/chips/p9/procedures/hwp/memory/lib/phy/dp16.C index 89c00b0b8..2b809d3fa 100644 --- a/src/import/chips/p9/procedures/hwp/memory/lib/phy/dp16.C +++ b/src/import/chips/p9/procedures/hwp/memory/lib/phy/dp16.C @@ -4704,6 +4704,47 @@ fapi_try_exit: return fapi2::current_err; } +/// +/// @brief Gets the write vref registers by rank pair +/// @param[in] i_rp - rank pair, to make sure the dram and rp are within bounds +/// @param[out] o_regs - the dp16 wr_vref registers for the rank pair +/// +fapi2::ReturnCode get_wr_vref_regs_by_rp(const uint64_t i_rp, + std::vector<std::pair<uint64_t, uint64_t>>& o_reg) +{ + typedef dp16Traits<fapi2::TARGET_TYPE_MCA> TT; + + switch (i_rp) + { + case 0: + o_reg = TT::WR_VREF_VALUE_RP0_REG; + break; + + case 1: + o_reg = TT::WR_VREF_VALUE_RP1_REG; + break; + + case 2: + o_reg = TT::WR_VREF_VALUE_RP2_REG; + break; + + case 3: + o_reg = TT::WR_VREF_VALUE_RP3_REG; + break; + + default: + FAPI_ASSERT( false, + fapi2::MSS_RP_OUT_OF_RANGE() + .set_RP(i_rp), + "RP out of range %d", + i_rp); + break; + } + +fapi_try_exit: + return fapi2::current_err; +} + } // close namespace wr_vref } // close namespace dp16 diff --git a/src/import/chips/p9/procedures/hwp/memory/lib/phy/dp16.H b/src/import/chips/p9/procedures/hwp/memory/lib/phy/dp16.H index bfde9b4ad..73e011b94 100644 --- a/src/import/chips/p9/procedures/hwp/memory/lib/phy/dp16.H +++ b/src/import/chips/p9/procedures/hwp/memory/lib/phy/dp16.H @@ -2499,6 +2499,14 @@ inline void get_wr_vref_value( const fapi2::buffer<uint64_t> i_data, i_data.extractToRight(o_value, l_value_pos, TT::WR_VREF_VALUE_VALUE_DRAM_EVEN_LEN); } +/// +/// @brief Gets the write vref registers by rank pair +/// @param[in] i_rp - rank pair, to make sure the dram and rp are within bounds +/// @param[out] o_regs - the dp16 wr_vref registers for the rank pair +/// +fapi2::ReturnCode get_wr_vref_regs_by_rp(const uint64_t i_rp, + std::vector<std::pair<uint64_t, uint64_t>>& o_reg); + } // close namespace wr_vref /// diff --git a/src/import/chips/p9/procedures/hwp/memory/lib/phy/mss_training.C b/src/import/chips/p9/procedures/hwp/memory/lib/phy/mss_training.C index 5db0e962d..50574d764 100644 --- a/src/import/chips/p9/procedures/hwp/memory/lib/phy/mss_training.C +++ b/src/import/chips/p9/procedures/hwp/memory/lib/phy/mss_training.C @@ -620,6 +620,14 @@ fapi2::ReturnCode write_ctr::post_workaround( const fapi2::Target<fapi2::TARGET_ { // Loops through the DRAMs to check and creates a vector of bad DRAMs and their associated starting delays std::vector<std::pair<uint64_t, uint64_t>> l_bad_drams; + uint8_t l_hybrid[mss::MAX_DIMM_PER_PORT] = {}; + uint8_t l_hybrid_type[mss::MAX_DIMM_PER_PORT] = {}; + + // Get the hybrid type + FAPI_TRY(mss::eff_hybrid(i_target, &l_hybrid[0]), + "%s failed to access DIMM hybrid type", mss::c_str(i_target)); + FAPI_TRY(mss::eff_hybrid_memory_type(i_target, &l_hybrid_type[0]), + "%s failed to access DIMM hybrid memory type", mss::c_str(i_target)); // Checking all of the DRAMs that had been good before WR VREF // If any of them have gone bad, then note it and run the workaround @@ -754,6 +762,22 @@ fapi2::ReturnCode write_ctr::post_workaround( const fapi2::Target<fapi2::TARGET_ false )); } + // If this port has nvdimm, run the workaround. When we do the restore we cannot do the + // pda wr vref latch because it will break the refresh timing with multiple ccs sequences. + // As such, let's run the nvdimm with rank median vref value so later on we can just load + // the vref in 1 ccs sequence. + if ((l_hybrid[0] == fapi2::ENUM_ATTR_EFF_HYBRID_IS_HYBRID) && + (l_hybrid_type[0] == fapi2::ENUM_ATTR_EFF_HYBRID_MEMORY_TYPE_NVDIMM) && !iv_wr_vref) + { + FAPI_TRY(mss::workarounds::wr_vref::nvdimm_workaround(i_target, i_rp, i_abort_on_error)); + + // Rerun the centering with the median vref + FAPI_TRY(run( i_target, + i_rp, + i_abort_on_error, + false )); + } + return fapi2::FAPI2_RC_SUCCESS; fapi_try_exit: return fapi2::current_err; diff --git a/src/import/chips/p9/procedures/hwp/memory/lib/workarounds/ccs_workarounds.C b/src/import/chips/p9/procedures/hwp/memory/lib/workarounds/ccs_workarounds.C index 42374919c..957eac4fd 100644 --- a/src/import/chips/p9/procedures/hwp/memory/lib/workarounds/ccs_workarounds.C +++ b/src/import/chips/p9/procedures/hwp/memory/lib/workarounds/ccs_workarounds.C @@ -37,7 +37,9 @@ #include <lib/dimm/rank.H> #include <p9_mc_scom_addresses.H> #include <generic/memory/lib/utils/scom.H> +#include <generic/memory/lib/utils/pos.H> #include <lib/eff_config/timing.H> +#include <lib/ccs/ccs.H> namespace mss { @@ -212,6 +214,176 @@ fapi_try_exit: return fapi2::current_err; } +namespace nvdimm +{ + +/// +/// @brief Execute the contents of the CCS array with ccs_addr_mux_sel control +/// @param[in] i_target The MCBIST containing the array +/// @param[in] i_program the MCBIST ccs program - to get the polling parameters +/// @param[in] i_port The port target that the array is for +/// @return FAPI2_RC_SUCCESS iff success +/// @note This is the exact same copy of execute_inst_array() in ccs.H with changes +/// to ccs_addr_mux_sel before and after the execute. This is required to ensure +/// CCS can properly drive the bus during the nvdimm post-restore sequence. +/// +fapi2::ReturnCode execute_inst_array(const fapi2::Target<fapi2::TARGET_TYPE_MCBIST>& i_target, + mss::ccs::program<fapi2::TARGET_TYPE_MCBIST>& i_program, + const fapi2::Target<fapi2::TARGET_TYPE_MCA>& i_port) +{ + typedef ccsTraits<fapi2::TARGET_TYPE_MCBIST> TT; + + fapi2::buffer<uint64_t> status; + + // Change ccs_add_mux_sel to high to make sure the CCS logic is driving the bus + FAPI_TRY(mss::change_addr_mux_sel(i_port, mss::HIGH)); + + FAPI_TRY(mss::ccs::start_stop(i_target, mss::START), "%s Error in execute_inst_array", mss::c_str(i_port) ); + + mss::poll(i_target, TT::STATQ_REG, i_program.iv_poll, + [&status](const size_t poll_remaining, const fapi2::buffer<uint64_t>& stat_reg) -> bool + { + FAPI_INF("ccs statq 0x%llx, remaining: %d", stat_reg, poll_remaining); + status = stat_reg; + return status.getBit<TT::CCS_IN_PROGRESS>() != 1; + }, + i_program.iv_probes); + + // ccs_add_mux_sel back to low to give control back to mainline + FAPI_TRY(mss::change_addr_mux_sel(i_port, mss::LOW)); + + // Check for done and success. DONE being the only bit set. + if (status == STAT_QUERY_SUCCESS) + { + FAPI_INF("%s CCS Executed Successfully.", mss::c_str(i_port) ); + goto fapi_try_exit; + } + + // So we failed or we're still in progress. Mask off the fail bits + // and run this through the FFDC generator. + // TK: Put the const below into a traits class? -- JLH + FAPI_TRY( mss::ccs::fail_type(i_target, status & 0x1C00000000000000, i_port), "Error in execute_inst_array" ); + +fapi_try_exit: + return fapi2::current_err; +} + +/// +/// @brief Execute a set of CCS instructions +/// @param[in] i_target the target to effect +/// @param[in] i_program the vector of instructions +/// @param[in] i_port The port target that the array is for +/// @return FAPI2_RC_SUCCSS iff ok +/// @note This is a copy of execute() with minor tweaks to the namespace and single port only +/// +fapi2::ReturnCode execute( const fapi2::Target<fapi2::TARGET_TYPE_MCBIST>& i_target, + mss::ccs::program<fapi2::TARGET_TYPE_MCBIST>& i_program, + const fapi2::Target<fapi2::TARGET_TYPE_MCA>& i_port) +{ + typedef ccsTraits<fapi2::TARGET_TYPE_MCBIST> TT; + + // Subtract one for the idle we insert at the end + constexpr size_t CCS_INSTRUCTION_DEPTH = 32 - 1; + constexpr uint64_t CCS_ARR0_ZERO = MCBIST_CCS_INST_ARR0_00; + constexpr uint64_t CCS_ARR1_ZERO = MCBIST_CCS_INST_ARR1_00; + + mss::ccs::instruction_t<fapi2::TARGET_TYPE_MCBIST> l_des = ccs::des_command<fapi2::TARGET_TYPE_MCBIST>(); + + FAPI_INF("loading ccs instructions (%d) for %s", i_program.iv_instructions.size(), mss::c_str(i_target)); + + auto l_inst_iter = i_program.iv_instructions.begin(); + + // Stop the CCS engine just for giggles - it might be running ... + FAPI_TRY( mss::ccs::start_stop(i_target, mss::states::STOP), "Error in ccs::execute" ); + + FAPI_ASSERT( mss::poll(i_target, TT::STATQ_REG, poll_parameters(), + [](const size_t poll_remaining, const fapi2::buffer<uint64_t>& stat_reg) -> bool + { + FAPI_INF("ccs statq (stop) 0x%llx, remaining: %d", stat_reg, poll_remaining); + return stat_reg.getBit<TT::CCS_IN_PROGRESS>() != 1; + }), + fapi2::MSS_CCS_HUNG_TRYING_TO_STOP().set_MCBIST_TARGET(i_target) ); + + while (l_inst_iter != i_program.iv_instructions.end()) + { + size_t l_inst_count = 0; + + uint64_t l_total_delay = 0; + uint64_t l_delay = 0; + uint64_t l_repeat = 0; + uint8_t l_current_cke = 0; + + // Shove the instructions into the CCS engine, in 32 instruction chunks, and execute them + for (; l_inst_iter != i_program.iv_instructions.end() + && l_inst_count < CCS_INSTRUCTION_DEPTH; ++l_inst_count, ++l_inst_iter) + { + l_inst_iter->arr0.extractToRight<TT::ARR0_DDR_CKE, TT::ARR0_DDR_CKE_LEN>(l_current_cke); + + // Make sure this instruction leads to the next. Notice this limits this mechanism to pretty + // simple (straight line) CCS programs. Anything with a loop or such will need another mechanism. + l_inst_iter->arr1.insertFromRight<MCBIST_CCS_INST_ARR1_00_GOTO_CMD, + MCBIST_CCS_INST_ARR1_00_GOTO_CMD_LEN>(l_inst_count + 1); + FAPI_TRY( mss::putScom(i_target, CCS_ARR0_ZERO + l_inst_count, l_inst_iter->arr0), "Error in ccs::execute" ); + FAPI_TRY( mss::putScom(i_target, CCS_ARR1_ZERO + l_inst_count, l_inst_iter->arr1), "Error in ccs::execute" ); + + // arr1 contains a specification of the delay and repeat after this instruction, as well + // as a repeat. Total up the delays as we go so we know how long to wait before polling + // the CCS engine for completion + l_inst_iter->arr1.extractToRight<MCBIST_CCS_INST_ARR1_00_IDLES, MCBIST_CCS_INST_ARR1_00_IDLES_LEN>(l_delay); + l_inst_iter->arr1.extractToRight<MCBIST_CCS_INST_ARR1_00_REPEAT_CMD_CNT, + MCBIST_CCS_INST_ARR1_00_REPEAT_CMD_CNT>(l_repeat); + + l_total_delay += l_delay * (l_repeat + 1); + + FAPI_INF("css inst %d: 0x%016lX 0x%016lX (0x%lx, 0x%lx) delay: 0x%x (0x%x) %s", + l_inst_count, l_inst_iter->arr0, l_inst_iter->arr1, + CCS_ARR0_ZERO + l_inst_count, CCS_ARR1_ZERO + l_inst_count, + l_delay, l_total_delay, mss::c_str(i_target)); + } + + // Check our program for any delays. If there isn't a iv_initial_delay configured, then + // we use the delay we just summed from the instructions. + if (i_program.iv_poll.iv_initial_delay == 0) + { + i_program.iv_poll.iv_initial_delay = cycles_to_ns(i_target, l_total_delay); + } + + if (i_program.iv_poll.iv_initial_sim_delay == 0) + { + i_program.iv_poll.iv_initial_sim_delay = cycles_to_simcycles(l_total_delay); + } + + FAPI_INF("executing ccs instructions (%d:%d, %d) for %s", + i_program.iv_instructions.size(), l_inst_count, i_program.iv_poll.iv_initial_delay, mss::c_str(i_target)); + + // Deselect + l_des.arr0.insertFromRight<TT::ARR0_DDR_CKE, TT::ARR0_DDR_CKE_LEN>(l_current_cke); + + // Insert a DES as our last instruction. DES is idle state anyway and having this + // here as an instruction forces the CCS engine to wait the delay specified in + // the last instruction in this array (which it otherwise doesn't do.) + l_des.arr1.setBit<MCBIST_CCS_INST_ARR1_00_END>(); + FAPI_TRY( mss::putScom(i_target, CCS_ARR0_ZERO + l_inst_count, l_des.arr0), "Error in ccs::execute" ); + FAPI_TRY( mss::putScom(i_target, CCS_ARR1_ZERO + l_inst_count, l_des.arr1), "Error in ccs::execute" ); + + FAPI_INF("css inst %d fixup: 0x%016lX 0x%016lX (0x%lx, 0x%lx) %s", + l_inst_count, l_des.arr0, l_des.arr1, + CCS_ARR0_ZERO + l_inst_count, CCS_ARR1_ZERO + l_inst_count, mss::c_str(i_target)); + + // Kick off the CCS engine - per port. No broadcast mode for CCS (per Shelton 9/23/15) + FAPI_INF("executing CCS array for port %d (%s)", mss::relative_pos<fapi2::TARGET_TYPE_MCBIST>(i_port), + mss::c_str(i_port)); + FAPI_TRY( mss::ccs::select_ports( i_target, mss::relative_pos<fapi2::TARGET_TYPE_MCBIST>(i_port)), + "Error in ccs execute" ); + FAPI_TRY( execute_inst_array(i_target, i_program, i_port), "Error in ccs execute" ); + } + +fapi_try_exit: + i_program.iv_instructions.clear(); + return fapi2::current_err; +} + +} // ns nvdimm namespace wr_lvl { diff --git a/src/import/chips/p9/procedures/hwp/memory/lib/workarounds/ccs_workarounds.H b/src/import/chips/p9/procedures/hwp/memory/lib/workarounds/ccs_workarounds.H index 60c7ef23d..067f8481f 100644 --- a/src/import/chips/p9/procedures/hwp/memory/lib/workarounds/ccs_workarounds.H +++ b/src/import/chips/p9/procedures/hwp/memory/lib/workarounds/ccs_workarounds.H @@ -157,6 +157,36 @@ inline void hold_cke_high( ccs::program<fapi2::TARGET_TYPE_MCBIST>& io_program ) fapi2::ReturnCode preload_ccs_for_epow( const fapi2::Target<fapi2::TARGET_TYPE_MCBIST>& i_target, ccs::program<fapi2::TARGET_TYPE_MCBIST>& i_program); +namespace nvdimm +{ + +/// +/// @brief Execute a set of CCS instructions +/// @param[in] i_target the target to effect +/// @param[in] i_program the vector of instructions +/// @param[in] i_port The port target that the array is for +/// @return FAPI2_RC_SUCCSS iff ok +/// @note This is a copy of execute() with minor tweaks to the namespace and single port only +/// +fapi2::ReturnCode execute( const fapi2::Target<fapi2::TARGET_TYPE_MCBIST>& i_target, + mss::ccs::program<fapi2::TARGET_TYPE_MCBIST>& i_program, + const fapi2::Target<fapi2::TARGET_TYPE_MCA>& i_port); + +/// +/// @brief Execute the contents of the CCS array with ccs_addr_mux_sel control +/// @param[in] i_target The MCBIST containing the array +/// @param[in] i_program the MCBIST ccs program - to get the polling parameters +/// @param[in] i_port The port target that the array is for +/// @return FAPI2_RC_SUCCESS iff success +/// @note This is the exact same copy of execute_inst_array() in ccs.H with changes +/// to ccs_addr_mux_sel before and after the execute. This is required to ensure +/// CCS can properly drive the bus during the nvdimm post-restore sequence. +/// +fapi2::ReturnCode execute_inst_array(const fapi2::Target<fapi2::TARGET_TYPE_MCBIST>& i_target, + mss::ccs::program<fapi2::TARGET_TYPE_MCBIST>& i_program, + fapi2::Target<fapi2::TARGET_TYPE_MCA>& i_port); +} + namespace wr_lvl { diff --git a/src/import/chips/p9/procedures/hwp/memory/lib/workarounds/wr_vref_workarounds.C b/src/import/chips/p9/procedures/hwp/memory/lib/workarounds/wr_vref_workarounds.C index b9426fe94..3fbc51f9a 100644 --- a/src/import/chips/p9/procedures/hwp/memory/lib/workarounds/wr_vref_workarounds.C +++ b/src/import/chips/p9/procedures/hwp/memory/lib/workarounds/wr_vref_workarounds.C @@ -5,7 +5,7 @@ /* */ /* OpenPOWER HostBoot Project */ /* */ -/* Contributors Listed Below - COPYRIGHT 2016,2017 */ +/* Contributors Listed Below - COPYRIGHT 2016,2019 */ /* [+] International Business Machines Corp. */ /* */ /* */ @@ -38,6 +38,10 @@ #include <fapi2.H> #include <lib/workarounds/dp16_workarounds.H> #include <lib/workarounds/wr_vref_workarounds.H> +#include <lib/phy/dp16.H> +#include <lib/dimm/ddr4/latch_wr_vref.H> +#include <lib/dimm/ddr4/nvdimm_utils.H> +#include <generic/memory/lib/utils/scom.H> namespace mss { @@ -79,6 +83,92 @@ fapi_try_exit: return fapi2::current_err; } +/// +/// @brief Executes the nvdimm workaround +/// @param[in] i_target - the MCA target on which to operate +/// @param[in] i_rp - the rank pair +/// @param[in] i_abort_on_error - whether or not we are aborting on cal error +/// @return fapi2::ReturnCode fapi2::FAPI2_RC_SUCCESS iff ok +/// +fapi2::ReturnCode nvdimm_workaround( const fapi2::Target<fapi2::TARGET_TYPE_MCA>& i_target, + const uint64_t i_rp, + const uint8_t i_abort_on_error ) +{ + FAPI_INF("nvdimm_workaround() on %s RP%lu ", mss::c_str(i_target), i_rp); + + // traits definition + typedef dp16Traits<fapi2::TARGET_TYPE_MCA> TT; + constexpr uint64_t DRAMS_PER_RP = 18; + std::vector<std::pair<uint64_t, uint64_t>> l_wr_vref_regs; + std::vector<std::pair<fapi2::buffer<uint64_t>, fapi2::buffer<uint64_t>>> l_wr_vref_regs_values; + std::vector<uint64_t> l_composite_values; + + // Get the wr_vref regs by rp and then suck out all the data + FAPI_TRY(mss::dp16::wr_vref::get_wr_vref_regs_by_rp(i_rp, l_wr_vref_regs)); + FAPI_TRY(mss::scom_suckah(i_target, l_wr_vref_regs, l_wr_vref_regs_values)); + + for (const auto l_wr_vref_pair : l_wr_vref_regs_values) + { + // Get the composite values from the first reg. We don't really care which value belongs to which + // dram here since we only need the median value + mss::nvdimm::wr_vref::get_wr_vref_composite_value_helper(l_wr_vref_pair.first, l_composite_values); + + // Abort if we have enough... + // This excludes the non-existent on DP4 + if (l_composite_values.size() == DRAMS_PER_RP) + { + break; + } + + // Repeat for the second reg + mss::nvdimm::wr_vref::get_wr_vref_composite_value_helper(l_wr_vref_pair.second, l_composite_values); + } + + // Get the median + { + // Sort by the "first" of each pair + std::sort(l_composite_values.begin(), l_composite_values.end()); + + // I'm assuming we always have even number of DRAMs here... + const uint64_t MID_POINT = DRAMS_PER_RP / 2; + + // Average the two mid-point values + const uint64_t l_wr_vref_med_avg = ( l_composite_values[MID_POINT] + + l_composite_values[MID_POINT - 1] ) / 2; + + FAPI_INF("nvdimm_workaround() - l_wr_vref_med_avg: 0x%016lx", l_wr_vref_med_avg); + + // Convert to JEDEC language + const uint8_t l_jedec_med_value = mss::dp16::wr_vref::get_value(l_wr_vref_med_avg); + const uint8_t l_jedec_med_range = mss::dp16::wr_vref::get_range(l_wr_vref_med_avg); + fapi2::buffer<uint64_t> l_wr_vref_med_data; + + FAPI_INF("nvdimm_workaround() - median jedec: value = 0x%02x, range = 0x%02x", l_jedec_med_value, l_jedec_med_range); + + // Set up the reg with the median value + l_wr_vref_med_data.insertFromRight<TT::WR_VREF_VALUE_VALUE_DRAM_EVEN, TT::WR_VREF_VALUE_VALUE_DRAM_EVEN_LEN> + (l_jedec_med_value); + l_wr_vref_med_data.insertFromRight<TT::WR_VREF_VALUE_VALUE_DRAM_ODD, TT::WR_VREF_VALUE_VALUE_DRAM_ODD_LEN> + (l_jedec_med_value); + + l_wr_vref_med_data.writeBit<TT::WR_VREF_VALUE_RANGE_DRAM_EVEN>(l_jedec_med_range); + l_wr_vref_med_data.writeBit<TT::WR_VREF_VALUE_RANGE_DRAM_ODD>(l_jedec_med_range); + + // Set the phy with the median value + FAPI_TRY(mss::scom_blastah(i_target, l_wr_vref_regs, l_wr_vref_med_data)); + + // Latches the median wr_vref + FAPI_TRY( mss::ddr4::latch_wr_vref_commands_by_rank_pair(i_target, + i_rp, + l_jedec_med_range, + l_jedec_med_value) ); + } + + return fapi2::FAPI2_RC_SUCCESS; +fapi_try_exit: + return fapi2::current_err; +} + } // close namespace wr_vref } // close namespace workarounds } // close namespace mss diff --git a/src/import/chips/p9/procedures/hwp/memory/lib/workarounds/wr_vref_workarounds.H b/src/import/chips/p9/procedures/hwp/memory/lib/workarounds/wr_vref_workarounds.H index 82761e08d..b5c37d2ed 100644 --- a/src/import/chips/p9/procedures/hwp/memory/lib/workarounds/wr_vref_workarounds.H +++ b/src/import/chips/p9/procedures/hwp/memory/lib/workarounds/wr_vref_workarounds.H @@ -5,7 +5,7 @@ /* */ /* OpenPOWER HostBoot Project */ /* */ -/* Contributors Listed Below - COPYRIGHT 2016,2017 */ +/* Contributors Listed Below - COPYRIGHT 2016,2019 */ /* [+] International Business Machines Corp. */ /* */ /* */ @@ -65,6 +65,17 @@ fapi2::ReturnCode execute( const fapi2::Target<fapi2::TARGET_TYPE_MCA>& i_target uint8_t& o_vrefdq_train_range, uint8_t& o_vrefdq_train_value ); +/// +/// @brief Executes the nvdimm workaround +/// @param[in] i_target - the MCA target on which to operate +/// @param[in] i_rp - the rank pair +/// @param[in] i_abort_on_error - whether or not we are aborting on cal error +/// @return fapi2::ReturnCode fapi2::FAPI2_RC_SUCCESS iff ok +/// +fapi2::ReturnCode nvdimm_workaround( const fapi2::Target<fapi2::TARGET_TYPE_MCA>& i_target, + const uint64_t i_rp, + const uint8_t i_abort_on_error ); + } // close namespace wr_vref } // close namespace workarounds } // close namespace mss diff --git a/src/import/chips/p9/procedures/xml/error_info/p9_memory_mss_draminit_training.xml b/src/import/chips/p9/procedures/xml/error_info/p9_memory_mss_draminit_training.xml index 7ac07623b..d7bb833cd 100644 --- a/src/import/chips/p9/procedures/xml/error_info/p9_memory_mss_draminit_training.xml +++ b/src/import/chips/p9/procedures/xml/error_info/p9_memory_mss_draminit_training.xml @@ -5,7 +5,7 @@ <!-- --> <!-- OpenPOWER HostBoot Project --> <!-- --> -<!-- Contributors Listed Below - COPYRIGHT 2015,2018 --> +<!-- Contributors Listed Below - COPYRIGHT 2015,2019 --> <!-- [+] International Business Machines Corp. --> <!-- --> <!-- --> @@ -820,4 +820,12 @@ </callout> </hwpError> +<hwpError> + <rc>RC_MSS_RP_OUT_OF_RANGE</rc> + <description> + An informational callout for rank-pair value out of range + </description> + <ffdc>RP</ffdc> +</hwpError> + </hwpErrors> |