summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSumit Kumar <sumit_kumar@in.ibm.com>2016-01-27 00:57:27 -0600
committerDaniel M. Crowell <dcrowell@us.ibm.com>2017-10-09 11:56:02 -0400
commit58e1baea3881477294f1020eba92218e779990a2 (patch)
treebc43bea56bc498b788778715e3d242d2a9b98af7
parent5e97dc7721eaabc9049b24430c3065acfffcd86e (diff)
downloadtalos-hostboot-58e1baea3881477294f1020eba92218e779990a2.tar.gz
talos-hostboot-58e1baea3881477294f1020eba92218e779990a2.zip
Erepair HWP p9_io_erepair procedure
- Added clock group parameter in api to identify the higher/lower order bus width of XBUS-X0 & X1 - Added AccessorHwpFuncs Change-Id: Ica1f217821ea4045d20407e1cc4a40a6971db860 Original-Change-Id: Ieb9b1072ed73b26b321ec16f0742271f48aff810 Reviewed-on: http://ralgit01.raleigh.ibm.com/gerrit1/10587 Tested-by: FSP CI Jenkins <fsp-CI-jenkins+hostboot@us.ibm.com> Tested-by: Jenkins Server <pfd-jenkins+hostboot@us.ibm.com> Tested-by: HWSV CI <hwsv-ci+hostboot@us.ibm.com> Tested-by: Hostboot CI <hostboot-ci+hostboot@us.ibm.com> Reviewed-by: Benjamin J. Weisenbeck <bweisenb@us.ibm.com> Reviewed-by: Daniel M. Crowell <dcrowell@us.ibm.com> Reviewed-by: Jennifer A. Stofer <stofer@us.ibm.com> Reviewed-on: http://ralgit01.raleigh.ibm.com/gerrit1/47108 Reviewed-by: Zane C. Shelley <zshelle@us.ibm.com> Tested-by: Jenkins OP Build CI <op-jenkins+hostboot@us.ibm.com> Tested-by: Jenkins OP HW <op-hw-jenkins+hostboot@us.ibm.com>
-rwxr-xr-xsrc/import/chips/p9/procedures/hwp/io/p9_io_erepairAccessorHwpFuncs.C1327
-rw-r--r--src/import/chips/p9/procedures/hwp/io/p9_io_erepairAccessorHwpFuncs.mk27
-rwxr-xr-xsrc/import/chips/p9/procedures/hwp/io/p9_io_erepairConsts.H182
-rwxr-xr-xsrc/import/chips/p9/procedures/hwp/io/p9_io_erepairGetFailedLanesHwp.C708
-rwxr-xr-xsrc/import/chips/p9/procedures/hwp/io/p9_io_erepairGetFailedLanesHwp.H90
-rw-r--r--src/import/chips/p9/procedures/hwp/io/p9_io_erepairGetFailedLanesHwp.mk27
-rwxr-xr-xsrc/import/chips/p9/procedures/hwp/io/p9_io_erepairSetFailedLanesHwp.C1158
-rwxr-xr-xsrc/import/chips/p9/procedures/hwp/io/p9_io_erepairSetFailedLanesHwp.H88
-rw-r--r--src/import/chips/p9/procedures/hwp/io/p9_io_erepairSetFailedLanesHwp.mk27
-rwxr-xr-xsrc/import/chips/p9/procedures/xml/attribute_info/p9_erepair_thresholds.xml106
-rw-r--r--src/import/chips/p9/procedures/xml/error_info/p9_io_erepair_errors.xml191
-rwxr-xr-xsrc/usr/targeting/common/genHwsvMrwXml.pl4
-rwxr-xr-xsrc/usr/targeting/common/xmltohb/attribute_types.xml96
-rw-r--r--src/usr/targeting/common/xmltohb/simics_CUMULUS.system.xml6
-rw-r--r--src/usr/targeting/common/xmltohb/target_types.xml4
15 files changed, 3938 insertions, 103 deletions
diff --git a/src/import/chips/p9/procedures/hwp/io/p9_io_erepairAccessorHwpFuncs.C b/src/import/chips/p9/procedures/hwp/io/p9_io_erepairAccessorHwpFuncs.C
new file mode 100755
index 000000000..682313e21
--- /dev/null
+++ b/src/import/chips/p9/procedures/hwp/io/p9_io_erepairAccessorHwpFuncs.C
@@ -0,0 +1,1327 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/import/chips/p9/procedures/hwp/io/p9_io_erepairAccessorHwpFuncs.C $ */
+/* */
+/* OpenPOWER HostBoot Project */
+/* */
+/* Contributors Listed Below - COPYRIGHT 2015,2017 */
+/* [+] International Business Machines Corp. */
+/* */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+///
+/// @file p9_io_erepairAccessorHwpFuncs.C
+/// @brief FW Team utility functions that access fabric and memory eRepair data.
+///
+//----------------------------------------------------------------------------
+#include <fapi2.H>
+#include <p9_io_erepairAccessorHwpFuncs.H>
+#include <string.h>
+#include <p9_io_erepairConsts.H>
+#include <p9_io_erepairSetFailedLanesHwp.H>
+#include <p9_io_erepairGetFailedLanesHwp.H>
+
+using namespace EREPAIR;
+using namespace fapi2;
+
+/** Forward Declarations **/
+
+/**
+ * @brief: This function reads the field VPD data to check if there is any
+ * eRepair data. This function will be called during Mnfg mode IPL
+ * during which we need to make sure that the Field VPD is clear.
+ * The Field VPD needs to be clear to enable customers to have
+ * eRepair capability.
+ *
+ * @param [in] i_endp1_target Target of one end the connecting bus
+ * @param [in] i_endp2_target Target of the other end of the connecting bus
+ * The VPD of the passed targets are read for
+ * checking the VPD contents
+ * @param[in] i_clkGroup Specifies clock group 0:[XOA, X1A,..] 1:[X0B, X1B,..]
+ *
+ * @return ReturnCode
+ */
+fapi2::ReturnCode mnfgCheckFieldVPD(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_endp1_target,
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_endp2_target,
+ const uint8_t i_clkGroup);
+
+
+/**
+ * @brief: This Function reads the specified VPD (Mnfg or Field) of the passed
+ * targets and verifies whether there are matching eRepair records.
+ * The matching eRepair lanes are returned in the passed references
+ * for vectors.
+ *
+ * @param [in] i_endp1_target Target of one end the connecting bus
+ * @param [in] i_endp2_target Target of the other end of the connecting
+ * bus
+ * @param[in] i_clkGroup Specifies clock group 0:[XOA, X1A,..] 1:[X0B, X1B,..]
+ * @param [out] o_endp1_txFaillanes Reference to vector which will have fail
+ * lane numbers on Tx side of target passed
+ * as first param
+ * @param [out] o_endp1_rxFaillanes Reference to vector which will have fail
+ * lane numbers on Rx side of target passed
+ * as first param
+ * @param [out] o_endp2_txFaillanes Reference to vector which will have fail
+ * lane numbers on Tx side of target passed
+ * as fourth param
+ * @param [out] o_endp2_rxFaillanes Reference to vector which will have fail
+ * lane numbers on Rx side of target passed
+ * as fourth param
+ * @param[in] i_clkGroup Specifies clock group 0:[XOA, X1A,..] 1:[X0B, X1B,..]
+ * @param [in] i_vpdType Indicates whether to read Mnfg VPD or
+ * Field VPD
+ *
+ * @return ReturnCode
+ */
+fapi2::ReturnCode getVerifiedRepairLanes(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_endp1_target,
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_endp2_target,
+ const uint8_t i_clkGroup,
+ std::vector<uint8_t>& o_endp1_txFaillanes,
+ std::vector<uint8_t>& o_endp1_rxFaillanes,
+ std::vector<uint8_t>& o_endp2_txFaillanes,
+ std::vector<uint8_t>& o_endp2_rxFaillanes,
+ const erepairVpdType i_vpdType);
+
+/**
+ * @brief This function checks to see if the passed vectors have matching
+ * fail lane numbers. If no matching lane number is found, such lane
+ * value will be invalidated in the vector
+ *
+ * @param [in] io_endp1_txFaillanes Reference to vector which has fail
+ * lane numbers of Tx side
+ * @param [in] io_endp2_rxFaillanes Reference to vector which has fail
+ * lane numbers of Rx side
+ * @param [out] o_invalidFails_inTx_Ofendp1 If TRUE, indicates that Tx has fail
+ * lane numbers for which there is no
+ * matching entry on Rx side
+ * @param [out] o_invalidFails_inRx_Ofendp2 If TRUE, indicates that Tx has fail
+ * lane numbers for which there is no
+ * matching entry on Tx side
+ *
+ * @return void
+ */
+void invalidateNonMatchingFailLanes(std::vector<uint8_t>& io_endp1_txFaillanes,
+ std::vector<uint8_t>& io_endp2_rxFaillanes,
+ bool& o_invalidFails_inTx_Ofendp1,
+ bool& o_invalidFails_inRx_Ofendp2);
+
+/**
+ * @brief This function gets the eRepair threshold value of the passed target
+ * for the particular IPL type.
+ *
+ * @param [in] i_endp_target The target for whose type the threshold value
+ * is needed
+ * @param [in] i_mfgModeIPL If TRUE, indicates that this is a MnfgMode IPL
+ * If FALSE, indicates that this is a Normal IPL
+ * @param [out] o_threshold The threshold return value
+ *
+ * @return ReturnCode
+ */
+fapi2::ReturnCode geteRepairThreshold(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_endp_target,
+ const bool i_mfgModeIPL,
+ uint8_t& o_threshold);
+
+/**
+ * @brief This function determines the lane numbers that needs to be spared
+ * to support Corner testing.
+ *
+ * @param [in] i_tgtType The target type(XBus or OBus or DMIBus) for
+ * which the lanes that need to be spared are
+ * determined
+ * @param [out] o_endp1_txFailLanes The reference to the vector which will
+ * have the Tx side of lanes that need to be
+ * spared for endp1
+ * @param [out] o_endp1_rxFailLanes The reference to the vector which will
+ * have the Rx side of lanes that need to be
+ * spared for endp1
+ * @param [out] o_endp2_txFailLanes The reference to the vector which will
+ * have the Tx side of lanes that need to be
+ * spared for endp2
+ * @param [out] o_endp2_rxFailLanes The reference to the vector which will
+ * have the Rx side of lanes that need to be
+ * spared for endp2
+ *
+ * @return void
+ */
+void getCornerTestingLanes(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_tgtType,
+ std::vector<uint8_t>& o_endp1_txFailLanes,
+ std::vector<uint8_t>& o_endp1_rxFailLanes,
+ std::vector<uint8_t>& o_endp2_txFailLanes,
+ std::vector<uint8_t>& o_endp2_rxFailLanes);
+
+/**
+ * @brief This function combines the eRepair lane numbers read from
+ * Manufacturing VPD and Field VPD
+ *
+ * @param [in] i_mnfgFaillanes The eRepair lane numbers read from the
+ * Manufacturing VPD
+ * @param [in] i_fieldFaillanes The eRepair lane numbers read from the
+ * Field VPD
+ * @param [out] o_allFaillanes The eRepair lane numbers which is the union
+ * of the Field and Manufacturing eRepair lanes
+ * passed as first iand second params
+ *
+ * @return void
+ */
+void combineFieldandMnfgLanes(std::vector<uint8_t>& i_mnfgFaillanes,
+ std::vector<uint8_t>& i_fieldFaillanes,
+ std::vector<uint8_t>& o_allFailLanes);
+
+
+/***** Function definitions *****/
+
+fapi2::ReturnCode erepairGetRestoreLanes(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_endp1_target,
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_endp2_target,
+ const uint8_t i_clkGroup,
+ std::vector<uint8_t>& o_endp1_txFaillanes,
+ std::vector<uint8_t>& o_endp1_rxFaillanes,
+ std::vector<uint8_t>& o_endp2_txFaillanes,
+ std::vector<uint8_t>& o_endp2_rxFaillanes)
+{
+ fapi2::ReturnCode l_rc = fapi2::FAPI2_RC_SUCCESS;
+ fapi2::current_err = fapi2::FAPI2_RC_SUCCESS;
+
+ std::vector<uint8_t> l_endp1_txFieldFaillanes;
+ std::vector<uint8_t> l_endp1_rxFieldFaillanes;
+ std::vector<uint8_t> l_endp1_txMnfgFaillanes;
+ std::vector<uint8_t> l_endp1_rxMnfgFaillanes;
+
+ std::vector<uint8_t> l_endp2_txFieldFaillanes;
+ std::vector<uint8_t> l_endp2_rxFieldFaillanes;
+ std::vector<uint8_t> l_endp2_txMnfgFaillanes;
+ std::vector<uint8_t> l_endp2_rxMnfgFaillanes;
+
+ bool l_mnfgModeIPL = false;
+ bool l_enableDmiSpares = false;
+ bool l_enableFabricSpares = false;
+ bool l_disableFabricERepair = false;
+ bool l_disableMemoryERepair = false;
+ bool l_thresholdExceed = false;
+ uint8_t l_threshold = 0;
+ uint64_t l_allMnfgFlags = 0;
+ uint32_t l_numTxFailLanes = 0;
+ uint32_t l_numRxFailLanes = 0;
+ fapi2::TargetType l_endp1_tgtType = fapi2::TARGET_TYPE_NONE;
+ fapi2::TargetType l_endp2_tgtType = fapi2::TARGET_TYPE_NONE;
+
+ FAPI_INF(">>erepairGetRestoreLanes");
+
+ FAPI_TRY(FAPI_ATTR_GET(fapi2::ATTR_MNFG_FLAGS,
+ fapi2::Target<fapi2::TARGET_TYPE_SYSTEM>(),
+ l_allMnfgFlags));
+
+ // Check if MNFG_DISABLE_FABRIC_EREPAIR is enabled
+ l_disableFabricERepair = false;
+
+ if(l_allMnfgFlags &
+ fapi2::ENUM_ATTR_MNFG_FLAGS_MNFG_DISABLE_FABRIC_eREPAIR)
+ {
+ l_disableFabricERepair = true;
+ }
+
+ // Check if MNFG_DISABLE_MEMORY_EREPAIR is enabled
+ l_disableMemoryERepair = false;
+
+ if(l_allMnfgFlags &
+ fapi2::ENUM_ATTR_MNFG_FLAGS_MNFG_DISABLE_MEMORY_eREPAIR)
+ {
+ l_disableMemoryERepair = true;
+ }
+
+ // Check if this is Manufacturing mode IPL.
+ l_mnfgModeIPL = false;
+
+ if(l_allMnfgFlags & fapi2::ENUM_ATTR_MNFG_FLAGS_MNFG_THRESHOLDS)
+ {
+ l_mnfgModeIPL = true;
+ }
+
+ // Get the type of passed targets
+ l_endp1_tgtType = i_endp1_target.getType();
+ l_endp2_tgtType = i_endp2_target.getType();
+
+ // Check if the correct target types are passed
+ if(l_endp1_tgtType == fapi2::TARGET_TYPE_XBUS_ENDPOINT ||
+ l_endp1_tgtType == fapi2::TARGET_TYPE_OBUS)
+ {
+ FAPI_ASSERT(l_endp1_tgtType == l_endp2_tgtType,
+ fapi2::P9_EREPAIR_RESTORE_INVALID_TARGET_PAIR()
+ .set_TARGET1(l_endp1_tgtType)
+ .set_TARGET2(l_endp2_tgtType),
+ "ERROR:erepairGetRestoreLanes: Invalid endpoint target pair");
+
+ // Fabric eRepair has been disabled using the
+ // Manufacturing policy flags
+ FAPI_ASSERT(!(l_mnfgModeIPL && l_disableFabricERepair),
+ fapi2::P9_EREPAIR_RESTORE_FABRIC_DISABLED()
+ .set_VALUE1(l_mnfgModeIPL)
+ .set_VALUE2(l_disableFabricERepair),
+ "ERROR:erepairGetRestoreLanes: Fabric eRepair is disabled");
+ }
+ else if(l_endp1_tgtType == fapi2::TARGET_TYPE_MCS_CHIPLET ||
+ l_endp1_tgtType == fapi2::TARGET_TYPE_MEMBUF_CHIP)
+ {
+ FAPI_ASSERT( ((l_endp1_tgtType == fapi2::TARGET_TYPE_MCS_CHIPLET) &&
+ (l_endp2_tgtType == fapi2::TARGET_TYPE_MEMBUF_CHIP)) ||
+ ((l_endp1_tgtType == fapi2::TARGET_TYPE_MEMBUF_CHIP) &&
+ (l_endp2_tgtType == fapi2::TARGET_TYPE_MCS_CHIPLET)),
+ fapi2::P9_EREPAIR_RESTORE_INVALID_TARGET_PAIR()
+ .set_TARGET1(l_endp1_tgtType)
+ .set_TARGET2(l_endp2_tgtType),
+ "ERROR:erepairGetRestoreLanes: Invalid endpoint target pair");
+
+ // Memory eRepair has been disabled using the
+ // Manufacturing policy flags
+ FAPI_ASSERT(!(l_mnfgModeIPL && l_disableMemoryERepair),
+ fapi2::P9_EREPAIR_RESTORE_MEMORY_DISABLED()
+ .set_VALUE1(l_mnfgModeIPL)
+ .set_VALUE2(l_disableMemoryERepair),
+ "ERROR:erepairGetRestoreLanes: Memory eRepair is disabled");
+ }
+
+ if(l_mnfgModeIPL)
+ {
+ /***** Check Field VPD *****/
+
+ // Do not allow eRepair data in Field VPD during Mfg Mode IPL
+ l_rc = mnfgCheckFieldVPD(i_endp1_target,
+ i_endp2_target,
+ i_clkGroup);
+
+ if(l_rc)
+ {
+ FAPI_DBG("erepairGetRestoreLanes:Error from mnfgCheckFieldVPD");
+ fapi2::current_err = l_rc;
+ goto fapi_try_exit;
+ }
+
+ /***** Read Manufacturing VPD *****/
+ FAPI_TRY( getVerifiedRepairLanes(
+ i_endp1_target,
+ i_endp2_target,
+ i_clkGroup,
+ o_endp1_txFaillanes,
+ o_endp1_rxFaillanes,
+ o_endp2_txFaillanes,
+ o_endp2_rxFaillanes,
+ EREPAIR_VPD_MNFG),
+ "getVerifiedRepairLanes(Mnfg) mnfg mode ipl failed w/rc=0x%x",
+ (uint64_t)current_err );
+ }
+ else
+ {
+ /***** Normal Mode IPL *****/
+ // During Normal mode IPL we read both Mnfg and Field VPD
+ // for restoring eRepair lanes
+
+ /***** Read Manufacturing VPD *****/
+ FAPI_TRY( getVerifiedRepairLanes(
+ i_endp1_target,
+ i_endp2_target,
+ i_clkGroup,
+ l_endp1_txMnfgFaillanes,
+ l_endp1_rxMnfgFaillanes,
+ l_endp2_txMnfgFaillanes,
+ l_endp2_rxMnfgFaillanes,
+ EREPAIR_VPD_MNFG),
+ "getVerifiedRepairLanes(Mnfg) normal mode ipl failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+ /***** Read Field VPD *****/
+ FAPI_TRY( getVerifiedRepairLanes(
+ i_endp1_target,
+ i_endp2_target,
+ i_clkGroup,
+ l_endp1_txFieldFaillanes,
+ l_endp1_rxFieldFaillanes,
+ l_endp2_txFieldFaillanes,
+ l_endp2_rxFieldFaillanes,
+ EREPAIR_VPD_FIELD),
+ "getVerifiedRepairLanes(Field) normal mode ipl failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+ /***** Combine the Mnfg and Field eRepair lanes *****/
+
+ // Combine the Tx side fail lanes of endp1
+ combineFieldandMnfgLanes(l_endp1_txMnfgFaillanes,
+ l_endp1_txFieldFaillanes,
+ o_endp1_txFaillanes);
+
+ // Combine the Rx side fail lanes of endp1
+ combineFieldandMnfgLanes(l_endp1_rxMnfgFaillanes,
+ l_endp1_rxFieldFaillanes,
+ o_endp1_rxFaillanes);
+
+ // Combine the Tx side fail lanes of endp2
+ combineFieldandMnfgLanes(l_endp2_txMnfgFaillanes,
+ l_endp2_txFieldFaillanes,
+ o_endp2_txFaillanes);
+
+ // Combine the Rx side fail lanes of endp1
+ combineFieldandMnfgLanes(l_endp2_rxMnfgFaillanes,
+ l_endp2_rxFieldFaillanes,
+ o_endp2_rxFaillanes);
+
+ } // end of else block of "if(l_mnfgModeIPL)"
+
+
+ /***** Check for threshold exceed conditions *****/
+
+ // Get the eRepair threshold limit
+ l_threshold = 0;
+ FAPI_TRY( geteRepairThreshold(
+ i_endp1_target,
+ l_mnfgModeIPL,
+ l_threshold),
+ "geteRepairThreshold() failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+ // Check if the eRepair threshold has exceeded for Tx side of endp1
+ if(o_endp1_txFaillanes.size() > l_threshold)
+ {
+ l_thresholdExceed = true;
+ l_numTxFailLanes = o_endp1_txFaillanes.size();
+
+ FAPI_DBG("erepairGetRestoreLanes: eRepair threshold exceed error"
+ " seen in Tx of endp1 target. No.of lanes: %d", l_numTxFailLanes);
+ }
+
+ // Check if the eRepair threshold has exceeded for Rx side of endp1
+ if(o_endp1_rxFaillanes.size() > l_threshold)
+ {
+ l_thresholdExceed = true;
+ l_numRxFailLanes = o_endp1_rxFaillanes.size();
+
+ FAPI_DBG("erepairGetRestoreLanes: eRepair threshold exceed error"
+ " seen in Rx of endp1 target. No.of lanes: %d", l_numRxFailLanes);
+ }
+
+ // Check if the eRepair threshold has exceeded for Tx side of endp2
+ if(o_endp2_txFaillanes.size() > l_threshold)
+ {
+ l_thresholdExceed = true;
+ l_numTxFailLanes = o_endp2_txFaillanes.size();
+
+ FAPI_DBG("erepairGetRestoreLanes: eRepair threshold exceed error"
+ " seen in Tx of endp2 target. No.of lanes: %d",
+ l_numTxFailLanes);
+ }
+
+ // Check if the eRepair threshold has exceeded for Rx side of endp2
+ if(o_endp2_rxFaillanes.size() > l_threshold)
+ {
+ l_thresholdExceed = true;
+ l_numRxFailLanes = o_endp2_rxFaillanes.size();
+
+ FAPI_DBG("erepairGetRestoreLanes: eRepair threshold exceed error"
+ " seen in Rx of endp2 target. No.of lanes: %d",
+ l_numRxFailLanes);
+ }
+
+ FAPI_ASSERT(l_thresholdExceed == false,
+ fapi2::P9_EREPAIR_THRESHOLD_EXCEED()
+ .set_TX_NUM_LANES(l_numTxFailLanes)
+ .set_RX_NUM_LANES(l_numTxFailLanes)
+ .set_THRESHOLD(l_threshold),
+ "ERROR:The threshold limit for eRepair has been crossed");
+
+ if(l_mnfgModeIPL)
+ {
+ // Check if MNFG_DMI_DEPLOY_LANE_SPARES is enabled
+ l_enableDmiSpares = false;
+
+ if(l_allMnfgFlags &
+ fapi2::ENUM_ATTR_MNFG_FLAGS_MNFG_DMI_DEPLOY_LANE_SPARES)
+ {
+ l_enableDmiSpares = true;
+ }
+
+ // Check if MNFG_FABRIC_DEPLOY_LANE_SPARES is enabled
+ l_enableFabricSpares = false;
+
+ if(l_allMnfgFlags &
+ fapi2::ENUM_ATTR_MNFG_FLAGS_MNFG_FABRIC_DEPLOY_LANE_SPARES)
+ {
+ l_enableFabricSpares = true;
+ }
+
+ if(l_enableDmiSpares || l_enableFabricSpares)
+ {
+ // This is a Corner testing IPL.
+ // eRepair Restore the pre-determined memory lanes
+ getCornerTestingLanes(i_endp1_target,
+ o_endp1_txFaillanes,
+ o_endp1_rxFaillanes,
+ o_endp2_txFaillanes,
+ o_endp2_rxFaillanes);
+ }
+ } // end of if(l_mnfgModeIPL)
+
+fapi_try_exit:
+ return fapi2::current_err;
+}
+
+void combineFieldandMnfgLanes(std::vector<uint8_t>& i_mnfgFaillanes,
+ std::vector<uint8_t>& i_fieldFaillanes,
+ std::vector<uint8_t>& o_allFaillanes)
+{
+ std::vector<uint8_t>::iterator l_it;
+
+ // Merge the Field and Mnfg fail lanes
+ l_it = o_allFaillanes.begin();
+ o_allFaillanes.insert(l_it,
+ i_mnfgFaillanes.begin(),
+ i_mnfgFaillanes.end());
+
+ l_it = o_allFaillanes.end();
+ o_allFaillanes.insert(l_it,
+ i_fieldFaillanes.begin(),
+ i_fieldFaillanes.end());
+
+ // Check if Mfg VPD and Field VPD have same fail lanes.
+ // If found, erase them
+ std::sort(o_allFaillanes.begin(), o_allFaillanes.end());
+
+ o_allFaillanes.erase(std::unique(o_allFaillanes.begin(),
+ o_allFaillanes.end()),
+ o_allFaillanes.end());
+
+}
+
+void getCornerTestingLanes(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_tgtType,
+ std::vector<uint8_t>& o_endp1_txFaillanes,
+ std::vector<uint8_t>& o_endp1_rxFaillanes,
+ std::vector<uint8_t>& o_endp2_txFaillanes,
+ std::vector<uint8_t>& o_endp2_rxFaillanes)
+{
+ std::vector<uint8_t>::iterator l_it;
+ uint8_t l_deployIndx = 0;
+ uint8_t l_maxDeploys = 0;
+ uint8_t* l_deployPtr = NULL;
+
+ uint8_t l_xDeployLanes[XBUS_MAXSPARES_IN_HW] = {XBUS_SPARE_DEPLOY_LANE_1};
+ uint8_t l_oDeployLanes[OBUS_MAXSPARES_IN_HW] = {OBUS_SPARE_DEPLOY_LANE_1,
+ OBUS_SPARE_DEPLOY_LANE_2
+ };
+
+ uint8_t l_dmiDeployLanes[DMIBUS_MAXSPARES_IN_HW] =
+ {
+ DMIBUS_SPARE_DEPLOY_LANE_1,
+ DMIBUS_SPARE_DEPLOY_LANE_2
+ };
+
+ // Idea is to push_back the pre-determined lanes into the Tx and Rx
+ // vectors of endpoint1 and endpoint2
+ switch(i_tgtType.getType())
+ {
+ case fapi2::TARGET_TYPE_XBUS_ENDPOINT:
+ l_maxDeploys = XBUS_MAXSPARES_IN_HW;
+ l_deployPtr = l_xDeployLanes;
+ break;
+
+ case fapi2::TARGET_TYPE_OBUS:
+ l_maxDeploys = OBUS_MAXSPARES_IN_HW;
+ l_deployPtr = l_oDeployLanes;
+ break;
+
+ case fapi2::TARGET_TYPE_MCS_CHIPLET:
+ case fapi2::TARGET_TYPE_MEMBUF_CHIP:
+ l_maxDeploys = DMIBUS_MAXSPARES_IN_HW;
+ l_deployPtr = l_dmiDeployLanes;
+ break;
+
+ default:
+ FAPI_ASSERT(false,
+ fapi2::P9_EREPAIR_RESTORE_INVALID_TARGET()
+ .set_TARGET(i_tgtType),
+ "ERROR:getCornerTestingLanes: Invalid target type");
+ break;
+ };
+
+ std::sort(o_endp1_txFaillanes.begin(), o_endp1_txFaillanes.end());
+
+ std::sort(o_endp1_rxFaillanes.begin(), o_endp1_rxFaillanes.end());
+
+ for(l_deployIndx = 0;
+ ((l_deployIndx < l_maxDeploys) &&
+ (o_endp1_txFaillanes.size() < l_maxDeploys));
+ l_deployIndx++)
+ {
+ l_it = std::find(o_endp1_txFaillanes.begin(),
+ o_endp1_txFaillanes.end(),
+ l_deployPtr[l_deployIndx]);
+
+ if(l_it == o_endp1_txFaillanes.end())
+ {
+ o_endp1_txFaillanes.push_back(l_deployPtr[l_deployIndx]);
+ }
+ }
+
+ for(l_deployIndx = 0;
+ ((o_endp1_rxFaillanes.size() < l_maxDeploys) &&
+ (l_deployIndx < l_maxDeploys));
+ l_deployIndx++)
+ {
+ l_it = std::find(o_endp1_rxFaillanes.begin(),
+ o_endp1_rxFaillanes.end(),
+ l_deployPtr[l_deployIndx]);
+
+ if(l_it == o_endp1_rxFaillanes.end())
+ {
+ o_endp1_rxFaillanes.push_back(l_deployPtr[l_deployIndx]);
+ }
+ }
+
+ // We can cassign the lanes of endpoint1 to endpoint2 because any
+ // existing faillanes in endpoint2 have already been matched with
+ // endpoint1. This means that there cannot be any faillanes in
+ // endpoint2 that do not have equivalent lanes in endpoint1.
+ o_endp2_txFaillanes = o_endp1_txFaillanes;
+ o_endp2_rxFaillanes = o_endp1_rxFaillanes;
+
+fapi_try_exit:
+ return;
+}
+
+fapi2::ReturnCode geteRepairThreshold(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_endp_target,
+ const bool i_mfgModeIPL,
+ uint8_t& o_threshold)
+{
+ fapi2::current_err = fapi2::FAPI2_RC_SUCCESS;
+ fapi2::TargetType l_tgtType = fapi2::TARGET_TYPE_NONE;
+
+ o_threshold = 0;
+ l_tgtType = i_endp_target.getType();
+
+ if(i_mfgModeIPL)
+ {
+ switch(l_tgtType)
+ {
+ case fapi2::TARGET_TYPE_XBUS_ENDPOINT:
+ FAPI_TRY(FAPI_ATTR_GET(fapi2::ATTR_X_EREPAIR_THRESHOLD_MNFG,
+ fapi2::Target<fapi2::TARGET_TYPE_SYSTEM>(),
+ o_threshold));
+ break;
+
+ case fapi2::TARGET_TYPE_OBUS:
+ FAPI_TRY(FAPI_ATTR_GET(fapi2::ATTR_O_EREPAIR_THRESHOLD_MNFG,
+ fapi2::Target<fapi2::TARGET_TYPE_SYSTEM>(),
+ o_threshold));
+ break;
+
+ case fapi2::TARGET_TYPE_MCS_CHIPLET:
+ case fapi2::TARGET_TYPE_MEMBUF_CHIP:
+ FAPI_TRY(FAPI_ATTR_GET(fapi2::ATTR_DMI_EREPAIR_THRESHOLD_MNFG,
+ fapi2::Target<fapi2::TARGET_TYPE_SYSTEM>(),
+ o_threshold));
+ break;
+
+ default:
+ FAPI_ASSERT(false,
+ fapi2::P9_EREPAIR_RESTORE_INVALID_TARGET()
+ .set_TARGET(l_tgtType),
+ "ERROR:geteRepairThreshold: Invalid target type");
+ break;
+ };
+ }
+ else
+ {
+ switch(l_tgtType)
+ {
+ case fapi2::TARGET_TYPE_XBUS_ENDPOINT:
+ FAPI_TRY(FAPI_ATTR_GET(fapi2::ATTR_X_EREPAIR_THRESHOLD_FIELD,
+ fapi2::Target<fapi2::TARGET_TYPE_SYSTEM>(),
+ o_threshold));
+ break;
+
+ case fapi2::TARGET_TYPE_OBUS:
+ FAPI_TRY(FAPI_ATTR_GET(fapi2::ATTR_O_EREPAIR_THRESHOLD_FIELD,
+ fapi2::Target<fapi2::TARGET_TYPE_SYSTEM>(),
+ o_threshold));
+ break;
+
+ case fapi2::TARGET_TYPE_MCS_CHIPLET:
+ case fapi2::TARGET_TYPE_MEMBUF_CHIP:
+ FAPI_TRY(FAPI_ATTR_GET(fapi2::ATTR_DMI_EREPAIR_THRESHOLD_FIELD,
+ fapi2::Target<fapi2::TARGET_TYPE_SYSTEM>(),
+ o_threshold));
+ break;
+
+ default:
+ FAPI_ASSERT(false,
+ fapi2::P9_EREPAIR_RESTORE_INVALID_TARGET()
+ .set_TARGET(l_tgtType),
+ "ERROR:geteRepairThreshold: Invalid target type");
+ break;
+ };
+ }
+
+fapi_try_exit:
+ FAPI_INF("geteRepairThreshold: o_threshold = %d", o_threshold);
+ return fapi2::current_err;
+}
+
+fapi2::ReturnCode mnfgCheckFieldVPD(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_endp1_target,
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_endp2_target,
+ const uint8_t i_clkGroup)
+{
+ fapi2::current_err = fapi2::FAPI2_RC_SUCCESS;
+ std::vector<uint8_t> l_endp1_txFaillanes;
+ std::vector<uint8_t> l_endp1_rxFaillanes;
+ std::vector<uint8_t> l_endp2_txFaillanes;
+ std::vector<uint8_t> l_endp2_rxFaillanes;
+ bool l_fieldVPDClear = true;
+
+ l_fieldVPDClear = true;
+
+ /***** Read Field VPD *****/
+
+ // During Mfg mode IPL, field VPD need to be clear.
+
+ // Get failed lanes for endp1
+ FAPI_TRY( erepairGetFieldFailedLanes(
+ i_endp1_target,
+ i_clkGroup,
+ l_endp1_txFaillanes,
+ l_endp1_rxFaillanes),
+ "erepairGetFieldFailedLanes endp1 target failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+ // If there are fail lanes in Field VPD on endpoint1, create an
+ // error log and return
+ if(l_endp1_txFaillanes.size() ||
+ l_endp1_rxFaillanes.size())
+ {
+ l_fieldVPDClear = false;
+ FAPI_DBG("mnfgCheckFieldVPD: eRepair records found in Field VPD in Tx during Manufacturing mode IPL");
+ }
+
+ // Get failed lanes for endp2
+ FAPI_TRY( erepairGetFieldFailedLanes(
+ i_endp2_target,
+ i_clkGroup,
+ l_endp2_txFaillanes,
+ l_endp2_rxFaillanes),
+ "erepairGetFieldFailedLanes endp2 target failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+ // If there are fail lanes in Field VPD on endpoint2, create an
+ // error log and return
+ if(l_endp2_txFaillanes.size() ||
+ l_endp2_rxFaillanes.size())
+ {
+ l_fieldVPDClear = false;
+ FAPI_DBG("mnfgCheckFieldVPD: eRepair records found in Field VPD in Rx during Manufacturing mode IPL");
+ }
+
+ FAPI_ASSERT(l_fieldVPDClear == true,
+ fapi2::P9_EREPAIR_RESTORE_FIELD_VPD_NOT_CLEAR()
+ .set_TARGET1(i_endp1_target)
+ .set_TARGET2(i_endp2_target),
+ "ERROR: mnfgCheckFieldVPD: Field VPD need to be clear during Mnfg mode IPL");
+
+fapi_try_exit:
+ return fapi2::current_err;
+}
+
+fapi2::ReturnCode getVerifiedRepairLanes(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_endp1_target,
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_endp2_target,
+ const uint8_t i_clkGroup,
+ std::vector<uint8_t>& o_endp1_txFaillanes,
+ std::vector<uint8_t>& o_endp1_rxFaillanes,
+ std::vector<uint8_t>& o_endp2_txFaillanes,
+ std::vector<uint8_t>& o_endp2_rxFaillanes,
+ const erepairVpdType i_vpdType)
+{
+ fapi2::current_err = fapi2::FAPI2_RC_SUCCESS;
+
+ getLanes_t l_getLanes = NULL;
+ setLanes_t l_setLanes = NULL;
+
+ fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > l_target[2] = {i_endp1_target, i_endp2_target};
+ bool l_invalidFails_inTx_OfTgt[2] = {false, false};
+ bool l_invalidFails_inRx_OfTgt[2] = {false, false};
+ uint8_t l_tgtIndx = 0;
+
+ std::vector<uint8_t> l_emptyVector;
+ std::vector<uint8_t> l_txFaillanes;
+ std::vector<uint8_t> l_rxFaillanes;
+
+ FAPI_INF(">> getVerifiedRepairLanes: vpdType: %s",
+ i_vpdType == EREPAIR_VPD_FIELD ? "Field" : "Mnfg");
+
+ /***** Read VPD *****/
+
+ if(i_vpdType == EREPAIR_VPD_FIELD)
+ {
+ l_getLanes = &erepairGetFieldFailedLanes;
+ l_setLanes = &erepairSetFieldFailedLanes;
+ }
+ else if(i_vpdType == EREPAIR_VPD_MNFG)
+ {
+ l_getLanes = &erepairGetMnfgFailedLanes;
+ l_setLanes = &erepairSetMnfgFailedLanes;
+ }
+
+ for(l_tgtIndx = 0; l_tgtIndx < 2; l_tgtIndx++)
+ {
+ // Get failed lanes for endp1 and endp2
+ FAPI_TRY( l_getLanes(
+ l_target[l_tgtIndx],
+ i_clkGroup,
+ l_txFaillanes,
+ l_rxFaillanes),
+ "getVerifiedRepairLanes() from Accessor HWP failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+ if(l_tgtIndx == 0)
+ {
+ o_endp1_txFaillanes = l_txFaillanes;
+ o_endp1_rxFaillanes = l_rxFaillanes;
+ }
+ else
+ {
+ o_endp2_txFaillanes = l_txFaillanes;
+ o_endp2_rxFaillanes = l_rxFaillanes;
+ }
+
+ l_txFaillanes.clear();
+ l_rxFaillanes.clear();
+ } // end of for(l_tgtIndx)
+
+ // Check if matching fail lanes exists on the sub-interfaces
+ // connecting the two end points
+ if(o_endp1_txFaillanes.size() || o_endp2_rxFaillanes.size())
+ {
+ invalidateNonMatchingFailLanes(o_endp1_txFaillanes,
+ o_endp2_rxFaillanes,
+ l_invalidFails_inTx_OfTgt[0],
+ l_invalidFails_inRx_OfTgt[1]);
+ }
+
+ if(o_endp2_txFaillanes.size() || o_endp1_rxFaillanes.size())
+ {
+ invalidateNonMatchingFailLanes(o_endp2_txFaillanes,
+ o_endp1_rxFaillanes,
+ l_invalidFails_inTx_OfTgt[1],
+ l_invalidFails_inRx_OfTgt[0]);
+ }
+
+ /***** Correct eRepair data of endp1 in VPD *****/
+
+ for(l_tgtIndx = 0; l_tgtIndx < 2; l_tgtIndx++)
+ {
+ if(l_tgtIndx == 0)
+ {
+ l_txFaillanes = o_endp1_txFaillanes;
+ l_rxFaillanes = o_endp1_rxFaillanes;
+ }
+ else
+ {
+ l_txFaillanes = o_endp2_txFaillanes;
+ l_rxFaillanes = o_endp2_rxFaillanes;
+ }
+
+ // Update endp1 and endp2 VPD to invalidate fail lanes that do
+ // not have matching fail lanes on the other end
+ if(l_invalidFails_inTx_OfTgt[l_tgtIndx] &&
+ l_invalidFails_inRx_OfTgt[l_tgtIndx])
+ {
+ FAPI_TRY( l_setLanes(
+ l_target[l_tgtIndx],
+ i_clkGroup,
+ l_txFaillanes,
+ l_rxFaillanes),
+ "getVerifiedRepairLanes() tx/rx from Accessor HWP failed w/rc=0x%x",
+ (uint64_t)current_err );
+ }
+ else if(l_invalidFails_inTx_OfTgt[l_tgtIndx])
+ {
+ FAPI_TRY( l_setLanes(
+ l_target[l_tgtIndx],
+ i_clkGroup,
+ l_txFaillanes,
+ l_emptyVector),
+ "getVerifiedRepairLanes() tx from Accessor HWP failed w/rc=0x%x",
+ (uint64_t)current_err );
+ }
+ else if(l_invalidFails_inRx_OfTgt[l_tgtIndx])
+ {
+ FAPI_TRY( l_setLanes(
+ l_target[l_tgtIndx],
+ i_clkGroup,
+ l_emptyVector,
+ l_rxFaillanes),
+ "getVerifiedRepairLanes() rx from Accessor HWP failed w/rc=0x%x",
+ (uint64_t)current_err );
+ }
+ } // end of for loop
+
+fapi_try_exit:
+ return current_err;
+}
+
+void invalidateNonMatchingFailLanes(std::vector<uint8_t>& io_endp1_txFaillanes,
+ std::vector<uint8_t>& io_endp2_rxFaillanes,
+ bool& o_invalidFails_inTx_Ofendp1,
+ bool& o_invalidFails_inRx_Ofendp2)
+{
+ std::vector<uint8_t>::iterator l_it;
+ std::vector<uint8_t>::iterator l_itTmp;
+ std::vector<uint8_t>::iterator l_itDrv;
+ std::vector<uint8_t>::iterator l_itRcv;
+
+ o_invalidFails_inTx_Ofendp1 = false;
+ o_invalidFails_inRx_Ofendp2 = false;
+
+ std::sort(io_endp1_txFaillanes.begin(), io_endp1_txFaillanes.end());
+ std::sort(io_endp2_rxFaillanes.begin(), io_endp2_rxFaillanes.end());
+
+ // Start with drive side fail lanes and check for matching lanes
+ // on the recieve side
+ l_itTmp = io_endp2_rxFaillanes.begin();
+
+ for(l_itDrv = io_endp1_txFaillanes.begin();
+ l_itDrv != io_endp1_txFaillanes.end();
+ l_itDrv++)
+ {
+ l_it = std::lower_bound(io_endp2_rxFaillanes.begin(),
+ io_endp2_rxFaillanes.end(),
+ *l_itDrv);
+
+ // If matching fail lane is not found on the receive side,
+ // invalidate the drive side fail lane number
+ if((l_it == io_endp2_rxFaillanes.end()) || (*l_it > *l_itDrv))
+ {
+ *l_itDrv = INVALID_FAIL_LANE_NUMBER;
+ o_invalidFails_inTx_Ofendp1 = true;
+ }
+ else
+ {
+ // save the iterator for the next search
+ l_itTmp = l_it;
+ }
+ }
+
+ // Sort again as we might have invalidated some lanes
+ std::sort(io_endp1_txFaillanes.begin(), io_endp1_txFaillanes.end());
+
+ // Now, traverse through the receive side fail lanes and
+ // check for matching lanes on the drive side
+ for(l_itRcv = io_endp2_rxFaillanes.begin();
+ ((l_itRcv <= l_itTmp) && (l_itRcv != io_endp2_rxFaillanes.end()));
+ l_itRcv++)
+ {
+ l_it = std::lower_bound(io_endp1_txFaillanes.begin(),
+ io_endp1_txFaillanes.end(),
+ *l_itRcv);
+
+ // If matching lane is not found on the driver side,
+ // invalidate the receive side fail lane number
+ if((l_it == io_endp1_txFaillanes.end()) || (*l_it > *l_itRcv))
+ {
+ *l_itRcv = INVALID_FAIL_LANE_NUMBER;
+ o_invalidFails_inRx_Ofendp2 = true;
+ }
+ }
+
+ // Need to invalidate all the entries beyond the last
+ // lower bound of first search
+ if(l_itTmp != io_endp2_rxFaillanes.end())
+ {
+ for(l_itTmp++; l_itTmp != io_endp2_rxFaillanes.end(); l_itTmp++)
+ {
+ *l_itTmp = INVALID_FAIL_LANE_NUMBER;
+ }
+ }
+}
+
+
+fapi2::ReturnCode erepairGetFailedLanes(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_endp_target,
+ const uint8_t i_clkGroup,
+ std::vector<uint8_t>& o_txFailLanes,
+ std::vector<uint8_t>& o_rxFailLanes)
+{
+ fapi2::current_err = fapi2::FAPI2_RC_SUCCESS;
+ std::vector<uint8_t> l_txFailLanes;
+ std::vector<uint8_t> l_rxFailLanes;
+ std::vector<uint8_t>::iterator l_it;
+
+ FAPI_INF(">> erepairGetFailedLaness");
+
+ // Get the erepair lanes from Field VPD
+ FAPI_TRY( erepairGetFieldFailedLanes(
+ i_endp_target,
+ i_clkGroup,
+ l_txFailLanes,
+ l_rxFailLanes),
+ "erepairGetFieldFailedLanes() failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+ o_txFailLanes = l_txFailLanes;
+ o_rxFailLanes = l_rxFailLanes;
+
+ // Get the erepair lanes from Manufacturing VPD
+ l_txFailLanes.clear();
+ l_rxFailLanes.clear();
+ FAPI_TRY( erepairGetMnfgFailedLanes(
+ i_endp_target,
+ i_clkGroup,
+ l_txFailLanes,
+ l_rxFailLanes),
+ "erepairGetMnfgFailedLanes() failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+ // Merge the Mnfg lanes with the Field lanes
+ l_it = o_txFailLanes.end();
+ o_txFailLanes.insert(l_it, l_txFailLanes.begin(), l_txFailLanes.end());
+
+ l_it = o_rxFailLanes.end();
+ o_rxFailLanes.insert(l_it, l_rxFailLanes.begin(), l_rxFailLanes.end());
+
+fapi_try_exit:
+ return fapi2::current_err;
+}
+
+fapi2::ReturnCode erepairGetFieldFailedLanes(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_endp_target,
+ const uint8_t i_clkGroup,
+ std::vector<uint8_t>& o_txFailLanes,
+ std::vector<uint8_t>& o_rxFailLanes)
+{
+ fapi2::current_err = fapi2::FAPI2_RC_SUCCESS;
+
+ FAPI_DBG(">> erepairGetFieldFailedLanes");
+
+ // Execute the Accessor HWP to retrieve the failed lanes from the VPD
+ FAPI_TRY( p9_io_erepairGetFailedLanesHwp(
+ i_endp_target,
+ EREPAIR_VPD_FIELD,
+ i_clkGroup,
+ o_txFailLanes,
+ o_rxFailLanes),
+ "erepairGetFieldFailedLanes() from Accessor HWP failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+fapi_try_exit:
+ return fapi2::current_err;
+}
+
+
+fapi2::ReturnCode erepairGetMnfgFailedLanes(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_endp_target,
+ const uint8_t i_clkGroup,
+ std::vector<uint8_t>& o_txFailLanes,
+ std::vector<uint8_t>& o_rxFailLanes)
+{
+ fapi2::current_err = fapi2::FAPI2_RC_SUCCESS;
+
+ FAPI_DBG(">> erepairGetMnfgFailedLanes");
+
+ // Execute the Accessor HWP to retrieve the failed lanes from the VPD
+ FAPI_TRY( p9_io_erepairGetFailedLanesHwp(
+ i_endp_target,
+ EREPAIR_VPD_MNFG,
+ i_clkGroup,
+ o_txFailLanes,
+ o_rxFailLanes),
+ "erepairGetMnfgFailedLanes() from Accessor HWP failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+fapi_try_exit:
+ return fapi2::current_err;
+}
+
+fapi2::ReturnCode erepairSetFailedLanes(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_txEndp_target,
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_rxEndp_target,
+ const uint8_t i_clkGroup,
+ const std::vector<uint8_t>& i_rxFailLanes,
+ bool& o_thresholdExceed)
+{
+ fapi2::current_err = fapi2::FAPI2_RC_SUCCESS;
+ uint64_t l_allMnfgFlags = 0;
+ bool l_mnfgModeIPL = false;
+ uint8_t l_threshold = 0;
+ setLanes_t l_setLanes = NULL;
+ getLanes_t l_getLanes = NULL;
+ std::vector<uint8_t> l_txFaillanes;
+ std::vector<uint8_t> l_rxFaillanes;
+ std::vector<uint8_t> l_emptyVector;
+ std::vector<uint8_t> l_throwAway;
+
+ FAPI_INF(">> erepairSetFailedLanes");
+
+ o_thresholdExceed = false;
+
+ // Get the Manufacturing Policy flags
+ FAPI_TRY(FAPI_ATTR_GET(fapi2::ATTR_MNFG_FLAGS,
+ fapi2::Target<fapi2::TARGET_TYPE_SYSTEM>(),
+ l_allMnfgFlags),
+ "erepairSetFailedLanes: Unable to read attribute ATTR_MNFG_FLAGS");
+
+ // Check if this is a Mnfg mode IPL
+ if(l_allMnfgFlags & fapi2::ENUM_ATTR_MNFG_FLAGS_MNFG_THRESHOLDS)
+ {
+ l_mnfgModeIPL = true;
+ }
+
+ if(l_mnfgModeIPL)
+ {
+ l_setLanes = &erepairSetMnfgFailedLanes;
+ l_getLanes = &erepairGetMnfgFailedLanes;
+ }
+ else
+ {
+ l_setLanes = &erepairSetFieldFailedLanes;
+ l_getLanes = &erepairGetFieldFailedLanes;
+ }
+
+ /*** Check if we have crossed the repair threshold ***/
+ // Get the eRepair threshold limit
+ l_threshold = 0;
+ FAPI_TRY( geteRepairThreshold(
+ i_rxEndp_target,
+ l_mnfgModeIPL,
+ l_threshold),
+ "geteRepairThreshold() from Accessor HWP failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+ // Check if the new fails have crossed the threshold
+ if(i_rxFailLanes.size() > l_threshold)
+ {
+ o_thresholdExceed = true;
+ goto fapi_try_exit;
+ }
+
+ // Get existing fail lanes that are in the VPD of rx endpoint
+ FAPI_TRY( l_getLanes(
+ i_rxEndp_target,
+ i_clkGroup,
+ l_throwAway,
+ l_rxFaillanes),
+ "rx l_getLanes() from Accessor HWP failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+ // Get existing fail lanes that are in the VPD of tx endpoint
+ FAPI_TRY( l_getLanes(
+ i_txEndp_target,
+ i_clkGroup,
+ l_txFaillanes,
+ l_throwAway),
+ "tx l_getLanes() from Accessor HWP failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+ // Lets combine the new and old fail lanes of Rx side
+ l_rxFaillanes.insert(l_rxFaillanes.end(),
+ i_rxFailLanes.begin(),
+ i_rxFailLanes.end());
+
+ // Remove duplicate lanes if any on the Rx side
+ std::sort(l_rxFaillanes.begin(), l_rxFaillanes.end());
+
+ l_rxFaillanes.erase(std::unique(l_rxFaillanes.begin(),
+ l_rxFaillanes.end()),
+ l_rxFaillanes.end());
+
+ // Lets combine the new and old fail lanes of Tx side
+ l_txFaillanes.insert(l_txFaillanes.end(),
+ i_rxFailLanes.begin(),
+ i_rxFailLanes.end());
+
+ // Remove duplicate lanes if any on the Tx side
+ std::sort(l_txFaillanes.begin(), l_txFaillanes.end());
+
+ l_txFaillanes.erase(std::unique(l_txFaillanes.begin(),
+ l_txFaillanes.end()),
+ l_txFaillanes.end());
+
+ // Check if the sum of old and new fail lanes have crossed the threshold
+ if((l_txFaillanes.size() > l_threshold) ||
+ (l_rxFaillanes.size() > l_threshold))
+ {
+ o_thresholdExceed = true;
+ goto fapi_try_exit;
+ }
+
+ /*** Update the VPD ***/
+
+ // Lets write the VPD of endpoint1 with faillanes on Rx side
+ FAPI_TRY( l_setLanes(
+ i_rxEndp_target,
+ i_clkGroup,
+ l_emptyVector,
+ l_rxFaillanes),
+ "rx l_setLanes() from Accessor HWP failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+ // Lets write the VPD of endpoint2 with faillanes on Tx side
+ FAPI_TRY( l_setLanes(
+ i_txEndp_target,
+ i_clkGroup,
+ l_txFaillanes,
+ l_emptyVector),
+ "tx l_setLanes() from Accessor HWP failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+fapi_try_exit:
+ FAPI_INF("<< erepairSetFailedLanes");
+ return fapi2::current_err;
+}
+
+fapi2::ReturnCode erepairSetFieldFailedLanes(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_endp_target,
+ const uint8_t i_clkGroup,
+ const std::vector<uint8_t>& i_txFailLanes,
+ const std::vector<uint8_t>& i_rxFailLanes)
+{
+ fapi2::current_err = fapi2::FAPI2_RC_SUCCESS;
+
+ // Execute the Accessor HWP to write the fail lanes to Field VPD
+ FAPI_TRY( p9_io_erepairSetFailedLanesHwp(
+ i_endp_target,
+ EREPAIR_VPD_FIELD,
+ i_clkGroup,
+ i_txFailLanes,
+ i_rxFailLanes),
+ "erepairSetFieldFailedLanes() from Accessor HWP failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+fapi_try_exit:
+ return fapi2::current_err;
+}
+
+fapi2::ReturnCode erepairSetMnfgFailedLanes(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_endp_target,
+ const uint8_t i_clkGroup,
+ const std::vector<uint8_t>& i_txFailLanes,
+ const std::vector<uint8_t>& i_rxFailLanes)
+{
+ fapi2::current_err = fapi2::FAPI2_RC_SUCCESS;
+
+ // Execute the Accessor HWP to write the fail lanes to Mnfg VPD
+ FAPI_TRY( p9_io_erepairSetFailedLanesHwp(
+ i_endp_target,
+ EREPAIR_VPD_MNFG,
+ i_clkGroup,
+ i_txFailLanes,
+ i_rxFailLanes),
+ "erepairSetMnfgFailedLanes() from Accessor HWP failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+fapi_try_exit:
+ return fapi2::current_err;
+}
+
diff --git a/src/import/chips/p9/procedures/hwp/io/p9_io_erepairAccessorHwpFuncs.mk b/src/import/chips/p9/procedures/hwp/io/p9_io_erepairAccessorHwpFuncs.mk
new file mode 100644
index 000000000..af439b453
--- /dev/null
+++ b/src/import/chips/p9/procedures/hwp/io/p9_io_erepairAccessorHwpFuncs.mk
@@ -0,0 +1,27 @@
+# IBM_PROLOG_BEGIN_TAG
+# This is an automatically generated prolog.
+#
+# $Source: src/import/chips/p9/procedures/hwp/io/p9_io_erepairAccessorHwpFuncs.mk $
+#
+# OpenPOWER HostBoot Project
+#
+# Contributors Listed Below - COPYRIGHT 2015,2017
+# [+] International Business Machines Corp.
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied. See the License for the specific language governing
+# permissions and limitations under the License.
+#
+# IBM_PROLOG_END_TAG
+
+PROCEDURE=p9_io_erepairAccessorHwpFuncs
+$(call BUILD_PROCEDURE)
diff --git a/src/import/chips/p9/procedures/hwp/io/p9_io_erepairConsts.H b/src/import/chips/p9/procedures/hwp/io/p9_io_erepairConsts.H
new file mode 100755
index 000000000..108d3d2f4
--- /dev/null
+++ b/src/import/chips/p9/procedures/hwp/io/p9_io_erepairConsts.H
@@ -0,0 +1,182 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/import/chips/p9/procedures/hwp/io/p9_io_erepairConsts.H $ */
+/* */
+/* OpenPOWER HostBoot Project */
+/* */
+/* Contributors Listed Below - COPYRIGHT 2015,2017 */
+/* [+] International Business Machines Corp. */
+/* */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+///
+/// @file p9_io_erepairConsts.H
+/// @brief eRepair Constants
+///
+//----------------------------------------------------------------------------
+// *HWP HWP Owner : Chris Steffen <cwsteffen@us.ibm.com>
+// *HWP HWP Backup Owner: Gary Peterson <garyp@us.ibm.com>
+// *HWP FW Owner : Sumit Kumar <sumit_kumar@in.ibm.com>
+// *HWP Team : IO
+// *HWP Level : 2
+// *HWP Consumed by : FSP:HB
+//----------------------------------------------------------------------------
+
+#ifndef P9_IO_EREPAIRCONSTS_H_
+#define P9_IO_EREPAIRCONSTS_H_
+
+/******************************************************************************
+ * Erepair constants
+ *****************************************************************************/
+
+namespace EREPAIR
+{
+const uint8_t INVALID_FAIL_LANE_NUMBER = 0;
+
+// X-Bus is 16+1 lanes wide in 2 byte mode
+// Data lanes numbering: 0 - 15 in 2 byte mode
+// Spare lanes numbering: 16 in 2 byte mode
+const uint8_t XBUS_2_ACTIVE_LANE_START = 0;
+const uint8_t XBUS_2_ACTIVE_LANE_END = 15;
+
+const uint8_t XBUS_SPARE_DEPLOY_LANE_1 = 0;
+const uint8_t XBUS_MAXSPARES_IN_HW = 1;
+const uint8_t XBUS_MAX_LANE_WIDTH = 16;
+
+// O-Bus is 12+2 lanes wide.
+// Data lanes numbering: 0 - 11
+// Spare lane numbering: 12,13
+const uint8_t OBUS_ACTIVE_LANE_START = 0;
+const uint8_t OBUS_ACTIVE_LANE_END = 11;
+
+const uint8_t OBUS_SPARE_DEPLOY_LANE_1 = 0;
+const uint8_t OBUS_SPARE_DEPLOY_LANE_2 = 1;
+const uint8_t OBUS_MAXSPARES_IN_HW = 2;
+const uint8_t OBUS_MAX_LANE_WIDTH = 12;
+
+// UpStream DMI-Bus is 21+2 lanes wide.
+// Data lanes numbering: 0 - 20
+// Spare lanes numbering: 21, 22
+const uint8_t DMIBUS_UPSTREAM_ACTIVE_LANE_START = 0;
+const uint8_t DMIBUS_UPSTREAM_ACTIVE_LANE_END = 20;
+
+// DownStream DMI-Bus is 14+2 lanes wide.
+// Data lanes numbering: 0 - 13
+// Spare lanes numbering: 14, 15
+const uint8_t DMIBUS_DOWNSTREAM_ACTIVE_LANE_START = 0;
+const uint8_t DMIBUS_DOWNSTREAM_ACTIVE_LANE_END = 13;
+
+const uint8_t DMIBUS_SPARE_DEPLOY_LANE_1 = 0;
+const uint8_t DMIBUS_SPARE_DEPLOY_LANE_2 = 1;
+const uint8_t DMIBUS_MAXSPARES_IN_HW = 2;
+const uint8_t DMIBUS_UPSTREAM_MAX_LANE_WIDTH = 21;
+const uint8_t DMIBUS_DNSTREAM_MAX_LANE_WIDTH = 14;
+
+#ifdef P9_CUMULUS
+ const uint32_t EREPAIR_P9_MODULE_VPD_FIELD_SIZE = 0x10E; // 270 bytes
+ const uint32_t EREPAIR_P9_MODULE_VPD_MNFG_SIZE = 0x10E; // 270 bytes
+ const uint32_t EREPAIR_MEM_FIELD_VPD_SIZE_PER_CENTAUR = 0x36; // 54 bytes
+ const uint32_t EREPAIR_MEM_MNFG_VPD_SIZE_PER_CENTAUR = 0x36; // 54 bytes
+#else // Nimbus
+ const uint32_t EREPAIR_P9_MODULE_VPD_FIELD_SIZE = 0x66; // 102 bytes
+ const uint32_t EREPAIR_P9_MODULE_VPD_MNFG_SIZE = 0x66; // 102 bytes
+#endif
+
+//TODO:
+enum busType
+{
+ UNKNOWN_BUS_TYPE = 0,
+ PROCESSOR_OPT = 1,
+ PROCESSOR_EDIP = 2,
+ MEMORY_EDIP = 3
+};
+
+enum interfaceType
+{
+ UNKNOWN_INT_TYPE = 0,
+ PBUS_DRIVER = 1, // X-Bus, O-Bus transmit
+ PBUS_RECEIVER = 2, // X-Bus, O-Bus receive
+ DMI_MCS_RECEIVE = 3, // MCS receive
+ DMI_MCS_DRIVE = 4, // MCS transmit
+ DMI_MEMBUF_RECEIVE = 5, // Centaur receive
+ DMI_MEMBUF_DRIVE = 6, // Centaur transmit
+ DRIVE = 7, // Tx
+ RECEIVE = 8 // Rx
+};
+
+// VPD Type to read-write
+enum erepairVpdType
+{
+ EREPAIR_VPD_UNKNOWN = 0,
+ EREPAIR_VPD_MNFG = 1,
+ EREPAIR_VPD_FIELD = 2,
+};
+
+}// end of EREPAIR namespace
+
+/******************************************************************************
+ * VPD Structures.
+ *****************************************************************************/
+
+// eRepair Header
+struct eRepairHeader
+{
+ struct
+ {
+ uint8_t eye1;
+ uint8_t eye2;
+ } eyeCatcher;
+
+ uint8_t version;
+ uint8_t sizeOfRecord;
+ uint8_t maxNumRecord;
+ uint8_t availNumRecord;
+};
+
+// Device info structure of the P8 Processor
+struct eRepairProcDevInfo
+{
+ uint8_t processor_id;// Range:0x00-0xFF. Value:Processor MRU IDs
+ uint8_t fabricBus; // Range:0x00-0xFF. Value: FabricBus(ATTR_CHIP_UNIT_POS)
+};
+
+// eRepair structure for failing lanes on Power Bus
+struct eRepairPowerBus
+{
+ eRepairProcDevInfo device; // Device info of P9
+ uint8_t type : 4; // Range:0x0-0xF. Value:PROCESSOR_EDIP
+ uint8_t interface : 4; // Range:0x0-0xF. Value:[PBUS_DRIVER|PBUS_RECEIVER]
+ uint32_t failBit : 24; // Bit stream value: Bit 0:Lane 0; Bit 1:Lane 1 ...
+};
+
+
+// Device info structure of the endpoints of the Memory Channel
+struct eRepairMemDevInfo
+{
+ uint8_t proc_centaur_id;// Range:0x00-0xFF.Value:Processor or Centaur MRU ID
+ uint8_t memChannel; // Range:0x00-0xFF.Value: MemoryBus(ATTR_CHIP_UNIT_POS)
+};
+
+// eRepair structure of failing lanes on Memory Channel
+struct eRepairMemBus
+{
+ eRepairMemDevInfo device; // Device info of P9 and Centaur
+ uint8_t type : 4; // Range:0x0-0xF. Value:MEMORY_EDIP
+ uint8_t interface : 4; // Range:0x0-0xF. Value:[MCS_Receive|MCS_Drive|memBuf_Receive|memBuf_Drive]
+ uint32_t failBit : 24; // Bit stream value: Bit 0:Lane 0; Bit 1:Lane 1 ...
+};
+
+#endif
diff --git a/src/import/chips/p9/procedures/hwp/io/p9_io_erepairGetFailedLanesHwp.C b/src/import/chips/p9/procedures/hwp/io/p9_io_erepairGetFailedLanesHwp.C
new file mode 100755
index 000000000..68938da65
--- /dev/null
+++ b/src/import/chips/p9/procedures/hwp/io/p9_io_erepairGetFailedLanesHwp.C
@@ -0,0 +1,708 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/import/chips/p9/procedures/hwp/io/p9_io_erepairGetFailedLanesHwp.C $ */
+/* */
+/* OpenPOWER HostBoot Project */
+/* */
+/* Contributors Listed Below - COPYRIGHT 2015,2017 */
+/* [+] International Business Machines Corp. */
+/* */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+///
+/// @file p9_io_erepairGetFailedLanesHwp.C
+/// @brief FW Team HWP that accesses the fail lanes of Fabric and Memory buses.
+///
+//----------------------------------------------------------------------------
+// *HWP HWP Owner : Chris Steffen <cwsteffen@us.ibm.com>
+// *HWP HWP Backup Owner: Gary Peterson <garyp@us.ibm.com>
+// *HWP FW Owner : Sumit Kumar <sumit_kumar@in.ibm.com>
+// *HWP Team : IO
+// *HWP Level : 2
+// *HWP Consumed by : FSP:HB
+//----------------------------------------------------------------------------
+#include <fapi2.H>
+#include <p9_io_erepairConsts.H>
+#include <p9_io_erepairGetFailedLanesHwp.H>
+#include <mvpd_access.H>
+
+using namespace EREPAIR;
+using namespace fapi2;
+
+/******************************************************************************
+ * Forward Declarations
+ *****************************************************************************/
+
+/**
+ * @brief Function called by the FW Team HWP that reads the data from Field VPD.
+ * This function makes the actual calls to read the VPD
+ * It determines the size of the buffer to be read, allocates memory
+ * of the determined size, calls fapiGetMvpdField to read the eRepair
+ * records. This buffer is further passed to another routine for
+ * parsing.
+ *
+ * @param[in] i_target Reference to X-Bus or O-Bus or MCS target
+ * @param[in] i_vpdType Specifies which VPD (MNFG or Field) to access.
+ * @param[in] i_clkGroup Specifies clock group 0:[XOA, X1A,..] 1:[X0B, X1B,..]
+ * @param[o] o_txFailLanes Reference to a vector that will hold eRepair fail
+ * lane numbers of the Tx sub-interface.
+ * @param[o] o_rxFailLanes Reference to a vector that will hold eRepair fail
+ * lane numbers of the Rx sub-interface.
+ *
+ * @return ReturnCode
+ */
+fapi2::ReturnCode retrieveRepairData(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_target,
+ erepairVpdType i_vpdType,
+ const uint8_t i_clkGroup,
+ std::vector<uint8_t>& o_txFailLanes,
+ std::vector<uint8_t>& o_rxFailLanes);
+
+/**
+ * @brief Function called by the FW Team HWP that parses the data read from
+ * Field VPD. This function matches each eRepair record read from the VPD
+ * and matches it against the attributes of the passed target.
+ * If a match is found, the corresponding eRepair record is copied into
+ * the respective failLane vectors to be returned to the caller.
+ *
+ * @param[in] i_target Reference to X-Bus or O-Bus or MCS target
+ * @param[in] i_buf This is the buffer that has the eRepair records
+ * read from the VPD
+ * @param[in] i_bufSz This is the size of passed buffer in terms of bytes
+ * @param[in] i_clkGroup Specifies clock group 0:[XOA, X1A,..] 1:[X0B, X1B,..]
+ * @param[o] o_txFailLanes Reference to a vector that will hold eRepair fail
+ * lane numbers of the Tx sub-interface.
+ * @param[o] o_rxFailLanes Reference to a vector that will hold eRepair fail
+ * lane numbers of the Rx sub-interface.
+ *
+ * @return ReturnCode
+ */
+fapi2::ReturnCode determineRepairLanes(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_target,
+ uint8_t* i_buf,
+ uint32_t i_bufSz,
+ const uint8_t i_clkGroup,
+ std::vector<uint8_t>& o_txFailLanes,
+ std::vector<uint8_t>& o_rxFailLanes);
+
+
+/**
+ * @brief Function to check if the system has Custom DIMM type (CDIMM).
+ * Attribute ATTR_EFF_CUSTOM_DIMM is read to determine the type.
+ * @param[in] i_target Reference to X-Bus or O-Bus or MCS target
+ * @param[o] o_customDimm Return value - ENUM_ATTR_EFF_CUSTOM_DIMM_NO
+ * or ENUM_ATTR_EFF_CUSTOM_DIMM_YES
+ * @return ReturnCode
+ */
+fapi2::ReturnCode getDimmType(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_target,
+ uint8_t& o_customDimm);
+
+
+/**
+ * @brief Function called by the HWP that parses the data read from VPD.
+ * This function scans through failBit field bit pattern and checks
+ * for all bits that are set. For bits SET the corresponding bit positions
+ * marks the failed lane number and is copied into
+ * the respective failLane vectors to be returned to the caller.
+ *
+ * @param[in] i_target Reference to X-Bus or O-Bus or MCS target type
+ * @param[in] i_busInterface Reference to target sub interface
+ * @param[in] i_failBit This is the failBit field from the eRepair records
+ * read from the VPD
+ * @param[o] o_FailLanes Reference to a vector that will hold eRepair fail
+ * lane numbers of the Rx/Tx sub-interface.
+ *
+ * @return ReturnCode
+ */
+fapi2::ReturnCode decodeFailedLanes(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_target,
+ uint8_t i_busInterface,
+ uint32_t i_failBit,
+ std::vector<uint8_t>& o_FailLanes);
+
+/******************************************************************************
+ * Accessor HWP
+ *****************************************************************************/
+
+fapi2::ReturnCode p9_io_erepairGetFailedLanesHwp(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_target,
+ erepairVpdType i_vpdType,
+ const uint8_t i_clkGroup,
+ std::vector<uint8_t>& o_txFailLanes,
+ std::vector<uint8_t>& o_rxFailLanes)
+{
+ fapi2::current_err = fapi2::FAPI2_RC_SUCCESS;
+
+ FAPI_INF(">> erepairGetFailedLanesHwp");
+
+ o_txFailLanes.clear();
+ o_rxFailLanes.clear();
+
+ // Retrieve the Field eRepair lane numbers from the VPD
+ FAPI_TRY( retrieveRepairData(
+ i_target,
+ i_vpdType,
+ i_clkGroup,
+ o_txFailLanes,
+ o_rxFailLanes),
+ "p9_io_erepairGetFailedLanesHwp() failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+fapi_try_exit:
+ return fapi2::current_err;
+}
+
+fapi2::ReturnCode retrieveRepairData(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_target,
+ erepairVpdType i_vpdType,
+ const uint8_t i_clkGroup,
+ std::vector<uint8_t>& o_txFailLanes,
+ std::vector<uint8_t>& o_rxFailLanes)
+{
+ fapi2::ReturnCode l_rc = fapi2::FAPI2_RC_SUCCESS;
+ fapi2::current_err = fapi2::FAPI2_RC_SUCCESS;
+ uint8_t* l_retBuf = NULL;
+ uint32_t l_bufSize = 0;
+ fapi2::Target<fapi2::TARGET_TYPE_PROC_CHIP> l_procTarget;
+#ifdef P9_CUMULUS
+ uint8_t l_customDimm;
+#endif
+
+ FAPI_DBG(">> retrieveRepairData");
+
+ if(i_target.getType() == fapi2::TARGET_TYPE_MEMBUF_CHIP)
+ {
+#ifdef P9_CUMULUS
+ fapi2::MBvpdRecord l_vpdRecord = fapi2::MBVPD_RECORD_VEIR;
+
+ if(i_vpdType == EREPAIR_VPD_MNFG)
+ {
+ l_vpdRecord = fapi2::MBVPD_RECORD_MER0;
+ }
+
+ // Determine the size of the eRepair data in the VPD
+ FAPI_TRY( getMBvpdField(
+ l_vpdRecord,
+ fapi2::MBVPD_KEYWORD_PDI,
+ i_target,
+ NULL,
+ l_bufSize),
+ "VPD size read failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+ // Check whether we have Memory on a CDIMM
+ l_rc = getDimmType(i_target, l_customDimm);
+
+ FAPI_ASSERT((uint64_t)l_rc == 0x0,
+ fapi2::P9_EREPAIR_DIMM_TYPE_CHECK_ERR()
+ .set_ERROR(l_rc),
+ "ERROR: DIMM type check");
+
+ if( (l_customDimm == fapi2::ENUM_ATTR_SPD_CUSTOM_YES) || (l_bufSize == 0) )
+ {
+ if((l_bufSize == 0) ||
+ ((i_vpdType == EREPAIR_VPD_FIELD) &&
+ (l_bufSize > EREPAIR_MEM_FIELD_VPD_SIZE_PER_CENTAUR)) ||
+ ((i_vpdType == EREPAIR_VPD_MNFG) &&
+ (l_bufSize > EREPAIR_MEM_MNFG_VPD_SIZE_PER_CENTAUR)))
+ {
+ FAPI_ASSERT(false,
+ fapi2::P9_EREPAIR_ACCESSOR_HWP_INVALID_MEM_VPD_SIZE_ERR()
+ .set_ERROR(l_rc),
+ "ERROR: Invalid MEM VPD size");
+ }
+ }
+
+ // Allocate memory for buffer
+ l_retBuf = new uint8_t[l_bufSize];
+
+ FAPI_ASSERT(l_retBuf != NULL,
+ fapi2::P9_EREPAIR_ACCESSOR_HWP_MEMORY_ALLOC_FAIL_ERR()
+ .set_BUF_SIZE(l_bufSize),
+ "ERROR: Failed to allocate memory size");
+
+ // Retrieve the Field eRepair data from the PNOR
+ FAPI_TRY( getMBvpdField(
+ l_vpdRecord,
+ fapi2::MBVPD_KEYWORD_PDI,
+ i_target,
+ l_retBuf,
+ l_bufSize),
+ "VPD read failed w/rc=0x%x",
+ (uint64_t)current_err );
+#endif
+ }
+ else
+ {
+ // Determine the Processor target
+ l_procTarget = i_target.getParent<fapi2::TARGET_TYPE_PROC_CHIP>();
+
+ fapi2::MvpdRecord l_vpdRecord = fapi2::MVPD_RECORD_VWML;
+
+ if(i_vpdType == EREPAIR_VPD_MNFG)
+ {
+ l_vpdRecord = fapi2::MVPD_RECORD_MER0;
+ }
+
+ // Determine the size of the eRepair data in the VPD
+ FAPI_TRY( getMvpdField(
+ l_vpdRecord,
+ fapi2::MVPD_KEYWORD_PDI,
+ l_procTarget,
+ NULL,
+ l_bufSize),
+ "VPD size read failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+ if((l_bufSize == 0) ||
+ ((i_vpdType == EREPAIR_VPD_FIELD) &&
+ (l_bufSize > EREPAIR_P9_MODULE_VPD_FIELD_SIZE)) ||
+ ((i_vpdType == EREPAIR_VPD_MNFG) &&
+ (l_bufSize > EREPAIR_P9_MODULE_VPD_MNFG_SIZE)))
+ {
+ FAPI_ASSERT(false,
+ fapi2::P9_EREPAIR_ACCESSOR_HWP_INVALID_FABRIC_VPD_SIZE_ERR()
+ .set_ERROR(current_err),
+ "ERROR: Invalid Fabric VPD size");
+ }
+
+ // Allocate memory for buffer
+ l_retBuf = new uint8_t[l_bufSize];
+
+ FAPI_ASSERT(l_retBuf != NULL,
+ fapi2::P9_EREPAIR_ACCESSOR_HWP_MEMORY_ALLOC_FAIL_ERR()
+ .set_BUF_SIZE(l_bufSize),
+ "ERROR: Failed to allocate memory size");
+
+ // Retrieve the Field eRepair data from the PNOR
+ FAPI_TRY( getMvpdField(
+ l_vpdRecord,
+ fapi2::MVPD_KEYWORD_PDI,
+ l_procTarget,
+ l_retBuf,
+ l_bufSize),
+ "VPD read failed w/rc=0x%x",
+ (uint64_t)current_err );
+ }
+
+ // Parse the buffer to determine eRepair lanes and copy the
+ // fail lane numbers to the return vector
+ FAPI_TRY( determineRepairLanes(
+ i_target,
+ l_retBuf,
+ l_bufSize,
+ i_clkGroup,
+ o_txFailLanes,
+ o_rxFailLanes),
+ "Call to determineRepairLanes failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+ // Delete the buffer which has Field eRepair data
+ delete[] l_retBuf;
+
+ FAPI_DBG("<< retrieveRepairData");
+
+fapi_try_exit:
+ return fapi2::current_err;
+}
+
+fapi2::ReturnCode determineRepairLanes(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_target,
+ uint8_t* i_buf,
+ uint32_t i_bufSz,
+ const uint8_t i_clkGroup,
+ std::vector<uint8_t>& o_txFailLanes,
+ std::vector<uint8_t>& o_rxFailLanes)
+{
+ uint32_t l_numRepairs = 0;
+ uint8_t* l_vpdPtr = NULL;
+ eRepairHeader* l_vpdHeadPtr = NULL;
+ uint32_t l_loop = 0;
+ uint32_t l_bytesParsed = 0;
+ const uint32_t l_fabricRepairDataSz = sizeof(eRepairPowerBus);
+#ifdef P9_CUMULUS
+ const uint32_t l_memRepairDataSz = sizeof(eRepairMemBus);
+ uint8_t l_customDimm;
+#endif
+ fapi2::TargetType l_tgtType = fapi2::TARGET_TYPE_NONE;
+ fapi2::Target<fapi2::TARGET_TYPE_MCS> l_mcsTarget;
+ fapi2::Target<fapi2::TARGET_TYPE_PROC_CHIP> l_chipTarget;
+ fapi2::ReturnCode l_rc = fapi2::FAPI2_RC_SUCCESS;
+ fapi2::ATTR_CHIP_UNIT_POS_Type l_busNum;
+ bool l_bClkGroupFound = false;
+ fapi2::current_err = fapi2::FAPI2_RC_SUCCESS;
+
+ FAPI_DBG(">> determineRepairLanes");
+
+ l_tgtType = i_target.getType();
+
+ // Get the parent chip target
+ l_chipTarget = i_target.getParent<fapi2::TARGET_TYPE_PROC_CHIP>();
+
+ // Get the chip position
+ uint32_t l_chipPosition;
+ FAPI_TRY(FAPI_ATTR_GET(fapi2::ATTR_POS,
+ l_chipTarget,
+ l_chipPosition));
+
+ // Read the header and count information
+ l_vpdPtr = i_buf; // point to the start of header data
+ l_vpdHeadPtr = reinterpret_cast<eRepairHeader*> (l_vpdPtr);
+
+ l_numRepairs = l_vpdHeadPtr->availNumRecord;
+
+ l_bytesParsed = sizeof(eRepairHeader); // we've read the header data
+ l_vpdPtr += sizeof(eRepairHeader); // point to the start of repair data
+
+ // Parse for Power bus data
+ if((l_tgtType == fapi2::TARGET_TYPE_XBUS) ||
+ (l_tgtType == fapi2::TARGET_TYPE_OBUS))
+ {
+ eRepairPowerBus* l_fabricBus;
+
+ // Read Power bus eRepair data and get the failed lane numbers
+ for(l_loop = 0;
+ l_loop < l_numRepairs;
+ l_loop++, (l_vpdPtr += l_fabricRepairDataSz))
+ {
+ // Make sure we are not parsing more data than the passed size
+ l_bytesParsed += l_fabricRepairDataSz;
+
+ if(l_bytesParsed > i_bufSz)
+ {
+ break;
+ }
+
+ l_fabricBus = reinterpret_cast<eRepairPowerBus*>(l_vpdPtr);
+
+#ifndef _BIG_ENDIAN
+ // We are on a Little Endian system.
+ // Need to swap the nibbles of the structure - eRepairPowerBus
+
+ uint8_t l_temp = l_vpdPtr[2];
+ l_fabricBus->type = (l_temp >> 4);
+ l_fabricBus->interface = (l_temp & 0x0F);
+#endif
+
+ // We do not need the check of processor ID because
+ // a MVPD read is specific to a Processor
+
+ // Check if we have the matching the Fabric Bus types
+ if((l_tgtType == fapi2::TARGET_TYPE_OBUS) &&
+ (l_fabricBus->type != PROCESSOR_OPT))
+ {
+ continue;
+ }
+
+ if((l_tgtType == fapi2::TARGET_TYPE_XBUS) &&
+ (l_fabricBus->type != PROCESSOR_EDIP))
+ {
+ continue;
+ }
+
+ // Check if we have the matching fabric bus interface
+ FAPI_TRY(FAPI_ATTR_GET(fapi2::ATTR_CHIP_UNIT_POS,
+ i_target,
+ l_busNum));
+
+ if(l_fabricBus->device.fabricBus != l_busNum)
+ {
+ continue;
+ }
+
+ if(i_clkGroup > 0 && !l_bClkGroupFound)
+ {
+ l_vpdPtr += l_fabricRepairDataSz;
+ l_bClkGroupFound = true;
+ continue;
+ }
+
+ // Copy the fail lane numbers in the vectors
+ if(l_fabricBus->interface == PBUS_DRIVER)
+ {
+ decodeFailedLanes(i_target, l_fabricBus->interface,
+ l_fabricBus->failBit, o_txFailLanes);
+ }
+ else if(l_fabricBus->interface == PBUS_RECEIVER)
+ {
+ decodeFailedLanes(i_target, l_fabricBus->interface,
+ l_fabricBus->failBit, o_rxFailLanes);
+ }
+ } // end of for loop
+ } // end of if(l_tgtType is XBus or OBus)
+ else if((l_tgtType == fapi2::TARGET_TYPE_MCS_CHIPLET) ||
+ (l_tgtType == fapi2::TARGET_TYPE_MEMBUF_CHIP))
+ {
+#ifdef P9_CUMULUS
+ // Parse for Memory bus data
+ eRepairMemBus* l_memBus;
+
+ if(l_tgtType == fapi2::TARGET_TYPE_MEMBUF_CHIP)
+ {
+ l_rc = fapiGetOtherSideOfMemChannel(
+ i_target,
+ l_mcsTarget,
+ fapi2::TARGET_STATE_FUNCTIONAL);
+
+ FAPI_ASSERT((uint64_t)l_rc == 0x0,
+ fapi2::P9_EREPAIR_UNABLE_CONNECT_MCS_TARGET_ERR()
+ .set_ERROR(l_rc),
+ "ERROR: determineRepairLanes: Unable to get the connected to MCS target");
+
+ // Check whether we have Memory on a CDIMM
+ l_rc = getDimmType(i_target, l_customDimm);
+
+ FAPI_ASSERT((uint64_t)l_rc == 0x0,
+ fapi2::P9_EREPAIR_DIMM_TYPE_CHECK_ERR()
+ .set_ERROR(l_rc),
+ "ERROR: DIMM type check");
+ }
+
+ // Read Power bus eRepair data and get the failed lane numbers
+ for(l_loop = 0;
+ l_loop < l_numRepairs;
+ l_loop++, (l_vpdPtr += l_memRepairDataSz))
+ {
+ // Make sure we are not parsing more data than the passed size
+ l_bytesParsed += l_memRepairDataSz;
+
+ if(l_bytesParsed > i_bufSz)
+ {
+ break;
+ }
+
+ l_memBus = reinterpret_cast<eRepairMemBus*>(l_vpdPtr);
+
+#ifndef _BIG_ENDIAN
+ // We are on a Little Endian system.
+ // Need to swap the nibbles of the structure - eRepairMemBus
+
+ uint8_t l_temp = l_vpdPtr[2];
+ l_memBus->type = (l_temp >> 4);
+ l_memBus->interface = (l_temp & 0x0F);
+#endif
+
+ // Check if we have the correct Centaur ID
+ // NOTE: We do not prefer to make the check of Centaur ID if the
+ // system is known to have CDIMMs. This check is applicable
+ // only for systems with ISDIMM because in the ISDIMM systems
+ // the Lane eRepair data for multiple Centaurs is maintained in
+ // a common VPD.
+
+ if((l_tgtType == fapi2::TARGET_TYPE_MEMBUF_CHIP) &&
+ (l_customDimm != fapi2::ENUM_ATTR_SPD_CUSTOM_YES) &&
+ (l_chipPosition != l_memBus->device.proc_centaur_id))
+ {
+ continue;
+ }
+
+ // Check if we have the matching the Memory Bus types
+ if(l_memBus->type != MEMORY_EDIP)
+ {
+ continue;
+ }
+
+ // Check if we have the matching memory bus interface
+ FAPI_TRY(FAPI_ATTR_GET(fapi2::ATTR_CHIP_UNIT_POS,
+ l_mcsTarget,
+ l_busNum));
+
+ if(l_memBus->device.memChannel != l_busNum)
+ {
+ continue;
+ }
+
+ // Copy the fail lane numbers in the vectors
+ if(l_tgtType == fapi2::TARGET_TYPE_MCS_CHIPLET)
+ {
+ if(l_memBus->interface == DMI_MCS_DRIVE)
+ {
+ decodeFailedLanes(i_target, l_memBus->interface,
+ l_memBus->failBit, o_txFailLanes);
+ }
+ else if(l_memBus->interface == DMI_MCS_RECEIVE)
+ {
+ decodeFailedLanes(i_target, l_memBus->interface,
+ l_memBus->failBit, o_rxFailLanes);
+ }
+ }
+ else if(l_tgtType == fapi2::TARGET_TYPE_MEMBUF_CHIP)
+ {
+ if(l_memBus->interface == DMI_MEMBUF_DRIVE)
+ {
+ decodeFailedLanes(i_target, l_memBus->interface,
+ l_memBus->failBit, o_txFailLanes);
+ }
+ else if(l_memBus->interface == DMI_MEMBUF_RECEIVE)
+ {
+ decodeFailedLanes(i_target, l_memBus->interface,
+ l_memBus->failBit, o_rxFailLanes);
+ }
+ }
+ } // end of for loop
+
+#endif
+ } // end of if(l_tgtType is MCS)
+
+ FAPI_INF("<< No.of Fail Lanes: tx: %zd, rx: %zd",
+ o_txFailLanes.size(), o_rxFailLanes.size());
+
+fapi_try_exit:
+ return fapi2::current_err;
+}
+
+fapi2::ReturnCode getDimmType(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_target,
+ uint8_t& o_customDimm)
+{
+ fapi2::ReturnCode l_rc = fapi2::FAPI2_RC_SUCCESS;
+#ifdef P9_CUMULUS
+ std::vector<fapi2::Target<fapi2::TARGET_TYPE_MBA_CHIPLET>> l_mbaChiplets;
+ fapi2::Target<fapi2::TARGET_TYPE_MBA> l_mbaTarget;
+ std::vector<fapi2::Target<fapi2::TARGET_TYPE_DIMM>> l_target_dimm_array;
+
+ o_customDimm = fapi2::ENUM_ATTR_SPD_CUSTOM_NO;
+
+ // Get the connected MBA chiplet and determine whether we have CDIMM
+ l_rc = fapiGetChildChiplets(i_target,
+ fapi2::TARGET_TYPE_MBA_CHIPLET,
+ l_mbaChiplets,
+ fapi2::TARGET_STATE_FUNCTIONAL);
+
+ FAPI_ASSERT( ((uint64_t)l_rc == 0x0) || (0 != l_mbaChiplets.size()),
+ fapi2::P9_EREPAIR_CHILD_MBA_TARGETS_ERR()
+ .set_ERROR(l_rc),
+ "ERROR: During get child MBA targets");
+
+ l_mbaTarget = l_mbaChiplets[0];
+ l_rc = fapiGetAssociatedDimms(l_mbaTarget, l_target_dimm_array);
+
+ FAPI_ASSERT( (uint64_t)l_rc == 0x0,
+ fapi2::P9_EREPAIR_GET_ASSOCIATE_DIMMS_ERR()
+ .set_ERROR(l_rc),
+ "ERROR: from fapiGetAssociatedDimms");
+
+ if(0 != l_target_dimm_array.size())
+ {
+ FAPI_TRY(FAPI_ATTR_GET(fapi2::ATTR_SPD_CUSTOM,
+ l_target_dimm_array[0],
+ o_customDimm));
+ }
+ else
+ {
+ o_customDimm = fapi2::ENUM_ATTR_SPD_CUSTOM_NO;
+ }
+
+fapi_try_exit:
+#endif
+ return l_rc;
+}
+
+fapi2::ReturnCode decodeFailedLanes(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_target,
+ uint8_t i_busInterface,
+ uint32_t i_failBit,
+ std::vector<uint8_t>& o_FailLanes)
+{
+ fapi2::ReturnCode l_rc = fapi2::FAPI2_RC_SUCCESS;
+ uint8_t loop;
+ uint8_t maxBusLanes = 0;
+ uint32_t checkBitPosition = (0x80000000);
+
+ FAPI_DBG(">> decodeFailedLanes");
+
+ // Check for target type and corresponding sub interface
+ // to get max lanes supported per interface
+ if(i_target.getType() == fapi2::TARGET_TYPE_OBUS)
+ {
+ maxBusLanes = OBUS_MAX_LANE_WIDTH; //OBUS
+ }
+ else if(i_target.getType() == fapi2::TARGET_TYPE_XBUS)
+ {
+ maxBusLanes = XBUS_MAX_LANE_WIDTH; //XBUS
+ }
+ else if((i_target.getType() == fapi2::TARGET_TYPE_MEMBUF_CHIP) ||
+ (i_target.getType() == fapi2::TARGET_TYPE_MCS_CHIPLET) ||
+ (i_target.getType() == fapi2::TARGET_TYPE_MCS)) //DMI
+ {
+ if( (i_busInterface == DMI_MCS_RECEIVE) ||
+ (i_busInterface == DMI_MEMBUF_DRIVE) )
+ {
+ maxBusLanes = DMIBUS_DNSTREAM_MAX_LANE_WIDTH;
+ }
+ else if( (i_busInterface == DMI_MCS_DRIVE) ||
+ (i_busInterface == DMI_MEMBUF_RECEIVE) )
+ {
+ maxBusLanes = DMIBUS_UPSTREAM_MAX_LANE_WIDTH;
+ }
+ }
+
+ //Check for all the failed bit SET in the bit stream and update the vector
+ //And print the failed lanes
+ FAPI_INF("No. of Failed Lanes:");
+
+ for( loop = 0;
+ loop < maxBusLanes;
+ loop++ )
+ {
+ if( i_failBit & ( checkBitPosition >> loop ) )
+ {
+ o_FailLanes.push_back(loop);
+ FAPI_INF("%d", loop);
+ }
+ }
+
+ FAPI_DBG("<< decodeFailedLanes");
+ return l_rc;
+}
+
diff --git a/src/import/chips/p9/procedures/hwp/io/p9_io_erepairGetFailedLanesHwp.H b/src/import/chips/p9/procedures/hwp/io/p9_io_erepairGetFailedLanesHwp.H
new file mode 100755
index 000000000..c63575379
--- /dev/null
+++ b/src/import/chips/p9/procedures/hwp/io/p9_io_erepairGetFailedLanesHwp.H
@@ -0,0 +1,90 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/import/chips/p9/procedures/hwp/io/p9_io_erepairGetFailedLanesHwp.H $ */
+/* */
+/* OpenPOWER HostBoot Project */
+/* */
+/* Contributors Listed Below - COPYRIGHT 2015,2017 */
+/* [+] International Business Machines Corp. */
+/* */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+///
+/// @file p9_io_erepairGetFailedLanesHwp.H
+/// @brief FW Team HWP that accesses the fail lanes of Fabric and Memory buses.
+///
+//----------------------------------------------------------------------------
+// *HWP HWP Owner : Chris Steffen <cwsteffen@us.ibm.com>
+// *HWP HWP Backup Owner: Gary Peterson <garyp@us.ibm.com>
+// *HWP FW Owner : Sumit Kumar <sumit_kumar@in.ibm.com>
+// *HWP Team : IO
+// *HWP Level : 2
+// *HWP Consumed by : FSP:HB
+//----------------------------------------------------------------------------
+
+#ifndef P9_IO_EREPAIRGETFAILEDLANESHWP_H_
+#define P9_IO_EREPAIRGETFAILEDLANESHWP_H_
+
+#include <fapi2.H>
+#include <p9_io_erepairConsts.H>
+
+
+typedef fapi2::ReturnCode (*p9_io_erepairGetFailedLanesHwp_FP_t)(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_target,
+ EREPAIR::erepairVpdType i_vpdType,
+ const uint8_t i_clkGroup,
+ std::vector<uint8_t>& o_txFailLanes,
+ std::vector<uint8_t>& o_rxFailLanes);
+
+extern "C"
+{
+
+ /**
+ * @brief FW Team HWP that retrieves the eRepair fail lanes.
+ * It retrieves the eRepair data from the P9 MVPD and the Centaur FRU
+ * VPD sections depending on the passed target type. It then parses the
+ * eRepair data to determine the fail lane numbers on the sub-interfaces
+ * (Tx and Rx) of the passed bus target.
+ *
+ * @param[in] i_tgtHandle Reference to X-Bus or A-Bus or MCS or memBuf Target
+ * @param[in] i_vpdType Specifies which VPD (MNFG or Field) to access.
+ * @param[in] i_clkGroup Specifies clock group 0:[XOA, X1A,..] 1:[X0B, X1B,..]
+ * @param[o] o_txFailLanes Reference to a vector that will hold eRepair fail
+ * lane numbers of the Tx sub-interface.
+ * @param[o] o_rxFailLanes Reference to a vector that will hold eRepair fail
+ * lane numbers of the Rx sub-interface.
+ *
+ * @return ReturnCode
+ *
+ */
+ fapi2::ReturnCode p9_io_erepairGetFailedLanesHwp(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_target,
+ EREPAIR::erepairVpdType i_vpdType,
+ const uint8_t i_clkGroup,
+ std::vector<uint8_t>& o_txFailLanes,
+ std::vector<uint8_t>& o_rxFailLanes);
+
+}// end of extern C
+
+#endif
diff --git a/src/import/chips/p9/procedures/hwp/io/p9_io_erepairGetFailedLanesHwp.mk b/src/import/chips/p9/procedures/hwp/io/p9_io_erepairGetFailedLanesHwp.mk
new file mode 100644
index 000000000..554d5ecd0
--- /dev/null
+++ b/src/import/chips/p9/procedures/hwp/io/p9_io_erepairGetFailedLanesHwp.mk
@@ -0,0 +1,27 @@
+# IBM_PROLOG_BEGIN_TAG
+# This is an automatically generated prolog.
+#
+# $Source: src/import/chips/p9/procedures/hwp/io/p9_io_erepairGetFailedLanesHwp.mk $
+#
+# OpenPOWER HostBoot Project
+#
+# Contributors Listed Below - COPYRIGHT 2015,2017
+# [+] International Business Machines Corp.
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied. See the License for the specific language governing
+# permissions and limitations under the License.
+#
+# IBM_PROLOG_END_TAG
+
+PROCEDURE=p9_io_erepairGetFailedLanesHwp
+$(call BUILD_PROCEDURE)
diff --git a/src/import/chips/p9/procedures/hwp/io/p9_io_erepairSetFailedLanesHwp.C b/src/import/chips/p9/procedures/hwp/io/p9_io_erepairSetFailedLanesHwp.C
new file mode 100755
index 000000000..1632ddb3c
--- /dev/null
+++ b/src/import/chips/p9/procedures/hwp/io/p9_io_erepairSetFailedLanesHwp.C
@@ -0,0 +1,1158 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/import/chips/p9/procedures/hwp/io/p9_io_erepairSetFailedLanesHwp.C $ */
+/* */
+/* OpenPOWER HostBoot Project */
+/* */
+/* Contributors Listed Below - COPYRIGHT 2015,2017 */
+/* [+] International Business Machines Corp. */
+/* */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+///
+/// @file p9_io_erepairSetFailedLanesHwp.C
+/// @brief FW Team HWP that accesses the fail lanes of Fabric and Memory buses.
+///
+//----------------------------------------------------------------------------
+// *HWP HWP Owner : Chris Steffen <cwsteffen@us.ibm.com>
+// *HWP HWP Backup Owner: Gary Peterson <garyp@us.ibm.com>
+// *HWP FW Owner : Sumit Kumar <sumit_kumar@in.ibm.com>
+// *HWP Team : IO
+// *HWP Level : 2
+// *HWP Consumed by : FSP:HB
+//----------------------------------------------------------------------------
+
+#include <fapi2.H>
+#include <p9_io_erepairConsts.H>
+#include <p9_io_erepairSetFailedLanesHwp.H>
+#include <mvpd_access.H>
+
+using namespace EREPAIR;
+using namespace fapi2;
+
+/******************************************************************************
+ * Forward Declarations
+ *****************************************************************************/
+
+/**
+ * @brief Function called by the FW Team HWP that writes the data to Field VPD.
+ * This function calls fapiSetMvpdField to write the VPD.
+ *
+ * @param[in] i_target Reference to X-Bus or O-Bus or MCS target
+ * @param[in] i_vpdType Specifies which VPD (MNFG or Field) to access.
+ * @param[in] i_clkGroup Specifies clock group 0:[XOA, X1A,..] 1:[X0B, X1B,..]
+ * @param[in] i_txFailLanes Reference to a vector that has eRepair fail
+ * lane numbers of the Tx sub-interface.
+ * @param[in] i_rxFailLanes Reference to a vector that has eRepair fail
+ * lane numbers of the Rx sub-interface.
+ *
+ * @return ReturnCode
+ */
+fapi2::ReturnCode writeRepairDataToVPD(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_target,
+ erepairVpdType i_vpdType,
+ const uint8_t i_clkGroup,
+ const std::vector<uint8_t>& i_txFailLanes,
+ const std::vector<uint8_t>& i_rxFailLanes);
+
+/**
+ * @brief Function called by the FW Team HWP that updates the passed buffer
+ * with the eRepair faillane numbers.
+ *
+ * @param[in] i_target Reference to X-Bus or O-Bus or MCS target
+ * @param[in] i_txFailLanes Reference to a vector that has the Tx side faillane
+ * numbers that need to be updated to the o_buf buffer
+ * @param[in] i_rxFailLanes Reference to a vector that has the Rx side faillane
+ * numbers that need to be updated to the o_buf buffer
+ * @param[in] i_bufSz This is the size of passed buffer in terms of bytes
+ * @param[in] i_clkGroup Specifies clock group 0:[XOA, X1A,..] 1:[X0B, X1B,..]
+ * @param[o] o_buf This is the buffer that has the eRepair records
+ * that needs to be written to the VPD
+ *
+ * @return ReturnCode
+ */
+fapi2::ReturnCode writeRepairLanesToBuf(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_target,
+ const std::vector<uint8_t>& i_txFailLanes,
+ const std::vector<uint8_t>& i_rxFailLanes,
+ const uint32_t i_bufSz,
+ const uint8_t i_clkGroup,
+ uint8_t* o_buf);
+
+/**
+ * @brief Function called by the FW Team HWP that updates the passed buffer
+ * with the eRepair faillane numbers of a specified interface.
+ *
+ * @param[in] i_target Reference to X-Bus or O-Bus or MCS target
+ * @param[in] i_interface This indicates the sub-interface type the passed
+ * faillane vector represents
+ * @param[in] i_bufSz This is the size of passed buffer in terms of bytes
+ * @param[in] i_clkGroup Specifies clock group 0:[XOA, X1A,..] 1:[X0B, X1B,..]
+ * @param[in] i_failLanes Reference to a vector that has the faillane numbers
+ * that need to be updated to the o_buf buffer
+ * @param[o] o_buf This is the buffer that has the eRepair records
+ * that needs to be written to the VPD
+ *
+ * @return ReturnCode
+ */
+fapi2::ReturnCode updateRepairLanesToBuf(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_target,
+ const interfaceType i_interface,
+ const uint32_t i_bufSz,
+ const uint8_t i_clkGroup,
+ const std::vector<uint8_t>& i_failLanes,
+ uint8_t* o_buf);
+
+/**
+ * @brief Function called by the FW Team HWP that updates the passed buffer
+ * with the eRepair faillane numbers of a specified interface.
+ *
+ * @param[in] i_target Reference to X-Bus or O-Bus or MCS target
+ * @param[in] i_busInterface This indicates the sub-interface type the passed
+ * faillane vector represents
+ * @param[in] i_repairLane Reference to the faillane number
+ * that need to be updated to fail bits field
+ * @param[o] o_failBit This is the failed lanes data that maintains the
+ * eRepair record that needs to be updated with fail
+ * lane number
+ *
+ * @return ReturnCode
+ */
+fapi2::ReturnCode gatherRepairLanes(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_target,
+ uint8_t i_busInterface,
+ uint8_t i_repairLane,
+ uint32_t* o_failBit);
+
+
+/******************************************************************************
+ * Accessor HWP
+ *****************************************************************************/
+
+fapi2::ReturnCode p9_io_erepairSetFailedLanesHwp(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_target,
+ erepairVpdType i_vpdType,
+ const uint8_t i_clkGroup,
+ const std::vector<uint8_t>& i_txFailLanes,
+ const std::vector<uint8_t>& i_rxFailLanes)
+{
+ fapi2::current_err = fapi2::FAPI2_RC_SUCCESS;
+ fapi2::Target<fapi2::TARGET_TYPE_MCS> l_mcsTgt;
+
+ FAPI_INF(">> erepairSetFailedLanesHwp");
+
+ FAPI_ASSERT(( (i_txFailLanes.size() != 0) && (i_rxFailLanes.size() != 0) ),
+ fapi2::P9_EREPAIR_NO_RX_TX_FAILED_LANES_ERR()
+ .set_TX_LANE(i_txFailLanes.size()).set_RX_LANE(i_rxFailLanes.size()),
+ "ERROR: No Tx/Rx fail lanes were provided");
+
+ FAPI_TRY( writeRepairDataToVPD(
+ i_target,
+ i_vpdType,
+ i_clkGroup,
+ i_txFailLanes,
+ i_rxFailLanes),
+ "p9_io_erepairSetFailedLanesHwp() failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+fapi_try_exit:
+ return fapi2::current_err;
+}
+
+
+fapi2::ReturnCode writeRepairDataToVPD(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_target,
+ erepairVpdType i_vpdType,
+ const uint8_t i_clkGroup,
+ const std::vector<uint8_t>& i_txFailLanes,
+ const std::vector<uint8_t>& i_rxFailLanes)
+{
+ fapi2::ReturnCode l_rc = fapi2::FAPI2_RC_SUCCESS;
+ fapi2::current_err = fapi2::FAPI2_RC_SUCCESS;
+ fapi2::Target<fapi2::TARGET_TYPE_PROC_CHIP> l_procTarget;
+ std::vector<fapi2::Target<fapi2::TARGET_TYPE_MBA_CHIPLET>> l_mbaChiplets;
+ fapi2::Target<fapi2::TARGET_TYPE_MBA> l_mbaTarget;
+
+ uint8_t* l_retBuf = NULL;
+ uint32_t l_bufSize = 0;
+#ifdef P9_CUMULUS
+ uint8_t l_customDimm;
+#endif
+ FAPI_DBG(">> writeRepairDataToVPD");
+
+ if(i_target.getType() == TARGET_TYPE_MEMBUF_CHIP)
+ {
+#ifdef P9_CUMULUS
+ fapi2::MBvpdRecord l_vpdRecord = MBVPD_RECORD_VEIR;
+
+ if(i_vpdType == EREPAIR_VPD_MNFG)
+ {
+ l_vpdRecord = MBVPD_RECORD_MER0;
+ }
+
+ /*** Read the data from the FRU VPD ***/
+
+ // Determine the size of the eRepair data in the Centaur VPD
+ FAPI_TRY( getMBvpdField(
+ l_vpdRecord,
+ MBVPD_KEYWORD_PDI,
+ i_target,
+ NULL,
+ l_bufSize),
+ "VPD size read failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+ // Get the connected MBA chiplet and determine whether we have CDIMM
+ l_rc = fapiGetChildChiplets(i_target,
+ fapi2::TARGET_TYPE_MBA_CHIPLET,
+ l_mbaChiplets,
+ fapi2::TARGET_STATE_FUNCTIONAL);
+
+ FAPI_ASSERT( ((uint64_t)l_rc == 0x0) || (0 != l_mbaChiplets.size()),
+ fapi2::P9_EREPAIR_CHILD_MBA_TARGETS_ERR()
+ .set_ERROR(l_rc),
+ "ERROR: During get child MBA targets");
+
+ l_mbaTarget = l_mbaChiplets[0];
+ std::vector<fapi2::Target<fapi2::TARGET_TYPE_DIMM>> l_target_dimm_array;
+
+ FAPI_TRY( fapiGetAssociatedDimms(
+ l_mbaTarget,
+ l_target_dimm_array),
+ "fapiGetAssociatedDimms() failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+ if(0 != l_target_dimm_array.size())
+ {
+ FAPI_TRY(FAPI_ATTR_GET(fapi2::ATTR_SPD_CUSTOM,
+ l_target_dimm_array[0],
+ l_customDimm));
+ }
+ else
+ {
+ l_customDimm = fapi2::ENUM_ATTR_SPD_CUSTOM_NO;
+ }
+
+ if( (l_customDimm == fapi2::ENUM_ATTR_SPD_CUSTOM_YES) || (l_bufSize == 0) )
+ {
+ if((l_bufSize == 0) ||
+ ((i_vpdType == EREPAIR_VPD_FIELD) &&
+ (l_bufSize > EREPAIR_MEM_FIELD_VPD_SIZE_PER_CENTAUR)) ||
+ ((i_vpdType == EREPAIR_VPD_MNFG) &&
+ (l_bufSize > EREPAIR_MEM_MNFG_VPD_SIZE_PER_CENTAUR)))
+ {
+ FAPI_ASSERT(false,
+ fapi2::P9_EREPAIR_ACCESSOR_HWP_INVALID_MEM_VPD_SIZE_ERR()
+ .set_ERROR(current_err),
+ "ERROR: Invalid MEM VPD size");
+ }
+ }
+
+ // Allocate memory for buffer
+ l_retBuf = new uint8_t[l_bufSize];
+
+ FAPI_ASSERT(l_retBuf != NULL,
+ fapi2::P9_EREPAIR_ACCESSOR_HWP_MEMORY_ALLOC_FAIL_ERR()
+ .set_BUF_SIZE(l_bufSize),
+ "ERROR: Failed to allocate memory size");
+
+ // Retrieve the Field eRepair data from the Centaur FRU VPD
+ FAPI_TRY( getMBvpdField(
+ l_vpdRecord,
+ MBVPD_KEYWORD_PDI,
+ i_target,
+ l_retBuf,
+ l_bufSize),
+ "Centaur FRU VPD read failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+ /*** Update the new eRepair data to the buffer ***/
+ FAPI_TRY( writeRepairLanesToBuf(
+ i_target,
+ i_txFailLanes,
+ i_rxFailLanes,
+ l_bufSize,
+ i_clkGroup,
+ l_retBuf),
+ "Update erepair data to buffer failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+ /*** Write the updated eRepair buffer back to Centaur FRU VPD ***/
+ FAPI_TRY( setMBvpdField(
+ l_vpdRecord,
+ MBVPD_KEYWORD_PDI,
+ i_target,
+ l_retBuf,
+ l_bufSize),
+ "Update erepair data to VPD failed w/rc=0x%x",
+ (uint64_t)current_err );
+#endif
+ } // end of(targetType == MEMBUF)
+ else
+ {
+ // Determine the Processor target
+ l_procTarget = i_target.getParent<fapi2::TARGET_TYPE_PROC_CHIP>();
+
+ fapi2::MvpdRecord l_vpdRecord = MVPD_RECORD_VWML;
+
+ if(i_vpdType == EREPAIR_VPD_MNFG)
+ {
+ l_vpdRecord = MVPD_RECORD_MER0;
+ }
+
+ /*** Read the data from the Module VPD ***/
+
+ // Determine the size of the eRepair data in the VPD
+ FAPI_TRY( getMvpdField(
+ l_vpdRecord,
+ MVPD_KEYWORD_PDI,
+ l_procTarget,
+ NULL,
+ l_bufSize),
+ "VPD size read failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+ if((l_bufSize == 0) ||
+ ((i_vpdType == EREPAIR_VPD_FIELD) &&
+ (l_bufSize > EREPAIR_P9_MODULE_VPD_FIELD_SIZE)) ||
+ ((i_vpdType == EREPAIR_VPD_MNFG) &&
+ (l_bufSize > EREPAIR_P9_MODULE_VPD_MNFG_SIZE)))
+ {
+ FAPI_ASSERT(false,
+ fapi2::P9_EREPAIR_ACCESSOR_HWP_INVALID_FABRIC_VPD_SIZE_ERR()
+ .set_ERROR(current_err),
+ "ERROR: Invalid Fabric VPD size");
+ }
+
+ // Allocate memory for buffer
+ l_retBuf = new uint8_t[l_bufSize];
+
+ FAPI_ASSERT(l_retBuf != NULL,
+ fapi2::P9_EREPAIR_ACCESSOR_HWP_MEMORY_ALLOC_FAIL_ERR()
+ .set_BUF_SIZE(l_bufSize),
+ "ERROR: Failed to allocate memory size");
+
+ // Retrieve the Field eRepair data from the MVPD
+ FAPI_TRY( getMvpdField(
+ l_vpdRecord,
+ MVPD_KEYWORD_PDI,
+ l_procTarget,
+ l_retBuf,
+ l_bufSize),
+ "VPD read failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+ /*** Update the new eRepair data to the buffer ***/
+ FAPI_TRY( writeRepairLanesToBuf(
+ i_target,
+ i_txFailLanes,
+ i_rxFailLanes,
+ l_bufSize,
+ i_clkGroup,
+ l_retBuf),
+ "writeRepairLanesToBuf() failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+ /*** Write the updated eRepair buffer back to MVPD ***/
+ FAPI_TRY( setMvpdField(
+ l_vpdRecord,
+ MVPD_KEYWORD_PDI,
+ l_procTarget,
+ l_retBuf,
+ l_bufSize),
+ "setMvpdField()-Update erepair data to VPD failed w/rc=0x%x",
+ (uint64_t)current_err );
+ }
+
+ // Delete the buffer which has Field eRepair data
+ delete[] l_retBuf;
+
+fapi_try_exit:
+ return fapi2::current_err;
+}
+
+
+fapi2::ReturnCode writeRepairLanesToBuf(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_target,
+
+ const std::vector<uint8_t>& i_txFailLanes,
+ const std::vector<uint8_t>& i_rxFailLanes,
+ const uint32_t i_bufSz,
+ const uint8_t i_clkGroup,
+ uint8_t* o_buf)
+{
+ fapi2::current_err = fapi2::FAPI2_RC_SUCCESS;
+
+ FAPI_DBG(">> writeRepairLanesToBuf");
+
+ if(i_txFailLanes.size())
+ {
+ /*** Lets update the tx side fail lane vector to the VPD ***/
+ FAPI_TRY( updateRepairLanesToBuf(
+ i_target,
+ DRIVE,
+ i_bufSz,
+ i_clkGroup,
+ i_txFailLanes,
+ o_buf),
+ "updateRepairLanesToBuf(DRIVE) failed w/rc=0x%x",
+ (uint64_t)current_err );
+ }
+
+ if(i_rxFailLanes.size())
+ {
+ /*** Lets update the rx side fail lane vector to the VPD ***/
+ FAPI_TRY( updateRepairLanesToBuf(
+ i_target,
+ RECEIVE,
+ i_bufSz,
+ i_clkGroup,
+ i_rxFailLanes,
+ o_buf),
+ "updateRepairLanesToBuf(RECEIVE) failed w/rc=0x%x",
+ (uint64_t)current_err );
+ }
+
+fapi_try_exit:
+ return fapi2::current_err;
+}
+
+fapi2::ReturnCode updateRepairLanesToBuf(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_target,
+
+ const interfaceType i_interface,
+ const uint32_t i_bufSz,
+ const uint8_t i_clkGroup,
+ const std::vector<uint8_t>& i_failLanes,
+ uint8_t* o_buf)
+{
+ fapi2::current_err = fapi2::FAPI2_RC_SUCCESS;
+ uint32_t l_numRepairs = 0;
+ uint32_t l_newNumRepairs = 0;
+ uint32_t l_repairCnt = 0;
+ uint32_t l_bytesParsed = 0;
+ uint8_t l_repairLane = 0;
+ uint32_t l_repairDataSz = 0;
+ uint8_t* l_vpdPtr = NULL;
+ uint8_t* l_vpdDataPtr = NULL;
+ uint8_t* l_vpdWritePtr = NULL;
+ eRepairHeader* l_vpdHeadPtr = NULL;
+ eRepairPowerBus* l_overWritePtr = NULL;
+ bool l_overWrite = false;
+ uint8_t l_chipNum = 0;
+ uint32_t l_chipPosition = 0;
+ bool l_bClkGroupFound = false;
+#ifdef P9_CUMULUS
+ fapi2::Target<fapi2::TARGET_TYPE_MCS> l_mcsTarget;
+#endif
+ std::vector<uint8_t>::const_iterator l_it;
+ ATTR_CHIP_UNIT_POS_Type l_busNum;
+
+ FAPI_DBG(">> updateRepairLanesToBuf, interface: %s",
+ i_interface == DRIVE ? "Drive" : "Receive");
+
+ {
+ l_repairDataSz = sizeof(eRepairPowerBus); // Size of memory Bus and
+ // fabric Bus eRepair data
+ // is same.
+ // Read the header and count information
+ l_vpdPtr = o_buf; // point to the start of header data
+ l_vpdHeadPtr = reinterpret_cast<eRepairHeader*> (l_vpdPtr);
+
+ l_numRepairs = l_newNumRepairs = l_vpdHeadPtr->availNumRecord;
+
+ // We've read the header data, increment bytes parsed
+ l_bytesParsed = sizeof(eRepairHeader);
+
+ // Get a pointer to the start of repair data
+ l_vpdPtr += sizeof(eRepairHeader);
+
+ if(i_target.getType() == fapi2::TARGET_TYPE_MEMBUF_CHIP)
+ {
+#ifdef P9_CUMULUS
+ FAPI_TRY( fapiGetOtherSideOfMemChannel(
+ i_target,
+ l_mcsTarget,
+ fapi2::TARGET_STATE_FUNCTIONAL),
+ "fapiGetOtherSideOfMemChannel() failed w/rc=0x%x",
+ (uint64_t)current_err );
+
+ // Get the bus number
+ FAPI_TRY(FAPI_ATTR_GET(fapi2::ATTR_CHIP_UNIT_POS,
+ l_mcsTarget,
+ l_busNum));
+#endif
+ }
+ else
+ {
+ // Get the bus number
+ FAPI_TRY(FAPI_ATTR_GET(fapi2::ATTR_CHIP_UNIT_POS,
+ i_target,
+ l_busNum));
+ }
+
+ // Get the chip target
+ fapi2::Target<fapi2::TARGET_TYPE_PROC_CHIP> l_chipTarget;
+ l_chipTarget = i_target.getParent<fapi2::TARGET_TYPE_PROC_CHIP>();
+
+ // Get the chip number
+ FAPI_TRY(FAPI_ATTR_GET(fapi2::ATTR_POS,
+ l_chipTarget,
+ l_chipPosition));
+
+
+ // This is needed because we can only store and compare a uint8_t
+ // value. For our purpose the value in l_chipPosition (Proc Position and
+ // Centaur Position) will always be within the range of uint8_t
+ l_chipNum = l_chipPosition;
+
+ /*** Lets update the fail lane vector to the Buffer ***/
+ // Create a structure of eRepair data that we will be matching
+ // in the buffer.
+ struct erepairDataMatch
+ {
+ interfaceType intType;
+ fapi2::TargetType tgtType;
+ union repairData
+ {
+ eRepairPowerBus fabBus;
+ eRepairMemBus memBus;
+ } bus;
+ };
+
+ // Create an array of the above match structure to have all the
+ // combinations of Fabric and Memory repair data
+ erepairDataMatch l_repairMatch[14] =
+ {
+ {
+ // index 0 - X0A (clock group 0)
+ DRIVE,
+ TARGET_TYPE_XBUS_ENDPOINT,
+ {
+ // repairData
+ {
+ // fabBus
+ {
+ // device
+ l_chipNum,// processor_id
+ l_busNum, // fabricBus
+ },
+ PROCESSOR_EDIP, // type
+ PBUS_DRIVER, // interface
+ },
+ },
+ },
+ {
+ // index 1 - X0A (clock group 0)
+ RECEIVE,
+ TARGET_TYPE_XBUS_ENDPOINT,
+ {
+ // repairData
+ {
+ // fabBus
+ {
+ // device
+ l_chipNum,// processor_id
+ l_busNum, // fabricBus
+ },
+ PROCESSOR_EDIP, // type
+ PBUS_RECEIVER, // interface
+ },
+ },
+ },
+ {
+ // index 2 - X0A (clock group 1)
+ DRIVE,
+ TARGET_TYPE_XBUS_ENDPOINT,
+ {
+ // repairData
+ {
+ // fabBus
+ {
+ // device
+ l_chipNum,// processor_id
+ l_busNum, // fabricBus
+ },
+ PROCESSOR_EDIP, // type
+ PBUS_DRIVER, // interface
+ },
+ },
+ },
+ {
+ // index 3 - X0A (clock group 1)
+ RECEIVE,
+ TARGET_TYPE_XBUS_ENDPOINT,
+ {
+ // repairData
+ {
+ // fabBus
+ {
+ // device
+ l_chipNum,// processor_id
+ l_busNum, // fabricBus
+ },
+ PROCESSOR_EDIP, // type
+ PBUS_RECEIVER, // interface
+ },
+ },
+ },
+ {
+ // index 4 - X1A (clock group 0)
+ DRIVE,
+ TARGET_TYPE_XBUS_ENDPOINT,
+ {
+ // repairData
+ {
+ // fabBus
+ {
+ // device
+ l_chipNum,// processor_id
+ l_busNum, // fabricBus
+ },
+ PROCESSOR_EDIP, // type
+ PBUS_DRIVER, // interface
+ },
+ },
+ },
+ {
+ // index 5 - X1A (clock group 0)
+ RECEIVE,
+ TARGET_TYPE_XBUS_ENDPOINT,
+ {
+ // repairData
+ {
+ // fabBus
+ {
+ // device
+ l_chipNum,// processor_id
+ l_busNum, // fabricBus
+ },
+ PROCESSOR_EDIP, // type
+ PBUS_RECEIVER, // interface
+ },
+ },
+ },
+ {
+ // index 6 - X1A (clock group 1)
+ DRIVE,
+ TARGET_TYPE_XBUS_ENDPOINT,
+ {
+ // repairData
+ {
+ // fabBus
+ {
+ // device
+ l_chipNum,// processor_id
+ l_busNum, // fabricBus
+ },
+ PROCESSOR_EDIP, // type
+ PBUS_DRIVER, // interface
+ },
+ },
+ },
+ {
+ // index 7 - X1A (clock group 1)
+ RECEIVE,
+ TARGET_TYPE_XBUS_ENDPOINT,
+ {
+ // repairData
+ {
+ // fabBus
+ {
+ // device
+ l_chipNum,// processor_id
+ l_busNum, // fabricBus
+ },
+ PROCESSOR_EDIP, // type
+ PBUS_RECEIVER, // interface
+ },
+ },
+ },
+ {
+ // index 8
+ DRIVE,
+ TARGET_TYPE_OBUS,
+ {
+ // repairData
+ {
+ // fabBus
+ {
+ // device
+ l_chipNum,// processor_id
+ l_busNum, // fabricBus
+ },
+ PROCESSOR_OPT, // type
+ PBUS_DRIVER, // interface
+ },
+ },
+ },
+ {
+ // index 9
+ RECEIVE,
+ TARGET_TYPE_OBUS,
+ {
+ // repairData
+ {
+ // fabBus
+ {
+ // device
+ l_chipNum,// processor_id
+ l_busNum, // fabricBus
+ },
+ PROCESSOR_OPT, // type
+ PBUS_RECEIVER, // interface
+ },
+ },
+ },
+ {
+ // index 10
+ DRIVE,
+ TARGET_TYPE_MCS_CHIPLET,
+ {
+ // repairData
+ {
+ // fabBus
+ {
+ // device
+ l_chipNum,// proc_centaur_id
+ l_busNum, // memChannel
+ },
+ MEMORY_EDIP, // type
+ DMI_MCS_DRIVE,// interface
+ },
+ },
+ },
+ {
+ // index 11
+ DRIVE,
+ TARGET_TYPE_MEMBUF_CHIP,
+ {
+ // repairData
+ {
+ // memBus
+ {
+ // device
+ l_chipNum,// proc_centaur_id
+ l_busNum, // memChannel
+ },
+ MEMORY_EDIP, // type
+ DMI_MEMBUF_DRIVE,// interface
+ },
+ },
+ },
+ {
+ // index 12
+ RECEIVE,
+ TARGET_TYPE_MCS_CHIPLET,
+ {
+ // repairData
+ {
+ // memBus
+ {
+ // device
+ l_chipNum,// proc_centaur_id
+ l_busNum, // memChannel
+ },
+ MEMORY_EDIP, // type
+ DMI_MCS_RECEIVE, // interface
+ },
+ },
+ },
+ {
+ // index 13
+ RECEIVE,
+ TARGET_TYPE_MEMBUF_CHIP,
+ {
+ // repairData
+ {
+ // memBus
+ {
+ // device
+ l_chipNum,// proc_centaur_id
+ l_busNum, // memChannel
+ },
+ MEMORY_EDIP, // type
+ DMI_MEMBUF_RECEIVE, // interface
+ },
+ },
+ }
+ };
+
+ l_vpdDataPtr = l_vpdPtr;
+ l_repairCnt = 0;
+
+ // Pick each faillane for copying into buffer
+ for(l_it = i_failLanes.begin();
+ l_it != i_failLanes.end();
+ l_it++, (l_vpdDataPtr += l_repairDataSz))
+ {
+ l_repairLane = *l_it;
+ l_overWrite = false;
+ l_vpdWritePtr = NULL;
+
+ // Parse the VPD for fabric and memory eRepair records
+ for(;
+ (l_repairCnt < l_numRepairs) && (l_bytesParsed <= i_bufSz);
+ l_repairCnt++, (l_vpdDataPtr += l_repairDataSz))
+ {
+ l_overWritePtr =
+ reinterpret_cast<eRepairPowerBus*> (l_vpdDataPtr);
+
+ // Lets find the matching fabric
+ for(uint8_t l_loop = 0; l_loop < 14; l_loop++)
+ {
+ if((i_target.getType() == TARGET_TYPE_XBUS) ||
+ (i_target.getType() == TARGET_TYPE_OBUS))
+ {
+ if((i_interface == l_repairMatch[l_loop].intType) &&
+ (i_target.getType() == l_repairMatch[l_loop].tgtType) &&
+ ((l_overWritePtr->device).processor_id ==
+ l_repairMatch[l_loop].bus.fabBus.device.processor_id) &&
+ (l_overWritePtr->type ==
+ l_repairMatch[l_loop].bus.fabBus.type) &&
+ (l_overWritePtr->interface ==
+ l_repairMatch[l_loop].bus.fabBus.interface) &&
+ (l_overWritePtr->device.fabricBus ==
+ l_repairMatch[l_loop].bus.fabBus.device.fabricBus))
+ {
+ if(i_clkGroup > 0 && !l_bClkGroupFound)
+ {
+ l_bClkGroupFound = true;
+ continue;
+ }
+
+ // update the failBit number
+ {
+ uint32_t temp = (uint32_t)(l_overWritePtr->failBit);
+ uint32_t* tptr = &temp;
+ FAPI_TRY( gatherRepairLanes(
+ i_target,
+ l_overWritePtr->interface,
+ l_repairLane,
+ tptr),
+ "gatherRepairLanes() failed w/rc=0x%x",
+ (uint64_t)current_err );
+ l_overWritePtr->failBit = temp;
+ }
+
+ // Increment the count of parsed bytes
+ l_bytesParsed += l_repairDataSz;
+
+ l_repairCnt++;
+ l_overWrite = true;
+
+ break;
+ }
+ }
+ else if((i_target.getType() == TARGET_TYPE_MCS_CHIPLET) ||
+ (i_target.getType() == TARGET_TYPE_MEMBUF_CHIP) )
+ {
+ if((i_interface == l_repairMatch[l_loop].intType) &&
+ (i_target.getType() == l_repairMatch[l_loop].tgtType) &&
+ ((l_overWritePtr->device).processor_id ==
+ l_repairMatch[l_loop].bus.memBus.device.proc_centaur_id) &&
+ (l_overWritePtr->type ==
+ l_repairMatch[l_loop].bus.memBus.type) &&
+ (l_overWritePtr->interface ==
+ l_repairMatch[l_loop].bus.memBus.interface) &&
+ (l_overWritePtr->device.fabricBus ==
+ l_repairMatch[l_loop].bus.memBus.device.memChannel))
+ {
+ // update the failBit number
+ {
+ uint32_t temp = (uint32_t)(l_overWritePtr->failBit);
+ uint32_t* tptr = &temp;
+ FAPI_TRY( gatherRepairLanes(
+ i_target,
+ l_overWritePtr->interface,
+ l_repairLane,
+ tptr),
+ "gatherRepairLanes() failed w/rc=0x%x",
+ (uint64_t)current_err );
+ l_overWritePtr->failBit = temp;
+ }
+
+ // Increment the count of parsed bytes
+ l_bytesParsed += l_repairDataSz;
+
+ l_repairCnt++;
+ l_overWrite = true;
+
+ break;
+ }
+ }
+ } // end of for(l_loop < 14)
+
+ if(l_overWrite == true)
+ {
+ // Go for the next repairLane
+ break;
+ }
+ } // end of for(vpd Parsing)
+
+ // Check if we have parsed more bytes than the passed size
+ if((l_vpdWritePtr == NULL) &&
+ (l_bytesParsed > i_bufSz) &&
+ (l_repairCnt < l_numRepairs))
+ {
+ FAPI_ASSERT(false,
+ fapi2::P9_EREPAIR_MVPD_FULL_ERR()
+ .set_VAL_BYTE_PARSED(l_bytesParsed)
+ .set_VAL_BUF_SIZE(i_bufSz)
+ .set_VAL_REPAIR_CNT(l_repairCnt)
+ .set_VAL_NUM_REPAIR(l_numRepairs),
+ "ERROR: from updateRepairLanesToBuf - MVPD full");
+ }
+
+ // Add at the end
+ if(l_overWrite == false)
+ {
+ if(l_vpdWritePtr == NULL)
+ {
+ // We are writing at the end
+ l_vpdWritePtr = l_vpdDataPtr;
+ }
+
+ if((i_target.getType() == TARGET_TYPE_XBUS) ||
+ (i_target.getType() == TARGET_TYPE_OBUS))
+ {
+ // Make sure we are not writing more records than the size
+ // allocated in the VPD
+ FAPI_ASSERT(l_bytesParsed <= i_bufSz,
+ fapi2::P9_EREPAIR_MVPD_FULL_ERR()
+ .set_VAL_BYTE_PARSED(l_bytesParsed)
+ .set_VAL_BUF_SIZE(i_bufSz)
+ .set_VAL_REPAIR_CNT(l_repairCnt)
+ .set_VAL_NUM_REPAIR(l_numRepairs),
+ "ERROR: from updateRepairLanesToBuf - MVPD full");
+
+ eRepairPowerBus* l_fabricBus =
+ reinterpret_cast<eRepairPowerBus*>(l_vpdWritePtr);
+
+ l_fabricBus->device.processor_id = l_chipNum;
+ l_fabricBus->device.fabricBus = l_busNum;
+
+ if(i_interface == DRIVE)
+ {
+ l_fabricBus->interface = PBUS_DRIVER;
+ }
+ else if(i_interface == RECEIVE)
+ {
+ l_fabricBus->interface = PBUS_RECEIVER;
+ }
+
+ if(i_target.getType() == TARGET_TYPE_XBUS)
+ {
+ l_fabricBus->type = PROCESSOR_EDIP;
+ }
+ else if(i_target.getType() == TARGET_TYPE_OBUS)
+ {
+ l_fabricBus->type = PROCESSOR_OPT;
+ }
+
+ {
+ uint32_t temp = (uint32_t)(l_fabricBus->failBit);
+ uint32_t* tptr = &temp;
+ FAPI_TRY( gatherRepairLanes(
+ i_target,
+ l_fabricBus->interface,
+ l_repairLane,
+ tptr),
+ "gatherRepairLanes() failed w/rc=0x%x",
+ (uint64_t)current_err );
+ l_fabricBus->failBit = temp;
+ }
+
+ l_newNumRepairs++;
+
+ // Increment the count of parsed bytes
+ l_bytesParsed += l_repairDataSz;
+#ifndef _BIG_ENDIAN
+ // We are on a Little Endian system.
+ // Need to swap the nibbles of structure - eRepairPowerBus
+
+ l_vpdWritePtr[2] = ((l_vpdWritePtr[2] >> 4) |
+ (l_vpdWritePtr[2] << 4));
+#endif
+ }
+ else if((i_target.getType() == TARGET_TYPE_MCS_CHIPLET) ||
+ (i_target.getType() == TARGET_TYPE_MEMBUF_CHIP) )
+ {
+ // Make sure we are not writing more records than the size
+ // allocated in the VPD
+ FAPI_ASSERT(l_bytesParsed == i_bufSz,
+ fapi2::P9_EREPAIR_MBVPD_FULL_ERR()
+ .set_ERROR(l_bytesParsed),
+ "ERROR: from updateRepairLanesToBuf - MBVPD full");
+
+ eRepairMemBus* l_memBus =
+ reinterpret_cast<eRepairMemBus*>(l_vpdWritePtr);
+
+ l_memBus->device.proc_centaur_id = l_chipNum;
+ l_memBus->device.memChannel = l_busNum;
+ l_memBus->type = MEMORY_EDIP;
+
+ if(i_interface == DRIVE)
+ {
+ if(i_target.getType() == TARGET_TYPE_MCS_CHIPLET)
+ {
+ l_memBus->interface = DMI_MCS_DRIVE;
+ }
+ else if(i_target.getType() == TARGET_TYPE_MEMBUF_CHIP)
+ {
+ l_memBus->interface = DMI_MEMBUF_DRIVE;
+ }
+ }
+ else if(i_interface == RECEIVE)
+ {
+ if(i_target.getType() == TARGET_TYPE_MCS_CHIPLET)
+ {
+ l_memBus->interface = DMI_MCS_RECEIVE;
+ }
+ else if(i_target.getType() == TARGET_TYPE_MEMBUF_CHIP)
+ {
+ l_memBus->interface = DMI_MEMBUF_RECEIVE;
+ }
+ }
+
+ {
+ uint32_t temp = (uint32_t)(l_memBus->failBit);
+ uint32_t* tptr = &temp;
+ FAPI_TRY( gatherRepairLanes(
+ i_target,
+ l_memBus->interface,
+ l_repairLane,
+ tptr),
+ "gatherRepairLanes() failed w/rc=0x%x",
+ (uint64_t)current_err );
+ l_memBus->failBit = temp;
+ }
+
+ l_newNumRepairs++;
+
+ // Increment the count of parsed bytes
+ l_bytesParsed += l_repairDataSz;
+#ifndef _BIG_ENDIAN
+ // We are on a Little Endian system.
+ // Need to swap the nibbles of structure - eRepairMemBus
+
+ l_vpdWritePtr[2] = ((l_vpdWritePtr[2] >> 4) |
+ (l_vpdWritePtr[2] << 4));
+#endif
+ }
+ } // end of if(l_overWrite == false)
+ } // end of for(failLanes)
+ }
+ // Update the eRepair count
+ l_vpdHeadPtr->availNumRecord = l_newNumRepairs;
+
+fapi_try_exit:
+ return fapi2::current_err;
+}
+
+
+fapi2::ReturnCode gatherRepairLanes(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_target,
+ uint8_t i_busInterface,
+ uint8_t i_repairLane,
+ uint32_t* o_failBit)
+{
+ fapi2::ReturnCode l_rc;
+ uint8_t maxBusLanes = 0;
+ uint32_t setBitPosition = (0x80000000);
+
+ FAPI_DBG(">> setRepairLanes");
+
+ // Check for target type and corresponding sub interface
+ // to get max lanes supported per interface
+ if(i_target.getType() == fapi2::TARGET_TYPE_OBUS)
+ {
+ maxBusLanes = OBUS_MAX_LANE_WIDTH; //OBUS
+ }
+ else if(i_target.getType() == fapi2::TARGET_TYPE_XBUS)
+ {
+ maxBusLanes = XBUS_MAX_LANE_WIDTH; //XBUS
+ }
+ else if((i_target.getType() == fapi2::TARGET_TYPE_MEMBUF_CHIP) ||
+ (i_target.getType() == fapi2::TARGET_TYPE_MCS_CHIPLET)) //DMI
+ {
+ if( (i_busInterface == DMI_MCS_RECEIVE) ||
+ (i_busInterface == DMI_MEMBUF_DRIVE) )
+ {
+ maxBusLanes = DMIBUS_DNSTREAM_MAX_LANE_WIDTH;
+ }
+ else if( (i_busInterface == DMI_MCS_DRIVE) ||
+ (i_busInterface == DMI_MEMBUF_RECEIVE) )
+ {
+ maxBusLanes = DMIBUS_UPSTREAM_MAX_LANE_WIDTH;
+ }
+ }
+
+ // Make sure repair lane value passed is within valid range as per the target type
+ FAPI_ASSERT(i_repairLane < maxBusLanes,
+ fapi2::P9_EREPAIR_INVALID_LANE_VALUE_ERR()
+ .set_ERROR(i_repairLane)
+ .set_TARGET(i_target),
+ "ERROR: Invalid erepair lane value");
+
+ // Update the fail bits data with the repair lane number failed
+ *o_failBit |= (setBitPosition >> i_repairLane);
+
+ // Get the failed lanes
+ FAPI_INF("Updated Fail Lanes:%x", *o_failBit);
+
+ FAPI_DBG("<< setRepairLanes");
+
+fapi_try_exit:
+ return l_rc;
+}
+
diff --git a/src/import/chips/p9/procedures/hwp/io/p9_io_erepairSetFailedLanesHwp.H b/src/import/chips/p9/procedures/hwp/io/p9_io_erepairSetFailedLanesHwp.H
new file mode 100755
index 000000000..45c58f10d
--- /dev/null
+++ b/src/import/chips/p9/procedures/hwp/io/p9_io_erepairSetFailedLanesHwp.H
@@ -0,0 +1,88 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/import/chips/p9/procedures/hwp/io/p9_io_erepairSetFailedLanesHwp.H $ */
+/* */
+/* OpenPOWER HostBoot Project */
+/* */
+/* Contributors Listed Below - COPYRIGHT 2015,2017 */
+/* [+] International Business Machines Corp. */
+/* */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+///
+/// @file p9_io_erepairSetFailedLanesHwp.H
+/// @brief FW Team HWP that accesses the fail lanes of Fabric and Memory buses.
+///
+//----------------------------------------------------------------------------
+// *HWP HWP Owner : Chris Steffen <cwsteffen@us.ibm.com>
+// *HWP HWP Backup Owner: Gary Peterson <garyp@us.ibm.com>
+// *HWP FW Owner : Sumit Kumar <sumit_kumar@in.ibm.com>
+// *HWP Team : IO
+// *HWP Level : 2
+// *HWP Consumed by : FSP:HB
+//----------------------------------------------------------------------------
+
+#ifndef P9_IO_EREPAIRSETFAILEDLANESHWP_H_
+#define P9_IO_EREPAIRSETFAILEDLANESHWP_H_
+
+#include <fapi2.H>
+#include <p9_io_erepairConsts.H>
+
+typedef fapi2::ReturnCode (*p9_io_erepairSetFailedLanesHwp_FP_t)(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_target,
+ EREPAIR::erepairVpdType i_vpdType,
+ const uint8_t i_clkGroup,
+ const std::vector<uint8_t>& i_txFailLanes,
+ const std::vector<uint8_t>& i_rxFailLanes);
+
+extern "C"
+{
+
+ /**
+ * @brief FW Team HWP that writes the eRepair fail lanes to the VPD.
+ * The fail lanes will be written to either the P9 MVPD or the
+ * Centaur FRU VPD depending on the passed target type.
+ *
+ * @param[in] i_target Reference to X-Bus or O-Bus or MCS or memBuf Target
+ * @param[in] i_vpdType Specifies which VPD (MNFG or Field) to access.
+ * @param[in] i_clkGroup Specifies clock group 0:[XOA, X1A,..] 1:[X0B, X1B,..]
+ * @param[in] i_txFailLanes Reference to a vector that has the Tx side
+ * (of i_tgtHandle) fail lane numbers that need
+ * to be written to the VPD
+ * @param[in] i_rxFailLanes Reference to a vector that has the Rx side
+ * (of i_tgtHandle) fail lane numbers that need
+ * to be written to the VPD
+ *
+ * @return ReturnCode
+ *
+ */
+ fapi2::ReturnCode p9_io_erepairSetFailedLanesHwp(
+ const fapi2::Target < fapi2::TARGET_TYPE_XBUS |
+ fapi2::TARGET_TYPE_OBUS |
+ fapi2::TARGET_TYPE_MEMBUF_CHIP |
+ fapi2::TARGET_TYPE_MCS_CHIPLET |
+ fapi2::TARGET_TYPE_MCS > &i_target,
+ EREPAIR::erepairVpdType i_vpdType,
+ const uint8_t i_clkGroup,
+ const std::vector<uint8_t>& i_txFailLanes,
+ const std::vector<uint8_t>& i_rxFailLanes);
+}// end of extern C
+
+#endif
diff --git a/src/import/chips/p9/procedures/hwp/io/p9_io_erepairSetFailedLanesHwp.mk b/src/import/chips/p9/procedures/hwp/io/p9_io_erepairSetFailedLanesHwp.mk
new file mode 100644
index 000000000..03988f13d
--- /dev/null
+++ b/src/import/chips/p9/procedures/hwp/io/p9_io_erepairSetFailedLanesHwp.mk
@@ -0,0 +1,27 @@
+# IBM_PROLOG_BEGIN_TAG
+# This is an automatically generated prolog.
+#
+# $Source: src/import/chips/p9/procedures/hwp/io/p9_io_erepairSetFailedLanesHwp.mk $
+#
+# OpenPOWER HostBoot Project
+#
+# Contributors Listed Below - COPYRIGHT 2015,2017
+# [+] International Business Machines Corp.
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied. See the License for the specific language governing
+# permissions and limitations under the License.
+#
+# IBM_PROLOG_END_TAG
+
+PROCEDURE=p9_io_erepairSetFailedLanesHwp
+$(call BUILD_PROCEDURE)
diff --git a/src/import/chips/p9/procedures/xml/attribute_info/p9_erepair_thresholds.xml b/src/import/chips/p9/procedures/xml/attribute_info/p9_erepair_thresholds.xml
new file mode 100755
index 000000000..2999717c6
--- /dev/null
+++ b/src/import/chips/p9/procedures/xml/attribute_info/p9_erepair_thresholds.xml
@@ -0,0 +1,106 @@
+<!-- IBM_PROLOG_BEGIN_TAG -->
+<!-- This is an automatically generated prolog. -->
+<!-- -->
+<!-- $Source: src/import/chips/p9/procedures/xml/attribute_info/p9_erepair_thresholds.xml $ -->
+<!-- -->
+<!-- OpenPOWER HostBoot Project -->
+<!-- -->
+<!-- Contributors Listed Below - COPYRIGHT 2015,2017 -->
+<!-- [+] International Business Machines Corp. -->
+<!-- -->
+<!-- -->
+<!-- Licensed under the Apache License, Version 2.0 (the "License"); -->
+<!-- you may not use this file except in compliance with the License. -->
+<!-- You may obtain a copy of the License at -->
+<!-- -->
+<!-- http://www.apache.org/licenses/LICENSE-2.0 -->
+<!-- -->
+<!-- Unless required by applicable law or agreed to in writing, software -->
+<!-- distributed under the License is distributed on an "AS IS" BASIS, -->
+<!-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -->
+<!-- implied. See the License for the specific language governing -->
+<!-- permissions and limitations under the License. -->
+<!-- -->
+<!-- IBM_PROLOG_END_TAG -->
+<!--
+ XML file specifying eRepair threshold attributes.
+ These are platInit attributes associated with the system.
+ These attributes are not associated with particular targets.
+ Each execution platform must initialize.
+-->
+
+<attributes>
+ <!-- ********************************************************************* -->
+ <attribute>
+ <id>ATTR_X_EREPAIR_THRESHOLD_FIELD</id>
+ <targetType>TARGET_TYPE_SYSTEM</targetType>
+ <description>
+ Firmware specified eRepair threshold limit of X Bus for Field usage
+ This value must be initialized by platforms by reading the value
+ from System Model - x_threshold_field of system_policy_table
+ </description>
+ <valueType>uint8</valueType>
+ <platInit/>
+ </attribute>
+ <!-- ********************************************************************* -->
+ <attribute>
+ <id>ATTR_X_EREPAIR_THRESHOLD_MNFG</id>
+ <targetType>TARGET_TYPE_SYSTEM</targetType>
+ <description>
+ Firmware specified eRepair threshold limit of X Bus for MNFG usage
+ This value must be initialized by platforms by reading the value
+ from System Model - x_threshold_mnfg of system_policy_table
+ </description>
+ <valueType>uint8</valueType>
+ <platInit/>
+ </attribute>
+ <!-- ********************************************************************* -->
+ <attribute>
+ <id>ATTR_O_EREPAIR_THRESHOLD_FIELD</id>
+ <targetType>TARGET_TYPE_SYSTEM</targetType>
+ <description>
+ Firmware specified eRepair threshold limit of O Bus for Field usage
+ This value must be initialized by platforms by reading the value
+ from System Model - a_threshold_field of system_policy_table
+ </description>
+ <valueType>uint8</valueType>
+ <platInit/>
+ </attribute>
+ <!-- ********************************************************************* -->
+ <attribute>
+ <id>ATTR_O_EREPAIR_THRESHOLD_MNFG</id>
+ <targetType>TARGET_TYPE_SYSTEM</targetType>
+ <description>
+ Firmware specified eRepair threshold limit of O Bus for MNFG usage
+ This value must be initialized by platforms by reading the value
+ from System Model - a_threshold_mnfg of system_policy_table
+ </description>
+ <valueType>uint8</valueType>
+ <platInit/>
+ </attribute>
+ <!-- ********************************************************************* -->
+ <attribute>
+ <id>ATTR_DMI_EREPAIR_THRESHOLD_FIELD</id>
+ <targetType>TARGET_TYPE_SYSTEM</targetType>
+ <description>
+ Firmware specified eRepair threshold limit of Memory Bus for Field usage
+ This value must be initialized by platforms by reading the value
+ from System Model - dmi_threshold_field of system_policy_table
+ </description>
+ <valueType>uint8</valueType>
+ <platInit/>
+ </attribute>
+ <!-- ********************************************************************* -->
+ <attribute>
+ <id>ATTR_DMI_EREPAIR_THRESHOLD_MNFG</id>
+ <targetType>TARGET_TYPE_SYSTEM</targetType>
+ <description>
+ Firmware specified eRepair threshold limit of Memory Bus for MNFG usage
+ This value must be initialized by platforms by reading the value
+ from System Model - dmi_threshold_mnfg of system_policy_table
+ </description>
+ <valueType>uint8</valueType>
+ <platInit/>
+ </attribute>
+ <!-- ********************************************************************* -->
+</attributes>
diff --git a/src/import/chips/p9/procedures/xml/error_info/p9_io_erepair_errors.xml b/src/import/chips/p9/procedures/xml/error_info/p9_io_erepair_errors.xml
new file mode 100644
index 000000000..dc16eed89
--- /dev/null
+++ b/src/import/chips/p9/procedures/xml/error_info/p9_io_erepair_errors.xml
@@ -0,0 +1,191 @@
+<!-- IBM_PROLOG_BEGIN_TAG -->
+<!-- This is an automatically generated prolog. -->
+<!-- -->
+<!-- $Source: src/import/chips/p9/procedures/xml/error_info/p9_io_erepair_errors.xml $ -->
+<!-- -->
+<!-- OpenPOWER HostBoot Project -->
+<!-- -->
+<!-- Contributors Listed Below - COPYRIGHT 2016,2017 -->
+<!-- [+] International Business Machines Corp. -->
+<!-- -->
+<!-- -->
+<!-- Licensed under the Apache License, Version 2.0 (the "License"); -->
+<!-- you may not use this file except in compliance with the License. -->
+<!-- You may obtain a copy of the License at -->
+<!-- -->
+<!-- http://www.apache.org/licenses/LICENSE-2.0 -->
+<!-- -->
+<!-- Unless required by applicable law or agreed to in writing, software -->
+<!-- distributed under the License is distributed on an "AS IS" BASIS, -->
+<!-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -->
+<!-- implied. See the License for the specific language governing -->
+<!-- permissions and limitations under the License. -->
+<!-- -->
+<!-- IBM_PROLOG_END_TAG -->
+<!-- $Id: p9_io_erepair_errors.xml,v1.0 2015/15/01 14:00:44 sumit56 Exp $ -->
+<!-- Error definitions for p9_io_erepair_errors procedure -->
+<hwpErrors>
+ <!-- ********************************************************************* -->
+ <hwpError>
+ <rc>RC_P9_EREPAIR_DIMM_TYPE_CHECK_ERR</rc>
+ <ffdc>ERROR</ffdc>
+ <description>Error during DIMM type check.
+ </description>
+ <callout>
+ <procedure>CODE</procedure>
+ <priority>HIGH</priority>
+ </callout>
+ </hwpError>
+ <hwpError>
+ <rc>RC_P9_EREPAIR_ACCESSOR_HWP_INVALID_MEM_VPD_SIZE_ERR</rc>
+ <ffdc>ERROR</ffdc>
+ <description>Invalid MEM VPD size has been returned by platform.
+ </description>
+ <callout>
+ <procedure>CODE</procedure>
+ <priority>HIGH</priority>
+ </callout>
+ </hwpError>
+ <hwpError>
+ <rc>RC_P9_EREPAIR_ACCESSOR_HWP_MEMORY_ALLOC_FAIL_ERR</rc>
+ <ffdc>BUF_SIZE</ffdc>
+ <description>Failed to allocate run time memory from the heap.
+ </description>
+ <callout>
+ <procedure>CODE</procedure>
+ <priority>HIGH</priority>
+ </callout>
+ </hwpError>
+ <hwpError>
+ <rc>RC_P9_EREPAIR_ACCESSOR_HWP_INVALID_FABRIC_VPD_SIZE_ERR</rc>
+ <ffdc>ERROR</ffdc>
+ <description>Invalid Fabric VPD size has been returned by platform.
+ </description>
+ <callout>
+ <procedure>CODE</procedure>
+ <priority>HIGH</priority>
+ </callout>
+ </hwpError>
+ <hwpError>
+ <rc>RC_P9_EREPAIR_CHILD_MBA_TARGETS_ERR</rc>
+ <ffdc>ERROR</ffdc>
+ <description>Error during get child MBA targets.
+ </description>
+ <callout>
+ <procedure>CODE</procedure>
+ <priority>HIGH</priority>
+ </callout>
+ </hwpError>
+ <hwpError>
+ <rc>RC_P9_EREPAIR_NO_RX_TX_FAILED_LANES_ERR</rc>
+ <ffdc>TX_LANE</ffdc>
+ <ffdc>RX_LANE</ffdc>
+ <description>ERROR: No Tx/Rx fail lanes were provided
+ </description>
+ <callout>
+ <procedure>CODE</procedure>
+ <priority>HIGH</priority>
+ </callout>
+ </hwpError>
+ <hwpError>
+ <rc>RC_P9_EREPAIR_MBVPD_FULL_ERR</rc>
+ <ffdc>ERROR</ffdc>
+ <description>ERROR: eRepair data limit in the Memory Buffer FRU VPD has been reached.
+ </description>
+ <callout>
+ <procedure>CODE</procedure>
+ <priority>HIGH</priority>
+ </callout>
+ </hwpError>
+ <hwpError>
+ <rc>RC_P9_EREPAIR_MVPD_FULL_ERR</rc>
+ <ffdc>VAL_BYTE_PARSED</ffdc>
+ <ffdc>VAL_BUF_SIZE</ffdc>
+ <ffdc>VAL_REPAIR_CNT</ffdc>
+ <ffdc>VAL_NUM_REPAIR</ffdc>
+ <description>ERROR: eRepair data limit in the Processor Module VPD has been reached.
+ </description>
+ <callout>
+ <procedure>CODE</procedure>
+ <priority>HIGH</priority>
+ </callout>
+ </hwpError>
+ <hwpError>
+ <rc>RC_P9_EREPAIR_INVALID_LANE_VALUE_ERR</rc>
+ <ffdc>ERROR</ffdc>
+ <ffdc>TARGET</ffdc>
+ <description>ERROR: Invalid erepair lane value
+ </description>
+ <callout>
+ <procedure>CODE</procedure>
+ <priority>HIGH</priority>
+ </callout>
+ </hwpError>
+ <hwpError>
+ <rc>RC_P9_EREPAIR_RESTORE_FIELD_VPD_NOT_CLEAR</rc>
+ <ffdc>TARGET1</ffdc>
+ <ffdc>TARGET2</ffdc>
+ <description>ERROR: mnfgCheckFieldVPD: Field VPD need to be clear during Mnfg mode IPL
+ Invalid input parameter: Valid target pairs are: XBus-XBus, OBus-OBus, MCS-MEMBUF
+ </description>
+ <callout>
+ <procedure>CODE</procedure>
+ <priority>HIGH</priority>
+ </callout>
+ </hwpError>
+ <hwpError>
+ <rc>RC_P9_EREPAIR_RESTORE_INVALID_TARGET</rc>
+ <ffdc>TARGET</ffdc>
+ <description>ERROR:geteRepairThreshold: Invalid target type
+ </description>
+ <callout>
+ <procedure>CODE</procedure>
+ <priority>HIGH</priority>
+ </callout>
+ </hwpError>
+ <hwpError>
+ <rc>RC_P9_EREPAIR_THRESHOLD_EXCEED</rc>
+ <ffdc>TX_NUM_LANES</ffdc>
+ <ffdc>RX_NUM_LANES</ffdc>
+ <ffdc>THRESHOLD</ffdc>
+ <description>ERROR:The threshold limit for eRepair has been crossed.
+ </description>
+ <callout>
+ <procedure>CODE</procedure>
+ <priority>HIGH</priority>
+ </callout>
+ </hwpError>
+ <hwpError>
+ <rc>RC_P9_EREPAIR_RESTORE_FABRIC_DISABLED</rc>
+ <ffdc>VALUE1</ffdc>
+ <ffdc>VALUE2</ffdc>
+ <description>ERROR:erepairGetRestoreLanes: Fabric eRepair is disabled.
+ </description>
+ <callout>
+ <procedure>CODE</procedure>
+ <priority>HIGH</priority>
+ </callout>
+ </hwpError>
+ <hwpError>
+ <rc>RC_P9_EREPAIR_RESTORE_INVALID_TARGET_PAIR</rc>
+ <ffdc>TARGET1</ffdc>
+ <ffdc>TARGET2</ffdc>
+ <description>ERROR:geteRepairThreshold: Invalid target pair
+ </description>
+ <callout>
+ <procedure>CODE</procedure>
+ <priority>HIGH</priority>
+ </callout>
+ </hwpError>
+ <hwpError>
+ <rc>RC_P9_EREPAIR_RESTORE_MEMORY_DISABLED</rc>
+ <ffdc>VALUE1</ffdc>
+ <ffdc>VALUE2</ffdc>
+ <description>ERROR:erepairGetRestoreLanes: Memory eRepair is disabled.
+ </description>
+ <callout>
+ <procedure>CODE</procedure>
+ <priority>HIGH</priority>
+ </callout>
+ </hwpError>
+</hwpErrors>
diff --git a/src/usr/targeting/common/genHwsvMrwXml.pl b/src/usr/targeting/common/genHwsvMrwXml.pl
index 62060a156..281f47948 100755
--- a/src/usr/targeting/common/genHwsvMrwXml.pl
+++ b/src/usr/targeting/common/genHwsvMrwXml.pl
@@ -268,10 +268,10 @@ push @systemAttr,
"PROC_FABRIC_SMP_OPTICS_MODE", $reqPol->{'proc_fabric_smp_optics_mode'},
"PROC_FABRIC_CAPI_MODE", $reqPol->{'proc_fabric_capi_mode'},
"X_EREPAIR_THRESHOLD_FIELD", $reqPol->{'x-erepair-threshold-field'},
- "A_EREPAIR_THRESHOLD_FIELD", $reqPol->{'a-erepair-threshold-field'},
+ "O_EREPAIR_THRESHOLD_FIELD", $reqPol->{'a-erepair-threshold-field'},
"DMI_EREPAIR_THRESHOLD_FIELD", $reqPol->{'dmi-erepair-threshold-field'},
"X_EREPAIR_THRESHOLD_MNFG", $reqPol->{'x-erepair-threshold-mnfg'},
- "A_EREPAIR_THRESHOLD_MNFG", $reqPol->{'a-erepair-threshold-mnfg'},
+ "O_EREPAIR_THRESHOLD_MNFG", $reqPol->{'a-erepair-threshold-mnfg'},
"DMI_EREPAIR_THRESHOLD_MNFG", $reqPol->{'dmi-erepair-threshold-mnfg'},
"MSS_MBA_ADDR_INTERLEAVE_BIT", $reqPol->{'mss_mba_addr_interleave_bit'},
"EXTERNAL_VRM_STEPSIZE", $reqPol->{'pm_external_vrm_stepsize'},
diff --git a/src/usr/targeting/common/xmltohb/attribute_types.xml b/src/usr/targeting/common/xmltohb/attribute_types.xml
index b8573c7e4..31098656f 100755
--- a/src/usr/targeting/common/xmltohb/attribute_types.xml
+++ b/src/usr/targeting/common/xmltohb/attribute_types.xml
@@ -7004,102 +7004,6 @@ Selects which voltage level to place the Core and ECO domain PFETs upon Winkle e
</attribute>
<attribute>
- <id>X_EREPAIR_THRESHOLD_FIELD</id>
- <description>
- This attribute represents the eRepair threshold value of X-Bus used
- in the field.
- creator: platform (generated based on MRW data)
- See defintion in erepair_thresholds.xml for more information.
- </description>
- <simpleType>
- <uint8_t>
- </uint8_t>
- </simpleType>
- <persistency>non-volatile</persistency>
- <readable/>
-</attribute>
-
-<attribute>
- <id>A_EREPAIR_THRESHOLD_FIELD</id>
- <description>
- This attribute represents the eRepair threshold value of A-Bus used
- in the field.
- creator: platform (generated based on MRW data)
- See defintion in erepair_thresholds.xml for more information.
- </description>
- <simpleType>
- <uint8_t>
- </uint8_t>
- </simpleType>
- <persistency>non-volatile</persistency>
- <readable/>
-</attribute>
-
-<attribute>
- <id>DMI_EREPAIR_THRESHOLD_FIELD</id>
- <description>
- This attribute represents the eRepair threshold value of DMI-Bus used
- in the field.
- creator: platform (generated based on MRW data)
- See defintion in erepair_thresholds.xml for more information.
- </description>
- <simpleType>
- <uint8_t>
- </uint8_t>
- </simpleType>
- <persistency>non-volatile</persistency>
- <readable/>
-</attribute>
-
-<attribute>
- <id>X_EREPAIR_THRESHOLD_MNFG</id>
- <description>
- This attribute represents the eRepair threshold value of X-Bus used
- by Manufacturing.
- creator: platform (generated based on MRW data)
- See defintion in erepair_thresholds.xml for more information.
- </description>
- <simpleType>
- <uint8_t>
- </uint8_t>
- </simpleType>
- <persistency>non-volatile</persistency>
- <readable/>
-</attribute>
-
-<attribute>
- <id>A_EREPAIR_THRESHOLD_MNFG</id>
- <description>
- This attribute represents the eRepair threshold value of A-Bus used
- by Manufacturing.
- creator: platform (generated based on MRW data)
- See defintion in erepair_thresholds.xml for more information.
- </description>
- <simpleType>
- <uint8_t>
- </uint8_t>
- </simpleType>
- <persistency>non-volatile</persistency>
- <readable/>
-</attribute>
-
-<attribute>
- <id>DMI_EREPAIR_THRESHOLD_MNFG</id>
- <description>
- This attribute represents the eRepair threshold value of DMI-Bus used
- by Manufacturing.
- creator: platform (generated based on MRW data)
- See defintion in erepair_thresholds.xml for more information.
- </description>
- <simpleType>
- <uint8_t>
- </uint8_t>
- </simpleType>
- <persistency>non-volatile</persistency>
- <readable/>
-</attribute>
-
-<attribute>
<id>MSS_MBA_ADDR_INTERLEAVE_BIT</id>
<description>sets the Centaur address bits used to interleave addresses between MBA01 and MBA23. valid values are 23 through 32.</description>
<simpleType>
diff --git a/src/usr/targeting/common/xmltohb/simics_CUMULUS.system.xml b/src/usr/targeting/common/xmltohb/simics_CUMULUS.system.xml
index f793380aa..411ef82bb 100644
--- a/src/usr/targeting/common/xmltohb/simics_CUMULUS.system.xml
+++ b/src/usr/targeting/common/xmltohb/simics_CUMULUS.system.xml
@@ -5,7 +5,7 @@
<!-- -->
<!-- OpenPOWER HostBoot Project -->
<!-- -->
-<!-- Contributors Listed Below - COPYRIGHT 2017 -->
+<!-- Contributors Listed Below - COPYRIGHT 2016,2017 -->
<!-- [+] International Business Machines Corp. -->
<!-- -->
<!-- -->
@@ -129,11 +129,11 @@
</attribute>
<attribute>
- <id>A_EREPAIR_THRESHOLD_FIELD</id>
+ <id>O_EREPAIR_THRESHOLD_FIELD</id>
<default>1</default>
</attribute>
<attribute>
- <id>A_EREPAIR_THRESHOLD_MNFG</id>
+ <id>O_EREPAIR_THRESHOLD_MNFG</id>
<default>0</default>
</attribute>
<attribute>
diff --git a/src/usr/targeting/common/xmltohb/target_types.xml b/src/usr/targeting/common/xmltohb/target_types.xml
index 62a236cf4..8bb056751 100644
--- a/src/usr/targeting/common/xmltohb/target_types.xml
+++ b/src/usr/targeting/common/xmltohb/target_types.xml
@@ -881,10 +881,10 @@
<attribute>
<id>X_EREPAIR_THRESHOLD_FIELD</id>
</attribute>
- <attribute><id>A_EREPAIR_THRESHOLD_FIELD</id></attribute>
+ <attribute><id>O_EREPAIR_THRESHOLD_FIELD</id></attribute>
<attribute><id>DMI_EREPAIR_THRESHOLD_FIELD</id></attribute>
<attribute><id>X_EREPAIR_THRESHOLD_MNFG</id></attribute>
- <attribute><id>A_EREPAIR_THRESHOLD_MNFG</id></attribute>
+ <attribute><id>O_EREPAIR_THRESHOLD_MNFG</id></attribute>
<attribute><id>DMI_EREPAIR_THRESHOLD_MNFG</id></attribute>
<attribute><id>MSS_MBA_ADDR_INTERLEAVE_BIT</id></attribute>
<attribute><id>MSS_MBA_CACHELINE_INTERLEAVE_MODE</id></attribute>
OpenPOWER on IntegriCloud