diff options
author | Roland Veloz <rveloz@us.ibm.com> | 2019-08-23 20:00:36 -0500 |
---|---|---|
committer | Christian R Geddes <crgeddes@us.ibm.com> | 2019-09-03 09:49:31 -0500 |
commit | c3d8cfd066998862656706bb00d15ad986470fe5 (patch) | |
tree | f267927cbfb821659b4bd25107dcf1155a0b3ba2 /src/usr/util/runtime/rt_cmds.C | |
parent | e22e362f3cd9b96b845f8c51acd6da97c78554a4 (diff) | |
download | talos-hostboot-c3d8cfd066998862656706bb00d15ad986470fe5.tar.gz talos-hostboot-c3d8cfd066998862656706bb00d15ad986470fe5.zip |
Added code to support doing an NVM health check
Added a method that will do an NVM (non-volatile memory)
health check. In particular this method will check the
flash error counts and does a predictive callout
if the number of flash error counts exceeds the maximum
allowed. This method also checks the bad block percentage
and does a predictive callout if the number of bad block
percentage exceeds the maximum allowed. A predictive
callout is done if either or both fail the check.
Added support in the runtime commands to make the nvm health check
call as well.
Also, when I did the ES (energy source) health check method, I was
not very explicit in the method that it was doing an ES health
check. So I updated the verbiage in the nvDimmCheckHealthStatus
to add ES wherever appropriate as to make these two method
explicit as to what health check is being performed.
Change-Id: Ib9925fd2bb8430cf2121108329247d96072beb1b
CQ: 473220
Reviewed-on: http://rchgit01.rchland.ibm.com/gerrit1/82843
Tested-by: Jenkins Server <pfd-jenkins+hostboot@us.ibm.com>
Tested-by: Jenkins OP Build CI <op-jenkins+hostboot@us.ibm.com>
Tested-by: Jenkins OP HW <op-hw-jenkins+hostboot@us.ibm.com>
Tested-by: FSP CI Jenkins <fsp-CI-jenkins+hostboot@us.ibm.com>
Reviewed-by: Glenn Miles <milesg@ibm.com>
Reviewed-by: Zachary Clark <zach@ibm.com>
Reviewed-by: Christian R Geddes <crgeddes@us.ibm.com>
Diffstat (limited to 'src/usr/util/runtime/rt_cmds.C')
-rw-r--r-- | src/usr/util/runtime/rt_cmds.C | 74 |
1 files changed, 62 insertions, 12 deletions
diff --git a/src/usr/util/runtime/rt_cmds.C b/src/usr/util/runtime/rt_cmds.C index c669aae4f..bf0c51749 100644 --- a/src/usr/util/runtime/rt_cmds.C +++ b/src/usr/util/runtime/rt_cmds.C @@ -1179,25 +1179,59 @@ void cmd_nvdimm_protection_msg( char* &o_output, uint32_t i_huid, } } -void cmd_nvdimmCheckHealthStatus( char* &o_output) +/** + * @brief Check the ES (energy source) health status of all NVDIMMs in the + * system. If check fails, see HBRT traces for further details. + * @param[out] o_output Output display buffer, memory allocated here. + * Will inform caller if ES health check passes or fails. + */ +void cmd_nvDimmEsCheckHealthStatus( char* &o_output) +{ + o_output = new char[500]; + if (NVDIMM::nvDimmEsCheckHealthStatusOnSystem()) + { + sprintf( o_output, "cmd_nvDimmEsCheckHealthStatus: " + "ES (energy source) health status check passed."); + + } + else + { + sprintf( o_output, "cmd_nvDimmEsCheckHealthStatus: " + "ES (energy source) health status check failed. " + "Inspect HBRT traces for further details."); + + } + + return; +} // end cmd_nvDimmEsCheckHealthStatus + +/** + * @brief Check the NVM (non-volatile memory) health status of all NVDIMMs in + * the system. If check fails, see HBRT traces for further details. + * @param[out] o_output Output display buffer, memory allocated here. + * Will inform caller if NVM health check passes or fails. + */ + +void cmd_nvdDmmNvmCheckHealthStatus( char* &o_output) { o_output = new char[500]; - if (NVDIMM::nvDimmCheckHealthStatusOnSystem()) + if (NVDIMM::nvDimmNvmCheckHealthStatusOnSystem()) { - sprintf( o_output, "cmd_doNvDimmCheckHealthStatus: " - "health status check passed."); + sprintf( o_output, "cmd_nvdDmmNvmCheckHealthStatus: " + "NVM (non-volatile memory) health status check passed."); } else { - sprintf( o_output, "cmd_doNvDimmCheckHealthStatus: " - "health status check failed. Inspect HBRT traces " - "for further details."); + sprintf( o_output, "cmd_nvdDmmNvmCheckHealthStatus: " + "NVM (non-volatile memory) health status check failed. " + "Inspect HBRT traces for further details."); } return; -} // end cmd_nvdimmCheckHealthStatus +} // end cmd_nvdDmmNvmCheckHealthStatus + #endif @@ -1535,18 +1569,31 @@ int hbrtCommand( int argc, sprintf(*l_output, "ERROR: nvdimm_protection <huid> <0 or 1>"); } } - else if( !strcmp( argv[0], "nvdimm_check_status" ) ) + else if( !strcmp( argv[0], "nvdimm_es_check_status" ) ) { if (argc == 1) { - cmd_nvdimmCheckHealthStatus( *l_output ); + cmd_nvDimmEsCheckHealthStatus( *l_output ); } else { *l_output = new char[100]; - sprintf(*l_output, "Usage: nvdimm_check_status"); + sprintf(*l_output, "Usage: nvdimm_es_check_status"); } } + else if( !strcmp( argv[0], "nvdimm_nvm_check_status" ) ) + { + if (argc == 1) + { + cmd_nvdDmmNvmCheckHealthStatus( *l_output ); + } + else + { + *l_output = new char[100]; + sprintf(*l_output, "Usage: nvdimm_nvm_check_status"); + } + } + #endif else { @@ -1587,8 +1634,11 @@ int hbrtCommand( int argc, #ifdef CONFIG_NVDIMM sprintf( l_tmpstr, "nvdimm_protection <huid> <0 or 1>\n"); strcat( *l_output, l_tmpstr ); - sprintf( l_tmpstr, "nvdimm_check_status\n"); + sprintf( l_tmpstr, "nvdimm_es_check_status\n"); + strcat( *l_output, l_tmpstr ); + sprintf( l_tmpstr, "nvdimm_nvm_check_status\n"); strcat( *l_output, l_tmpstr ); + #endif } |