summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--import/chips/p9/common/include/p9_frequency_buckets.H26
-rw-r--r--import/chips/p9/common/pmlib/include/pstate_pgpe_occ_api.h22
-rw-r--r--import/chips/p9/procedures/hwp/lib/p9_hcd_memmap_base.H10
-rw-r--r--import/chips/p9/procedures/hwp/lib/p9_hcd_memmap_occ_sram.H5
-rw-r--r--import/chips/p9/procedures/hwp/lib/p9_hcode_image_defines.H2
-rw-r--r--import/chips/p9/procedures/hwp/lib/p9_pm_hcd_flags.h5
-rw-r--r--import/chips/p9/procedures/hwp/lib/p9_pstates_cmeqm.h47
-rw-r--r--import/chips/p9/procedures/hwp/lib/p9_pstates_common.h15
-rw-r--r--import/chips/p9/procedures/hwp/lib/p9_pstates_occ.h4
-rw-r--r--import/chips/p9/procedures/hwp/lib/p9_pstates_pgpe.h4
-rw-r--r--import/chips/p9/procedures/ppe/iota/iota_uih.c81
-rw-r--r--import/chips/p9/procedures/ppe/pk/ppe42/pk_panic_codes.h4
-rw-r--r--import/chips/p9/procedures/ppe/pk/ppe42/ppe42_exceptions.S47
-rw-r--r--import/chips/p9/procedures/ppe/pk/ppe42/ppe42_irq_core.c44
-rwxr-xr-ximport/chips/p9/procedures/ppe/pk/ppe42/ppe42_scom.h6
-rw-r--r--import/chips/p9/procedures/ppe_closed/cme/cme_p9a10.mk85
-rw-r--r--import/chips/p9/procedures/ppe_closed/cme/p9_cme_iota_main.c8
-rw-r--r--import/chips/p9/procedures/ppe_closed/cme/p9_cme_irq.h15
-rw-r--r--import/chips/p9/procedures/ppe_closed/cme/p9_cme_main.c4
-rw-r--r--import/chips/p9/procedures/ppe_closed/cme/pk_app_cfg.h6
-rw-r--r--import/chips/p9/procedures/ppe_closed/cme/pstate_cme/p9_cme_intercme.c8
-rw-r--r--import/chips/p9/procedures/ppe_closed/cme/pstate_cme/p9_cme_thread_db.c109
-rw-r--r--import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_img_edit.c13
-rw-r--r--import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_stop.h11
-rwxr-xr-ximport/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_stop_entry.c133
-rw-r--r--import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_stop_exit.c397
-rw-r--r--import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_stop_init.c20
-rw-r--r--import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_stop_irq_handlers.c16
-rw-r--r--import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_hcd_core_scominit.c6
-rw-r--r--import/chips/p9/procedures/ppe_closed/ippe/ioa/p9_abus_main.c60
-rw-r--r--import/chips/p9/procedures/ppe_closed/lib/hcodelibfiles.mk3
-rwxr-xr-ximport/chips/p9/procedures/ppe_closed/lib/p9_hcd_errl.c605
-rwxr-xr-ximport/chips/p9/procedures/ppe_closed/lib/p9_hcd_errl.h142
-rw-r--r--import/chips/p9/procedures/ppe_closed/lib/p9_hcd_errldefs.h106
-rw-r--r--import/chips/p9/procedures/ppe_closed/lib/p9_hcd_occ_errldefs.h232
-rw-r--r--import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/avs_driver.c105
-rw-r--r--import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/avs_driver.h18
-rw-r--r--import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_fit.c42
-rw-r--r--import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_gppb.c12
-rw-r--r--import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_header.c11
-rw-r--r--import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_header.h8
-rw-r--r--import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_irq_handlers.c17
-rw-r--r--import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_main.c9
-rw-r--r--import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_optrace.c4
-rw-r--r--import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_optrace.h20
-rw-r--r--import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_pstate.c227
-rw-r--r--import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_pstate.h6
-rw-r--r--import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_thread_actuate_pstates.c9
-rw-r--r--import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_thread_process_requests.c21
-rw-r--r--import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/pgpe_panic_codes.h2
-rw-r--r--import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/pk_app_cfg.h7
-rw-r--r--import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/pstate_common.mk3
-rw-r--r--import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/pstate_gpe.mk2
-rw-r--r--import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/pstate_gpe_p9a10.mk70
-rw-r--r--import/chips/p9/procedures/ppe_closed/sgpe/stop_gpe/p9_sgpe_main.C9
-rw-r--r--import/chips/p9/procedures/ppe_closed/sgpe/stop_gpe/p9_sgpe_stop_irq_handlers.c5
-rw-r--r--import/chips/p9/procedures/ppe_closed/sgpe/stop_gpe/pk_app_cfg.h7
-rw-r--r--import/chips/p9/procedures/ppe_closed/sgpe/stop_gpe/stop_gpe_p9a10.mk96
-rwxr-xr-ximport/chips/p9/procedures/utils/stopreg/p9_core_save_restore_routines.S856
-rw-r--r--import/chips/p9/procedures/utils/stopreg/p9_core_save_restore_routines.s860
-rwxr-xr-ximport/chips/p9/procedures/utils/stopreg/p9_cpu_reg_restore_instruction.H5
-rwxr-xr-ximport/chips/p9/procedures/utils/stopreg/p9_stop_api.C715
-rwxr-xr-ximport/chips/p9/procedures/utils/stopreg/p9_stop_api.H139
-rwxr-xr-ximport/chips/p9/procedures/utils/stopreg/selfRest.binbin9016 -> 9016 bytes
-rw-r--r--import/chips/p9/procedures/utils/stopreg/selfRest.list86
-rw-r--r--import/chips/p9/procedures/utils/stopreg/selfRest.map12
-rw-r--r--import/chips/p9/xip/p9_xip_image.h3
-rw-r--r--rings/p9a.hw.overlays.binbin0 -> 1000 bytes
-rw-r--r--rings/p9a.hw.rings.binbin0 -> 13960 bytes
-rw-r--r--rings/p9n.hw.rings.binbin95932 -> 95996 bytes
-rw-r--r--tools/build/release_tag.txt2
-rw-r--r--tools/build/rules.dir/chips.env.mk3
-rw-r--r--tools/imageProcs/hw_image.mk7
73 files changed, 4910 insertions, 789 deletions
diff --git a/import/chips/p9/common/include/p9_frequency_buckets.H b/import/chips/p9/common/include/p9_frequency_buckets.H
index 9695681d..280c4679 100644
--- a/import/chips/p9/common/include/p9_frequency_buckets.H
+++ b/import/chips/p9/common/include/p9_frequency_buckets.H
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2016,2018 */
+/* COPYRIGHT 2016,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -65,6 +65,30 @@ const uint32_t MEM_PLL_FREQ_LIST[MEM_PLL_FREQ_BUCKETS] =
2666
};
+// OMI bucket descriptor
+struct OmiBucketDescriptor_t
+{
+ uint32_t omifreq; // OMI Frequency in MHz
+ uint32_t vco; // VCO selector
+
+ uint32_t mcafreq; // MCA Frequency in MHz
+};
+
+//MC PLL frequency in MHz for Axone
+// index is bucket number
+// OMI -> ATTR_FREQ_OMI_MHZ
+// VCO -> ATTR_OMI_PLL_VCO
+// MCA -> ATTR_FREQ_MCA_MHZ
+const OmiBucketDescriptor_t OMI_PLL_FREQ_LIST[MEM_PLL_FREQ_BUCKETS] =
+{
+ // OMI VCO MCA Data rate
+ { 19200, 0, 1200 }, // ->DDR4-2400
+ { 21330, 0, 1333 }, // ->DDR4-2667
+ { 23460, 0, 1466 }, // ->DDR4-2933
+ { 23460, 1, 1466 }, // ->DDR4-2933
+ { 25600, 1, 1600 } // ->DDR4-3200
+};
+
// constant definining number of OBUS PLL frequency options ('buckets')
// to be built into unsigned HW image
const uint8_t OBUS_PLL_FREQ_BUCKETS = 3;
diff --git a/import/chips/p9/common/pmlib/include/pstate_pgpe_occ_api.h b/import/chips/p9/common/pmlib/include/pstate_pgpe_occ_api.h
index cdaff84c..0df2ea1e 100644
--- a/import/chips/p9/common/pmlib/include/pstate_pgpe_occ_api.h
+++ b/import/chips/p9/common/pmlib/include/pstate_pgpe_occ_api.h
@@ -41,7 +41,8 @@
extern "C" {
#endif
-#define HCODE_OCC_SHARED_MAGIC_NUMBER 0x4F505330 //OPS0
+#define HCODE_OCC_SHARED_MAGIC_NUMBER_OPS0 0x4F505330 //OPS0
+#define HCODE_OCC_SHARED_MAGIC_NUMBER_OPS1 0x4F505331 //OPS1
//---------------
// IPC from 405
@@ -409,17 +410,9 @@ typedef struct
// -----------------------------------------------------------------------------
// Start Error Log Table
-/// Maximum number of error log entries available
-#define MAX_HCODE_ELOG_ENTRIES 4
-
-/// Index into the array of error log entries
-enum elog_entry_index
-{
- ELOG_PGPE_CRITICAL = 0,
- ELOG_PGPE_INFO = 1,
- ELOG_SGPE_CRITICAL = 2,
- ELOG_SGPE_INFO = 3,
-};
+/// Maximum number of error log entries available, 1 UE per SPGE & PGPE
+#define MAX_HCODE_ELOG_ENTRIES 2
+#define HCODE_ELOG_TABLE_MAGIC_WORD 0x454C5443 // "ELTC"
/// Structure of an individual error log entry
typedef struct
@@ -461,7 +454,7 @@ typedef struct hcode_error_table
} fields;
} dw0;
- /// Array of error log entries (index with enum elog_entry_index)
+ /// Array of error log entries
hcode_elog_entry_t elog[MAX_HCODE_ELOG_ENTRIES];
} hcode_error_table_t;
@@ -491,9 +484,6 @@ typedef struct
//PGPE WOF Values
pgpe_wof_values_t pgpe_wof_values;
- //Reserved
- uint64_t reserved1;
-
/// Hcode Error Log Index
hcode_error_table_t errlog_idx;
diff --git a/import/chips/p9/procedures/hwp/lib/p9_hcd_memmap_base.H b/import/chips/p9/procedures/hwp/lib/p9_hcd_memmap_base.H
index 2e6f5c5a..968d8184 100644
--- a/import/chips/p9/procedures/hwp/lib/p9_hcd_memmap_base.H
+++ b/import/chips/p9/procedures/hwp/lib/p9_hcd_memmap_base.H
@@ -46,7 +46,7 @@
/// Image Magic Numbers
-HCD_CONST64(CPMR_MAGIC_NUMBER, ULL(0x43504d525f312e30)) // CPMR_1.0
+HCD_CONST64(CPMR_MAGIC_NUMBER, ULL(0x43504d525f322e30)) // CPMR_2.0
HCD_CONST64(CME_MAGIC_NUMBER , ULL(0x434d455f5f312e30)) // CME__1.0
HCD_CONST64(QPMR_MAGIC_NUMBER, ULL(0x51504d525f312e30)) // QPMR_1.0
@@ -434,6 +434,7 @@ HCD_CONST(CME_QM_FLAG_SYS_WOF_ENABLE, 0x1000)
HCD_CONST(CME_QM_FLAG_SYS_DYN_FMIN_ENABLE, 0x0800)
HCD_CONST(CME_QM_FLAG_SYS_DYN_FMAX_ENABLE, 0x0400)
HCD_CONST(CME_QM_FLAG_SYS_JUMP_PROTECT, 0x0200)
+HCD_CONST(CME_QM_FLAG_PER_QUAD_VDM_ENABLE, 0x0100)
HCD_CONST(CME_QM_FLAG_PSTATE_PHANTOM_HALT_EN, 0x0001)
/// CME Hcode
@@ -460,6 +461,13 @@ HCD_CONST(CME_QUAD_PSTATE_SIZE, HALF_KB)
HCD_CONST(CME_REGION_SIZE, (64 * ONE_KB))
+
+// HOMER compatibility
+
+HCD_CONST(STOP_API_CPU_SAVE_VER, 0x02)
+HCD_CONST(SELF_SAVE_RESTORE_VER, 0x02)
+HCD_CONST(SMF_SUPPORT_SIGNATURE_OFFSET, 0x1300)
+HCD_CONST(SMF_SELF_SIGNATURE, (0x5f534d46))
// Debug
HCD_CONST(CPMR_TRACE_REGION_OFFSET, (512 * ONE_KB))
diff --git a/import/chips/p9/procedures/hwp/lib/p9_hcd_memmap_occ_sram.H b/import/chips/p9/procedures/hwp/lib/p9_hcd_memmap_occ_sram.H
index 8a4fc997..844bc715 100644
--- a/import/chips/p9/procedures/hwp/lib/p9_hcd_memmap_occ_sram.H
+++ b/import/chips/p9/procedures/hwp/lib/p9_hcd_memmap_occ_sram.H
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2015,2018 */
+/* COPYRIGHT 2015,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -76,6 +76,7 @@ HCD_CONST(OCC_SRAM_OCC_REGION_SIZE, (512 * ONE_KB))
HCD_CONST(OCC_SRAM_BEFORE_PGPE_REGION_SIZE_TOTAL,
(OCC_SRAM_IPC_REGION_SIZE + OCC_SRAM_GPE0_REGION_SIZE + OCC_SRAM_GPE1_REGION_SIZE))
+
//--------------------------------------------------------------------------------------
/// PGPE Base
@@ -192,5 +193,7 @@ HCD_CONST( OCC_SRAM_PGPE_TRACE_START,
(OCC_SRAM_PGPE_HEADER_ADDR + PGPE_HEADER_SIZE));
+HCD_CONST(OCC_SRAM_SHARED_DATA_BASE_ADDR,
+ (OCC_SRAM_PGPE_BASE_ADDR + OCC_SRAM_PGPE_REGION_SIZE - PGPE_OCC_SHARED_SRAM_SIZE))
#endif /* __P9_HCD_MEMMAP_OCC_SRAM_H__ */
diff --git a/import/chips/p9/procedures/hwp/lib/p9_hcode_image_defines.H b/import/chips/p9/procedures/hwp/lib/p9_hcode_image_defines.H
index eeae5161..69acacb0 100644
--- a/import/chips/p9/procedures/hwp/lib/p9_hcode_image_defines.H
+++ b/import/chips/p9/procedures/hwp/lib/p9_hcode_image_defines.H
@@ -374,8 +374,10 @@ HCD_HDR_UINT32(g_wof_table_length, 0 ); // WOF Table length
HCD_HDR_UINT32(g_pgpe_core_throttle_assert_cnt, 0 ); // Core throttle assert count
HCD_HDR_UINT32(g_pgpe_core_throttle_deassert_cnt, 0 ); // Core throttle de-aasert count
HCD_HDR_UINT32(g_pgpe_aux_controls, 0 ); // Auxiliary Controls
+HCD_HDR_UINT32(g_pgpe_optrace_pointer, 0 ); // Operational Trace OCC SRAM Pointer
HCD_HDR_UINT32(g_pgpe_doptrace_offset, 0 ); // Deep Operational Trace Main Memory Buffer Offset
HCD_HDR_UINT32(g_pgpe_doptrace_length, 0 ); // Deep Opeartional Trace Main Memory Buffer Length
+HCD_HDR_UINT32(g_pgpe_wof_values_address, 0 ); // SRAM address where PGPE Produced WOF values are located
#ifdef __ASSEMBLER__
.endm
#else
diff --git a/import/chips/p9/procedures/hwp/lib/p9_pm_hcd_flags.h b/import/chips/p9/procedures/hwp/lib/p9_pm_hcd_flags.h
index 13509e17..1b940919 100644
--- a/import/chips/p9/procedures/hwp/lib/p9_pm_hcd_flags.h
+++ b/import/chips/p9/procedures/hwp/lib/p9_pm_hcd_flags.h
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2015,2018 */
+/* COPYRIGHT 2015,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -65,6 +65,7 @@ enum PM_GPE_OCCFLG_DEFS
PIB_I2C_MASTER_ENGINE_2_LOCK_BIT1 = 19, //BIT0 ored BIT1 gives the field
PIB_I2C_MASTER_ENGINE_3_LOCK_BIT0 = 20, //BIT0 ored BIT1 gives the field
PIB_I2C_MASTER_ENGINE_3_LOCK_BIT1 = 21, //BIT0 ored BIT1 gives the field
+ PGPE_OCS_DIRTY = 26,
PGPE_PM_RESET_SUPPRESS = 27,
WOF_HCODE_MODE_BIT0 = 28,
WOF_HCODE_MODE_BIT1 = 29,
@@ -77,6 +78,7 @@ enum PM_GPE_OCCFLG2_DEFS
{
OCCFLG2_DEAD_CORES_START = 0,
OCCFLG2_DEAD_CORES_LENGTH = 24,
+ OCCFLG2_ENABLE_PRODUCE_WOF_VALUES = 24,
OCCFLG2_PGPE_HCODE_FIT_ERR_INJ = 27,
PM_CALLOUT_ACTIVE = 28,
STOP_RECOVERY_TRIGGER_ENABLE = 29,
@@ -127,6 +129,7 @@ enum PM_CME_FLAGS_DEFS
CME_FLAGS_DROOP_SUSPEND_ENTRY = 14,
CME_FLAGS_SAFE_MODE = 16,
CME_FLAGS_PSTATES_SUSPENDED = 17,
+ CME_FLAGS_DB0_COMM_RECV_STARVATION_CNT_ENABLED = 18,
CME_FLAGS_SPWU_CHECK_ENABLE = 22,
CME_FLAGS_BLOCK_ENTRY_STOP11 = 23,
CME_FLAGS_PSTATES_ENABLED = 24,
diff --git a/import/chips/p9/procedures/hwp/lib/p9_pstates_cmeqm.h b/import/chips/p9/procedures/hwp/lib/p9_pstates_cmeqm.h
index 1958a801..4c7bef05 100644
--- a/import/chips/p9/procedures/hwp/lib/p9_pstates_cmeqm.h
+++ b/import/chips/p9/procedures/hwp/lib/p9_pstates_cmeqm.h
@@ -35,6 +35,7 @@
#define __P9_PSTATES_CME_H__
#include <p9_pstates_common.h>
+#include <p9_hcd_memmap_base.H>
/// @}
@@ -161,6 +162,23 @@ typedef struct
uint16_t r_core_header;
} resistance_entry_t;
+typedef struct __attribute__((packed))
+{
+ uint16_t r_package_common;
+ uint16_t r_quad;
+ uint16_t r_core;
+ uint16_t r_quad_header;
+ uint16_t r_core_header;
+ uint8_t r_vdm_cal_version;
+ uint8_t r_avg_min_scale_fact;
+ uint16_t r_undervolt_vmin_floor_limit;
+ uint8_t r_min_bin_protect_pc_adder;
+ uint8_t r_min_bin_protect_bin_adder;
+ uint8_t r_undervolt_allowed;
+ uint8_t reserve[10];
+}
+resistance_entry_per_quad_t;
+
typedef struct
{
poundw_entry_t poundw[NUM_OP_POINTS];
@@ -178,6 +196,35 @@ typedef struct
PoundW_data vpd_w_data;
} LP_VDMParmBlock;
+typedef struct __attribute__((packed))
+{
+ uint16_t ivdd_tdp_ac_current_10ma;
+ uint16_t ivdd_tdp_dc_current_10ma;
+ uint8_t vdm_overvolt_small_thresholds;
+ uint8_t vdm_large_extreme_thresholds;
+ uint8_t vdm_normal_freq_drop; // N_S and N_L Drop
+ uint8_t vdm_normal_freq_return; // L_S and S_N Return
+ uint8_t vdm_vid_compare_per_quad[MAX_QUADS_PER_CHIP];
+ uint8_t vdm_cal_state_avg_min_per_quad[MAX_QUADS_PER_CHIP];
+ uint16_t vdm_cal_state_vmin;
+ uint8_t vdm_cal_state_avg_core_dts;
+ uint16_t vdm_cal_state_avg_core_current;
+ uint16_t vdm_spare;
+}
+poundw_entry_per_quad_t;
+
+typedef struct __attribute__((packed))
+{
+ poundw_entry_per_quad_t poundw[NUM_OP_POINTS];
+ resistance_entry_per_quad_t resistance_data;
+}
+PoundW_data_per_quad;
+
+
+typedef struct
+{
+ PoundW_data_per_quad vpd_w_data;
+} LP_VDMParmBlock_PerQuad;
/// The layout of the data created by the Pstate table creation firmware for
/// comsumption by the Pstate GPE. This data will reside in the Quad
diff --git a/import/chips/p9/procedures/hwp/lib/p9_pstates_common.h b/import/chips/p9/procedures/hwp/lib/p9_pstates_common.h
index f97ea82f..5d3c1dbe 100644
--- a/import/chips/p9/procedures/hwp/lib/p9_pstates_common.h
+++ b/import/chips/p9/procedures/hwp/lib/p9_pstates_common.h
@@ -240,6 +240,21 @@ typedef struct
} SysPowerDistParms;
+/// AVSBUS Topology
+///
+/// AVS Bus and Rail numbers for VDD, VDN, VCS, and VIO
+///
+typedef struct
+{
+ uint8_t vdd_avsbus_num;
+ uint8_t vdd_avsbus_rail;
+ uint8_t vdn_avsbus_num;
+ uint8_t vdn_avsbus_rail;
+ uint8_t vcs_avsbus_num;
+ uint8_t vcs_avsbus_rail;
+ uint8_t vio_avsbus_num;
+ uint8_t vio_avsbus_rail;
+} AvsBusTopology_t;
//
// WOF Voltage, Frequency Ratio Tables
diff --git a/import/chips/p9/procedures/hwp/lib/p9_pstates_occ.h b/import/chips/p9/procedures/hwp/lib/p9_pstates_occ.h
index 086776e3..053f6d95 100644
--- a/import/chips/p9/procedures/hwp/lib/p9_pstates_occ.h
+++ b/import/chips/p9/procedures/hwp/lib/p9_pstates_occ.h
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2015,2018 */
+/* COPYRIGHT 2015,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -205,6 +205,8 @@ typedef struct
// AC tdp vdd nominal
uint16_t lac_tdp_vdd_nominal_10ma;
+ AvsBusTopology_t avs_bus_topology;
+
} __attribute__((aligned(128))) OCCPstateParmBlock;
#ifdef __cplusplus
diff --git a/import/chips/p9/procedures/hwp/lib/p9_pstates_pgpe.h b/import/chips/p9/procedures/hwp/lib/p9_pstates_pgpe.h
index ebddeaed..93972404 100644
--- a/import/chips/p9/procedures/hwp/lib/p9_pstates_pgpe.h
+++ b/import/chips/p9/procedures/hwp/lib/p9_pstates_pgpe.h
@@ -350,6 +350,10 @@ typedef struct
//Jump-value slopes
int16_t PsVDMJumpSlopes[VPD_NUM_SLOPES_REGION][NUM_JUMP_VALUES];
+ uint8_t pad2[2];
+
+ //AvsBusTopology
+ AvsBusTopology_t avs_bus_topology;
// @todo DPLL Droop Settings. These need communication to SGPE for STOP
diff --git a/import/chips/p9/procedures/ppe/iota/iota_uih.c b/import/chips/p9/procedures/ppe/iota/iota_uih.c
index 546def67..28272661 100644
--- a/import/chips/p9/procedures/ppe/iota/iota_uih.c
+++ b/import/chips/p9/procedures/ppe/iota/iota_uih.c
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2017 */
+/* COPYRIGHT 2017,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -32,6 +32,9 @@ int g_eimr_stack_ctr = -1;
uint64_t g_eimr_override_stack[IOTA_NUM_EXT_IRQ_PRIORITIES];
uint64_t g_eimr_override = 0x0000000000000000;
uint64_t g_ext_irq_vector = 0;
+uint32_t g_db0_pending_fit_tick_count = 0;
+uint32_t g_comm_recv_pending_fit_tick_count = 0;
+uint32_t g_intercme_in0_pending_tick_count = 0;
// Unified IRQ priority and masking handler.
// - Locates the highest priority IRQ task vector that has at least one of its
@@ -51,13 +54,50 @@ uint32_t iota_uih(void)
do
{
- if(ext_irq_vectors_cme[iPrtyLvl][IDX_PRTY_VEC] & g_ext_irq_vector)
+ //Note: Special handling of DB0/COMM_RECV to handle the db0/comm_recv
+ //starvation case.
+ //
+ //Reason: DB0(Quad Manager CME) and COMM_RECV(Sibling CME) are lower priority
+ //than the STOP related interrupts,
+ //and can stay pending for very long time(~ms scale) on systems with
+ //high frequency of STOP requests. This can then prevent PGPE from
+ //completing OCC directed IPC operations within the expected
+ //time bounds(< 8ms)
+ //
+ //Mechanism:
+ //1)In FIT: Every FIT tick, we check if DB0(on Quad manager)/COMM_RECV(on Sibling CME)
+ //is pending. If DB0(on Quad manager)/COMM_RECV(on Sibling CME) is seen pending for
+ //more than DB0_FIT_TICK_THRESHOLD/COMM_RECV_FIT_TICK_THRESHOLD FIT ticks,
+ //then we take action in UIH
+ //
+ //2)In UIH: We set priority level to IDX_PRTY_LVL_DB0/IDX_PRTY_LVL_COMM_RECVD, and mask
+ //everything except Priority 0(xstop, exceptions, etc). This then allows a
+ //pending DB0 to complete
+ if(g_db0_pending_fit_tick_count > DB0_FIT_TICK_THRESHOLD)
+ {
+ bFound = 1;
+ iPrtyLvl = IDX_PRTY_LVL_DB0;
+ break;
+ }
+ else if(g_comm_recv_pending_fit_tick_count > COMM_RECV_FIT_TICK_THRESHOLD)
+ {
+ bFound = 1;
+ iPrtyLvl = IDX_PRTY_LVL_COMM_RECVD;
+ break;
+ }
+ else if(g_intercme_in0_pending_tick_count > INTERCME_IN0_FIT_TICK_THRESHOLD)
+ {
+ bFound = 1;
+ iPrtyLvl = IDX_PRTY_LVL_INTERCME_IN0;
+ break;
+ }
+ else if(ext_irq_vectors_cme[iPrtyLvl][IDX_PRTY_VEC] & g_ext_irq_vector)
{
bFound = 1;
break;
}
}
- while(++iPrtyLvl < (IOTA_NUM_EXT_IRQ_PRIORITIES - 1)); //No need to check DISABLED.
+ while(++iPrtyLvl < (IOTA_NUM_EXT_IRQ_PRIORITIES - 1)); //No need to check DISABLED.
// Only manipulate EIMR masks for task level prty levels.
// Let shared non-task IRQs (iPrtyLvl=0) be processed by
@@ -82,9 +122,38 @@ uint32_t iota_uih(void)
}
// 3. Write the new mask for this priority level.
- out64(CME_LCL_EIMR, ext_irq_vectors_cme[iPrtyLvl][IDX_MASK_VEC] |
- g_eimr_override);
-
+ //Note: Special handling of DB0/COMM_RECV to handle the db0/comm_recv
+ //starvation case.
+ //
+ //Reason: DB0(Quad Manager CME) and COMM_RECV(Sibling CME) are lower priority
+ //than the STOP related interrupts,
+ //and can stay pending for very long time(~ms scale) on systems with
+ //high frequency of STOP requests. This can then prevent PGPE from
+ //completing OCC directed IPC operations within the expected
+ //time bounds(< 8ms)
+ //
+ //Mechanism:
+ //1)In FIT: Every FIT tick, we check if DB0(on Quad manager)/COMM_RECV(on Sibling CME)
+ //is pending. If DB0(on Quad manager)/COMM_RECV(on Sibling CME) is seen pending for
+ //more than DB0_FIT_TICK_THRESHOLD/COMM_RECV_FIT_TICK_THRESHOLD FIT ticks,
+ //then we take action in UIH
+ //
+ //2)In UIH: We set priority level to IDX_PRTY_LVL_DB0/IDX_PRTY_LVL_COMM_RECVD, and mask
+ //everything except Priority 0(xstop, exceptions, etc). This then allows a
+ //pending DB0 to complete
+ if ((g_db0_pending_fit_tick_count > DB0_FIT_TICK_THRESHOLD) ||
+ (g_comm_recv_pending_fit_tick_count > COMM_RECV_FIT_TICK_THRESHOLD) ||
+ (g_intercme_in0_pending_tick_count > INTERCME_IN0_FIT_TICK_THRESHOLD))
+ {
+ PK_TRACE_INF("UIH: Starvation Detected. Overriding Mask!");
+ out64(CME_LCL_EIMR, (ext_irq_vectors_cme[0][IDX_MASK_VEC] |
+ g_eimr_override));
+ }
+ else
+ {
+ out64(CME_LCL_EIMR, ext_irq_vectors_cme[iPrtyLvl][IDX_MASK_VEC] |
+ g_eimr_override);
+ }
}
else
{
diff --git a/import/chips/p9/procedures/ppe/pk/ppe42/pk_panic_codes.h b/import/chips/p9/procedures/ppe/pk/ppe42/pk_panic_codes.h
index ec78f923..3504405b 100644
--- a/import/chips/p9/procedures/ppe/pk/ppe42/pk_panic_codes.h
+++ b/import/chips/p9/procedures/ppe/pk/ppe42/pk_panic_codes.h
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2016,2017 */
+/* COPYRIGHT 2016,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -49,7 +49,7 @@ typedef enum
PK_DEFAULT_SPECIAL_HANDLER = 0x0007,
PPE42_PHANTOM_INTERRUPT = 0x0008,
PPE42_ILLEGAL_INSTRUCTION = 0x0009,
- PK_UNUSED_000a = 0x000a,
+ PPE42_PIB_RESET_NOT_RECOVER = 0x000a,
PK_UNUSED_000d = 0x000d,
PK_UNUSED_001c = 0x001c,
PK_UNUSED_001d = 0x001d,
diff --git a/import/chips/p9/procedures/ppe/pk/ppe42/ppe42_exceptions.S b/import/chips/p9/procedures/ppe/pk/ppe42/ppe42_exceptions.S
index 2c773397..56cba14e 100644
--- a/import/chips/p9/procedures/ppe/pk/ppe42/ppe42_exceptions.S
+++ b/import/chips/p9/procedures/ppe/pk/ppe42/ppe42_exceptions.S
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2015,2018 */
+/* COPYRIGHT 2015,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -583,4 +583,49 @@ ctx_pop:
rfi
+__special_machine_check_handler:
+
+ stwu %r1, -PK_CTX_SIZE(%r1)
+ stw %r0, PK_CTX_GPR0(%r1)
+ stvd %d3, PK_CTX_GPR3(%r1)
+ stvd %d5, PK_CTX_GPR5(%r1)
+ stvd %d7, PK_CTX_GPR7(%r1)
+ stvd %d9, PK_CTX_GPR9(%r1)
+ stvd %d28, PK_CTX_GPR28(%r1)
+ stvd %d30, PK_CTX_GPR30(%r1)
+ mflr %r3
+ stw %r3, PK_CTX_LR(%r1)
+ mfcr %r3
+ mfsprg0 %r4
+ stvd %d3, PK_CTX_CR(%r1)
+ mfxer %r3
+ mfctr %r4
+ stvd %d3, PK_CTX_XER(%r1)
+ mfsrr0 %r3
+ mfsrr1 %r4
+ stvd %d3, PK_CTX_SRR0(%r1)
+#if defined(__PPE__)
+ bl __ppe42_pib_reset_handler
+#endif
+ lwz %r0, PK_CTX_GPR0(%r1)
+ lvd %d7, PK_CTX_SRR0(%r1)
+ mtsrr1 %r8
+ mtsrr0 %r7
+ lvd %d5, PK_CTX_XER(%r1)
+ mtctr %r6
+ mtxer %r5
+ lvd %d30, PK_CTX_GPR30(%r1)
+ lvd %d28, PK_CTX_GPR28(%r1)
+ lvd %d9, PK_CTX_GPR9(%r1)
+ lvd %d7, PK_CTX_GPR7(%r1)
+ lvd %d5, PK_CTX_GPR5(%r1)
+ lvd %d3, PK_CTX_CR(%r1) ## CR,SPRG0
+ mtcr0 %r3
+ lwz %r4, PK_CTX_LR(%r1)
+ mtlr %r4
+ lvd %d3, PK_CTX_GPR3(%r1)
+ addi %r1, %r1, PK_CTX_SIZE
+
+ rfi
+
/// \endcond
diff --git a/import/chips/p9/procedures/ppe/pk/ppe42/ppe42_irq_core.c b/import/chips/p9/procedures/ppe/pk/ppe42/ppe42_irq_core.c
index d472c183..87eafa40 100644
--- a/import/chips/p9/procedures/ppe/pk/ppe42/ppe42_irq_core.c
+++ b/import/chips/p9/procedures/ppe/pk/ppe42/ppe42_irq_core.c
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2015,2017 */
+/* COPYRIGHT 2015,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -34,8 +34,50 @@
#define __PPE42_IRQ_CORE_C__
+
#include "pk.h"
+uint32_t G_pib_reset_flag = 0;
+
+#ifdef __PPE__
+
+void
+__ppe42_pib_reset_handler()
+{
+ //PK_TRACE("Entered 1 ppe42_pib_reset_handler");
+ uint32_t srr1 = mfspr(SPRN_SRR1);
+
+ // assuming pib is being reset thus give timeout error
+ if (((srr1 & MSR_SIBRC) == MSR_SIBRC))
+ {
+ // if already waited for pib to reset, panic as still fail
+ if (G_pib_reset_flag == 10 )
+ {
+ G_pib_reset_flag = 0;
+ PK_PANIC(PPE42_PIB_RESET_NOT_RECOVER);
+ }
+
+ // note pib reset is being detected
+ // this flag will be cleared by fit timer if pib reset recovers
+ G_pib_reset_flag++;
+
+ // DELAY to wait pib reset to complete
+ volatile uint32_t loop;
+
+ for(loop = 0; loop < 6400; loop++);
+
+ PK_TRACE_INF("PIB reset flag value %x", G_pib_reset_flag);
+
+ }
+ else
+ {
+ // panic for all other pib return codes
+ PK_PANIC(PPE42_MACHINE_CHECK_PANIC);
+ }
+}
+#endif
+
+
#ifndef STATIC_IRQ_TABLE
Ppe42IrqHandler __ppe42_irq_handlers[EXTERNAL_IRQS + 1];
#endif
diff --git a/import/chips/p9/procedures/ppe/pk/ppe42/ppe42_scom.h b/import/chips/p9/procedures/ppe/pk/ppe42/ppe42_scom.h
index 36e5aa1f..df64095b 100755
--- a/import/chips/p9/procedures/ppe/pk/ppe42/ppe42_scom.h
+++ b/import/chips/p9/procedures/ppe/pk/ppe42/ppe42_scom.h
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2015,2017 */
+/* COPYRIGHT 2015,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -49,6 +49,7 @@ extern "C" {
#endif
+
/// PPE Load Virtual Double operation
#define PPE_LVD(_m_address, _m_data) \
asm volatile \
@@ -149,6 +150,9 @@ extern inline uint32_t getscom(const uint32_t i_chiplet, const uint32_t i_addres
extern inline void putscom_norc(const uint32_t i_address, uint64_t i_data)
{
PPE_STVD(i_address, i_data);
+#ifdef PK_MACHINE_HANDLER_SUPPPORT
+ asm volatile ("sync");
+#endif
}
#ifdef __cplusplus
diff --git a/import/chips/p9/procedures/ppe_closed/cme/cme_p9a10.mk b/import/chips/p9/procedures/ppe_closed/cme/cme_p9a10.mk
new file mode 100644
index 00000000..021e9017
--- /dev/null
+++ b/import/chips/p9/procedures/ppe_closed/cme/cme_p9a10.mk
@@ -0,0 +1,85 @@
+# IBM_PROLOG_BEGIN_TAG
+# This is an automatically generated prolog.
+#
+# $Source: import/chips/p9/procedures/ppe_closed/cme/cme_p9a10.mk $
+#
+# OpenPOWER HCODE Project
+#
+# COPYRIGHT 2016,2019
+# [+] International Business Machines Corp.
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied. See the License for the specific language governing
+# permissions and limitations under the License.
+#
+# IBM_PROLOG_END_TAG
+
+CME_TARGET := cme_p9a10
+IMAGE := $(CME_TARGET)
+
+#Select KERNEL
+$(IMAGE)_KERNEL:=__IOTA__
+#$(IMAGE)_KERNEL:=__PK__
+
+# Options for Platforms specific
+$(IMAGE)_COMMONFLAGS = -DNIMBUS_DD_LEVEL=0
+$(IMAGE)_COMMONFLAGS+= -DCUMULUS_DD_LEVEL=0
+$(IMAGE)_COMMONFLAGS+= -DAXONE_DD_LEVEL=10
+
+$(IMAGE)_COMMONFLAGS+= -DPK_TRACE_LEVEL=1
+
+$(IMAGE)_COMMONFLAGS+= -DLAB_P9_TUNING=0
+$(IMAGE)_COMMONFLAGS+= -DEPM_P9_TUNING=0
+$(IMAGE)_COMMONFLAGS+= -DEPM_BROADSIDE_SCAN0=0
+
+$(IMAGE)_COMMONFLAGS+= -DSIMICS_TUNING=0
+$(IMAGE)_COMMONFLAGS+= -DUSE_SIMICS_IO=0
+
+include $(CME_SRCDIR)/cme_common.mk
+OBJS := $(CME_OBJS)
+
+$(call BUILD_PPEIMAGE)
+
+
+
+## ## Bin header
+ IMAGE=cpmr_header_p9a10
+ IMAGE_EDITOR=cmeImgEdit.exe
+##
+## # Target tool chain
+ $(IMAGE)_TARGET=PPE
+##
+## #linkscript to use
+ $(IMAGE)_LINK_SCRIPT=linkcpmr.cmd
+##
+ OBJS = stop_cme/p9_cme_cpmr.o
+
+$(call ADD_BINHEADER_INCDIR,$(IMAGE),\
+ $(CME_SRCDIR)/stop_cme \
+ $(CME_SRCDIR)/pstate_cme \
+ $(PK_SRCDIR)/kernel \
+ $(PK_SRCDIR)/ppe42 \
+ $(PK_SRCDIR)/trace \
+ $(PK_SRCDIR)/$(_PPE_TYPE) \
+ $(PM_LIBDIR)/include \
+ $(PM_LIBDIR)/include/registers \
+ $(PM_LIBDIR)/common \
+ $(HCODE_LIBDIR) \
+ $(HCODE_COMMON_LIBDIR) \
+ $(ROOTPATH)/chips/p9/procedures/hwp/lib/ \
+ $(ROOTPATH)/chips/p9/utils/imageProcs/ \
+ )
+
+gitsha := $(shell git log -1 --pretty=format:"%h")
+ $(call BUILD_BINHEADER,$(IMAGEPATH)/$(CME_TARGET)/$(CME_TARGET).bin, \
+ $(ROOTPATH)/chips/p9/procedures/utils/stopreg/selfRest.bin \
+ $(gitsha))
diff --git a/import/chips/p9/procedures/ppe_closed/cme/p9_cme_iota_main.c b/import/chips/p9/procedures/ppe_closed/cme/p9_cme_iota_main.c
index f9632f45..1c81eacd 100644
--- a/import/chips/p9/procedures/ppe_closed/cme/p9_cme_iota_main.c
+++ b/import/chips/p9/procedures/ppe_closed/cme/p9_cme_iota_main.c
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2017,2018 */
+/* COPYRIGHT 2017,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -46,6 +46,8 @@ CmeFitRecord G_cme_fit_record = {0, 0, 0, 0, 0xFFFFFFFF, 0};
#endif
+void p9_cme_pstate_db0_comm_recv_intercme_in0_pending_counter();
+
uint32_t G_CME_LCL_EINR = CME_LCL_EINR;
uint32_t G_CME_LCL_EISR = CME_LCL_EISR;
uint32_t G_CME_LCL_EISR_CLR = CME_LCL_EISR_CLR;
@@ -90,7 +92,7 @@ void fit_handler()
if(BIT32(CPPM_CSAR_FIT_HCODE_ERROR_INJECT) & scom_data.words.upper)
{
- PKTRACE("CME FIT ERROR INJECT TRAP");
+ //PKTRACE("CME FIT ERROR INJECT TRAP");
PK_PANIC(CME_STOP_ENTRY_TRAP_INJECT);
}
@@ -102,6 +104,8 @@ void fit_handler()
p9_cme_core_livelock_buster();
#endif
+ //Handle DB0/Comm_Recv starvation case
+ p9_cme_pstate_db0_comm_recv_intercme_in0_pending_counter();
}
#endif //fit handler
diff --git a/import/chips/p9/procedures/ppe_closed/cme/p9_cme_irq.h b/import/chips/p9/procedures/ppe_closed/cme/p9_cme_irq.h
index c5a29801..170eb88f 100644
--- a/import/chips/p9/procedures/ppe_closed/cme/p9_cme_irq.h
+++ b/import/chips/p9/procedures/ppe_closed/cme/p9_cme_irq.h
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2015,2018 */
+/* COPYRIGHT 2015,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -48,6 +48,19 @@
#include <stdint.h>
+//CME_TSEL is set to 8 which means FIT has period of 1.04ms when
+//Nest Freq is 2000Mhz. Ideally, should calculate period of FIT based
+//on nest frequency, but nest frequency is NOT plumbed to CME and we
+//don't need to be highly accurate here.
+//Note, from PGPE perspective, the latency of the DB0 operation depends
+//on the amount of time DB0 is pending on Quad Manager plus COMM_RECV is pending
+//on sibling. This is because COMM_RECV interrupt is triggered by the DB0
+//handler on the quad manager. Therefore, we must set the COMM_RECV_TICK_THRESHOLD
+//to be smaller.
+#define DB0_FIT_TICK_THRESHOLD 1 //Threshold for DB0 pending count(2ms)
+#define COMM_RECV_FIT_TICK_THRESHOLD 1 //Threshold for COMM_RECV pending countr(2ms)
+#define INTERCME_IN0_FIT_TICK_THRESHOLD 1 //Threshold for COMM_RECV pending countr(2ms)
+
// Priority Levels
#define IDX_PRTY_LVL_HIPRTY 0
#define IDX_PRTY_LVL_DB3 1
diff --git a/import/chips/p9/procedures/ppe_closed/cme/p9_cme_main.c b/import/chips/p9/procedures/ppe_closed/cme/p9_cme_main.c
index be6e5eda..5f420caf 100644
--- a/import/chips/p9/procedures/ppe_closed/cme/p9_cme_main.c
+++ b/import/chips/p9/procedures/ppe_closed/cme/p9_cme_main.c
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2015,2018 */
+/* COPYRIGHT 2015,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -170,7 +170,7 @@ main(int argc, char** argv)
#elif (CUMULUS_DD_LEVEL != 0)
#define PVR_CONST (0x42090800 | (((CUMULUS_DD_LEVEL ) / 10) << 8) | (CUMULUS_DD_LEVEL % 10))
#elif (AXONE_DD_LEVEL != 0)
-#define PVR_CONST (0x42091000 | (((AXONE_DD_LEVEL ) / 10) << 8) | (AXONE_DD_LEVEL % 10))
+#define PVR_CONST (0x42090000 | (((AXONE_DD_LEVEL ) / 10) << 8) | (AXONE_DD_LEVEL % 10))
#else
#define PVR_CONST 0
#endif
diff --git a/import/chips/p9/procedures/ppe_closed/cme/pk_app_cfg.h b/import/chips/p9/procedures/ppe_closed/cme/pk_app_cfg.h
index 1124ed8a..5a953734 100644
--- a/import/chips/p9/procedures/ppe_closed/cme/pk_app_cfg.h
+++ b/import/chips/p9/procedures/ppe_closed/cme/pk_app_cfg.h
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2015,2018 */
+/* COPYRIGHT 2015,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -58,7 +58,7 @@
#define DISABLE_CME_DUAL_CAST 0
// NDD22 Secure Memory Support: RAM URMOR
-#if NIMBUS_DD_LEVEL >= 22 || CUMULUS_DD_LEVEL >= 13
+#if NIMBUS_DD_LEVEL >= 22 || CUMULUS_DD_LEVEL >= 13 || AXONE_DD_LEVEL >= 10
#define SMF_SUPPORT_ENABLE 1
@@ -104,6 +104,8 @@
#error "USE_PPE_IMPRECISE_MODE must be defined in order to enable USE_CME_QUEUED_SCOM or USE_CME_QUEUED_SCAN"
#endif
+#define PK_MACHINE_HANDLER_SUPPORT 0
+
// --------------------
#if NIMBUS_DD_LEVEL == 10
diff --git a/import/chips/p9/procedures/ppe_closed/cme/pstate_cme/p9_cme_intercme.c b/import/chips/p9/procedures/ppe_closed/cme/pstate_cme/p9_cme_intercme.c
index e802448e..9497442c 100644
--- a/import/chips/p9/procedures/ppe_closed/cme/pstate_cme/p9_cme_intercme.c
+++ b/import/chips/p9/procedures/ppe_closed/cme/pstate_cme/p9_cme_intercme.c
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2016,2018 */
+/* COPYRIGHT 2016,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -43,17 +43,21 @@
//
extern CmePstateRecord G_cme_pstate_record;
extern CmeRecord G_cme_record;
+extern uint32_t g_comm_recv_pending_fit_tick_count;
+extern uint32_t g_intercme_in0_pending_tick_count;
//
//InterCME_IN0 handler
//
void p9_cme_pstate_intercme_in0_irq_handler(void)
{
+ g_intercme_in0_pending_tick_count = 0;
p9_cme_pstate_process_db0_sibling();
}
void p9_cme_pstate_intercme_msg_handler(void)
{
+ g_comm_recv_pending_fit_tick_count = 0;
p9_cme_pstate_sibling_lock_and_intercme_protocol(INTERCME_MSG_LOCK_WAIT_ON_RECV);
}
@@ -119,6 +123,8 @@ void p9_cme_pstate_process_db0_sibling()
//Unmask EIMR[OCC_HEARTBEAT_LOST/4]
g_eimr_override &= ~BIT64(4);
+ out32(G_CME_LCL_FLAGS_OR, BIT32(CME_FLAGS_DB0_COMM_RECV_STARVATION_CNT_ENABLED));//Set Starvation Count enabled
+
//Clear Core GPMMR RESET_STATE_INDICATOR bit to show pstates have started
CME_PUTSCOM(PPM_GPMMR_CLR, G_cme_record.core_enabled, BIT64(15));
}
diff --git a/import/chips/p9/procedures/ppe_closed/cme/pstate_cme/p9_cme_thread_db.c b/import/chips/p9/procedures/ppe_closed/cme/pstate_cme/p9_cme_thread_db.c
index 72ba30ef..1d40203a 100644
--- a/import/chips/p9/procedures/ppe_closed/cme/pstate_cme/p9_cme_thread_db.c
+++ b/import/chips/p9/procedures/ppe_closed/cme/pstate_cme/p9_cme_thread_db.c
@@ -56,6 +56,9 @@ extern CmePstateRecord G_cme_pstate_record;
extern cmeHeader_t* G_cmeHeader;
extern LocalPstateParmBlock* G_lppb;
extern uint8_t G_vdm_threshold_table[];
+extern uint32_t g_db0_pending_fit_tick_count;
+extern uint32_t g_comm_recv_pending_fit_tick_count;
+extern uint32_t g_intercme_in0_pending_tick_count;
cppm_cmedb0_t G_dbData;
@@ -75,6 +78,7 @@ void p9_cme_pstate_db0_start();
void p9_cme_pstate_db0_glb_bcast();
void p9_cme_pstate_db0_clip_bcast();
void p9_cme_pstate_update();
+void p9_cme_pstate_db0_comm_recv_intercme_in0_pending_counter();
//
//Doorbell0 interrupt handler
@@ -116,7 +120,7 @@ p9_cme_pgpe_hb_loss_handler(void* arg, PkIrqId irq)
if(FSafe)
{
- PK_TRACE_INF("Fsafe=0x%x", FSafe);
+ PK_TRACE_DBG("Fsafe=0x%x", FSafe);
G_cme_pstate_record.nextPstate = G_lppb->dpll_pstate0_value - FSafe;
G_cme_pstate_record.globalPstate = G_lppb->dpll_pstate0_value - FSafe;
p9_cme_pstate_update_analog();
@@ -135,7 +139,7 @@ p9_cme_pgpe_hb_loss_handler(void* arg, PkIrqId irq)
intercme_direct(INTERCME_DIRECT_IN2, INTERCME_DIRECT_ACK, 0);
- PK_TRACE_INF("RCVed Notify and ACKed");
+ PK_TRACE_DBG("RCVed Notify and ACKed");
out32(G_CME_LCL_FLAGS_CLR, BIT32(CME_FLAGS_PSTATES_ENABLED));
out32(G_CME_LCL_FLAGS_OR, BIT32(CME_FLAGS_SAFE_MODE) |
@@ -167,6 +171,67 @@ void p9_cme_pstate_db0_handler(void)
}
//
+//Doorbell0/Comm Recv pending counter(called every FIT tick)
+//
+void p9_cme_pstate_db0_comm_recv_intercme_in0_pending_counter()
+{
+ //Note: Special handling of DB0/COMM_RECV to handle the db0/comm_recv
+ //starvation case.
+ //
+ //Reason: DB0(Quad Manager CME) and COMM_RECV(Sibling CME) are lower priority
+ //than the STOP related interrupts,
+ //and can stay pending for very long time(~ms scale) on systems with
+ //high frequency of STOP requests. This can then prevent PGPE from
+ //completing OCC directed IPC operations within the expected
+ //time bounds(< 8ms)
+ //
+ //Mechanism:
+ //1)In FIT: Every FIT tick, we check if DB0(on Quad manager)/COMM_RECV(on Sibling CME)
+ //is pending. If DB0(on Quad manager)/COMM_RECV(on Sibling CME) is seen pending for
+ //more than DB0_FIT_TICK_THRESHOLD/COMM_RECV_FIT_TICK_THRESHOLD FIT ticks,
+ //then we take action in UIH
+ //
+ //2)In UIH: We set priority level to IDX_PRTY_LVL_DB0/IDX_PRTY_LVL_COMM_RECVD, and mask
+ //everything except Priority 0(xstop, exceptions, etc). This then allows a
+ //pending DB0 to complete
+ uint32_t cme_flags = in32(G_CME_LCL_FLAGS);
+
+ if (cme_flags & BIT32(CME_FLAGS_DB0_COMM_RECV_STARVATION_CNT_ENABLED))
+ {
+ if(G_cme_pstate_record.qmFlag)
+ {
+
+ if (cme_flags & BIT32(CME_FLAGS_CORE0_GOOD))
+ {
+ if (in32_sh(CME_LCL_EISR) & BIT64SH(36))
+ {
+ g_db0_pending_fit_tick_count++;
+ }
+ }
+ else
+ {
+ if (in32_sh(CME_LCL_EISR) & BIT64SH(37))
+ {
+ g_db0_pending_fit_tick_count++;
+ }
+ }
+ }
+ else
+ {
+ if (in32(CME_LCL_EISR) & BIT32(29))
+ {
+ g_comm_recv_pending_fit_tick_count++;
+ }
+
+ if(in32(CME_LCL_EISR) & BIT32(7))
+ {
+ g_intercme_in0_pending_tick_count++;
+ }
+ }
+ }
+}
+
+//
//Doorbell3 interrupt handler
//
//Note: This enabled on both QuadManagerCME and SiblingCME
@@ -211,7 +276,11 @@ void p9_cme_pstate_db3_handler(void)
if (db3.fields.cme_message_numbern == MSGID_DB3_ENTER_SAFE_MODE)
{
- out32(G_CME_LCL_FLAGS_OR, BIT32(CME_FLAGS_SAFE_MODE));
+ if (G_cme_pstate_record.updateAnalogError == 0)
+ {
+ out32(G_CME_LCL_FLAGS_OR, BIT32(CME_FLAGS_SAFE_MODE));
+ }
+
}
G_cme_pstate_record.skipSiblingLock = 0;
@@ -363,7 +432,7 @@ void p9_cme_pstate_db3_handler(void)
intercme_direct(INTERCME_DIRECT_IN2, INTERCME_DIRECT_ACK, 0);
}
- PK_TRACE_INF("PSTATE: DB3 Clip Exit");
+ PK_TRACE_DBG("PSTATE: DB3 Clip Exit");
}
else
@@ -386,10 +455,19 @@ void p9_cme_pstate_init()
uint64_t eimr_clr = 0;
uint64_t eimr_or = 0;
uint32_t resclk_data;
+ uint32_t pstate_offset = 0;
+
+ G_cmeHeader = (cmeHeader_t*)(CME_SRAM_HEADER_ADDR);
+ pstate_offset = G_cmeHeader->g_cme_pstate_region_offset;
- G_cmeHeader = (cmeHeader_t*)(CME_SRAM_HEADER_ADDR);
- G_lppb = (LocalPstateParmBlock*)(G_cmeHeader->g_cme_pstate_region_offset + CME_SRAM_BASE_ADDR);
- PK_TRACE_INF("PSTATE: Hdr=0x%x, LPPB=0x%x, Nominal_Freq_Mhz=%d ", (uint32_t)G_cmeHeader, (uint32_t)G_lppb,
+ if( G_cmeHeader->g_cme_qm_mode_flags & CME_QM_FLAG_PER_QUAD_VDM_ENABLE )
+ {
+ pstate_offset = G_cmeHeader->g_cme_pstate_offset << 5;
+ }
+
+ G_lppb = (LocalPstateParmBlock*)(pstate_offset + CME_SRAM_BASE_ADDR);
+
+ PK_TRACE_DBG("PSTATE: Hdr=0x%x, LPPB=0x%x, Nominal_Freq_Mhz=%d ", (uint32_t)G_cmeHeader, (uint32_t)G_lppb,
G_lppb->operating_points[NOMINAL].frequency_mhz);
// Pre-compute the value to be used as the SPURR reference during CME Boot and
@@ -651,6 +729,9 @@ void p9_cme_pstate_process_db0()
G_cme_pstate_record.updateAnalogError = 0;
uint64_t scom_data;
+ //Clear out db0_pending_tick_count
+ g_db0_pending_fit_tick_count = 0;
+
PK_TRACE_INF("PSTATE: Process DB0 Enter");
//Clear EISR and read DB0 register
@@ -771,7 +852,7 @@ inline void p9_cme_pstate_register()
if(register_enable)
{
- PK_TRACE_INF("PSTATE: DB0 Processing is Enabled");
+ PK_TRACE_DBG("PSTATE: DB0 Processing is Enabled");
//PGPE sends MSGID_DB0_REGISTER_DONE, if Pstates aren't active anymore.
//Otherwise, PGPE sends DB0 in the following order
@@ -822,7 +903,7 @@ inline void p9_cme_pstate_register()
{
if (register_enable)
{
- PK_TRACE_INF("PSTATE: Wait on Pstate Start");
+ PK_TRACE_DBG("PSTATE: Wait on Pstate Start");
//PGPE sends MSGID_DB0_REGISTER_DONE, if Pstates aren't active anymore.
//Otherwise, PGPE sends DB0 in the following order
@@ -847,7 +928,7 @@ inline void p9_cme_pstate_register()
}
}
- PK_TRACE_INF("PSTATE: Sib Register MsgCnt=%d", msgCnt);
+ PK_TRACE_DBG("PSTATE: Sib Register MsgCnt=%d", msgCnt);
}
}
}
@@ -885,6 +966,7 @@ void p9_cme_pstate_db0_start()
ack = MSGID_PCB_TYPE4_ACK_PSTATE_PROTO_ACK;
out32(G_CME_LCL_FLAGS_OR, BIT32(24));//Set Pstates Enabled
+ out32(G_CME_LCL_FLAGS_OR, BIT32(CME_FLAGS_DB0_COMM_RECV_STARVATION_CNT_ENABLED));//Set Starvation Count enabled
//Enable PMCR Interrupts (for good cores) when this task is done
g_eimr_override &= ~(uint64_t)(G_cme_record.core_enabled << SHIFT64(35));
@@ -925,7 +1007,6 @@ void p9_cme_pstate_db0_glb_bcast()
//Send type4(ack doorbell)
send_ack_to_pgpe(ack);
- G_cme_pstate_record.updateAnalogError = 0;
PK_TRACE_INF("PSTATE: DB0 GlbBcast Exit\n");
}
@@ -1026,12 +1107,12 @@ inline void p9_cme_pstate_db0_pmsr_updt()
//Set Core GPMMR RESET_STATE_INDICATOR bit to show pstates have stopped
CME_PUTSCOM(PPM_GPMMR_OR, G_cme_record.core_enabled, BIT64(15));
- PK_TRACE_INF("PSTATE: DB0 Safe Mode Exit");
+ PK_TRACE_INF("PSTATE: DB0 PMSR Updt Exit");
}
void p9_cme_pstate_notify_sib(INTERCME_DIRECT_INTF intf)
{
- PK_TRACE_INF("PSTATE: Notify Enter");
+ PK_TRACE_INF("PSTATE: Notify");
//Notify sibling CME(if any)
if(G_cme_pstate_record.siblingCMEFlag)
@@ -1049,7 +1130,7 @@ inline void p9_cme_pstate_freq_update(uint32_t cme_flags)
else
{
PK_TRACE_INF("PSTATE: Freq Updt Enter");
- PK_TRACE_INF("PSTATE: Dpll0=0x%x", G_lppb->dpll_pstate0_value);
+ PK_TRACE_DBG("PSTATE: Dpll0=0x%x", G_lppb->dpll_pstate0_value);
//Adjust DPLL
qppm_dpll_freq_t dpllFreq;
diff --git a/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_img_edit.c b/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_img_edit.c
index a7e4ad48..ba6336ab 100644
--- a/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_img_edit.c
+++ b/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_img_edit.c
@@ -26,6 +26,7 @@
#include <stdio.h>
#include <stdint.h>
#include <netinet/in.h>
+#include <time.h>
#include <stddef.h> /* offsetof */
#include <stdlib.h>
@@ -116,6 +117,18 @@ int main(int narg, char* argv[])
rewind(pSelfRest);
printf(" Self Restore size %s : %d (0x%X)\n", argv[3], selfRestSize, selfRestSize);
+ time_t buildTime = time(NULL);
+ struct tm* headerTime = localtime(&buildTime);
+ uint32_t temp = (((headerTime->tm_year + 1900) << 16) |
+ ((headerTime->tm_mon + 1) << 8) |
+ (headerTime->tm_mday));
+
+ printf(" Build date : %X -> %04d/%02d/%02d (YYYY/MM/DD)\n",
+ temp, headerTime->tm_year + 1900, headerTime->tm_mon + 1, headerTime->tm_mday);
+ fseek ( pImage , CPMR_BUILD_DATE_BYTE, SEEK_SET );
+ temp = htonl( temp );
+ fwrite(&temp, sizeof(uint32_t), 1, pImage );
+
fseek ( pImage , CPMR_SELF_RESTORE_OFFSET_BYTE , SEEK_SET );
temp = htonl( SELF_RESTORE_CPMR_OFFSET );
fwrite(&temp, sizeof(uint32_t), 1, pImage );
diff --git a/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_stop.h b/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_stop.h
index b7efe2f0..c42ebef5 100644
--- a/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_stop.h
+++ b/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_stop.h
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2015,2018 */
+/* COPYRIGHT 2015,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -235,6 +235,12 @@ enum CME_STOP_SUSPEND_BLOCK
STOP_SUSPEND_SELECT = 0x1 //0bZxx1 for SUSPEND, 0bZxx0 for BLOCK
};
+enum SPR_ACTN
+{
+ SPR_SELF_SAVE = 0x00,
+ SPR_SELF_RESTORE = 0x01,
+};
+
enum CME_SCOM_RESTORE_CONST
{
@@ -320,6 +326,9 @@ void p9_cme_core_livelock_buster();
void p9_cme_stop_entry();
void p9_cme_stop_exit();
+void p9_cme_stop_self_execute(uint32_t, uint32_t);
+void p9_cme_stop_self_cleanup(uint32_t);
+
// CME STOP Interrupt Handlers
void p9_cme_stop_enter_handler(void);
diff --git a/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_stop_entry.c b/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_stop_entry.c
index 15d45bd5..ab2e92d1 100755
--- a/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_stop_entry.c
+++ b/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_stop_entry.c
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2015,2018 */
+/* COPYRIGHT 2015,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -50,6 +50,7 @@
extern CmeStopRecord G_cme_stop_record;
extern CmeRecord G_cme_record;
+extern uint64_t G_spattn_mask;
#if NIMBUS_DD_LEVEL != 10
@@ -242,6 +243,10 @@ p9_cme_stop_entry()
uint32_t core_mask = 0;
uint32_t core_raw = 0;
uint32_t core = 0;
+#if SMF_SUPPORT_ENABLE
+ uint32_t core_spattn = 0;
+ uint32_t self_save_core = 0;
+#endif
uint32_t thread = 0;
uint32_t pscrs = 0;
uint32_t no_state_loss = 0;
@@ -288,7 +293,7 @@ p9_cme_stop_entry()
if (!core)
{
// PM_ACTIVE can be phantom, only gives warning
- PK_TRACE_INF("WARNING: Only Phantom PM_ACTIVE to be Ignored. Return");
+ //PK_TRACE_INF("WARNING: Only Phantom PM_ACTIVE to be Ignored. Return");
return;
}
@@ -299,7 +304,7 @@ p9_cme_stop_entry()
if (((core & CME_MASK_C0) && (in32(G_CME_LCL_SISR) & BITS32(13, 2))) ||
((core & CME_MASK_C1) && (in32_sh(CME_LCL_SISR) & BITS64SH(61, 2))))
{
- PK_TRACE_INF("WARNING: Attn/Recov Present, Abort Entry and Return");
+ //PK_TRACE_INF("WARNING: Attn/Recov Present, Abort Entry and Return");
return;
}
@@ -427,7 +432,7 @@ p9_cme_stop_entry()
if (!core)
{
- PK_TRACE_INF("WARNING: STOP1 PM_ACTIVE to be Ignored. Return");
+ //PK_TRACE_INF("WARNING: STOP1 PM_ACTIVE to be Ignored. Return");
return;
}
@@ -612,13 +617,13 @@ p9_cme_stop_entry()
if (core & CME_MASK_C0)
{
CME_GETSCOM(RAS_STATUS, CME_MASK_C0, scom_data.value);
- PKTRACE("CheckA RAS_STATUS_UPPER Core0 %X", scom_data.words.upper);
+ //PKTRACE("CheckA RAS_STATUS_UPPER Core0 %X", scom_data.words.upper);
}
if (core & CME_MASK_C1)
{
CME_GETSCOM(RAS_STATUS, CME_MASK_C1, scom_data.value);
- PKTRACE("CheckA RAS_STATUS_UPPER Core1 %X", scom_data.words.upper);
+ //PKTRACE("CheckA RAS_STATUS_UPPER Core1 %X", scom_data.words.upper);
}
#endif
@@ -635,7 +640,7 @@ p9_cme_stop_entry()
CME_GETSCOM_AND(RAS_STATUS, core, scom_data.value);
#ifdef PLS_DEBUG
- PKTRACE("CheckB RAS_STATUS_AND_UPPER %X", scom_data.words.upper);
+ //PKTRACE("CheckB RAS_STATUS_AND_UPPER %X", scom_data.words.upper);
#endif
}
while((scom_data.words.upper & (BIT32(1) | BIT32(3) | BIT32(9) | BIT32(11) | BIT32(17) | BIT32(19) | BIT32(25) | BIT32(
@@ -649,7 +654,7 @@ p9_cme_stop_entry()
CME_GETSCOM_OR(RAS_STATUS, core, scom_data.value);
#ifdef PLS_DEBUG
- PKTRACE("CheckC RAS_STATUS_OR_LOWER[0] %X", scom_data.words.lower);
+ //PKTRACE("CheckC RAS_STATUS_OR_LOWER[0] %X", scom_data.words.lower);
#endif
}
while(scom_data.words.lower & BIT32(0));
@@ -661,7 +666,7 @@ p9_cme_stop_entry()
CME_GETSCOM_OR(THREAD_INFO, core, scom_data.value);
#ifdef PLS_DEBUG
- PKTRACE("CheckD THREAD_INFO_OR_UPPER[23] %X", scom_data.words.upper);
+ //PKTRACE("CheckD THREAD_INFO_OR_UPPER[23] %X", scom_data.words.upper);
#endif
}
while(scom_data.words.upper & BIT32(23));
@@ -673,13 +678,13 @@ p9_cme_stop_entry()
if (core & CME_MASK_C0)
{
CME_GETSCOM(THREAD_INFO, CME_MASK_C0, scom_data.value);
- PKTRACE("CheckE THREAD_INFO_UPPER[0:3] Core0 %X", scom_data.words.upper);
+ //PKTRACE("CheckE THREAD_INFO_UPPER[0:3] Core0 %X", scom_data.words.upper);
}
if (core & CME_MASK_C1)
{
CME_GETSCOM(THREAD_INFO, CME_MASK_C1, scom_data.value);
- PKTRACE("CheckE THREAD_INFO_UPPER[0:3] Core1 %X", scom_data.words.upper);
+ //PKTRACE("CheckE THREAD_INFO_UPPER[0:3] Core1 %X", scom_data.words.upper);
}
PK_TRACE("RAMMING Read CORE_THREAD_STATE[56:59] to find out which threads are stopped");
@@ -687,13 +692,13 @@ p9_cme_stop_entry()
if (core & CME_MASK_C0)
{
CME_GETSCOM(CORE_THREAD_STATE, CME_MASK_C0, scom_data.value);
- PKTRACE("CheckF CORE_THREAD_STATE[56:59] Core0 %X %X", scom_data.words.upper, scom_data.words.lower);
+ //PKTRACE("CheckF CORE_THREAD_STATE[56:59] Core0 %X %X", scom_data.words.upper, scom_data.words.lower);
}
if (core & CME_MASK_C1)
{
CME_GETSCOM(CORE_THREAD_STATE, CME_MASK_C1, scom_data.value);
- PKTRACE("CheckF CORE_THREAD_STATE[56:59] Core1 %X %X", scom_data.words.upper, scom_data.words.lower);
+ //PKTRACE("CheckF CORE_THREAD_STATE[56:59] Core1 %X %X", scom_data.words.upper, scom_data.words.lower);
}
#endif
@@ -706,7 +711,7 @@ p9_cme_stop_entry()
CME_GETSCOM_AND(THREAD_INFO, core, scom_data.value);
#ifdef PLS_DEBUG
- PKTRACE("CheckG THREAD_INFO_AND_UPPER[0:3] %X", scom_data.words.upper);
+ //PKTRACE("CheckG THREAD_INFO_AND_UPPER[0:3] %X", scom_data.words.upper);
#endif
}
while((scom_data.words.upper & BITS32(0, 4)) != BITS32(0, 4));
@@ -775,8 +780,9 @@ p9_cme_stop_entry()
}
#ifdef PLS_DEBUG
- PKTRACE("cXtX PSSCR %X %X G_pls %x core %d",
- scom_data.words.upper, scom_data.words.lower, G_pls[core_mask & 1][thread], core);
+ /* PKTRACE("cXtX PSSCR %X %X G_pls %x core %d",
+ scom_data.words.upper, scom_data.words.lower, G_pls[core_mask & 1][thread], core);
+ */
#endif
}
@@ -796,7 +802,7 @@ p9_cme_stop_entry()
if (core & CME_MASK_C0)
{
#ifdef PLS_DEBUG
- PKTRACE("SCRATCH1 %x %x", (G_scratch[0] >> 32), (G_scratch[0] & 0xffffffff));
+ //PKTRACE("SCRATCH1 %x %x", (G_scratch[0] >> 32), (G_scratch[0] & 0xffffffff));
#endif
CME_PUTSCOM(SCRATCH1, CME_MASK_C0, G_scratch[0]);
}
@@ -804,7 +810,7 @@ p9_cme_stop_entry()
if (core & CME_MASK_C1)
{
#ifdef PLS_DEBUG
- PKTRACE("SCRATCH1 %x %x", (G_scratch[1] >> 32), (G_scratch[1] & 0xffffffff));
+ //PKTRACE("SCRATCH1 %x %x", (G_scratch[1] >> 32), (G_scratch[1] & 0xffffffff));
#endif
CME_PUTSCOM(SCRATCH1, CME_MASK_C1, G_scratch[1]);
}
@@ -816,6 +822,67 @@ p9_cme_stop_entry()
#endif
+#if SMF_SUPPORT_ENABLE
+
+ if (G_cme_stop_record.req_level[0] >= STOP_LEVEL_4)
+ {
+ self_save_core |= CME_MASK_C0;
+ }
+
+ if (G_cme_stop_record.req_level[1] >= STOP_LEVEL_4)
+ {
+ self_save_core |= CME_MASK_C1;
+ }
+
+ self_save_core = self_save_core & core;
+
+ if ( self_save_core )
+ {
+
+ p9_cme_stop_self_execute(self_save_core, SPR_SELF_SAVE);
+
+ PK_TRACE("Poll for core stop again(pm_active=1)");
+
+ while((~(in32(G_CME_LCL_EINR))) & (self_save_core << SHIFT32(21)))
+ {
+ core_spattn = (in32_sh(CME_LCL_SISR) >> SHIFT64SH(33)) & self_save_core;
+
+ if (core_spattn)
+ {
+ PK_TRACE_ERR("ERROR: Core[%d] Special Attention Detected. Gard Core!", core_spattn);
+ CME_STOP_CORE_ERROR_HANDLER(self_save_core, core_spattn, CME_STOP_EXIT_SELF_RES_SPATTN);
+
+ PK_TRACE("Release PCB Mux back on Core via SICR[10/11]");
+ out32(G_CME_LCL_SICR_CLR, core_spattn << SHIFT32(11));
+
+ while((core_spattn & ~(in32(G_CME_LCL_SISR) >> SHIFT32(11))) != core_spattn);
+
+ PK_TRACE("PCB Mux Released on Core[%d]", core_spattn);
+ }
+
+ if (!self_save_core)
+ {
+
+#if NIMBUS_DD_LEVEL == 20 || DISABLE_CME_DUAL_CAST == 1
+
+ continue;
+
+#else
+
+ return;
+
+#endif
+
+ }
+ }
+
+ PK_TRACE("SF.RS: Self Save Completed, Core Stopped Again(pm_exit=0/pm_active=1)");
+
+ p9_cme_stop_self_cleanup(self_save_core);
+
+ }// if self_save_core
+
+#endif
// ====================================
@@ -1259,18 +1326,20 @@ p9_cme_stop_entry()
MARK_TRAP(SE_IS0_BEGIN)
//===========================
-#if !SKIP_ABORT
+ /*** DISABLE STOP4 to STOP2 abort due to UV mode incorrectly reached on such case ***
+ #if !SKIP_ABORT
- core_wakeup = core & (~G_cme_stop_record.core_blockwu);
- out32(G_CME_LCL_EIMR_CLR, (core_wakeup << SHIFT32(13)) |
- (core_wakeup << SHIFT32(15)) |
- (core_wakeup << SHIFT32(17)));
- sync();
- wrteei(0);
- out32(G_CME_LCL_EIMR_OR, BITS32(10, 12));
- wrteei(1);
+ core_wakeup = core & (~G_cme_stop_record.core_blockwu);
+ out32(G_CME_LCL_EIMR_CLR, (core_wakeup << SHIFT32(13)) |
+ (core_wakeup << SHIFT32(15)) |
+ (core_wakeup << SHIFT32(17)));
+ sync();
+ wrteei(0);
+ out32(G_CME_LCL_EIMR_OR, BITS32(10, 12));
+ wrteei(1);
-#endif
+ #endif
+ */
//===================
MARK_TRAP(SE_IS0_END)
@@ -1315,13 +1384,13 @@ p9_cme_stop_entry()
if ((core & CME_MASK_C0) && (in32(G_CME_LCL_SISR) & BITS32(12, 4)))
{
- PK_TRACE_INF("WARNING: Core0 Xstop/Attn/Recov Present, Abort Entry");
+ //PK_TRACE_INF("WARNING: Core0 Xstop/Attn/Recov Present, Abort Entry");
core -= CME_MASK_C0;
}
if ((core & CME_MASK_C1) && (in32_sh(CME_LCL_SISR) & BITS64SH(60, 4)))
{
- PK_TRACE_INF("WARNING: Core1 Xstop/Attn/Recov Present, Abort Entry");
+ //PK_TRACE_INF("WARNING: Core1 Xstop/Attn/Recov Present, Abort Entry");
core -= CME_MASK_C1;
}
@@ -1531,7 +1600,7 @@ p9_cme_stop_entry()
MARK_TAG(SE_PURGE_L2_ABORT, core_aborted)
//=======================================
- PK_TRACE_INF("Abort: L2+NCU purge aborted by core[%d]", core_aborted);
+ //PK_TRACE_INF("Abort: L2+NCU purge aborted by core[%d]", core_aborted);
out32(G_CME_LCL_SICR_OR, BIT32(19) | BIT32(23));
}
}
@@ -1577,7 +1646,7 @@ p9_cme_stop_entry()
if (G_cme_record.disableSGPEHandoff)
{
- PK_TRACE_INF("SE.4+: Disable SGPE Handoff due to SGPE Halt");
+ //PK_TRACE_INF("SE.4+: Disable SGPE Handoff due to SGPE Halt");
break;
}
diff --git a/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_stop_exit.c b/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_stop_exit.c
index 9fbac710..ab305ff4 100644
--- a/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_stop_exit.c
+++ b/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_stop_exit.c
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2015,2018 */
+/* COPYRIGHT 2015,2020 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -46,6 +46,7 @@
extern CmeStopRecord G_cme_stop_record;
extern CmeRecord G_cme_record;
+uint64_t G_spattn_mask = 0;
#if HW386841_NDD1_DSL_STOP1_FIX
@@ -511,7 +512,6 @@ p9_cme_stop_exit()
uint32_t core = 0;
uint32_t core_mask = 0;
uint32_t core_spattn = 0;
- uint64_t spattn_mask = 0;
data64_t scom_data = {0};
#if !SPWU_AUTO
uint32_t spwu_stop = 0;
@@ -661,7 +661,7 @@ p9_cme_stop_exit()
if (core_stop1)
{
- PK_TRACE_INF("SX.1A: Core[%d] Requested Stop1 Exit", core_stop1);
+ //PK_TRACE_INF("SX.1A: Core[%d] Requested Stop1 Exit", core_stop1);
p9_cme_stop_exit_end(core_stop1, spwu_stop);
core = core - core_stop1;
@@ -1052,271 +1052,308 @@ p9_cme_stop_exit()
#if !SKIP_SELF_RESTORE
- PK_TRACE("Assert block interrupt to PC via SICR[2/3]");
- out32(G_CME_LCL_SICR_OR, core << SHIFT32(3));
+ p9_cme_stop_self_execute(core, SPR_SELF_RESTORE);
- PK_TRACE_PERF("SF.RS: Self Restore Prepare, Core Waking up(pm_exit=1) via SICR[4/5]");
- out32(G_CME_LCL_SICR_OR, core << SHIFT32(5));
-
- CME_PM_EXIT_DELAY
-
- PK_TRACE("Polling for core wakeup(pm_active=0) via EINR[20/21]");
+ //==========================
+ MARK_TRAP(SX_SRESET_THREADS)
+ //==========================
- while((in32(G_CME_LCL_EINR)) & (core << SHIFT32(21)));
+ PK_TRACE("Poll for core stop again(pm_active=1)");
- scom_data.value = pCmeImgHdr->g_cme_cpmr_PhyAddr & BITS64(13, 30); //HRMOR[13:42]
+ while((~(in32(G_CME_LCL_EINR))) & (core << SHIFT32(21)))
+ {
+ core_spattn = (in32_sh(CME_LCL_SISR) >> SHIFT64SH(33)) & core;
-#if NIMBUS_DD_LEVEL == 10
-#if !SKIP_RAM_HRMOR
+ if (core_spattn)
+ {
+ PK_TRACE_ERR("ERROR: Core[%d] Special Attention Detected. Gard Core!", core_spattn);
+ CME_STOP_CORE_ERROR_HANDLER(core, core_spattn, CME_STOP_EXIT_SELF_RES_SPATTN);
- PK_TRACE("Activate thread0 for RAM via THREAD_INFO[18]");
- CME_PUTSCOM(THREAD_INFO, core, BIT64(18));
+ PK_TRACE("Release PCB Mux back on Core via SICR[10/11]");
+ out32(G_CME_LCL_SICR_CLR, core_spattn << SHIFT32(11));
- PK_TRACE("Enable RAM mode via RAM_MODEREG[0]");
- CME_PUTSCOM(RAM_MODEREG, core, BIT64(0));
+ while((core_spattn & ~(in32(G_CME_LCL_SISR) >> SHIFT32(11))) != core_spattn);
- PK_TRACE("Set SPR mode to LT0-7 via SPR_MODE[20-27]");
- CME_PUTSCOM(SPR_MODE, core, BITS64(20, 8));
+ PK_TRACE("PCB Mux Released on Core[%d]", core_spattn);
+ }
- for (core_mask = 2; core_mask; core_mask--)
- {
- if (core & core_mask)
+ if (!core)
{
- PK_TRACE_DBG("Set SPRC to scratch for core[%d] via SCOM_SPRC", core_mask);
- CME_PUTSCOM(SCOM_SPRC, core_mask, ((core_mask & 1) ? BIT64(60) : 0));
- PK_TRACE_DBG("Load SCRATCH0 with HOMER+2MB address %x", scom_data.value);
-
-#if EPM_P9_TUNING
+#if NIMBUS_DD_LEVEL == 20 || DISABLE_CME_DUAL_CAST == 1
- CME_PUTSCOM((SCRATCH0 + (core_mask & 1)), core_mask, 0xA200000);
+ continue;
#else
- CME_PUTSCOM((SCRATCH0 + (core_mask & 1)), core_mask, scom_data.value);
+ if (d2u4_flag)
+ {
+ p9_cme_stop_exit_end((CME_MASK_BC - deeper_core), spwu_stop);
+ }
+
+ return;
#endif
+
}
}
- PK_TRACE("RAM: mfspr sprd , gpr0 via RAM_CTRL");
- CME_PUTSCOM(RAM_CTRL, core, RAM_MFSPR_SPRD_GPR0);
-
- PK_TRACE("RAM: mtspr hrmor, gpr0 via RAM_CTRL");
- CME_PUTSCOM(RAM_CTRL, core, RAM_MTSPR_HRMOR_GPR0);
-
- PK_TRACE("Disable thread0 for RAM via THREAD_INFO");
- CME_PUTSCOM(THREAD_INFO, core, 0);
+ PK_TRACE_PERF("SF.RS: Self Restore Completed, Core Stopped Again(pm_exit=0/pm_active=1)");
- PK_TRACE("Disable RAM mode via RAM_MODEREG");
- CME_PUTSCOM(RAM_MODEREG, core, 0);
+ p9_cme_stop_self_cleanup(core);
- PK_TRACE("Clear scratch/spr used in RAM");
- CME_PUTSCOM(SPR_MODE, core, 0);
- CME_PUTSCOM(SCOM_SPRC, core, 0);
+#endif
- if (core & CME_MASK_C0)
+ if (d2u4_flag)
{
- CME_PUTSCOM(SCRATCH0, CME_MASK_C0, 0);
+ core = CME_MASK_BC;
}
+ }
- if (core & CME_MASK_C1)
- {
- CME_PUTSCOM(SCRATCH1, CME_MASK_C1, 0);
- }
+ p9_cme_stop_exit_end(core, spwu_stop);
+
+#if NIMBUS_DD_LEVEL == 20 || DISABLE_CME_DUAL_CAST == 1
+
+ // NDD2: dual cast workaround loop end
+ }
#endif
-// Nimbus DD2+
-#else
-#if SMF_SUPPORT_ENABLE
+ //===========================
+ MARK_TRAP(ENDSCOPE_STOP_EXIT)
+ //===========================
-#if EPM_P9_TUNING
+ return;
+}
- CME_PUTSCOM(URMOR, core, 0xA200000);
+void
+p9_cme_stop_self_cleanup(uint32_t core)
+{
+ data64_t scom_data;
-#else
+ //Cleaning up thread scratch register after self restore.
+ if( CME_MASK_C0 & core )
+ {
+ CME_PUTSCOM(SCRATCH0, CME_MASK_C0, 0);
+ }
- PK_TRACE_INF("Core Wakes Up, Write URMOR with HOMER address");
- CME_PUTSCOM(URMOR, core, scom_data.value);
+ if( CME_MASK_C1 & core )
+ {
+ CME_PUTSCOM(SCRATCH1, CME_MASK_C1, 0);
+ }
-#endif
+ PK_TRACE("Restore SPATTN after self-restore");
+ CME_PUTSCOM(SPATTN_MASK, core, G_spattn_mask);
-#endif
+ PK_TRACE("Always Unfreeze IMA (by clearing bit 34) in case the CHTM is enabled to sample it");
+ CME_GETSCOM(IMA_EVENT_MASK, core, scom_data.value);
+ CME_PUTSCOM(IMA_EVENT_MASK, core, scom_data.value & ~BIT64(34));
-#if EPM_P9_TUNING
+ PK_TRACE("Drop block interrupt to PC via SICR[2/3]");
+ out32(G_CME_LCL_SICR_CLR, core << SHIFT32(3));
- CME_PUTSCOM(HRMOR, core, 0xA200000);
+ PK_TRACE("Clear pm_active status via EISR[20/21]");
+ out32(G_CME_LCL_EISR_CLR, core << SHIFT32(21));
-#else
+}
- PK_TRACE_PERF("Core Wakes Up, Write HRMOR with HOMER address");
- // Must not set bit 15 in HRMOR
- scom_data.words.upper = scom_data.words.upper & ~BIT32(15);
- CME_PUTSCOM(HRMOR, core, scom_data.value);
+void
+p9_cme_stop_self_execute(uint32_t core, uint32_t i_saveRestore )
+{
+ uint32_t core_mask;
+ data64_t scom_data;
+ cmeHeader_t* pCmeImgHdr = (cmeHeader_t*)(CME_SRAM_HEADER_ADDR);
+ scom_data.value = pCmeImgHdr->g_cme_cpmr_PhyAddr & BITS64(13, 30); //HRMOR[13:42]
-#endif
+ PK_TRACE("Assert block interrupt to PC via SICR[2/3]");
-#endif
+ out32(G_CME_LCL_SICR_OR, core << SHIFT32(3));
- PK_TRACE("Save off and mask SPATTN before self-restore");
- CME_GETSCOM(SPATTN_MASK, core, spattn_mask);
- CME_PUTSCOM(SPATTN_MASK, core, BITS64(0, 64));
+ PK_TRACE_PERF("SF.RS: Self Restore Prepare, Core Waking up(pm_exit=1) via SICR[4/5]");
+ out32(G_CME_LCL_SICR_OR, core << SHIFT32(5));
-#if !DISABLE_CORE_XSTOP_INJECTION
+ CME_PM_EXIT_DELAY
- PK_TRACE("Read WKUP_ERR_INJECT_MODE via CPMMR[8]");
+ PK_TRACE("Polling for core wakeup(pm_active=0) via EINR[20/21]");
- for (core_mask = 2; core_mask; core_mask--)
- {
- if (core & core_mask)
- {
- CME_GETSCOM(CPPM_CPMMR, core_mask, scom_data.value);
+ while((in32(G_CME_LCL_EINR)) & (core << SHIFT32(21)));
- if (scom_data.words.upper & BIT32(8))
- {
- PK_TRACE_INF("WARNING: Injecting a core[%d] xstop via C_LFIR[11]", core);
- CME_PUTSCOM(C_LFIR_OR, core_mask, BIT64(11));
- }
- }
- }
+#if NIMBUS_DD_LEVEL == 10
+#if !SKIP_RAM_HRMOR
-#endif
+ PK_TRACE("Activate thread0 for RAM via THREAD_INFO[18]");
+ CME_PUTSCOM(THREAD_INFO, core, BIT64(18));
- for (core_mask = 2; core_mask; core_mask--)
- {
- if (core & core_mask)
- {
- CME_GETSCOM(CPPM_CPMMR, core_mask, scom_data.value);
+ PK_TRACE("Enable RAM mode via RAM_MODEREG[0]");
+ CME_PUTSCOM(RAM_MODEREG, core, BIT64(0));
- if (scom_data.words.upper & BIT32(3))
- {
- scom_data.value = BIT64(59);
- }
- else
- {
- scom_data.value = 0;
- }
+ PK_TRACE("Set SPR mode to LT0-7 via SPR_MODE[20-27]");
+ CME_PUTSCOM(SPR_MODE, core, BITS64(20, 8));
- //Writing thread scratch register to
- // 1. Init Runtime wakeup mode for core.
- // 2. Signal Self Save Restore code for restore operation.
- CME_PUTSCOM(SCRATCH0, core_mask, scom_data.value);
- CME_PUTSCOM(SCRATCH1, core_mask, scom_data.value);
- CME_PUTSCOM(SCRATCH2, core_mask, scom_data.value);
- CME_PUTSCOM(SCRATCH3, core_mask, scom_data.value);
- }
- }
+ for (core_mask = 2; core_mask; core_mask--)
+ {
+ if (core & core_mask)
+ {
+ PK_TRACE_DBG("Set SPRC to scratch for core[%d] via SCOM_SPRC", core_mask);
+ CME_PUTSCOM(SCOM_SPRC, core_mask, ((core_mask & 1) ? BIT64(60) : 0));
- PK_TRACE_PERF("SF.RS: Self Restore Kickoff, S-Reset All Core Threads");
+ PK_TRACE_DBG("Load SCRATCH0 with HOMER+2MB address %x", scom_data.value);
- // Disable interrupts around the sreset to polling check to not miss the self-restore
- wrteei(0);
- CME_PUTSCOM(DIRECT_CONTROLS, core,
- BIT64(4) | BIT64(12) | BIT64(20) | BIT64(28));
- sync();
+#if EPM_P9_TUNING
- PK_TRACE("Poll for instruction running before drop pm_exit");
+ CME_PUTSCOM((SCRATCH0 + (core_mask & 1)), core_mask, 0xA200000);
- while((~(in32_sh(CME_LCL_SISR))) & (core << SHIFT64SH(47)));
+#else
+ CME_PUTSCOM((SCRATCH0 + (core_mask & 1)), core_mask, scom_data.value);
- wrteei(1);
+#endif
- //==========================
- MARK_TRAP(SX_SRESET_THREADS)
- //==========================
+ }
+ }
- PK_TRACE("Allow threads to run(pm_exit=0)");
- out32(G_CME_LCL_SICR_CLR, core << SHIFT32(5));
+ PK_TRACE("RAM: mfspr sprd , gpr0 via RAM_CTRL");
+ CME_PUTSCOM(RAM_CTRL, core, RAM_MFSPR_SPRD_GPR0);
- PK_TRACE("Poll for core stop again(pm_active=1)");
+ PK_TRACE("RAM: mtspr hrmor, gpr0 via RAM_CTRL");
+ CME_PUTSCOM(RAM_CTRL, core, RAM_MTSPR_HRMOR_GPR0);
- while((~(in32(G_CME_LCL_EINR))) & (core << SHIFT32(21)))
- {
- core_spattn = (in32_sh(CME_LCL_SISR) >> SHIFT64SH(33)) & core;
+ PK_TRACE("Disable thread0 for RAM via THREAD_INFO");
+ CME_PUTSCOM(THREAD_INFO, core, 0);
- if (core_spattn)
- {
- PK_TRACE_ERR("ERROR: Core[%d] Special Attention Detected. Gard Core!", core_spattn);
- CME_STOP_CORE_ERROR_HANDLER(core, core_spattn, CME_STOP_EXIT_SELF_RES_SPATTN);
+ PK_TRACE("Disable RAM mode via RAM_MODEREG");
+ CME_PUTSCOM(RAM_MODEREG, core, 0);
- PK_TRACE("Release PCB Mux back on Core via SICR[10/11]");
- out32(G_CME_LCL_SICR_CLR, core_spattn << SHIFT32(11));
+ PK_TRACE("Clear scratch/spr used in RAM");
+ CME_PUTSCOM(SPR_MODE, core, 0);
+ CME_PUTSCOM(SCOM_SPRC, core, 0);
- while((core_spattn & ~(in32(G_CME_LCL_SISR) >> SHIFT32(11))) != core_spattn);
+ if (core & CME_MASK_C0)
+ {
+ CME_PUTSCOM(SCRATCH0, CME_MASK_C0, 0);
+ }
- PK_TRACE("PCB Mux Released on Core[%d]", core_spattn);
- }
+ if (core & CME_MASK_C1)
+ {
+ CME_PUTSCOM(SCRATCH1, CME_MASK_C1, 0);
+ }
- if (!core)
- {
+#endif
+// Nimbus DD2+
+#else
-#if NIMBUS_DD_LEVEL == 20 || DISABLE_CME_DUAL_CAST == 1
+#if SMF_SUPPORT_ENABLE
- continue;
+#if EPM_P9_TUNING
+ CME_PUTSCOM(URMOR, core, 0xA200000);
+ CME_PUTSCOM(HRMOR, core, 0xA200000);
#else
- if (d2u4_flag)
- {
- p9_cme_stop_exit_end((CME_MASK_BC - deeper_core), spwu_stop);
- }
+ CME_PUTSCOM(URMOR, core, scom_data.value);
+ PK_TRACE_INF("SMF core wakes up, write URMOR with HOMER address" );
+ scom_data.words.upper = scom_data.words.upper & ~BIT32(15);
- return;
+ if( SPR_SELF_SAVE == i_saveRestore )
+ {
+ scom_data.value = pCmeImgHdr->g_cme_unsec_cpmr_PhyAddr & BITS64(13, 30); //Unsecure HOMER
+ PKTRACE("SMF core self save, write un-secure HOMER address");
+ }
-#endif
+ CME_PUTSCOM(HRMOR, core, scom_data.value);
- }
- }
+#endif //EPM_P9_TUNING
- for (core_mask = 2; core_mask; core_mask--)
- {
- if (core & core_mask)
- {
- //Cleaning up thread scratch register after self restore.
- CME_PUTSCOM(SCRATCH0, core_mask, 0);
- CME_PUTSCOM(SCRATCH1, core_mask, 0);
- CME_PUTSCOM(SCRATCH2, core_mask, 0);
- CME_PUTSCOM(SCRATCH3, core_mask, 0);
- }
- }
+#else //SMF Not supported
- PK_TRACE_PERF("SF.RS: Self Restore Completed, Core Stopped Again(pm_exit=0/pm_active=1)");
+#if EPM_P9_TUNING
- PK_TRACE("Restore SPATTN after self-restore");
- CME_PUTSCOM(SPATTN_MASK, core, spattn_mask);
+ CME_PUTSCOM(HRMOR, core, 0xA200000);
+#else
- PK_TRACE("Always Unfreeze IMA (by clearing bit 34) in case the CHTM is enabled to sample it");
- CME_GETSCOM(IMA_EVENT_MASK, core, scom_data.value);
- CME_PUTSCOM(IMA_EVENT_MASK, core, scom_data.value & ~BIT64(34));
+ PK_TRACE_INF("Non SMF core wakes up, write HRMOR with HOMER address");
+ scom_data.words.upper = scom_data.words.upper & ~BIT32(15);
+ CME_PUTSCOM(HRMOR, core, scom_data.value);
- PK_TRACE("Drop block interrupt to PC via SICR[2/3]");
- out32(G_CME_LCL_SICR_CLR, core << SHIFT32(3));
+#endif //EPM_P9_TUNING
- PK_TRACE("Clear pm_active status via EISR[20/21]");
- out32(G_CME_LCL_EISR_CLR, core << SHIFT32(21));
+#endif //SMF_SUPPORT_ENABLE
-#endif
+#endif //Nimbus DD2+
- if (d2u4_flag)
+ PK_TRACE("Save off and mask SPATTN before self-restore");
+ CME_GETSCOM(SPATTN_MASK, core, G_spattn_mask);
+ CME_PUTSCOM(SPATTN_MASK, core, BITS64(0, 64));
+
+#if !DISABLE_CORE_XSTOP_INJECTION
+
+ PK_TRACE("Read WKUP_ERR_INJECT_MODE via CPMMR[8]");
+
+ for (core_mask = 2; core_mask; core_mask--)
+ {
+ if (core & core_mask)
+ {
+ CME_GETSCOM(CPPM_CPMMR, core_mask, scom_data.value);
+
+ if (scom_data.words.upper & BIT32(8))
{
- core = CME_MASK_BC;
+ //PK_TRACE_INF("WARNING: Injecting a core[%d] xstop via C_LFIR[11]", core);
+ CME_PUTSCOM(C_LFIR_OR, core_mask, BIT64(11));
}
- }
- p9_cme_stop_exit_end(core, spwu_stop);
+ if( SPR_SELF_SAVE == i_saveRestore )
+ {
+ //Writing thread scratch register to
+ //Signal Self Save Restore code for save operation.
+ scom_data.words.upper = 0;
+ scom_data.words.lower = 1;
+ }
+ else
+ {
+ //Writing thread scratch register to
+ // 1. Init Runtime wakeup mode for core.
+ // 2. Signal Self Save Restore code for restore operation.
-#if NIMBUS_DD_LEVEL == 20 || DISABLE_CME_DUAL_CAST == 1
+ if (scom_data.words.upper & BIT32(3))
+ {
+ scom_data.value = BIT64(59);
+ }
+ else
+ {
+ scom_data.value = 0;
+ }
+ }
- // NDD2: dual cast workaround loop end
+ if( CME_MASK_C0 & core_mask )
+ {
+ CME_PUTSCOM(SCRATCH0, CME_MASK_C0, scom_data.value);
+ }
+
+ if( CME_MASK_C1 & core_mask )
+ {
+ CME_PUTSCOM(SCRATCH1, CME_MASK_C1, scom_data.value);
+ }
+ }
}
#endif
- //===========================
- MARK_TRAP(ENDSCOPE_STOP_EXIT)
- //===========================
+ PK_TRACE_PERF("SF.RS: Self Restore Kickoff, S-Reset All Core Threads");
+
+ // Disable interrupts around the sreset to polling check to not miss the self-restore
+ wrteei(0);
+
+ CME_PUTSCOM(DIRECT_CONTROLS, core,
+ BIT64(4) | BIT64(12) | BIT64(20) | BIT64(28));
+ sync();
+
+ //**DISABLE CHECK FOR instruction running to avoid race condition**
+ //PK_TRACE_INF("Poll for instruction running before drop pm_exit");
+ //while((~(in32_sh(CME_LCL_SISR))) & (core << SHIFT64SH(47)));
+
+ wrteei(1);
+
+ PK_TRACE_INF("Allow threads to run(pm_exit=0)");
+ out32(G_CME_LCL_SICR_CLR, core << SHIFT32(5));
- return;
}
diff --git a/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_stop_init.c b/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_stop_init.c
index ff2be8c0..5c7d8663 100644
--- a/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_stop_init.c
+++ b/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_stop_init.c
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2016,2018 */
+/* COPYRIGHT 2016,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -42,6 +42,7 @@ p9_cme_stop_init()
uint32_t exit_first = 0;
uint32_t cme_flags = 0;
uint32_t core_mask = 0;
+ uint32_t bce_cpy_len = 0;
//--------------------------------------------------------------------------
// Parse CME Flags and Initialize Core States
@@ -104,11 +105,20 @@ p9_cme_stop_init()
cmeHeader_t* pCmeImgHdr = (cmeHeader_t*)(CME_SRAM_HEADER_ADDR);
+ if( pCmeImgHdr->g_cme_qm_mode_flags & CME_QM_FLAG_PER_QUAD_VDM_ENABLE )
+ {
+ bce_cpy_len = pCmeImgHdr->g_cme_custom_length;
+ }
+ else
+ {
+ bce_cpy_len = pCmeImgHdr->g_cme_max_spec_ring_length;
+ }
+
//right now a blocking call. Need to confirm this.
- start_cme_block_copy(CME_BCEBAR_1,
- (CME_IMAGE_CPMR_OFFSET + (pCmeImgHdr->g_cme_core_spec_ring_offset << 5)),
- pCmeImgHdr->g_cme_core_spec_ring_offset,
- pCmeImgHdr->g_cme_max_spec_ring_length);
+ start_cme_block_copy( CME_BCEBAR_1,
+ (CME_IMAGE_CPMR_OFFSET + (pCmeImgHdr->g_cme_core_spec_ring_offset << 5)),
+ pCmeImgHdr->g_cme_core_spec_ring_offset,
+ bce_cpy_len );
PK_TRACE_DBG("Setup: BCE Check for Copy Completed");
diff --git a/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_stop_irq_handlers.c b/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_stop_irq_handlers.c
index 12f353b3..7f82bb8d 100644
--- a/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_stop_irq_handlers.c
+++ b/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_cme_stop_irq_handlers.c
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2015,2018 */
+/* COPYRIGHT 2015,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -83,7 +83,7 @@ p9_cme_stop_pcwu_handler(void)
// if still wakeup for core with notify_select == cme, go exit
if (core)
{
- PK_TRACE_INF("PCWU Launching exit thread");
+ //PK_TRACE_INF("PCWU Launching exit thread");
out32(G_CME_LCL_EIMR_OR, BITS32(12, 10));
g_eimr_override |= BITS64(12, 10);
@@ -148,7 +148,7 @@ p9_cme_stop_spwu_handler(void)
}
}
- PK_TRACE_INF("Falling edge of SPWU, now clear spwu_done, eisr and flip eipr");
+ //PK_TRACE_INF("Falling edge of SPWU, now clear spwu_done, eisr and flip eipr");
out32(G_CME_LCL_SICR_CLR, BIT32((16 + core_index)));
out32(G_CME_LCL_EISR_CLR, BIT32((14 + core_index)));
out32(G_CME_LCL_EIPR_OR, BIT32((14 + core_index)));
@@ -159,7 +159,7 @@ p9_cme_stop_spwu_handler(void)
out32(G_CME_LCL_EISR_CLR, BIT32((14 + core_index)));
out32(G_CME_LCL_EIPR_CLR, BIT32((14 + core_index)));
out32(G_CME_LCL_SICR_OR, BIT32((16 + core_index)));
- PK_TRACE_INF("SPWU asserts again, clear eisr, flip eipr, re-assert spwu_done");
+ //PK_TRACE_INF("SPWU asserts again, clear eisr, flip eipr, re-assert spwu_done");
}
// if spwu truly dropped:
else
@@ -192,7 +192,7 @@ p9_cme_stop_spwu_handler(void)
if (spwu_rise)
{
- PK_TRACE_INF("SPWU Launching exit thread");
+ //PK_TRACE_INF("SPWU Launching exit thread");
out32(G_CME_LCL_EIMR_OR, BITS32(12, 10));
g_eimr_override |= BITS64(12, 10);
@@ -218,7 +218,7 @@ void
p9_cme_stop_rgwu_handler(void)
{
MARK_TRAP(STOP_RGWU_HANDLER)
- PK_TRACE_INF("RGWU Handler Trigger");
+ //PK_TRACE_INF("RGWU Handler Trigger");
out32(G_CME_LCL_EIMR_OR, BITS32(12, 10));
g_eimr_override |= BITS64(12, 10);
@@ -244,7 +244,7 @@ void
p9_cme_stop_enter_handler(void)
{
MARK_TRAP(STOP_ENTER_HANDLER)
- PK_TRACE_INF("PM_ACTIVE Handler Trigger");
+ //PK_TRACE_INF("PM_ACTIVE Handler Trigger");
// Abort Protection
out32(G_CME_LCL_EIMR_OR, BITS32(12, 10));
@@ -373,7 +373,7 @@ p9_cme_stop_db1_handler(void)
uint32_t suspend_ack = 0;
MARK_TRAP(STOP_DB1_HANDLER)
- PK_TRACE_INF("DB1 Handler Trigger");
+ //PK_TRACE_INF("DB1 Handler Trigger");
// Suspend DB should only come from the first good core
core = G_cme_pstate_record.firstGoodCoreMask;
diff --git a/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_hcd_core_scominit.c b/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_hcd_core_scominit.c
index 25121736..a9ef8e43 100644
--- a/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_hcd_core_scominit.c
+++ b/import/chips/p9/procedures/ppe_closed/cme/stop_cme/p9_hcd_core_scominit.c
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2015,2018 */
+/* COPYRIGHT 2015,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -78,8 +78,8 @@ p9_hcd_core_scominit(uint32_t core)
CME_PUTSCOM(CORE_ACTION1, core, 0xA854009775100000);
CME_PUTSCOM(CORE_FIRMASK, core, 0x0301D70000AB76FE);
#else
- CME_PUTSCOM(CORE_ACTION0, core, 0x14A800408A000040);
- CME_PUTSCOM(CORE_ACTION1, core, 0xBCFC00D7FF100040);
+ CME_PUTSCOM(CORE_ACTION0, core, 0x14A800408A000041);
+ CME_PUTSCOM(CORE_ACTION1, core, 0xBCFC00D7FF100041);
CME_PUTSCOM(CORE_FIRMASK, core, 0x0301D70000AB76BE);
// set mask for core_cs_recovery_handshake
diff --git a/import/chips/p9/procedures/ppe_closed/ippe/ioa/p9_abus_main.c b/import/chips/p9/procedures/ppe_closed/ippe/ioa/p9_abus_main.c
index fe4b2fd7..ae404153 100644
--- a/import/chips/p9/procedures/ppe_closed/ippe/ioa/p9_abus_main.c
+++ b/import/chips/p9/procedures/ppe_closed/ippe/ioa/p9_abus_main.c
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2015,2018 */
+/* COPYRIGHT 2015,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -70,6 +70,7 @@ void wait_ns(uint32_t i_ns);
void rxEnableAmpdac(t_gcr_addr* gcr_addr, const bool iEnable);
void powerDownBank(t_gcr_addr* gcr_addr, uint32_t iBankPowerDown);
void disableBank(t_gcr_addr* gcr_addr, uint32_t iBankPowerDown);
+void powerUpAll(t_gcr_addr* gcr_addr);
// The main function is called by the boot code
int main(int argc, char** argv)
@@ -96,6 +97,8 @@ int main(int argc, char** argv)
uint32_t currentRecalAbort = 0x0;
uint32_t count = 0x0;
uint32_t state = 0x0;
+ uint32_t resetCount = 0x0;
+ uint32_t waitCount = 0x0;
uint64_t work1Data = 0x0;
uint64_t work2Data = 0x0;
uint32_t workDoneCnt = 0x0;
@@ -148,10 +151,10 @@ int main(int argc, char** argv)
do
{
powerDownBank(&gcr_addr, bankPowerDown);
- //wait_ns(20000); // Wait for 20us
+ //wait_ns(20000); // Wait for 20us
// Write(530.67ns), RMW(1101ns), Read(1132ns)
- for (count = 0; ((count < 40000) && (recalAbortCnt[bankPowerDown] < 2)); ++count)
+ for (count = 0; ((count < 15000) && (recalAbortCnt[bankPowerDown] < 2)); ++count)
{
// Read rx_recal_abort_active (Returns a 0 or 1)
// - The DL sources this signal
@@ -161,12 +164,25 @@ int main(int argc, char** argv)
++loopCount;
bankPowerDown = (bankPowerDown == BANK_EVEN) ? BANK_ODD : BANK_EVEN;
+
+ // At the end of second loop we will wait and check to see if recal abort so traffic continues
+ if ((bankPowerDown == BANK_EVEN) && (count == 15000))
+ {
+ powerUpAll(&gcr_addr); //powerup all the lanes back
+ wait_ns(1000); // wait 1ms before we start to look for recal abort
+
+ do
+ {
+ currentRecalAbort = get_ptr_field(&gcr_addr, rx_recal_abort_active); // DL
+ ++waitCount;
+ }
+ while (currentRecalAbort == 0 ); //wait until the recal abort is seen to start powering off again
+ }
}
while((recalAbortCnt[BANK_EVEN] < 2) && (recalAbortCnt[BANK_ODD] < 2));
// Power down the bank with the bad lane/lanes
powerDownBank(&gcr_addr, bankPowerDown);
-
// Disable the ampdac for all lanes
rxEnableAmpdac(&gcr_addr, false);
@@ -182,9 +198,12 @@ int main(int argc, char** argv)
}
work1Data = (work1Data & 0x0FFFFFFF00000000) | (((uint64_t)workDoneCnt << 60) & 0xF000000000000000);
- work1Data = (work1Data & 0xFFFF000000000000) | (((uint64_t)(mfspr(SPRN_DEC)) << 32) & 0x0000FFFF00000000);
- work2Data = (((uint64_t)loopCount << 32) & 0xFFFFFFFF00000000);
- work2Data |= (bankPowerDown == BANK_EVEN) ? 0xAAAA000000000000 : 0x5555000000000000;
+ work1Data = (work1Data & 0xF000FFFF00000000) | (((uint64_t)resetCount << 48) & 0x0FFF000000000000);
+ work1Data = (work1Data & 0xFFFF000000000000) | (((uint64_t)loopCount << 32) & 0x0000FFFF00000000);
+ // work1Data = (work1Data & 0xFFFF000000000000) | (((uint64_t)(mfspr(SPRN_DEC)) << 32) & 0x0000FFFF00000000);
+ work2Data = (((uint64_t)waitCount << 32) & 0xFFFFFFFF00000000);
+ work2Data = (work2Data & 0x0000FFFF00000000) | (((uint64_t)count << 48) & 0xFFFF000000000000);
+ work2Data |= (bankPowerDown == BANK_EVEN) ? 0xE000000000000000 : 0x0000000000000000;
localPut(WORK1_REG, work1Data);
localPut(WORK2_REG, work2Data);
@@ -192,13 +211,15 @@ int main(int argc, char** argv)
count = 0x0;
state = 0x0;
}
- else if((count > 26500) && (state == 0x01) ) // 30ms
+ else if((count > 6000) && (state == 0x01)) // 3ms
{
currentRecalAbort = 0x0;
state = 0x0;
count = 0x0;
+ ++resetCount;
work1Data = (work1Data & 0xFFF0FFFF00000000) | (0x0001000000000000);
+ work1Data = (work1Data & 0xF000FFFF00000000) | (((uint64_t)resetCount << 48) & 0x0FFF000000000000);
localPut(WORK1_REG, work1Data);
}
else if(state == 0x01)
@@ -208,10 +229,10 @@ int main(int argc, char** argv)
// After we complete the workaround once, we will break out of the
// while loop and halt the ppe execution.
- if (workDoneCnt > 0x0)
- {
- break;
- }
+ //if (workDoneCnt > 0x0)
+ // {
+ // break;
+ // }
}
return 0;
@@ -278,6 +299,19 @@ void rxEnableAmpdac(t_gcr_addr* gcr_addr, const bool iEnable)
set_gcr_addr_lane(gcr_addr, 0);
return;
}
+// Power up all the banks
+void powerUpAll(t_gcr_addr* gcr_addr)
+{
+ set_gcr_addr_lane(gcr_addr, 0x1F);
+ put_ptr_field(gcr_addr, rx_lane_ana_pdwn, 0x00, fast_write);
+ wait_ns(20000);
+ put_ptr_field(gcr_addr, rx_amp_val, 0x0, fast_write);
+ set_gcr_addr_lane(gcr_addr, 0x00);
+
+ set_gcr_addr_lane(gcr_addr, 0);
+ return;
+}
+
// The bank that is passed in, is the bank that we want to power down
void powerDownBank(t_gcr_addr* gcr_addr, uint32_t iTargetBank)
@@ -288,7 +322,7 @@ void powerDownBank(t_gcr_addr* gcr_addr, uint32_t iTargetBank)
// Bias and Power Down all the Lanes
set_gcr_addr_lane(gcr_addr, 0x1F);
put_ptr_field(gcr_addr, rx_amp_val, 0x7F, fast_write);
- wait_ns(20000); // Wait for 20us to allow values to propogate
+ wait_ns(40000); // Wait for 20us to allow values to propogate
put_ptr_field(gcr_addr, rx_lane_ana_pdwn, 0x01, fast_write);
set_gcr_addr_lane(gcr_addr, 0x00);
diff --git a/import/chips/p9/procedures/ppe_closed/lib/hcodelibfiles.mk b/import/chips/p9/procedures/ppe_closed/lib/hcodelibfiles.mk
index 55cd38dc..0f0eb5f9 100644
--- a/import/chips/p9/procedures/ppe_closed/lib/hcodelibfiles.mk
+++ b/import/chips/p9/procedures/ppe_closed/lib/hcodelibfiles.mk
@@ -5,7 +5,7 @@
#
# OpenPOWER HCODE Project
#
-# COPYRIGHT 2015,2018
+# COPYRIGHT 2015,2019
# [+] International Business Machines Corp.
#
#
@@ -43,6 +43,7 @@
HCODE_C_SOURCES = \
p9_stop_recovery_trigger.c \
+ p9_hcd_errl.c \
p9_hcd_block_copy.c \
p9_dd1_doorbell_wr.c
diff --git a/import/chips/p9/procedures/ppe_closed/lib/p9_hcd_errl.c b/import/chips/p9/procedures/ppe_closed/lib/p9_hcd_errl.c
new file mode 100755
index 00000000..d779c11c
--- /dev/null
+++ b/import/chips/p9/procedures/ppe_closed/lib/p9_hcd_errl.c
@@ -0,0 +1,605 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: import/chips/p9/procedures/ppe_closed/lib/p9_hcd_errl.c $ */
+/* */
+/* OpenPOWER HCODE Project */
+/* */
+/* COPYRIGHT 2016,2019 */
+/* [+] International Business Machines Corp. */
+/* */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+#ifndef __PPE_CME
+
+#include <stdint.h>
+
+#include "pk.h"
+#include "ppe42_string.h"
+#include "pk_trace.h"
+
+#include "p9_hcd_memmap_occ_sram.H"
+
+#include "pstate_pgpe_occ_api.h"
+#include "p9_hcd_errldefs.h"
+#include "p9_hcd_errl.h"
+
+uint8_t G_errSlotUnrec[ERRL_MAX_ENTRY_SZ] __attribute__ ((aligned (8))) = {0};
+
+// As this is common code across GPEs, the number of error logs supported per
+// severity (and hence total error logs supported) per GPE has to be same.
+// Order of error logs in this table should match relative order per GPE from
+// elog_entry_index
+errlHndl_t G_gpeErrSlots[ERRL_MAX_SLOTS_PER_GPE] =
+{
+ (errlHndl_t)& G_errSlotUnrec
+};
+
+hcode_elog_entry_t* G_occElogTable = NULL; // Ptr to OCC shared data err idx tbl
+hcodeErrlConfigData_t G_errlConfigData = {0};
+hcodeErrlMetadata_t G_errlMetaData = {0};
+
+void initErrLogging (const uint8_t i_errlSource)
+{
+ HcodeOCCSharedData_t* l_occSharedData = (HcodeOCCSharedData_t*)
+ OCC_SRAM_SHARED_DATA_BASE_ADDR;
+
+ G_occElogTable = l_occSharedData->errlog_idx.elog;
+ G_errlConfigData.source = i_errlSource;
+
+ switch (i_errlSource)
+ {
+ case ERRL_SOURCE_PGPE:
+ G_errlConfigData.traceSz = ERRL_TRACE_DATA_SZ_PGPE;
+ break;
+
+ case ERRL_SOURCE_XGPE:
+ G_errlConfigData.traceSz = ERRL_TRACE_DATA_SZ_XGPE;
+ break;
+
+ default:
+ G_errlConfigData.source = ERRL_SOURCE_INVALID;
+ PK_TRACE_ERR ("initErrLogging: Bad Source %d", i_errlSource);
+ break;
+ }
+
+ // Record PPE Processor Version from where errors will be logged
+ // e.g. 0x42090203==PPE42 P9 DD2.3
+ G_errlConfigData.procVersion = mfspr(SPRN_PVR);
+ // Record PPE Instance from which errors will be logged
+ G_errlConfigData.ppeId = (uint16_t)(mfspr(SPRN_PIR)&PIR_PPE_INSTANCE_MASK);
+
+ G_errlMetaData.errId = 0;
+ G_errlMetaData.slotBits = 0;
+ G_errlMetaData.slotMask = ERRL_SLOT_MASK_DEFAULT;
+ G_errlMetaData.errSlot = ERRL_SLOT_INVALID;
+}
+
+// Function Specification
+//
+// Name: getErrSlotNumAndErrId
+//
+// Description: Get Error Slot Number and Error Id
+//
+// End Function Specification
+uint8_t getErrSlotNumAndErrId (
+ ERRL_SEVERITY i_severity,
+ uint8_t* o_errlId,
+ uint64_t* o_timeStamp )
+{
+ uint8_t l_slot = ERRL_SLOT_INVALID;
+ uint8_t l_localSlot = ERRL_SLOT_INVALID;
+ uint8_t l_baseSlot = 0;
+ uint32_t l_slotmask = ERRL_SLOT_MASK_DEFAULT;
+
+ // this logic will evolve once we support other severities
+ // or we could have a map table
+ if (ERRL_SEV_UNRECOVERABLE == i_severity)
+ {
+ switch (G_errlConfigData.source)
+ {
+ case ERRL_SOURCE_PGPE:
+ l_baseSlot = ERRL_SLOT_PGPE_BASE;
+ l_slotmask = ERRL_SLOT_MASK_PGPE_UNREC;
+ break;
+
+ case ERRL_SOURCE_XGPE:
+ l_baseSlot = ERRL_SLOT_XGPE_BASE;
+ l_slotmask = ERRL_SLOT_MASK_XGPE_UNREC;
+ break;
+ }
+ }
+
+ if (ERRL_SLOT_MASK_DEFAULT != l_slotmask)
+ {
+ // 0. Enter Critical Section to be thread-safe
+ PkMachineContext ctx;
+ pk_critical_section_enter (&ctx);
+
+ // 1. Check if a slot is free in the local GPE maintained slotBits
+ uint32_t l_slotBitWord = ~(G_errlMetaData.slotBits | l_slotmask);
+
+ // Count leading 0 bits in l_slotBitWord to get available slot based on
+ // l_slotmask. This logic is extensible to allow for a variable
+ // number of log slots per souce & severity based on proper definitions
+ // in ERRL_SLOT_MASK
+ __asm__ __volatile__ ( "cntlzw %0, %1;" : "=r" (l_slot) :
+ "r" (l_slotBitWord));
+
+ if (MAX_HCODE_ELOG_ENTRIES > l_slot)
+ {
+ // 2. Slot matching source + sev is available in local GPE slotBits
+ // Check that slot is free in the global OCC monitored Error Table
+ if (0 == G_occElogTable[l_slot].dw0.value)
+ {
+ // Matching slot is available in global OCC watched error table
+ // Get the local GPE Table slot
+ l_localSlot = l_slot - l_baseSlot;
+
+ // 3. Check that it does not exceed the local GPE error table
+ if (ERRL_MAX_SLOTS_PER_GPE > l_localSlot)
+ {
+ // 4. Get time stamp & save off timestamp
+ *o_timeStamp = pk_timebase_get();
+
+ // 5. Save global slot details in GPE. We cannot write to
+ // global OCC elog table until error is ready to commit
+ G_errlMetaData.slotBits |= (ERRL_SLOT_SHIFT >> l_slot);
+ G_errlMetaData.slotMask = l_slotmask;
+ G_errlMetaData.errSlot = l_slot;
+
+ // 6. Save off incremented counter which forms error log id
+ // Provide next ErrorId; ErrorId should never be 0.
+ *o_errlId = ((++G_errlMetaData.errId) == 0) ?
+ ++G_errlMetaData.errId :
+ G_errlMetaData.errId;
+ }
+ else
+ {
+ // localSlot cannot exceed local GPE Error Table Size
+ PK_TRACE_ERR ("Slot exceeds max! slot: %d localSlot: %d",
+ l_slot, l_localSlot);
+ l_localSlot = ERRL_SLOT_INVALID;
+ }
+ }
+ else
+ {
+ // Prev error nor yet cleared by OCC, GPE creating errors faster
+ // than OCC is consuming them (globally)
+ PK_TRACE_ERR ("Slot %d not free in global OCC table!", l_slot);
+ }
+ }
+ else
+ {
+ // Slot not available, was available in OCC Error Table,
+ // but already taken in GPE, for an error that should be
+ // commited soon. Multi-threaded GPE producing errors faster than
+ // they are being committed (locally)
+ PK_TRACE_ERR ("Slot %d not free in local GPE!", l_slot);
+ }
+
+ // 7. Exit Critical Section to be thread-safe
+ pk_critical_section_exit (&ctx);
+ }
+ else
+ {
+ PK_TRACE_ERR ("Can't get free slot! Bad Source %d OR Sev %d!",
+ G_errlConfigData.source, i_severity);
+ }
+
+ PK_TRACE_INF ("Sev %d Slot G %d L %d EID 0x%08X",
+ i_severity, l_slot, l_localSlot, *o_errlId);
+
+ return l_localSlot;
+}
+
+// @note i_size is a multiple of 8 bytes
+// Trace Buff Header is a multiple of 8 bytes (56 B currently)
+// Each chunk being copied is a multiple of 8 bytes
+// The Trace Buffer User Data Section Payload start address is 8 B aligned
+uint32_t copyTraceBufferPartial ( void* i_pDst,
+ uint16_t i_size )
+{
+ PK_TRACE_INF (">> copyTraceBufferPartial: size %d bytes", i_size);
+ uint16_t l_bytesCopied = 0;
+ const uint32_t l_trHdrSz = sizeof(PkTraceBuffer) - PK_TRACE_SZ;
+ const uint32_t l_trStateOffset = g_pk_trace_buf.state.offset &
+ PK_TRACE_CB_MASK;
+ uint32_t l_szBytes = l_trHdrSz; // first copy trace header
+ bool l_buffWrapped = false;
+ uint32_t l_offset = l_trHdrSz;
+
+ const uint16_t pk_tr_size = g_pk_trace_buf.size;
+ const uint16_t pk_tr_sz_max = PK_TRACE_SZ;
+ const uint32_t pk_tr_state_offset = g_pk_trace_buf.state.offset;
+
+ if (NULL != i_pDst)
+ {
+ // copy the trace buffer header
+ //PK_TRACE_INF ("Copying Tr Buff Hdr %d bytes", l_szBytes);
+ memcpy ( i_pDst,
+ (void*) &g_pk_trace_buf,
+ l_szBytes );
+ l_bytesCopied = l_szBytes;
+
+ // If size being copied is less than what was in the header, adjust
+ // the necessary fields to suit that partial buffer in new header
+ // Can't do this in PPE due to alignment restrictions .. compensate in
+ // parser world
+#if 0
+
+ if (l_pPkTraceBuf->size > i_size)
+ {
+ l_pPkTraceBuf->size = i_size;
+ l_pPkTraceBuf->state.offset = i_size;
+ }
+
+#endif
+
+ l_szBytes = i_size - l_szBytes; // account for copied trace header bytes
+
+ if (l_trStateOffset >= l_szBytes)
+ {
+ // TEs in requested size fit in -un-wrapped part of buffer
+ l_offset += l_trStateOffset - l_szBytes;
+ }
+ else
+ {
+ // requested size has some TEs in the wrapped part of the buffer
+ // copy wrapped chunk of TEs 1st, then copy the -un-wrapped TEs
+ // so that we have TEs in rev-chrono after both copies
+ l_buffWrapped = true;
+ l_szBytes -= l_trStateOffset;
+ l_offset += PK_TRACE_SZ - l_szBytes;
+ }
+
+ // copy (append to header) the first chunk of TEs
+ //PK_TRACE_INF ("Copying 1st chunk of TEs @ %d %d bytes",
+ // l_offset, l_szBytes);
+ memcpy ( i_pDst + l_bytesCopied,
+ (void*)(&g_pk_trace_buf) + l_offset,
+ l_szBytes );
+ l_bytesCopied += l_szBytes;
+
+ if (l_buffWrapped == true)
+ {
+ // Now copy the -un-wrapped chunk of TEs
+ l_szBytes = l_trStateOffset;
+ l_offset = l_trHdrSz;
+
+ // copy (append to 1st chunk) the 2nd chunk of wrapped trace entries
+ //PK_TRACE_INF ("Copying 2nd chunk of wrapped TEs @%d %d bytes",
+ // l_offset, l_szBytes);
+ memcpy ( i_pDst + l_bytesCopied,
+ (void*)(&g_pk_trace_buf) + l_offset,
+ l_szBytes );
+ l_bytesCopied += l_szBytes;
+ }
+ }
+
+ PK_TRACE_INF ("buf.state.offset %d offset.wrapped %d buf.sz %d buf.max %d",
+ pk_tr_state_offset, l_trStateOffset, pk_tr_size, pk_tr_sz_max);
+
+ PK_TRACE_INF ( "<< copyTraceBufferPartial: size %d copied %d",
+ i_size, l_bytesCopied );
+ return l_bytesCopied;
+}
+
+void reportErrorLog (errlHndl_t i_err)
+{
+ if (NULL != i_err)
+ {
+ if (G_errlMetaData.errId == i_err->iv_entryId)
+ {
+ PK_TRACE_INF ("reportErrorLog: EID 0x%08X", i_err->iv_entryId);
+ hcode_elog_entry_t l_errlEntry;
+
+ l_errlEntry.dw0.fields.errlog_id = i_err->iv_entryId;
+ l_errlEntry.dw0.fields.errlog_len = i_err->iv_userDetails.iv_entrySize;
+ l_errlEntry.dw0.fields.errlog_addr = (uint32_t)i_err;
+ l_errlEntry.dw0.fields.errlog_src = G_errlConfigData.source;
+
+ // Enter Critical Section to be thread-safe
+ PkMachineContext ctx;
+ pk_critical_section_enter (&ctx);
+
+ // OCC Error Table should get updated last as OCC polls on it
+ G_occElogTable[G_errlMetaData.errSlot].dw0.value =
+ l_errlEntry.dw0.value;
+
+ // Free up this slot as available on this GPE's records.
+ // OCC will free up corresponding slot in Shared SRAM space once
+ // the error log is processed
+ G_errlMetaData.slotBits &= G_errlMetaData.slotMask;
+ G_errlMetaData.slotMask = ERRL_SLOT_MASK_DEFAULT;
+ G_errlMetaData.errSlot = ERRL_SLOT_INVALID;
+
+ pk_critical_section_exit (&ctx);
+ }
+ }
+
+ PK_TRACE_INF ("<< reportErrorLog");
+}
+
+// Function Specification
+//
+// Name: createErrl
+//
+// Description: Create an Error Log
+//
+// End Function Specification
+errlHndl_t createErrl(
+ const uint16_t i_modId,
+ const uint8_t i_reasonCode,
+ const uint16_t i_extReasonCode,
+ const ERRL_SEVERITY i_sev,
+ const uint32_t i_userData1,
+ const uint32_t i_userData2,
+ const uint32_t i_userData3 )
+{
+ PK_TRACE_INF ("createErrl: modid %d rc %d sev %d",
+ i_modId, i_reasonCode, i_sev);
+
+ errlHndl_t l_rc = NULL;
+ uint64_t l_time = 0;
+ uint8_t l_id = 0;
+ uint8_t l_errSlot = getErrSlotNumAndErrId( i_sev, &l_id, &l_time);
+
+ if (ERRL_SLOT_INVALID != l_errSlot)
+ {
+ PK_TRACE_INF ("createErrl: EID [%d] Slot [%d]", l_id, l_errSlot);
+
+ // get slot pointer
+ l_rc = G_gpeErrSlots[l_errSlot];
+ // save off entry Id
+ l_rc->iv_entryId = l_id;
+ //Save off version info
+ l_rc->iv_version = ERRL_STRUCT_VERSION_1;
+ l_rc->iv_reasonCode = i_reasonCode;
+ l_rc->iv_extendedRC = i_extReasonCode;
+ l_rc->iv_severity = i_sev;
+ l_rc->iv_numCallouts = 0;
+ l_rc->iv_maxSize = ERRL_MAX_ENTRY_SZ;
+
+ // reset the committed flag indicating reusing slot for new error
+ l_rc->iv_userDetails.iv_committed = 0;
+ // save off default sizes of error log and user data sections
+ l_rc->iv_userDetails.iv_entrySize = sizeof( ErrlEntry_t );
+ l_rc->iv_userDetails.iv_userDetailEntrySize = 0;
+ // save off time
+ l_rc->iv_userDetails.iv_timeStamp = l_time;
+ // save off rest of input parameters
+ l_rc->iv_userDetails.iv_modId = i_modId;
+ l_rc->iv_userDetails.iv_userData1 = i_userData1;
+ l_rc->iv_userDetails.iv_userData2 = i_userData2;
+ l_rc->iv_userDetails.iv_userData3 = i_userData3;
+ l_rc->iv_userDetails.iv_version = ERRL_USR_DTL_STRUCT_VERSION_1;
+
+ // Save other invariants
+ l_rc->iv_userDetails.iv_procVersion = G_errlConfigData.procVersion;
+ l_rc->iv_userDetails.iv_ppeId = G_errlConfigData.ppeId;
+
+ // Default other unused fields
+ l_rc->iv_reserved1 = 0;
+ l_rc->iv_userDetails.iv_reserved4 = 0; // reserved by def
+ l_rc->iv_userDetails.iv_reserved5 = 0; // reuse OCC State
+ l_rc->iv_userDetails.iv_reserved7 = 0; // Alignment
+ }
+
+ PK_TRACE_INF ("<< createErrl EID: 0x%08X",
+ (l_rc != NULL) ? (l_rc->iv_entryId) : 0ull);
+
+ return l_rc;
+}
+
+
+// Function Specification
+//
+// Name: addCalloutToErrl
+//
+// Description: Add a callout to an Error Log
+//
+// End Function Specification
+void addCalloutToErrl(
+ errlHndl_t io_err,
+ const ERRL_CALLOUT_TYPE i_type,
+ const uint64_t i_calloutValue,
+ const ERRL_CALLOUT_PRIORITY i_priority)
+{
+ // 1. check if handle is valid (not null or invalid)
+ // 2. not committed
+ // 3. severity is not informational (unless mfg action flag is set)
+ // 4. callouts still not full
+ if ( (io_err != NULL ) &&
+ (io_err->iv_userDetails.iv_committed == 0) &&
+ (io_err->iv_severity != ERRL_SEV_INFORMATIONAL) &&
+ (io_err->iv_numCallouts < ERRL_MAX_CALLOUTS) )
+ {
+ //set callout type
+ io_err->iv_callouts[ io_err->iv_numCallouts ].iv_type = (uint8_t)i_type;
+
+ //set callout value
+ io_err->iv_callouts[ io_err->iv_numCallouts ].iv_calloutValue = i_calloutValue;
+
+ //set priority
+ io_err->iv_callouts[ io_err->iv_numCallouts].iv_priority = (uint8_t)i_priority;
+
+ //increment actual number of callout
+ io_err->iv_numCallouts++;
+ }
+ else
+ {
+ PK_TRACE_INF ("Callout type 0x%02X was NOT added to elog", i_type);
+ }
+}
+
+
+// Function Specification
+//
+// Name: addUsrDtlsToErrl
+//
+// Description: Add User Details to an Error Log
+// @note i_size should be a multiple of 8 bytes for alignment
+// End Function Specification
+void addUsrDtlsToErrl(
+ errlHndl_t io_err,
+ uint8_t* i_dataPtr,
+ const uint16_t i_size,
+ const uint8_t i_version,
+ const ERRL_USR_DETAIL_TYPE i_type)
+{
+ // 1. check if handle is valid
+ // 2. NOT empty
+ // 3. not committed
+ // 4. size being passed in is valid
+ // 5. data pointer is valid
+ // 6. and we have enough size
+ if ((io_err != NULL ) &&
+ (io_err->iv_userDetails.iv_committed == 0) &&
+ (i_size != 0) &&
+ (i_dataPtr != NULL) &&
+ ((io_err->iv_userDetails.iv_entrySize) < ERRL_MAX_ENTRY_SZ))
+ {
+ //adjust user details entry payload size to available size
+ uint16_t l_availableSize = ERRL_MAX_ENTRY_SZ -
+ (io_err->iv_userDetails.iv_entrySize +
+ sizeof (ErrlUserDetailsEntry_t));
+
+ // Add user details section only if ERRL_USR_DATA_SZ_MIN dwords fit
+ if (l_availableSize >= ERRL_USR_DATA_SZ_MIN)
+ {
+ //local copy of the usr details entry
+ ErrlUserDetailsEntry_t l_usrDtlsEntry;
+
+ l_usrDtlsEntry.iv_type = (uint8_t)i_type;
+ l_usrDtlsEntry.iv_version = i_version;
+ l_usrDtlsEntry.iv_size = (i_size < l_availableSize) ? i_size :
+ l_availableSize;
+
+ void* l_p = io_err;
+
+ // add user detail entry to end of the current error log
+ // copy header of the user detail entry
+ l_p = memcpy (l_p + (io_err->iv_userDetails.iv_entrySize),
+ &l_usrDtlsEntry,
+ sizeof (ErrlUserDetailsEntry_t));
+
+ // If we have more cases of user detail section payloads needing
+ // additional logic to copy the payload, the below if-else could
+ // be moved into a new function
+
+ // copy payload of the user detail entry
+ l_p += sizeof (ErrlUserDetailsEntry_t);
+
+ if (ERRL_USR_DTL_TRACE_DATA == l_usrDtlsEntry.iv_type)
+ {
+ // copy the trace buffer (source data ptr is global)
+ copyTraceBufferPartial (l_p, l_usrDtlsEntry.iv_size);
+ }
+ else
+ {
+ memcpy (l_p, i_dataPtr, l_usrDtlsEntry.iv_size);
+ }
+
+ uint16_t l_totalSizeOfUsrDtls = sizeof (ErrlUserDetailsEntry_t) +
+ l_usrDtlsEntry.iv_size;
+ //update usr data entry size
+ io_err->iv_userDetails.iv_userDetailEntrySize +=
+ l_totalSizeOfUsrDtls;
+ //update error log size
+ io_err->iv_userDetails.iv_entrySize += l_totalSizeOfUsrDtls;
+ }
+ }
+}
+
+
+// Function Specification
+//
+// Name: addTraceToErrl
+//
+// Description: Add trace to an error log
+//
+// End Function Specification
+void addTraceToErrl (errlHndl_t io_err)
+{
+ PkMachineContext ctx;
+
+ pk_critical_section_enter (&ctx);
+
+ addUsrDtlsToErrl (
+ io_err,
+ (uint8_t*) &g_pk_trace_buf,
+ G_errlConfigData.traceSz,
+ ERRL_TRACE_VERSION_1,
+ ERRL_USR_DTL_TRACE_DATA );
+
+ pk_critical_section_exit (&ctx);
+}
+
+// Function Specification
+//
+// Name: commitErrl
+//
+// Description: Commit an Error Log
+//
+// End Function Specification
+void commitErrl (errlHndl_t* io_err)
+{
+ if (NULL != *io_err)
+ {
+ // this is the last common place holder to change or override the error
+ // log fields like actions, severity, callouts, etc. based on generic
+ // handling on cases like xstop, etc., before the error is 'commited'
+ // for OCC to notice and trigger (H)TMGT
+
+ // mark the last callout by zeroing out the next one
+ uint8_t l_lastCallout = (*io_err)->iv_numCallouts;
+
+ if (l_lastCallout < ERRL_MAX_CALLOUTS)
+ {
+ PK_TRACE_INF ("Zeroing last+1 callout %u", l_lastCallout);
+
+ (*io_err)->iv_callouts[l_lastCallout].iv_type = 0;
+ (*io_err)->iv_callouts[l_lastCallout].iv_calloutValue = 0;
+ (*io_err)->iv_callouts[l_lastCallout].iv_priority = 0;
+ }
+
+ // numCallouts must be the max value as defined by the TMGT-OCC spec.
+ (*io_err)->iv_numCallouts = ERRL_MAX_CALLOUTS;
+
+ // calculate checksum & save it off
+ uint32_t l_cnt = 2; // starting point is after checksum field
+ uint32_t l_sum = 0;
+ uint32_t l_size = (*io_err)->iv_userDetails.iv_entrySize;
+ uint8_t* l_p = (uint8_t*)*io_err;
+
+ for( ; l_cnt < l_size ; l_cnt++ )
+ {
+ l_sum += *(l_p + l_cnt);
+ }
+
+ (*io_err)->iv_checkSum = l_sum;
+
+ // save off committed
+ (*io_err)->iv_userDetails.iv_committed = 1;
+
+ // report error to OCC
+ reportErrorLog(*io_err);
+
+ *io_err = (errlHndl_t) NULL;
+ }
+}
+
+#endif // __PPE_CME
diff --git a/import/chips/p9/procedures/ppe_closed/lib/p9_hcd_errl.h b/import/chips/p9/procedures/ppe_closed/lib/p9_hcd_errl.h
new file mode 100755
index 00000000..d4357066
--- /dev/null
+++ b/import/chips/p9/procedures/ppe_closed/lib/p9_hcd_errl.h
@@ -0,0 +1,142 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: import/chips/p9/procedures/ppe_closed/lib/p9_hcd_errl.h $ */
+/* */
+/* OpenPOWER HCODE Project */
+/* */
+/* COPYRIGHT 2016,2019 */
+/* [+] International Business Machines Corp. */
+/* */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+
+#ifndef _P9_HCD_ERRL_H
+#define _P9_HCD_ERRL_H
+
+#include <stdbool.h>
+#include "p9_hcd_occ_errldefs.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Creates an Error Log in the PPE's local SRAM
+///
+/// @param [in] i_modId Module/function ID where the error log is being created
+/// @param [in] i_reasonCode A unique code identifying the reason of this error
+/// @param [in] i_extReasonCode An extended Reason Code for this error
+/// @param [in] i_sev Severity this Error Log should be created with
+/// @param [in] i_userData1-3 User data to add to the Error Log as a FFDC
+///
+/// @return On Success: A non-NULL handle to the Error Log created
+/// On Failure: NULL
+///
+/// @note: (COMP_ID | i_reasonCode) become bits 16-31 of SRC
+/// @note: (i_modId<<16 | i_extReasonCode) get to user data4 & parsers
+/// @note: Until pending Error Logs are processed and room is created for a new
+/// HCode Error Log in SRAM by OCC/(H)TMGT, attempts to create new Error
+/// Log via createErrl will fail and HCode error logs will be dropped
+
+errlHndl_t createErrl (
+ const uint16_t i_modId,
+ const uint8_t i_reasonCode,
+ const uint16_t i_extReasonCode,
+ const ERRL_SEVERITY i_sev,
+ const uint32_t i_userData1,
+ const uint32_t i_userData2,
+ const uint32_t i_userData3 );
+
+
+/// @brief Adds a callout to the Error Log
+///
+/// @param [inout] io_err A valid error log handle returned via by createErrl
+/// @param [in] i_type Type of the callout (hardware FRU, code, etc.)
+/// @param [in] i_calloutValue Specific instance of the type being called out
+/// @param [in] i_priority Priority of this callout for service action
+///
+/// @return void
+///
+// @note: Callouts help a service engineer isolate the failing part/subsystem
+/// @note: Customer visible errors (Pred/Unrec) need at least 1 callout.
+/// @note: If there is an error adding callout to the Error Log, the callout
+/// will be dropped from the Error Log
+
+/// @TODO via RTC 211557: Support adding callouts to Hcode Error Logs
+/// TMGT adds a Processor callout as default, until this is supported
+
+void addCalloutToErrl (
+ errlHndl_t io_err,
+ const ERRL_CALLOUT_TYPE i_type,
+ const uint64_t i_calloutValue,
+ const ERRL_CALLOUT_PRIORITY i_priority);
+
+
+/// @brief Adds User Details Section to the Error log
+///
+/// @param [inout] io_err A valid error log handle returned via by createErrl
+/// @param [in] i_dataPtr Pointer to the data being added
+/// @param [in] i_size Size of the data being added in bytes
+/// @param [in] i_version Version of the User Details Section Header
+/// @param [in] i_type Type of the user details section being added
+///
+/// @return void
+///
+/// @note: Generic method to add user specific data like traces, dashboard, etc.
+/// @note: i_size must be a multiple of 8 & data must be 8 byte aligned
+/// @note: If there is an error adding user details section to the Error Log,
+/// the user details section will be dropped from the Error Log
+
+void addUsrDtlsToErrl (
+ errlHndl_t io_err,
+ uint8_t* i_dataPtr,
+ const uint16_t i_size,
+ const uint8_t i_version,
+ const ERRL_USR_DETAIL_TYPE i_type);
+
+
+/// @brief Add Trace Data to the Error log
+///
+/// @param [inout] io_err A valid error log handle returned via by createErrl
+///
+/// @return void
+///
+/// @note: Common method to add Hcode traces from PK trace buffer to Error Log
+/// @note: If there is an error adding traces to the Error Log, the trace data
+/// will be dropped from the Error Log
+
+void addTraceToErrl (errlHndl_t io_errl);
+
+
+/// @brief Commit the Error Log to the Error Log Table in OCC Shared SRAM
+///
+/// @param [inout] io_err Input: Pointer to a valid error log handle
+/// Output: NULL
+///
+/// @return void
+///
+/// @note: No further changes can be made to an error log once it is committed
+/// @note: It can take time for OCC & (H)TMGT to consume an error log commited
+/// to the OCC SRAM and convert it to a PEL/SEL
+/// @note: OCC or (H)TMGT being busy or not functional due to other reasons, can
+/// can cause HCode commited errors not converting to PELs/SELs
+
+void commitErrl (errlHndl_t* io_err);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //_P9_HCD_ERRL_H
diff --git a/import/chips/p9/procedures/ppe_closed/lib/p9_hcd_errldefs.h b/import/chips/p9/procedures/ppe_closed/lib/p9_hcd_errldefs.h
new file mode 100644
index 00000000..edb6a6df
--- /dev/null
+++ b/import/chips/p9/procedures/ppe_closed/lib/p9_hcd_errldefs.h
@@ -0,0 +1,106 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: import/chips/p9/procedures/ppe_closed/lib/p9_hcd_errldefs.h $ */
+/* */
+/* OpenPOWER HCODE Project */
+/* */
+/* COPYRIGHT 2016,2019 */
+/* [+] International Business Machines Corp. */
+/* */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+
+#ifndef _P9_HCD_ERRLDEFS_H
+#define _P9_HCD_ERRLDEFS_H
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Size of traces to add to ERRL_USR_DTL_TRACE_DATA
+#define ERRL_TRACE_DATA_SZ_PGPE 0x200
+#define ERRL_TRACE_DATA_SZ_XGPE 0x100
+
+// Max number of errorlog slots per GPE
+// Supporting only 1 (unrecoverable) error per GPE due to memory restrictions
+#define ERRL_MAX_SLOTS_PER_GPE 1
+
+// Used for shifting slot bits
+static const uint32_t ERRL_SLOT_SHIFT = 0x80000000;
+
+// These bits are used to acquire a slot number. When used with the global
+// slot bit mask, we are able to get 1 slot for unrecoverable errors,
+// 1 slot for informational logs and so on. This can be trivially extended to
+// multiple slots of the same type of error as well.
+// @note The algorithm to get an error slot assumes the all errors per GPE
+// are ordered sequentially without mixing with errors from other GPEs
+// @note Per current requirement & memory constraints there will only be 1
+// unrecoverable error log per GPE. There could be unused bits until
+// we use informational error logs, etc.
+/* Slot Masks */
+typedef enum
+{
+ ERRL_SLOT_MASK_DEFAULT = 0xFFFFFFFF,
+ ERRL_SLOT_MASK_PGPE_UNREC = 0x7FFFFFFF,
+ ERRL_SLOT_MASK_XGPE_UNREC = 0xBFFFFFFF,
+} ERRL_SLOT_MASK;
+
+// Index into array of error log entries in hcode_error_table_t
+// @note This enum should be in sync with the masks defined by ERRL_SLOT_MASK
+// @note OCC processes entries in hcode_error_table_t from 0 to
+// MAX_HCODE_ELOG_ENTRIES & is agnostic of how hcode orders them
+enum elog_entry_index
+{
+ ERRL_SLOT_PGPE_BASE = 0x00,
+ ERRL_SLOT_PGPE_UNREC = 0x00,
+ ERRL_SLOT_XGPE_BASE = 0x01,
+ ERRL_SLOT_XGPE_UNREC = 0x01,
+ ERRL_SLOT_INVALID = 0xFF, // default invalid
+};
+
+// Structure to house-keep specific error log related metadata until it is
+// commited out to the OCC Shared Data Error Index Table
+typedef struct
+{
+ uint32_t slotBits; // Bits 0:1 flags for slots taken by errors
+ uint32_t slotMask; // Slot mask of the current error being processed
+ uint8_t errId; // Error log id of this error, rolling counter
+ uint8_t errSlot; // Slot number of this error in OCC Shared SRAM
+} hcodeErrlMetadata_t;
+
+// Structure to configure GPE specific metadata for error logging that is
+// common for all errors logged from that GPE
+typedef struct
+{
+ uint32_t procVersion;// PPE Processor Version, from PVR
+ uint16_t ppeId; // PPE Instance Id, from PIR
+ uint16_t traceSz; // Size of ERRL_USR_DTL_TRACE_DATA to add
+ uint8_t source; // Engine creating logs. See ERRL_SOURCE
+} hcodeErrlConfigData_t;
+
+// Initializes attributes of the common error logging framework based on the GPE
+// instance trying to use it.
+// @note APIs in p9_hcd_errl.h should not be used before initializing the
+// error logging framework once
+void initErrLogging (const uint8_t i_errlSource);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // _P9_HCD_ERRLDEFS_H
diff --git a/import/chips/p9/procedures/ppe_closed/lib/p9_hcd_occ_errldefs.h b/import/chips/p9/procedures/ppe_closed/lib/p9_hcd_occ_errldefs.h
new file mode 100644
index 00000000..33bae5a8
--- /dev/null
+++ b/import/chips/p9/procedures/ppe_closed/lib/p9_hcd_occ_errldefs.h
@@ -0,0 +1,232 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: import/chips/p9/procedures/ppe_closed/lib/p9_hcd_occ_errldefs.h $ */
+/* */
+/* OpenPOWER HCODE Project */
+/* */
+/* COPYRIGHT 2019 */
+/* [+] International Business Machines Corp. */
+/* */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+#ifndef _P9_HCD_OCC_ERRLDEFS_H
+#define _P9_HCD_OCC_ERRLDEFS_H
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+#ifdef __PPE__
+extern "C" {
+#else
+namespace hcode
+{
+#endif
+#endif
+
+// See TMGT_OCC_INTERFACE_v1_x_y
+//
+// @note
+// - HCode error logs will be read and processed as a contiguous blob of data
+// by Service Processor or Host Firmware components
+// - (H)TMGT as well as parsers will depend on the order as well as size of
+// members in the Error Log Entry for processing and parsing the error log
+// correctly
+// - As a part of the framework, the Error Log Structure is reused and adapted
+// from the TMGT-OCC-Interface specification, all reserved fields should be
+// left unused (zeroed) in the Error Log Entries, to avoid mis-interpretation
+// and unintended actions in firmware (e.g. Resets, Safe Mode)
+
+// Max size of error log (1024 bytes)
+#define ERRL_MAX_ENTRY_SZ 0x400
+
+// Max number of callouts
+#define ERRL_MAX_CALLOUTS 6 // @TODO improve hard restrictions
+
+// Min size (bytes) of user data that can be added, excluding the header
+#define ERRL_USR_DATA_SZ_MIN 128
+
+// These are the possible sources that an error log can be coming from
+typedef enum
+{
+ ERRL_SOURCE_405 = 0x00,
+ ERRL_SOURCE_PGPE = 0x10,
+ ERRL_SOURCE_XGPE = 0x20, // Used by SGPE in P9
+ ERRL_SOURCE_INVALID = 0xFF,
+} ERRL_SOURCE;
+
+// These are the possible severities that an error log can have.
+// Users must ONLY use these enum values for severity.
+// Predictive & Unrecoverable severities are customer visible & will
+// solicit appropriate callouts and documentation
+/* Error Severity */
+typedef enum
+{
+ ERRL_SEV_INFORMATIONAL = 0x00, // unused
+ ERRL_SEV_PREDICTIVE = 0x01, // unused
+ ERRL_SEV_UNRECOVERABLE = 0x02,
+ ERRL_SEV_CALLHOME_DATA = 0x03, // unused
+} ERRL_SEVERITY;
+
+// These are the possible callout priorities that a callout can have.
+// Users must ONLY use these enum values for callout priority
+/* Callout Priority */
+typedef enum
+{
+ ERRL_CALLOUT_PRIORITY_INVALID = 0x00,
+ ERRL_CALLOUT_PRIORITY_LOW = 0x01,
+ ERRL_CALLOUT_PRIORITY_MED = 0x02,
+ ERRL_CALLOUT_PRIORITY_HIGH = 0x03,
+} ERRL_CALLOUT_PRIORITY;
+
+// These are the user detail types that a user details can have.
+// Users must ONLY use these enum values for user detail type.
+// User Detail is expected to be pass-through to the parser.
+/* User Detail Type */
+typedef enum
+{
+ ERRL_USR_DTL_TRACE_DATA = 0x01,
+ ERRL_USR_DTL_DASH_PGPE = 0x02, // @TODO via RTC: 211559
+ ERRL_USR_DTL_DASH_XGPE = 0x03, // @TODO via RTC: 211560
+ ERRL_USR_DTL_BINARY_DATA = 0x04,
+} ERRL_USR_DETAIL_TYPE;
+
+/* Errl Structure Version */
+typedef enum
+{
+ ERRL_STRUCT_VERSION_1 = 0x01,
+} ERRL_STRUCT_VERSION;
+
+/* Errl User Details Version */
+typedef enum
+{
+ ERRL_USR_DTL_STRUCT_VERSION_1 = 0x01,
+} ERRL_USR_DTL_STRUCT_VERSION;
+
+/* Errl Trace Version */
+typedef enum
+{
+ ERRL_TRACE_VERSION_1 = 0x01,
+} ERRL_TRACE_VERSION;
+
+// @TODO via RTC 211557 >> start
+// Hcode related callouts will need some firmware post-processing
+// Callout types will be adapted/extended for Hcode after consulting with FW
+// @note As a placeholder TMGT adds a processor callout if
+// sev!=info && source!=405 && numCallouts==0
+/* Type of Callout */
+typedef enum
+{
+ ERRL_CALLOUT_TYPE_HUID = 0x01, // unused
+ ERRL_CALLOUT_TYPE_COMPONENT_ID = 0x02,
+ ERRL_CALLOUT_TYPE_GPU_ID = 0x03, // unused
+} ERRL_CALLOUT_TYPE;
+
+/* TMGT-OCC Component Ids */
+typedef enum
+{
+ ERRL_COMPONENT_ID_FIRMWARE = 0x01,
+ ERRL_COMPONENT_ID_OVER_TEMPERATURE = 0x04, // unused
+ ERRL_COMPONENT_ID_OVERSUBSCRIPTION = 0x05, // unused
+ ERRL_COMPONENT_ID_NONE = 0xFF,
+} ERRL_COMPONENT_ID;
+// @TODO via RTC 211557 << end
+
+/* Callout Structure */
+struct ErrlCallout
+{
+ uint64_t iv_calloutValue; // Callout Value
+ uint8_t iv_type; // Type of callout (See ERRL_CALLOUT_TYPE)
+ uint8_t iv_priority; // Callout Priority (See ERRL_CALLOUT_PRIORITY)
+ uint8_t iv_reserved3[6]; // PPE alignment restriction
+} __attribute__ ((__packed__));
+
+typedef struct ErrlCallout ErrlCallout_t;
+
+// @note The User Detail Structure consists of the fields below followed
+// by each individual User Details Entry structure & data
+// A data pointer field is NOT defined but rather inferred here. In the
+// error log contents, the user will see all the subsequent fields
+// followed by each User Details Entry structure and its data
+/* User Detail Structure */
+struct ErrlUserDetails
+{
+ uint8_t iv_version; // User Details Version
+ uint8_t iv_reserved4; // Reserved, per definition
+ uint16_t iv_modId; // Module Id
+ uint32_t iv_procVersion; // PPE Processor Version Register (PVR)
+ uint64_t iv_timeStamp; // Time Stamp
+ uint16_t iv_ppeId; // PPE Instance in Chip
+ uint8_t iv_reserved5; // @reuse - OCC State
+ uint8_t iv_committed: 1; // Log Committed?
+ uint8_t iv_reserved6: 7;
+ uint32_t iv_userData1; // User Data Word 1
+ uint32_t iv_userData2; // User Data Word 2
+ uint32_t iv_userData3; // User Data Word 3
+ uint16_t iv_entrySize; // Log Size
+ uint16_t iv_userDetailEntrySize; // User Details Size
+ uint32_t iv_reserved7; // PPE alignment restriction
+} __attribute__ ((__packed__));
+
+typedef struct ErrlUserDetails ErrlUserDetails_t;
+
+// @note The User Detail Entry Structure consists of the fields below followed
+// by the actual data the user is trying to collect.
+// A data pointer field is NOT defined but rather inferred here. In the
+// error log contents, the user will see all the subsequent fields
+// followed by the actual data. For performance as well as alignment
+// requirements in the PPE, all actual data must be 8 byte aligned.
+/* User Detail Entry Structure */
+struct ErrlUserDetailsEntry
+{
+ uint8_t iv_version; // User Details Entry Version
+ uint8_t iv_type; // User Details Entry Type (ERRL_USR_DETAIL_TYPE)
+ uint16_t iv_size; // User Details Entry Size
+ uint32_t iv_reserved10; // PPE alignment restriction
+} __attribute__ ((__packed__));
+
+typedef struct ErrlUserDetailsEntry ErrlUserDetailsEntry_t;
+
+/* Error Log Structure */
+struct ErrlEntry
+{
+ uint16_t iv_checkSum; // Log CheckSum
+ uint8_t iv_version; // Log Version
+ uint8_t iv_entryId; // Log Entry ID
+ uint8_t iv_reasonCode; // Log Reason Code
+ uint8_t iv_severity; // Log Severity (See ERRL_SEVERITY)
+ uint8_t iv_reserved1; // Must be 0, until actions are defined
+ uint8_t iv_numCallouts; // Number of callouts in the log
+ uint16_t iv_extendedRC; // Log Extended Reason Code
+ uint16_t iv_maxSize; // Max possible size of Error Log
+ uint16_t iv_reserved2[2]; // @alignment
+ ErrlCallout_t iv_callouts[ERRL_MAX_CALLOUTS];// Callouts
+ ErrlUserDetails_t iv_userDetails; // User Details section for Log
+} __attribute__ ((__packed__));
+
+typedef struct ErrlEntry ErrlEntry_t;
+
+/* Error Log Handle */
+typedef ErrlEntry_t* errlHndl_t;
+
+#ifdef __cplusplus
+#ifdef __PPE__
+}
+#else
+}
+#endif
+#endif
+
+#endif // _P9_HCD_OCC_ERRLDEFS_H
diff --git a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/avs_driver.c b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/avs_driver.c
index d7beb770..ce1838c6 100644
--- a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/avs_driver.c
+++ b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/avs_driver.c
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2015,2018 */
+/* COPYRIGHT 2015,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -81,16 +81,17 @@ uint32_t CRC_calc(uint32_t data)
//#################################################################################################
// Function polls OCB status register O2SST for o2s_ongoing=0
//#################################################################################################
-uint8_t pollVoltageTransDone(void)
+uint8_t pollVoltageTransDone(uint32_t BusNum)
{
uint8_t rc = 0;
uint32_t ocbRegReadData = 0;
uint8_t ongoingFlag = 1;
uint8_t count = 0;
- uint32_t BusMask = (in32(G_OCB_OCCS2) & AVS_BUS_NUM_MASK) << 4;
+ uint32_t BusMask = BusNum << O2S_BUSNUM_OFFSET_SHIFT;
// The point of MAX_POLL_COUNT_AVS is to verify that ongoingFlag turns to
// zero very fast. Otherwise, something wrong with this i/f and error out.
+ //PK_TRACE_INF("PV:OCB_O2SST0A =0x%x",OCB_O2SST0A | BusMask);
while (ongoingFlag || (count <= MAX_POLL_COUNT_AVS))
{
ocbRegReadData = in32(OCB_O2SST0A | BusMask);
@@ -120,11 +121,11 @@ uint8_t pollVoltageTransDone(void)
//#################################################################################################
// Function which writes to OCB registers to initialize the AVS Slave with an idle frame
//#################################################################################################
-uint8_t driveIdleFrame(void)
+uint8_t driveIdleFrame(uint32_t BusNum)
{
uint8_t rc = 0;
uint32_t idleframe = 0xFFFFFFFF;
- uint32_t BusMask = (in32(G_OCB_OCCS2) & AVS_BUS_NUM_MASK) << 4;
+ uint32_t BusMask = BusNum << O2S_BUSNUM_OFFSET_SHIFT;
// Clear sticky bits in o2s_status_reg
out32(OCB_O2SCMD0A | BusMask , 0x40000000);
@@ -133,7 +134,7 @@ uint8_t driveIdleFrame(void)
out32(OCB_O2SWD0A | BusMask , idleframe);
// Wait on o2s_ongoing = 0
- rc = pollVoltageTransDone();
+ rc = pollVoltageTransDone(BusNum);
return rc;
}
@@ -142,18 +143,18 @@ uint8_t driveIdleFrame(void)
//#################################################################################################
// Function which writes to OCB registers to initiate a AVS write transaction
//#################################################################################################
-uint8_t driveWrite(uint32_t CmdDataType, uint32_t CmdData)
+uint8_t driveWrite(uint32_t CmdDataType, uint32_t CmdData, uint32_t BusNum, uint32_t RailNum)
{
uint8_t rc = 0, retryCnt = 0, done = 0;
uint32_t ocbRegWriteData = 0;
uint32_t ocbRegReadData = 0;
- uint32_t RailSelect = in32(G_OCB_OCCS2) & AVS_RAIL_NUM_MASK;
+ uint32_t RailSelect = RailNum;
uint32_t StartCode = 1;
uint32_t CmdType = 0; // 0:write+commit, 1:write+hold, 2: d/c, 3:read
uint32_t CmdGroup = 0;
uint32_t CRC = 0;
- uint32_t BusMask = (in32(G_OCB_OCCS2) & AVS_BUS_NUM_MASK) << 4;
+ uint32_t BusMask = BusNum << O2S_BUSNUM_OFFSET_SHIFT;
// Clear sticky bits in o2s_status_reg
out32(OCB_O2SCMD0A | BusMask, 0x40000000);
@@ -174,7 +175,7 @@ uint8_t driveWrite(uint32_t CmdDataType, uint32_t CmdData)
out32(OCB_O2SWD0A | BusMask, ocbRegWriteData);
// Wait on o2s_ongoing = 0
- rc = pollVoltageTransDone();
+ rc = pollVoltageTransDone(BusNum);
if (rc)
{
@@ -199,7 +200,7 @@ uint8_t driveWrite(uint32_t CmdDataType, uint32_t CmdData)
else
{
retryCnt++;
- rc = driveIdleFrame();
+ rc = driveIdleFrame(BusNum);
if (rc)
{
@@ -222,20 +223,20 @@ uint8_t driveWrite(uint32_t CmdDataType, uint32_t CmdData)
//#################################################################################################
// Function which writes to OCB registers to initiate a AVS read transaction
//#################################################################################################
-uint8_t driveRead(uint32_t CmdDataType, uint32_t* CmdData)
+uint8_t driveRead(uint32_t CmdDataType, uint32_t* CmdData, uint32_t BusNum, uint32_t RailNum)
{
uint8_t rc = 0, retryCnt = 0, done = 0;
uint32_t ocbRegReadData = 0;
uint32_t ocbRegWriteData = 0;
- uint32_t RailSelect = in32(G_OCB_OCCS2) & AVS_RAIL_NUM_MASK;
+ uint32_t RailSelect = RailNum;
uint32_t StartCode = 1;
uint32_t CmdType = 3; // 0:write+commit, 1:write+hold, 2: d/c, 3:read
uint32_t CmdGroup = 0;
uint32_t Reserved = 0xFFFF;
uint32_t CRC = 0;
- uint32_t BusMask = (in32(G_OCB_OCCS2) & AVS_BUS_NUM_MASK) << 4;
+ uint32_t BusMask = BusNum << O2S_BUSNUM_OFFSET_SHIFT;
// Clear sticky bits in o2s_status_reg
out32(OCB_O2SCMD0A | BusMask, 0x40000000);
@@ -256,7 +257,7 @@ uint8_t driveRead(uint32_t CmdDataType, uint32_t* CmdData)
out32(OCB_O2SWD0A | BusMask, ocbRegWriteData);
// Wait on o2s_ongoing = 0
- rc = pollVoltageTransDone();
+ rc = pollVoltageTransDone(BusNum);
if (rc)
{
@@ -281,7 +282,7 @@ uint8_t driveRead(uint32_t CmdDataType, uint32_t* CmdData)
else
{
retryCnt++;
- rc = driveIdleFrame();
+ rc = driveIdleFrame(BusNum);
if (rc)
{
@@ -301,19 +302,20 @@ uint8_t driveRead(uint32_t CmdDataType, uint32_t* CmdData)
return rc;
}
+void avs_driver_init()
+{
+ //Initialize VDD
+ avs_driver_bus_init(G_gppb->avs_bus_topology.vdd_avsbus_num);
-//#################################################################################################
-// Function which initializes the OCB O2S registers
-//#################################################################################################
-void external_voltage_control_init(uint32_t* vext_read_mv)
+ //Initialize VDN
+ avs_driver_bus_init(G_gppb->avs_bus_topology.vdn_avsbus_num);
+}
+
+
+void avs_driver_bus_init(uint32_t BusNum)
{
uint8_t rc = 0;
- uint32_t CmdDataRead = 0;
-//#if EPM_P9_TUNING
-// We do not need to initialize O2S and AVS slave in product
-// because this is done in istep 6. But for EPM, we need to do it.
-//\todo Read from Parameter Block. These are attributes
#define CLOCK_SPIVID_MHZ 10
PK_TRACE_DBG("NestFreq=0x%x", G_gppb->nest_frequency_mhz);
uint32_t ocbRegReadData = 0;
@@ -324,11 +326,8 @@ void external_voltage_control_init(uint32_t* vext_read_mv)
uint32_t O2SCTRL1_value = 0b10010000000000000100000000000000 |
(G_gppb->nest_frequency_mhz / (8 * CLOCK_SPIVID_MHZ) - 1) << 18;
- //
// OCI to SPIPMBus (O2S) bridge initialization
- //
-
- uint32_t BusMask = (in32(G_OCB_OCCS2) & AVS_BUS_NUM_MASK) << 4;
+ uint32_t BusMask = BusNum << O2S_BUSNUM_OFFSET_SHIFT;
// O2SCTRLF
ocbRegReadData = in32(OCB_O2SCTRLF0A | BusMask);
@@ -358,8 +357,7 @@ void external_voltage_control_init(uint32_t* vext_read_mv)
// In principle this only has to be done once. Though Doug Lane
// says that due to noise on the chip this init should be done
// periodically.
-
- rc = driveIdleFrame();
+ rc = driveIdleFrame(BusNum);
if (rc)
{
@@ -367,36 +365,25 @@ void external_voltage_control_init(uint32_t* vext_read_mv)
PGPE_TRACE_AND_PANIC(PGPE_AVS_INIT_DRIVE_IDLE_FRAME);
}
- // Drive read transaction to return initial setting of rail voltage and wait on o2s_ongoing=0
- rc = driveRead(0, &CmdDataRead);
-
- if (rc)
- {
- PK_TRACE_ERR("AVS_INIT: DriveRead FAIL");
- PGPE_TRACE_AND_PANIC(PGPE_AVS_INIT_DRIVE_READ);
- }
-
- *vext_read_mv = CmdDataRead;
}
-
//#################################################################################################
// Main function to initiate an eVRM voltage change. There is a write followed by a
// read, and then a voltage value compare check.
//#################################################################################################
-void external_voltage_control_write(uint32_t vext_write_mv)
+void avs_driver_voltage_write(uint32_t BusNum, uint32_t RailNum, uint32_t VoltMV)
{
uint8_t rc = 0;
uint32_t CmdDataType = 0; // 0b0000=Target rail voltage
- if (vext_write_mv > AVS_DRIVER_MAX_EXTERNAL_VOLTAGE ||
- vext_write_mv < AVS_DRIVER_MIN_EXTERNAL_VOLTAGE)
+ if (VoltMV > AVS_DRIVER_MAX_EXTERNAL_VOLTAGE ||
+ VoltMV < AVS_DRIVER_MIN_EXTERNAL_VOLTAGE)
{
PGPE_TRACE_AND_PANIC(PGPE_VOLTAGE_OUT_OF_BOUNDS);
}
// Drive write transaction with a target voltage on a particular rail and wait on o2s_ongoing=0
- rc = driveWrite(CmdDataType, vext_write_mv);
+ rc = driveWrite(CmdDataType, VoltMV, BusNum, RailNum);
switch (rc)
{
@@ -419,3 +406,29 @@ void external_voltage_control_write(uint32_t vext_write_mv)
break;
}
}
+
+void avs_driver_voltage_read(uint32_t BusNum, uint32_t RailNum, uint32_t* RetVolt)
+{
+ uint32_t rc = 0;
+
+ rc = driveRead(0x0, RetVolt, BusNum, RailNum);
+
+ if (rc)
+ {
+ PK_TRACE_ERR("AVS_READ_VOLT: DriveRead FAILED. BusNum=0x%x,RailNum=0x%x", BusNum, RailNum);
+ PGPE_TRACE_AND_PANIC(PGPE_AVS_DRIVE_READ);
+ }
+}
+
+void avs_driver_current_read(uint32_t BusNum, uint32_t RailNum, uint32_t* RetCurrent)
+{
+ uint32_t rc = 0;
+
+ rc = driveRead(0x2, RetCurrent, BusNum, RailNum);
+
+ if (rc)
+ {
+ PK_TRACE_ERR("AVS_READ_CURRENT: DriveRead FAILED rc=0x%x. BusNum=0x%x, RailNum=0x%x", rc, BusNum, RailNum);
+ PGPE_TRACE_AND_PANIC(PGPE_AVS_DRIVE_READ);
+ }
+}
diff --git a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/avs_driver.h b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/avs_driver.h
index 574c62c3..06fa5624 100644
--- a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/avs_driver.h
+++ b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/avs_driver.h
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2015,2017 */
+/* COPYRIGHT 2015,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -26,16 +26,20 @@
// Setup and enable the O2S bridge on the AVS bus.
//
+
#ifndef _AVS_DRIVER_H_
#define _AVS_DRIVER_H_
+#include "pstate_pgpe_occ_api.h"
+
#define MAX_POLL_COUNT_AVS 10
#define AVS_RAIL_NUM_MASK 0xF
#define AVS_BUS_NUM_MASK 0x10
enum AVS_DRIVER
{
AVS_DRIVER_MAX_EXTERNAL_VOLTAGE = 1500,
- AVS_DRIVER_MIN_EXTERNAL_VOLTAGE = 500
+ AVS_DRIVER_MIN_EXTERNAL_VOLTAGE = 500,
+ O2S_BUSNUM_OFFSET_SHIFT = 8
};
enum AVS_DRIVER_RETURN_CODES
@@ -45,11 +49,17 @@ enum AVS_DRIVER_RETURN_CODES
AVS_RC_RESYNC_ERROR = 2
};
+void avs_driver_init();
+
+void avs_driver_bus_init(uint32_t BusNum);
+
void
-external_voltage_control_init(uint32_t* vext_read_mv);
+avs_driver_voltage_write(uint32_t BusNum, uint32_t RailNum, uint32_t VoltMV);
+void
+avs_driver_voltage_read(uint32_t BusNum, uint32_t RailNum, uint32_t* RetVolt);
void
-external_voltage_control_write(uint32_t vext_write_mv);
+avs_driver_current_read(uint32_t BusNum, uint32_t RailNum, uint32_t* RetCurrent);
#endif //_AVS_DRIVER_H_
diff --git a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_fit.c b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_fit.c
index f8834fcc..181666bf 100644
--- a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_fit.c
+++ b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_fit.c
@@ -30,6 +30,7 @@
#include <p9_hcd_memmap_occ_sram.H>
#include "p9_pgpe_optrace.h"
#include "occhw_shared_data.h"
+#include "avs_driver.h"
#define AUX_TASK 14
#define GPE2TSEL 0xC0020000
@@ -382,20 +383,50 @@ __attribute__((always_inline)) inline void handle_fit_timebase_sync()
G_tb_sync_count++;
}
}
+extern uint32_t G_pib_reset_flag;
//
// handle_undervolt
//
__attribute__((always_inline)) inline void handle_wov()
{
- if (G_pgpe_pstate_record.wov.status & WOV_UNDERVOLT_ENABLED)
+
+ G_wov_count++;
+
+ if ((G_gppb->wov_sample_125us >> 1) == G_wov_count)
{
- G_wov_count++;
+ G_wov_count = 0;
+
+ if (G_pgpe_pstate_record.produceWOFValues)
+ {
+ p9_pgpe_pstate_update_wof_produced_values();
+ }
- if ((G_gppb->wov_sample_125us / 2) == G_wov_count)
+ //If enabled, run undervolting algorithm
+ if (G_pgpe_pstate_record.wov.status & WOV_UNDERVOLT_ENABLED)
{
p9_pgpe_pstate_adjust_wov();
- G_wov_count = 0;
+ }
+
+ //If enabled, run over rvolting algorithm
+ if (G_pgpe_pstate_record.wov.status & WOV_OVERVOLT_ENABLED)
+ {
+ if (G_pgpe_pstate_record.excessiveDroop == 1)
+ {
+ out32(G_OCB_OCCFLG_OR, BIT32(PGPE_OCS_DIRTY));
+ }
+ else
+ {
+ if (G_pgpe_pstate_record.pWofValues->dw2.fields.vdd_avg_mv >=
+ G_pgpe_pstate_record.vddCurrentThresh) //(default is #V turbo current)
+ {
+ out32(G_OCB_OCCFLG_OR, BIT32(PGPE_OCS_DIRTY));
+ }
+ else
+ {
+ out32(G_OCB_OCCFLG_CLR, BIT32(PGPE_OCS_DIRTY));
+ }
+ }
}
}
}
@@ -409,10 +440,13 @@ void p9_pgpe_fit_handler(void* arg, PkIrqId irq)
{
mtmsr(PPE42_MSR_INITIAL);
+ PK_TRACE_DBG("IPB reset flag value %x", G_pib_reset_flag);
+ G_pib_reset_flag = 0;
handle_occ_beacon();
handle_core_throttle();
handle_occflg_requests();
handle_aux_task();
handle_fit_timebase_sync();
handle_wov();
+
}
diff --git a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_gppb.c b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_gppb.c
index ee54b194..594c3550 100644
--- a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_gppb.c
+++ b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_gppb.c
@@ -72,8 +72,8 @@ const uint8_t G_vdm_threshold_table[13] =
//Globals and externs
GlobalPstateParmBlock* G_gppb;//Global pointer to GlobalPstateParmBlock
-uint32_t G_ext_vrm_inc_rate_mult_usperus;
-uint32_t G_ext_vrm_dec_rate_mult_usperus;
+uint32_t G_ext_vrm_inc_rate_mult_usperv;
+uint32_t G_ext_vrm_dec_rate_mult_usperv;
extern PgpeHeader_t* G_pgpe_header_data;
//
@@ -94,11 +94,11 @@ void p9_pgpe_gppb_init()
G_gppb = (GlobalPstateParmBlock*)gppb_sram_offset;
//PK_TRACE_INF("INIT: DPLL0Value=0x%x", G_gppb->dpll_pstate0_value);
- //External VRM increasing rate in us/uv
- G_ext_vrm_inc_rate_mult_usperus = 1 / G_gppb->ext_vrm_transition_rate_inc_uv_per_us;
+ //External VRM increasing rate in us/v
+ G_ext_vrm_inc_rate_mult_usperv = (1000 * 1000) / G_gppb->ext_vrm_transition_rate_inc_uv_per_us;
- //External VRM decreasing rate in us/uv
- G_ext_vrm_dec_rate_mult_usperus = 1 / G_gppb->ext_vrm_transition_rate_dec_uv_per_us;
+ //External VRM decreasing rate in us/v
+ G_ext_vrm_dec_rate_mult_usperv = (1000 * 1000) / G_gppb->ext_vrm_transition_rate_dec_uv_per_us;
}
//
diff --git a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_header.c b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_header.c
index 30f1f9cc..f3ba83f6 100644
--- a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_header.c
+++ b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_header.c
@@ -27,9 +27,6 @@
#include "p9_hcode_image_defines.H"
#include "p9_hcd_memmap_base.H"
-//OCC Shared SRAM starts at bottom 2K of PGPE OCC SRAM space
-#define OCC_SHARED_SRAM_ADDR_START \
- (OCC_SRAM_PGPE_BASE_ADDR + OCC_SRAM_PGPE_REGION_SIZE - PGPE_OCC_SHARED_SRAM_SIZE)
PgpeHeader_t* G_pgpe_header_data;
extern PgpeHeader_t* _PGPE_IMG_HEADER __attribute__ ((section (".pgpe_image_header")));
@@ -78,7 +75,11 @@ void p9_pgpe_header_init()
G_pgpe_header_data->g_pgpe_wof_state_address = (uint32_t)&occ_shared_data->pgpe_wof_state;//Wof State
G_pgpe_header_data->g_pgpe_req_active_quad_address = (uint32_t)
&occ_shared_data->req_active_quads;//Requested Active Quads
+ G_pgpe_header_data->g_pgpe_wof_values_address = (uint32_t)&occ_shared_data->pgpe_wof_values;//Wof Values
- //Write the magic number in the HcodeOCCSharedData struct
- occ_shared_data->magic = HCODE_OCC_SHARED_MAGIC_NUMBER;
+ // Write magic number & total error log slots supported in the shared data
+ // hcode error log table, for OCC to start acting on PGPE and SGPE errors.
+ // Init all error slots to available.
+ occ_shared_data->errlog_idx.dw0.fields.total_log_slots = MAX_HCODE_ELOG_ENTRIES;
+ occ_shared_data->errlog_idx.dw0.fields.magic_word = HCODE_ELOG_TABLE_MAGIC_WORD;
}
diff --git a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_header.h b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_header.h
index f441479d..bc3504b1 100644
--- a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_header.h
+++ b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_header.h
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2016,2018 */
+/* COPYRIGHT 2016,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -26,6 +26,12 @@
#define _P9_PGPE_HEADER_H_
#include "pk.h"
+#include "p9_hcode_image_defines.H"
+#include "p9_hcd_memmap_base.H"
+
+//OCC Shared SRAM starts at bottom 2K of PGPE OCC SRAM space
+#define OCC_SHARED_SRAM_ADDR_START \
+ (OCC_SRAM_PGPE_BASE_ADDR + OCC_SRAM_PGPE_REGION_SIZE - PGPE_OCC_SHARED_SRAM_SIZE)
//
// p9_pgpe_header_init
diff --git a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_irq_handlers.c b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_irq_handlers.c
index d1cc75e7..32272795 100644
--- a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_irq_handlers.c
+++ b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_irq_handlers.c
@@ -160,7 +160,7 @@ void p9_pgpe_irq_handler_ocb_err()
{
G_pgpe_optrace_data.word[0] = (G_pgpe_pstate_record.activeQuads << 24) |
- (G_pgpe_pstate_record.activeCores << 8);
+ (G_pgpe_pstate_record.activeCores >> 8);
G_pgpe_optrace_data.word[1] = (G_pgpe_pstate_record.psCurr.fields.glb << 24) |
(G_pgpe_pstate_record.extVrmCurr << 8) |
PGPE_OP_TRACE_OCC_HB_FAULT;
@@ -200,13 +200,14 @@ void p9_pgpe_irq_handler_sgpe_err()
out32(G_OCB_OISR0_CLR, BIT32(8));
//Optrace
- G_pgpe_optrace_data.word[0] = (G_pgpe_pstate_record.activeQuads << 24) |
- (G_pgpe_pstate_record.activeCores << 8);
- G_pgpe_optrace_data.word[1] = (G_pgpe_pstate_record.psCurr.fields.glb << 24) |
- (G_pgpe_pstate_record.extVrmCurr << 8) |
- PGPE_OP_TRACE_SGPE_FAULT;
+ G_pgpe_optrace_data.word[0] = (G_pgpe_pstate_record.activeQuads << 24) |
+ (G_pgpe_pstate_record.activeCores >> 8);
+ G_pgpe_optrace_data.word[1] = (G_pgpe_pstate_record.psCurr.fields.glb << 24) |
+ (G_pgpe_pstate_record.extVrmCurr << 8) |
+ PGPE_OP_TRACE_SGPE_FAULT;
p9_pgpe_optrace(SEVERE_FAULT_DETECTED);
+
//HALT if DEBUG_HALT is set
PGPE_OPTIONAL_TRACE_AND_PANIC(PGPE_GPE3_ERROR);
@@ -241,7 +242,7 @@ void p9_pgpe_irq_handler_pvref_err()
//Optrace
G_pgpe_optrace_data.word[0] = (G_pgpe_pstate_record.activeQuads << 24) |
- (G_pgpe_pstate_record.activeCores << 8);
+ (G_pgpe_pstate_record.activeCores >> 8);
G_pgpe_optrace_data.word[1] = (G_pgpe_pstate_record.psCurr.fields.glb << 24) |
(G_pgpe_pstate_record.extVrmCurr << 8) |
PGPE_OP_TRACE_PVREF_FAULT;
@@ -286,7 +287,7 @@ void p9_pgpe_irq_handler_system_xstop(void* arg, PkIrqId irq)
//Optrace
G_pgpe_optrace_data.word[0] = (G_pgpe_pstate_record.activeQuads << 24) |
- (G_pgpe_pstate_record.activeCores << 8);
+ (G_pgpe_pstate_record.activeCores >> 8);
G_pgpe_optrace_data.word[1] = (G_pgpe_pstate_record.psCurr.fields.glb << 24) |
(G_pgpe_pstate_record.extVrmCurr << 8) |
PGPE_OP_TRACE_SYS_XSTOP;
diff --git a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_main.c b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_main.c
index 8488b442..14947414 100644
--- a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_main.c
+++ b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_main.c
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2015,2018 */
+/* COPYRIGHT 2015,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -32,6 +32,8 @@
#include "occhw_shared_data.h"
#include "p9_hcd_memmap_occ_sram.H"
#include "p9_hcd_memmap_base.H"
+#include "p9_hcd_occ_errldefs.h"
+#include "p9_hcd_errldefs.h"
extern TraceData_t G_pgpe_optrace_data;
@@ -214,7 +216,7 @@ main(int argc, char** argv)
#elif (CUMULUS_DD_LEVEL != 0)
#define PVR_CONST (0x42090800 | (((CUMULUS_DD_LEVEL ) / 10) << 8) | (CUMULUS_DD_LEVEL % 10))
#elif (AXONE_DD_LEVEL != 0)
-#define PVR_CONST (0x42091000 | (((AXONE_DD_LEVEL ) / 10) << 8) | (AXONE_DD_LEVEL % 10))
+#define PVR_CONST (0x42090000 | (((AXONE_DD_LEVEL ) / 10) << 8) | (AXONE_DD_LEVEL % 10))
#else
#define PVR_CONST 0
#endif
@@ -279,6 +281,9 @@ main(int argc, char** argv)
pk_thread_resume(&G_p9_pgpe_thread_process_requests);
pk_thread_resume(&G_p9_pgpe_thread_actuate_pstates);
+ // Initialize the PGPE Error Logging
+ initErrLogging ((uint8_t) ERRL_SOURCE_PGPE);
+
//PGPE Header Init
p9_pgpe_header_init();
diff --git a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_optrace.c b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_optrace.c
index ad6f0521..db437ff3 100644
--- a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_optrace.c
+++ b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_optrace.c
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2016,2018 */
+/* COPYRIGHT 2016,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -74,7 +74,7 @@ void p9_pgpe_optrace(uint32_t mark)
if(G_lastDisable) //Place start trace mark at when first enabled
{
G_lastDisable = 0;
- p9_pgpe_optrace(ACK_START_TRACE);
+ p9_pgpe_optrace(ACK_START_TRACE); //Note, recursive function call
}
uint32_t word_count = ((mark >> 4) & 0x3) + 1;
diff --git a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_optrace.h b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_optrace.h
index dea34083..2bebeeac 100644
--- a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_optrace.h
+++ b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_optrace.h
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2016,2018 */
+/* COPYRIGHT 2016,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -28,12 +28,12 @@
enum PGPE_OP_TRACE_ENCODES
{
- START_STOP_IPC = 0,
- START_STOP_FLAG = 1,
+ PSTATE_STOP = 0,
+ PSTATE_START = 1,
//Without Timestamps
- WOF_CALC_DONE = 0x11,
AUCTION_DONE = 0x10,
+ WOF_CALC_DONE = 0x11,
FIT_TB_RESYNC = 0x02,
//ACKs
@@ -68,14 +68,18 @@ enum PGPE_OP_TRACE_ENCODES
PRC_CORES_ACTV = 0x5A,
PRC_QUAD_ACTV = 0x5B,
FIT_TB_SYNC = 0x5C,
- SEVERE_FAULT_DETECTED = 0x5D,
- SYSTEM_XSTOP = 0x4D,
+ SEVERE_FAULT_DETECTED = 0x6D,
PRC_PM_SUSP = 0x4E,
- PRC_SAFE_MODE = 0x4F,
+ PRC_SAFE_MODE = 0x5F,
//Debug Markers
PROLONGED_DROOP_EVENT = 0x9E,
- PROLONGED_DROOP_RESOLVED = 0xAF
+ PROLONGED_DROOP_RESOLVED = 0xAF,
+
+ //PMCR OWNER
+ OP_PMCR_HOST = 0x0,
+ OP_PMCR_OCC = 0x1,
+ OP_PMCR_CHAR = 0x2,
};
enum PGPE_OP_TRACE_SEVERE_FAULTS
diff --git a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_pstate.c b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_pstate.c
index 0beff6d9..e8c5c0d4 100644
--- a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_pstate.c
+++ b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_pstate.c
@@ -49,8 +49,8 @@
extern TraceData_t G_pgpe_optrace_data;
extern PgpeHeader_t* G_pgpe_header_data;
extern GlobalPstateParmBlock* G_gppb;
-extern uint32_t G_ext_vrm_inc_rate_mult_usperus;
-extern uint32_t G_ext_vrm_dec_rate_mult_usperus;
+extern uint32_t G_ext_vrm_inc_rate_mult_usperv;
+extern uint32_t G_ext_vrm_dec_rate_mult_usperv;
extern PgpePstateRecord G_pgpe_pstate_record;
extern void p9_pgpe_ipc_ack_sgpe_ctrl_stop_updt(ipc_msg_t* msg, void* arg);
extern void p9_pgpe_ipc_ack_sgpe_suspend_stop(ipc_msg_t* msg, void* arg);
@@ -63,7 +63,7 @@ GPE_BUFFER(ipcmsg_p2s_ctrl_stop_updates_t G_sgpe_control_updt);
GPE_BUFFER(ipcmsg_p2s_suspend_stop_t G_sgpe_suspend_stop);
//Local Functions
-void p9_pgpe_handle_nacks(uint32_t origCoreVector, uint32_t origAckVector, uint32_t expectedAcks);
+void p9_pgpe_handle_nacks(uint32_t origCoreVector, uint32_t origAckVector, uint32_t expectedAcks, uint64_t db3val);
void p9_pgpe_pstate_freq_updt();
void p9_pgpe_pstate_wov_init();
inline void p9_pgpe_droop_throttle() __attribute__((always_inline));
@@ -124,6 +124,7 @@ void p9_pgpe_pstate_init()
G_pgpe_pstate_record.pQuadState0 = (quad_state0_t*)G_pgpe_header_data->g_quad_status_addr;
G_pgpe_pstate_record.pQuadState1 = (quad_state1_t*)(G_pgpe_header_data->g_quad_status_addr + 8);
G_pgpe_pstate_record.pReqActQuads = (requested_active_quads_t*)(G_pgpe_header_data->g_pgpe_req_active_quad_address);
+ G_pgpe_pstate_record.pWofValues = (pgpe_wof_values_t*)(G_pgpe_header_data->g_pgpe_wof_values_address);
G_pgpe_pstate_record.pQuadState0->fields.quad0_pstate = 0xff;
G_pgpe_pstate_record.pQuadState0->fields.quad1_pstate = 0xff;
G_pgpe_pstate_record.pQuadState0->fields.quad2_pstate = 0xff;
@@ -134,6 +135,15 @@ void p9_pgpe_pstate_init()
G_pgpe_pstate_record.pQuadState1->fields.active_cores = 0x0;
G_pgpe_pstate_record.pReqActQuads->fields.requested_active_quads = 0x0;
G_pgpe_pstate_record.activeCoreUpdtAction = ACTIVE_CORE_UPDATE_ACTION_ERROR;
+ G_pgpe_pstate_record.pWofValues->dw0.value = 0;
+ G_pgpe_pstate_record.pWofValues->dw1.value = 0;
+ G_pgpe_pstate_record.pWofValues->dw2.value = 0;
+ G_pgpe_pstate_record.pWofValues->dw3.value = 0;
+ G_pgpe_pstate_record.prevIdd = 0;
+ G_pgpe_pstate_record.prevIdn = 0;
+ G_pgpe_pstate_record.prevVdd = 0;
+ G_pgpe_pstate_record.vddCurrentThresh = G_gppb->operating_points_set[VPD_PT_SET_BIASED_SYSP][TURBO].idd_100ma;
+ G_pgpe_pstate_record.excessiveDroop = 0;
//Create Semaphores
pk_semaphore_create(&(G_pgpe_pstate_record.sem_actuate), 0, 1);
@@ -142,6 +152,30 @@ void p9_pgpe_pstate_init()
//WOV init
p9_pgpe_pstate_wov_init();
+
+ //Initialize avs_driver
+ avs_driver_init();
+
+ HcodeOCCSharedData_t* occ_shared_data = (HcodeOCCSharedData_t*)
+ OCC_SHARED_SRAM_ADDR_START; //Bottom 2K of PGPE OCC Sram Space
+
+ if (in32(OCB_OCCFLG2) & BIT32(OCCFLG2_ENABLE_PRODUCE_WOF_VALUES))
+ {
+ //Write the magic number in the HcodeOCCSharedData struct
+ occ_shared_data->magic = HCODE_OCC_SHARED_MAGIC_NUMBER_OPS1;
+ G_pgpe_pstate_record.produceWOFValues = 1;
+
+ //Read VDN Voltage. On P9, VDN is NOT updated by PGPE, so we read it
+ //once during init and then don't read it all
+ uint32_t vdn = 0;
+ avs_driver_voltage_read(G_gppb->avs_bus_topology.vdn_avsbus_num, G_gppb->avs_bus_topology.vdn_avsbus_rail, &vdn);
+ G_pgpe_pstate_record.pWofValues->dw2.fields.vdn_avg_mv = vdn;
+ }
+ else
+ {
+ occ_shared_data->magic = HCODE_OCC_SHARED_MAGIC_NUMBER_OPS0;
+ G_pgpe_pstate_record.produceWOFValues = 0;
+ }
}
//
@@ -419,6 +453,64 @@ void p9_pgpe_pstate_update_wof_state()
wof_state->fields.vratio = G_pgpe_pstate_record.vratio;
PK_TRACE_INF("WFU: FClip_PS=0x%x, vindex=0x%x, vratio=0x%x", G_pgpe_pstate_record.wofClip, G_pgpe_pstate_record.vindex,
G_pgpe_pstate_record.vratio);
+
+}
+
+//
+// p9_pgpe_pstate_update_wof_produced_values
+//
+// This function updates the wof produced values in the OCC Shared SRAM area
+//
+void p9_pgpe_pstate_update_wof_produced_values()
+{
+ uint32_t current;
+
+ avs_driver_current_read(G_gppb->avs_bus_topology.vdd_avsbus_num, G_gppb->avs_bus_topology.vdd_avsbus_rail, &current);
+ PK_TRACE_DBG("VDD Current=0x%x, BusNum=0x%x, RailNum=0x%x", current, G_gppb->avs_bus_topology.vdd_avsbus_num,
+ G_gppb->avs_bus_topology.vdd_avsbus_rail);
+
+ G_pgpe_pstate_record.pWofValues->dw1.fields.idd_avg_ma = (G_pgpe_pstate_record.prevIdd + current) >> 1;
+ G_pgpe_pstate_record.prevIdd = current;
+
+ avs_driver_current_read(G_gppb->avs_bus_topology.vdn_avsbus_num, G_gppb->avs_bus_topology.vdn_avsbus_rail, &current);
+ PK_TRACE_DBG("VDN Current=0x%x, BusNum=0x%x, RailNum=0x%x", current, G_gppb->avs_bus_topology.vdn_avsbus_num,
+ G_gppb->avs_bus_topology.vdn_avsbus_rail);
+
+ G_pgpe_pstate_record.pWofValues->dw1.fields.idn_avg_ma = (G_pgpe_pstate_record.prevIdn + current) >> 1;
+ G_pgpe_pstate_record.prevIdn = current;
+
+ G_pgpe_pstate_record.pWofValues->dw2.fields.vdd_avg_mv = (G_pgpe_pstate_record.prevVdd +
+ G_pgpe_pstate_record.extVrmCurr) >> 1;
+ G_pgpe_pstate_record.prevVdd = G_pgpe_pstate_record.extVrmCurr;
+
+
+ uint32_t avg_pstate = 0;
+ uint32_t q, num = 0;
+
+ for (q = 0; q < MAX_QUADS; q++)
+ {
+ if (G_pgpe_pstate_record.activeQuads & QUAD_MASK(q))
+ {
+ avg_pstate += G_pgpe_pstate_record.psComputed.fields.quads[q];
+ num = num + 1;
+ }
+ }
+
+ if (num > 0)
+ {
+ G_pgpe_pstate_record.pWofValues->dw0.fields.average_pstate = ((avg_pstate / num) + G_pgpe_pstate_record.prevAvgPstate)
+ >> 1;
+ G_pgpe_pstate_record.pWofValues->dw0.fields.average_frequency_pstate =
+ G_pgpe_pstate_record.pWofValues->dw0.fields.average_pstate;
+ G_pgpe_pstate_record.prevAvgPstate = G_pgpe_pstate_record.pWofValues->dw0.fields.average_pstate;
+ }
+
+ G_pgpe_pstate_record.pWofValues->dw0.fields.clip_pstate = G_pgpe_pstate_record.wofClip;
+ G_pgpe_pstate_record.pWofValues->dw0.fields.vratio_inst = G_pgpe_pstate_record.vratio;
+ G_pgpe_pstate_record.pWofValues->dw0.fields.vratio_avg = (G_pgpe_pstate_record.vratio +
+ G_pgpe_pstate_record.prevVratio) >> 1;
+ G_pgpe_pstate_record.prevVratio = G_pgpe_pstate_record.vratio;
+
}
//
@@ -487,7 +579,8 @@ void p9_pgpe_send_db0(db0_parms_t p)
if(G_pgpe_pstate_record.quadsNACKed)
{
- p9_pgpe_handle_nacks(p.targetCores, p.expectedAckFrom, p.expectedAckValue);
+ uint64_t val = (uint64_t)MSGID_DB3_REPLAY_DB0 << 56;
+ p9_pgpe_handle_nacks(p.targetCores, p.expectedAckFrom, p.expectedAckValue, val);
}
}
}
@@ -527,7 +620,15 @@ void p9_pgpe_send_db3(db3_parms_t p)
if(G_pgpe_pstate_record.quadsNACKed && (p.checkNACKs == PGPE_DB3_CHECK_NACKS))
{
- p9_pgpe_handle_nacks(p.targetCores, p.expectedAckFrom, p.expectedAckValue);
+ if(p.useDB3ValForNacks)
+ {
+ p9_pgpe_handle_nacks(p.targetCores, p.expectedAckFrom, p.expectedAckValue, p.db3val);
+ }
+ else
+ {
+ uint64_t val = (uint64_t)MSGID_DB3_REPLAY_DB0 << 56;
+ p9_pgpe_handle_nacks(p.targetCores, p.expectedAckFrom, p.expectedAckValue, val);
+ }
}
}
@@ -665,24 +766,52 @@ void p9_pgpe_pstate_send_pmsr_updt(uint32_t command, uint32_t targetCoresVector,
// In case a prolonged droop event happens, CME will detect a timeout and send nack.
// This function handles the nacks from CME
//
-void p9_pgpe_handle_nacks(uint32_t origTargetCores, uint32_t origExpectedAckFrom, uint32_t expectedAckVal)
+void p9_pgpe_handle_nacks(uint32_t origTargetCores, uint32_t origExpectedAckFrom, uint32_t expectedAckVal,
+ uint64_t db3val)
{
uint32_t q;
uint32_t expectedAckFrom = origExpectedAckFrom;
uint32_t targetCores = origTargetCores;
db3_parms_t p;
- p.db3val = (uint64_t)MSGID_DB3_REPLAY_DB0 << 56;
+ p.db3val = db3val;
p.db0val = 0;
p.writeDB0 = PGPE_DB3_SKIP_WRITE_DB0;
p.waitForAcks = PGPE_DB_ACK_WAIT_CME;
p.checkNACKs = PGPE_DB3_SKIP_CHECK_NACKS;
- G_pgpe_pstate_record.wov.target_pct = 0;
- p9_pgpe_pstate_updt_ext_volt();
+ //IF WOV-Undervolting is enabled, then remove WOV bias
+ if (G_pgpe_pstate_record.wov.status & WOV_UNDERVOLT_ENABLED)
+ {
+ G_pgpe_pstate_record.wov.target_pct = 0; //Clear any WOV bias
+ uint32_t tmpBiasSyspExtVrmNext = G_pgpe_pstate_record.biasSyspExtVrmNext; //Save Next Voltage
+
+ //If Current Voltage != Next Voltage, then we are moving to lower pstate(low frequency/low voltage)
+ //and votlage hasn't been updated yet. To remove WOV bias, we use the current voltage because
+ //the p9_pgpe_pstate_updt_ext_volt function uses next voltage to write the VRMs
+ if (G_pgpe_pstate_record.biasSyspExtVrmCurr != G_pgpe_pstate_record.biasSyspExtVrmNext)
+ {
+ G_pgpe_pstate_record.biasSyspExtVrmNext = G_pgpe_pstate_record.biasSyspExtVrmCurr;
+ }
+
+ p9_pgpe_pstate_updt_ext_volt(); //Do voltage update that is remove any WOV bias
+ G_pgpe_pstate_record.biasSyspExtVrmNext = tmpBiasSyspExtVrmNext; //Restore Next voltage
+ }
+
+ //a) If OCC Scratch2 Core Throttle Continuous Change Enable
+ if ((in32(G_OCB_OCCS2) & BIT32(CORE_THROTTLE_CONTINUOUS_CHANGE_ENABLE)))
+ {
+ PGPE_TRACE_AND_PANIC(PGPE_DROOP_AND_CORE_THROTTLE_ENABLED);
+ }
+
+ //b) If OCC flag PGPE Prolonged Droop Workaround Active bit is not set,
+ // call droop_throttle()
+ if (!(in32(G_OCB_OCCFLG) & BIT32(PGPE_PROLONGED_DROOP_WORKAROUND_ACTIVE)))
+ {
+ p9_pgpe_droop_throttle();
+ }
//c) Send DB3 (Replay Previous DB0 Operation) to only the CME Quad Managers, and
//their Sibling CME (if present), that responded with a NACK.
-
while(G_pgpe_pstate_record.quadsNACKed)
{
G_pgpe_pstate_record.cntNACKs++;
@@ -704,14 +833,6 @@ void p9_pgpe_handle_nacks(uint32_t origTargetCores, uint32_t origExpectedAckFrom
//If a NACK received was in response to the first retry (i.e. second failed attempt):
if (G_pgpe_pstate_record.cntNACKs == 2)
{
-
- //b) If OCC flag PGPE Prolonged Droop Workaround Active bit is not set,
- // call droop_throttle()
- if (!(in32(G_OCB_OCCFLG) & BIT32(PGPE_PROLONGED_DROOP_WORKAROUND_ACTIVE)))
- {
- p9_pgpe_droop_throttle();
- }
-
// 1 SCOM Write to OCC FIR[prolonged_droop_detected] bit. This FIR bit is set to recoverable so that it will create an informational error log.
GPE_PUTSCOM(OCB_OCCLFIR_OR, BIT64(OCCLFIR_PROLONGED_DROOP_DETECTED));
@@ -719,6 +840,7 @@ void p9_pgpe_handle_nacks(uint32_t origTargetCores, uint32_t origExpectedAckFrom
// will read to tell OCC not to attempt a PM Complex reset on
// PGPE timeouts in the meantime.
out32(G_OCB_OCCFLG_OR, BIT32(PGPE_PM_RESET_SUPPRESS));
+ PK_TRACE_INF("NACK: PM_RESET_SUPPRESS SET");
// 3 Send DB0 PMSR Update with message Set Pstates Suspended only
// to the CME QM (and their Siblings) that provided an ACK
@@ -739,7 +861,10 @@ void p9_pgpe_handle_nacks(uint32_t origTargetCores, uint32_t origExpectedAckFrom
}//End while(quadNACked) loop
//if OCC Flag Register PGPE Prolonged Droop Workaround Active bit is set and all CME QMs respond with ACK
- p9_pgpe_droop_unthrottle();
+ if (in32(G_OCB_OCCFLG) & BIT32(PGPE_PROLONGED_DROOP_WORKAROUND_ACTIVE))
+ {
+ p9_pgpe_droop_unthrottle();
+ }
}
//
@@ -810,7 +935,15 @@ void p9_pgpe_pstate_start(uint32_t pstate_start_origin)
}
//3. Move system to SyncPState
- external_voltage_control_init(&G_pgpe_pstate_record.extVrmCurr);
+ PK_TRACE_INF("VDD_BUS_NUM=0x%x" , G_gppb->avs_bus_topology.vdd_avsbus_num);
+ PK_TRACE_INF("VDD_RAIL_NUM=0x%x", G_gppb->avs_bus_topology.vdd_avsbus_rail);
+ PK_TRACE_INF("VDN_BUS_NUM=0x%x" , G_gppb->avs_bus_topology.vdn_avsbus_num);
+ PK_TRACE_INF("VDN_RAIL_NUM=0x%x", G_gppb->avs_bus_topology.vdn_avsbus_rail);
+
+ //avs_driver_init();
+ avs_driver_voltage_read(G_gppb->avs_bus_topology.vdd_avsbus_num, G_gppb->avs_bus_topology.vdd_avsbus_rail,
+ &G_pgpe_pstate_record.extVrmCurr);
+
G_pgpe_pstate_record.biasSyspExtVrmCurr = G_pgpe_pstate_record.extVrmCurr;
G_pgpe_pstate_record.biasSyspExtVrmNext = p9_pgpe_gppb_intp_vdd_from_ps(syncPstate, VPD_PT_SET_BIASED_SYSP);
PK_TRACE_INF("PST: SyncPstate=0x%x eVid(Boot)=%umV,eVid(SyncPstate)=%umV", syncPstate,
@@ -978,6 +1111,12 @@ void p9_pgpe_pstate_start(uint32_t pstate_start_origin)
PK_TRACE_INF("PST: Undervolting Enabled");
}
+ if (G_pgpe_header_data->g_pgpe_flags & PGPE_FLAG_WOV_OVERVOLT_ENABLE)
+ {
+ G_pgpe_pstate_record.wov.status = WOV_OVERVOLT_ENABLED;
+ PK_TRACE_INF("PST: Overvolting Enabled");
+ }
+
PK_TRACE_DBG("PST: Start Done");
}
@@ -1131,10 +1270,6 @@ void p9_pgpe_pstate_stop()
out32(G_OCB_OCCS2, occScr2);
G_pgpe_pstate_record.pstatesStatus = PSTATE_STOPPED;
- G_pgpe_optrace_data.word[0] = (START_STOP_FLAG << 24) | (G_pgpe_pstate_record.psComputed.fields.glb << 16)
- | (in32(G_OCB_QCSR) >> 16);
- p9_pgpe_optrace(PRC_START_STOP);
-
PK_TRACE_DBG("PSS: Stop Done");
}
@@ -1559,15 +1694,15 @@ void p9_pgpe_pstate_safe_mode()
pgpe_db0_glb_bcast_t db0;
// Generate OPTRACE Process Start
- G_pgpe_optrace_data.word[0] = (G_pgpe_pstate_record.activeQuads << 24) | (G_pgpe_pstate_record.psComputed.fields.glb <<
+ G_pgpe_optrace_data.word[0] = (G_pgpe_pstate_record.activeQuads << 24) | (G_pgpe_pstate_record.psCurr.fields.glb <<
16)
| (G_pgpe_pstate_record.safePstate << 8) |
- G_pgpe_pstate_record.severeFault[SAFE_MODE_FAULT_OCC] ? 0x20 : 0 |
- G_pgpe_pstate_record.severeFault[SAFE_MODE_FAULT_SGPE] ? 0x10 : 0 |
- G_pgpe_pstate_record.severeFault[SAFE_MODE_FAULT_CME] ? 0x08 : 0 |
- G_pgpe_pstate_record.severeFault[SAFE_MODE_FAULT_PVREF] ? 0x04 : 0 |
- safemode ? 0x2 : 0 |
- suspend ? 0x1 : 0;
+ G_pgpe_pstate_record.severeFault[SAFE_MODE_FAULT_PVREF] ? 0x20 : 0 |
+ G_pgpe_pstate_record.severeFault[SAFE_MODE_FAULT_CME] ? 0x10 : 0 |
+ G_pgpe_pstate_record.severeFault[SAFE_MODE_FAULT_SGPE] ? 0x8 : 0 |
+ G_pgpe_pstate_record.severeFault[SAFE_MODE_FAULT_OCC] ? 0x4 : 0 |
+ suspend ? 0x2 : 0 |
+ safemode ? 0x1 : 0;
p9_pgpe_optrace(PRC_SAFE_MODE);
@@ -1585,6 +1720,7 @@ void p9_pgpe_pstate_safe_mode()
p.expectedAckFrom = G_pgpe_pstate_record.activeQuads;
p.expectedAckValue = MSGID_PCB_TYPE4_ACK_PSTATE_PROTO_ACK;
p.checkNACKs = PGPE_DB3_CHECK_NACKS;
+ p.useDB3ValForNacks = 1;
p9_pgpe_send_db3(p);
PK_TRACE_INF("SAF: Safe Mode Actuation Done!");
@@ -1667,6 +1803,7 @@ void p9_pgpe_pstate_sgpe_fault()
p.expectedAckFrom = G_pgpe_pstate_record.activeQuads;
p.expectedAckValue = MSGID_PCB_TYPE4_ACK_PSTATE_PROTO_ACK;
p.checkNACKs = PGPE_DB3_CHECK_NACKS;
+ p.useDB3ValForNacks = 0;
p9_pgpe_send_db3(p);
//3. PGPE performs STOP Recovery Trigger to set a malfunction alert to the
@@ -1764,6 +1901,7 @@ void p9_pgpe_pstate_handle_pending_occ_ack_on_fault()
args_wof_vfrt->msg_cb.rc = PGPE_RC_PM_COMPLEX_SUSPEND_SAFE_MODE;
G_pgpe_pstate_record.ipcPendTbl[IPC_PEND_WOF_VFRT].pending_ack = 0;
ipc_send_rsp(G_pgpe_pstate_record.ipcPendTbl[IPC_PEND_WOF_VFRT].cmd, IPC_RC_SUCCESS);
+ p9_pgpe_optrace(ACK_WOF_VFRT);
}
}
@@ -1789,6 +1927,7 @@ void p9_pgpe_pstate_handle_pending_sgpe_ack_on_fault()
args->fields.return_code = IPC_SGPE_PGPE_RC_SUCCESS;
G_pgpe_pstate_record.ipcPendTbl[IPC_PEND_SGPE_ACTIVE_QUADS_UPDT].pending_ack = 0;
ipc_send_rsp(G_pgpe_pstate_record.ipcPendTbl[IPC_PEND_SGPE_ACTIVE_QUADS_UPDT].cmd, IPC_RC_SUCCESS);
+ p9_pgpe_optrace(ACK_QUAD_ACTV);
}
//ACK back to SGPE with "IPC_SGPE_PGPE_RC_SUCCESS"
@@ -1810,6 +1949,7 @@ void p9_pgpe_pstate_handle_pending_sgpe_ack_on_fault()
G_pgpe_pstate_record.ipcPendTbl[IPC_PEND_SGPE_ACTIVE_CORES_UPDT].pending_ack = 0;
ipc_send_rsp(G_pgpe_pstate_record.ipcPendTbl[IPC_PEND_SGPE_ACTIVE_CORES_UPDT].cmd, IPC_RC_SUCCESS);
args->fields.return_code = IPC_SGPE_PGPE_RC_SUCCESS;
+ p9_pgpe_optrace(ACK_CORES_ACTV);
}
}
@@ -2065,7 +2205,7 @@ void p9_pgpe_pstate_updt_ext_volt()
//to keep the math simple(use shift instead of multiply) we approximate
//1us as (1024/32)=32 OTBR ticks
delay_ticks = ((G_pgpe_pstate_record.extVrmCurr - G_pgpe_pstate_record.extVrmNext) *
- G_ext_vrm_dec_rate_mult_usperus) << 5;
+ G_ext_vrm_dec_rate_mult_usperv) >> 5;
}
//Increasing
else if (G_pgpe_pstate_record.extVrmNext > G_pgpe_pstate_record.extVrmCurr)
@@ -2074,13 +2214,14 @@ void p9_pgpe_pstate_updt_ext_volt()
//to keep the math simple(use shift instead of multiply) we approximate
//1us as (1024/32)=32 OTBR ticks
delay_ticks = ((G_pgpe_pstate_record.extVrmNext - G_pgpe_pstate_record.extVrmCurr) *
- G_ext_vrm_inc_rate_mult_usperus) << 5;
+ G_ext_vrm_inc_rate_mult_usperv) >> 5;
}
#endif
//Update external voltage
- external_voltage_control_write(G_pgpe_pstate_record.extVrmNext);
+ avs_driver_voltage_write(G_gppb->avs_bus_topology.vdd_avsbus_num, G_gppb->avs_bus_topology.vdd_avsbus_rail,
+ G_pgpe_pstate_record.extVrmNext);
#if !EPM_P9_TUNING
@@ -2118,7 +2259,7 @@ void p9_pgpe_pstate_updt_ext_volt()
if(G_pgpe_pstate_record.biasSyspExtVrmNext == p9_pgpe_gppb_intp_vdd_from_ps(G_pgpe_pstate_record.psNext.fields.glb,
VPD_PT_SET_BIASED_SYSP))
{
- delay_ticks = G_gppb->ext_vrm_stabilization_time_us << 5;
+ delay_ticks = G_gppb->ext_vrm_stabilization_time_us >> 5;
//Read TimebaseStart
tbStart = in32(OCB_OTBR);
@@ -2159,6 +2300,7 @@ void p9_pgpe_pstate_updt_ext_volt()
G_pgpe_pstate_record.wov.max_volt = G_pgpe_pstate_record.wov.curr_mv;
}
+
//If VDM is disabled, update VDMCFG register for every quad
if (!(G_pgpe_header_data->g_pgpe_flags & PGPE_FLAG_VDM_ENABLE))
{
@@ -2241,6 +2383,7 @@ void p9_pgpe_pstate_freq_updt(uint32_t freq_change_dir)
PGPE_DB3_CHECK_NACKS
};
p.db3val = (uint64_t)(MSGID_DB3_HIGH_PRIORITY_PSTATE) << 56;
+ p.useDB3ValForNacks = 0;
p9_pgpe_send_db3(p);
}
//Otherwise, send regular DB0
@@ -2319,7 +2462,7 @@ inline void p9_pgpe_droop_throttle()
p.expectedAckFrom = G_pgpe_pstate_record.activeQuads;
p.expectedAckValue = MSGID_PCB_TYPE4_SUSPEND_ENTRY_ACK;
p.checkNACKs = PGPE_DB3_SKIP_CHECK_NACKS;
-
+ p.useDB3ValForNacks = 0;
p9_pgpe_send_db3(p);
//We poll on CME_FLAGS[] here. The CME doesn't send an ACK for SUSPEND_ENTRY DB3.
@@ -2404,7 +2547,7 @@ inline void p9_pgpe_droop_unthrottle()
p.expectedAckFrom = G_pgpe_pstate_record.activeQuads;
p.expectedAckValue = MSGID_PCB_TYPE4_UNSUSPEND_ENTRY_ACK;
p.checkNACKs = PGPE_DB3_SKIP_CHECK_NACKS;
-
+ p.useDB3ValForNacks = 0;
p9_pgpe_send_db3(p);
//3. Send Doorbell0 PMSR Update with message Clear Pstates Suspended to all configured cores in the active Quads.
@@ -2419,8 +2562,8 @@ inline void p9_pgpe_droop_unthrottle()
//5. Write PK Trace and Optrace record that the Prolonged Throttle workaround was removed,
//including the Total Retry Count and the most recent bit vector of Quads that provided the NACK(s) .
G_pgpe_optrace_data.word[0] = (G_pgpe_pstate_record.quadsNACKed << 24) |
- (G_pgpe_pstate_record.activeCores);
- G_pgpe_optrace_data.word[0] = G_pgpe_pstate_record.cntNACKs;
+ (G_pgpe_pstate_record.activeCores >> 8);
+ G_pgpe_optrace_data.word[1] = G_pgpe_pstate_record.cntNACKs;
p9_pgpe_optrace(PROLONGED_DROOP_RESOLVED);
PK_TRACE_INF("DTH: Droop Unthrottle Done");
@@ -2435,7 +2578,7 @@ void p9_pgpe_pstate_wov_init()
G_pgpe_pstate_record.wov.avg_freq_gt_target_freq = 0;
G_pgpe_pstate_record.wov.freq_loss_tenths_gt_max_droop_tenths = 0;
G_pgpe_pstate_record.wov.status = WOV_DISABLED;
- G_pgpe_pstate_record.wov.info = 0xdeadbeef;
+ G_pgpe_pstate_record.wov.info = 0xdeadde04;
}
//
@@ -2565,6 +2708,8 @@ void p9_pgpe_pstate_adjust_wov()
{
G_pgpe_pstate_record.wov.target_pct = G_gppb->wov_underv_max_pct;
}
+
+ G_pgpe_pstate_record.excessiveDroop = 0;
}
else
{
@@ -2572,6 +2717,8 @@ void p9_pgpe_pstate_adjust_wov()
{
G_pgpe_pstate_record.wov.target_pct -= G_gppb->wov_underv_step_incr_pct;
}
+
+ G_pgpe_pstate_record.excessiveDroop = 1;
}
}// WOV ALGORITHM END
diff --git a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_pstate.h b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_pstate.h
index 78bfe77a..189cf965 100644
--- a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_pstate.h
+++ b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_pstate.h
@@ -227,6 +227,10 @@ typedef struct
uint32_t activeCoreUpdtAction;
uint32_t biasSyspExtVrmCurr, biasSyspExtVrmNext;
wov_t wov;
+ pgpe_wof_values_t* pWofValues;
+ uint32_t produceWOFValues;
+ uint32_t prevIdd, prevIdn, prevVdd, prevAvgPstate, prevVratio;
+ uint32_t excessiveDroop, vddCurrentThresh;
} PgpePstateRecord __attribute__ ((aligned (8)));
@@ -250,6 +254,7 @@ typedef struct db3_parms
uint32_t expectedAckFrom;
uint32_t expectedAckValue;
uint32_t checkNACKs;
+ uint32_t useDB3ValForNacks;
} db3_parms_t;
//
@@ -266,6 +271,7 @@ void p9_pgpe_pstate_apply_clips();
void p9_pgpe_pstate_calc_wof();
void p9_pgpe_pstate_updt_actual_quad();
void p9_pgpe_pstate_update_wof_state();
+void p9_pgpe_pstate_update_wof_produced_values();
//CME Communication
void p9_pgpe_send_db0(db0_parms_t p);
diff --git a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_thread_actuate_pstates.c b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_thread_actuate_pstates.c
index d01a7d86..43f77968 100644
--- a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_thread_actuate_pstates.c
+++ b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_thread_actuate_pstates.c
@@ -76,8 +76,10 @@ void p9_pgpe_thread_actuate_pstates(void* arg)
//Mask all external interrupts. Timers are still enabled
pk_irq_sub_critical_enter(&ctx);
p9_pgpe_pstate_start(PSTATE_START_OCC_FLAG);
- G_pgpe_optrace_data.word[0] = (START_STOP_FLAG << 24) | (G_pgpe_pstate_record.psComputed.fields.glb << 16) | (in32(
- G_OCB_QCSR) >> 16);
+ G_pgpe_optrace_data.word[0] = (G_pgpe_pstate_record.pmcrOwner << 25) |
+ (PSTATE_START << 24) |
+ (G_pgpe_pstate_record.psCurr.fields.glb << 16) |
+ (in32(G_OCB_QCSR) >> 16);
p9_pgpe_optrace(PRC_START_STOP);
pk_irq_sub_critical_exit(&ctx);
}
@@ -126,6 +128,7 @@ void p9_pgpe_thread_actuate_pstates(void* arg)
(G_pgpe_pstate_record.ipcPendTbl[IPC_PEND_WOF_CTRL].pending_processing == 0))
{
out32(G_OCB_OCCFLG_CLR, BIT32(PGPE_PM_RESET_SUPPRESS));
+ PK_TRACE_INF("ACT_TH: PM_RESET_SUPP=0");
}
}
@@ -291,7 +294,7 @@ void p9_pgpe_thread_actuate_pstates(void* arg)
//Check if IPC should be opened again
if (restore_irq == 1)
{
- PK_TRACE_DBG("ACT_TH: IRQ Restore");
+ PK_TRACE_INF("ACT_TH: IRQ Restore");
restore_irq = 0;
pk_irq_vec_restore(&ctx);
}
diff --git a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_thread_process_requests.c b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_thread_process_requests.c
index c9f8d0f5..5cb60571 100644
--- a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_thread_process_requests.c
+++ b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/p9_pgpe_thread_process_requests.c
@@ -483,9 +483,10 @@ inline void p9_pgpe_process_start_stop()
{
PK_TRACE_DBG("START_STOP: Imm");
args->msg_cb.rc = PGPE_RC_SUCCESS;
- G_pgpe_optrace_data.word[0] = (START_STOP_IPC << 24) |
- (G_pgpe_pstate_record.psComputed.fields.glb << 16) |
- (in32(G_OCB_QCSR) >> 16);
+ G_pgpe_optrace_data.word[0] = (args->pmcr_owner << 25 ) |
+ (PSTATE_START << 24) |
+ (G_pgpe_pstate_record.psCurr.fields.glb << 16) |
+ (in32(G_OCB_QCSR) >> 16);
p9_pgpe_optrace(PRC_START_STOP);
}
@@ -545,7 +546,7 @@ inline void p9_pgpe_process_start_stop()
}
G_pgpe_optrace_data.word[0] = (args->pmcr_owner << 25 ) |
- (1 << 24) |
+ (PSTATE_START << 24) |
(G_pgpe_pstate_record.psCurr.fields.glb << 16) |
(in32(G_OCB_QCSR) >> 16);
p9_pgpe_optrace(PRC_START_STOP);
@@ -1034,7 +1035,7 @@ inline void p9_pgpe_process_wof_vfrt()
//
inline void p9_pgpe_process_set_pmcr_req()
{
- PK_TRACE_DBG("PTH: Set PMCR Enter");
+ PK_TRACE_INF("PTH: Set PMCR Enter");
uint32_t q, c, bad_rc = 0;
ipc_async_cmd_t* async_cmd = (ipc_async_cmd_t*)G_pgpe_pstate_record.ipcPendTbl[IPC_PEND_SET_PMCR_REQ].cmd;
@@ -1045,7 +1046,6 @@ inline void p9_pgpe_process_set_pmcr_req()
if(G_pgpe_header_data->g_pgpe_flags & PGPE_FLAG_OCC_IPC_IMMEDIATE_MODE)
{
PK_TRACE_DBG("PTH: Set PMCR Imme");
- p9_pgpe_optrace(PRC_SET_PMCR);
args->msg_cb.rc = PGPE_RC_SUCCESS;
G_pgpe_optrace_data.word[0] = (G_pgpe_pstate_record.psTarget.fields.quads[0] << 24) |
(G_pgpe_pstate_record.psTarget.fields.quads[1] << 16) |
@@ -1092,7 +1092,7 @@ inline void p9_pgpe_process_set_pmcr_req()
}
else
{
- PK_TRACE_DBG("PTH: Upd coresPSReq");
+ PK_TRACE_INF("PTH: Upd coresPSReq");
for (q = 0; q < MAX_QUADS; q++)
{
@@ -1129,7 +1129,7 @@ inline void p9_pgpe_process_set_pmcr_req()
PGPE_OPTIONAL_TRACE_AND_PANIC(PGPE_OCC_IPC_ACK_BAD_RC);
}
- PK_TRACE_DBG("PTH: Set PMCR Exit");
+ PK_TRACE_INF("PTH: Set PMCR Exit");
}
//
@@ -1185,8 +1185,9 @@ inline void p9_pgpe_process_registration()
PK_TRACE_INF("PTH: Quad %d Registration Processing. qActive=0x%x cActive=0x%x", q, G_pgpe_pstate_record.activeQuads,
G_pgpe_pstate_record.activeDB);
- G_pgpe_optrace_data.word[0] = (G_pgpe_pstate_record.activeQuads << 24) | (G_pgpe_pstate_record.psCurr.fields.glb << 16)
- | (in32(G_OCB_QCSR) >> 16);
+ G_pgpe_optrace_data.word[0] = (G_pgpe_pstate_record.activeQuads << 24)
+ | (G_pgpe_pstate_record.psCurr.fields.glb << 16)
+ | (in32(G_OCB_QCSR) >> 16);
p9_pgpe_optrace(PRC_PCB_T4);
}
}
diff --git a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/pgpe_panic_codes.h b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/pgpe_panic_codes.h
index 7703cd35..d7471bac 100644
--- a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/pgpe_panic_codes.h
+++ b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/pgpe_panic_codes.h
@@ -51,7 +51,7 @@ PGPE_AVS_WRITE_ONGOING_FLAG_TIMEOUT = 0x1c04,
PGPE_AVS_INIT_DRIVE_IDLE_FRAME = 0x1c05,
PGPE_AVS_INIT_DRIVE_READ = 0x1c06,
PGPE_AVS_RESYNC_ERROR = 0x1c07,
-//_UNUSED_1c08 = 0x1c08,
+PGPE_AVS_DRIVE_READ = 0x1c08,
PGPE_CME_FAULT = 0x1c09,
PGPE_PVREF_ERROR = 0x1c0a,
PGPE_OCC_FIR_IRQ = 0x1c0d,
diff --git a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/pk_app_cfg.h b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/pk_app_cfg.h
index b5cfb41b..a188452e 100644
--- a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/pk_app_cfg.h
+++ b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/pk_app_cfg.h
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2015,2018 */
+/* COPYRIGHT 2015,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -107,6 +107,11 @@
/// using the table defined in pk_app_irq_table.c.
#define STATIC_IRQ_TABLE
+#define PK_MACHINE_HANDLER_SUPPORT 1
+
+#define PPE42_MACHINE_CHECK_HANDLER \
+ b __special_machine_check_handler
+
/// Static configuration data for external interrupts:
///
diff --git a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/pstate_common.mk b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/pstate_common.mk
index 04c17452..a312c371 100644
--- a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/pstate_common.mk
+++ b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/pstate_common.mk
@@ -90,8 +90,9 @@ PSTATE_COMMONFLAGS+= -DPK_THREAD_SUPPORT=1
PSTATE_COMMONFLAGS+= -DPK_TRACE_SUPPORT=1
PSTATE_COMMONFLAGS+= -DUSE_PK_APP_CFG_H=1
PSTATE_COMMONFLAGS+= -D__PPE_PLAT
+PSTATE_COMMONFLAGS+= -D__PPE__
PSTATE_COMMONFLAGS+= -D__PK__=1
-PSTATE_COMMONFLAGS+= -DPK_TRACE_SZ=2048
+PSTATE_COMMONFLAGS+= -DPK_TRACE_SZ=1024
PSTATE_COMMONFLAGS+= -DPSTATE_GPE
diff --git a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/pstate_gpe.mk b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/pstate_gpe.mk
index c516d6bf..40fdd7f5 100644
--- a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/pstate_gpe.mk
+++ b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/pstate_gpe.mk
@@ -94,7 +94,7 @@ $(IMAGE)_COMMONFLAGS+= -DUSE_PK_APP_CFG_H=1
$(IMAGE)_COMMONFLAGS+= -D__PPE_PLAT
$(IMAGE)_COMMONFLAGS+= -D__PK__=1
#$(IMAGE)_COMMONFLAGS+= -fstack-usage
-$(IMAGE)_COMMONFLAGS+= -DPK_TRACE_SZ=2048
+$(IMAGE)_COMMONFLAGS+= -DPK_TRACE_SZ=1024
# add include paths
$(call ADD_PPEIMAGE_INCDIR,$(IMAGE),\
diff --git a/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/pstate_gpe_p9a10.mk b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/pstate_gpe_p9a10.mk
new file mode 100644
index 00000000..f335989a
--- /dev/null
+++ b/import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/pstate_gpe_p9a10.mk
@@ -0,0 +1,70 @@
+# IBM_PROLOG_BEGIN_TAG
+# This is an automatically generated prolog.
+#
+# $Source: import/chips/p9/procedures/ppe_closed/pgpe/pstate_gpe/pstate_gpe_p9a10.mk $
+#
+# OpenPOWER HCODE Project
+#
+# COPYRIGHT 2016,2019
+# [+] International Business Machines Corp.
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied. See the License for the specific language governing
+# permissions and limitations under the License.
+#
+# IBM_PROLOG_END_TAG
+PSTATE_TARGET:=pstate_gpe_p9a10
+IMAGE:=$(PSTATE_TARGET)
+
+#Note: Flags are resolved later - so local variables can't be
+# used to build them
+$(IMAGE)_COMMONFLAGS+= -DNIMBUS_DD_LEVEL=0
+$(IMAGE)_COMMONFLAGS+= -DCUMULUS_DD_LEVEL=0
+$(IMAGE)_COMMONFLAGS+= -DAXONE_DD_LEVEL=10
+#$(IMAGE)_COMMONFLAGS+= -fstack-usage
+
+include $(PGPE_SRCDIR)/pstate_gpe/pstate_common.mk
+$(IMAGE)_COMMONFLAGS += $(PSTATE_COMMONFLAGS)
+OBJS := $(PSTATE_OBJS)
+
+$(call BUILD_PPEIMAGE)
+
+# PPMR header edit:
+IMAGE=ppmr_header_p9a10
+IMAGE_EDITOR=pstate_gpeImgEdit.exe
+
+# Target tool chain
+$(IMAGE)_TARGET=PPE
+
+#linkscript to use
+$(IMAGE)_LINK_SCRIPT=linkppmr.cmd
+
+OBJS = p9_pgpe_ppmr.o
+
+$(call ADD_BINHEADER_INCDIR,$(IMAGE),\
+ $(PK_SRCDIR)/kernel \
+ $(PK_SRCDIR)/ppe42 \
+ $(PK_SRCDIR)/trace \
+ $(PK_SRCDIR)/$(_PPE_TYPE) \
+ $(PM_LIBDIR)/include \
+ $(PM_LIBDIR)/include/registers \
+ $(PM_LIBDIR)/common \
+ $(PM_LIBDIR)/occlib \
+ $(HCODE_LIBDIR) \
+ $(HCODE_COMMON_LIBDIR) \
+ $(HCODE_UTILS_INCDIR) \
+ $(ROOTPATH)/chips/p9/procedures/hwp/lib/ \
+ )
+
+gitsha := $(shell git log -1 --pretty=format:"%h")
+$(call BUILD_BINHEADER,$(IMAGEPATH)/$(PSTATE_TARGET)/$(PSTATE_TARGET).bin, \
+ $(gitsha))
diff --git a/import/chips/p9/procedures/ppe_closed/sgpe/stop_gpe/p9_sgpe_main.C b/import/chips/p9/procedures/ppe_closed/sgpe/stop_gpe/p9_sgpe_main.C
index dce81d17..d9b6a539 100644
--- a/import/chips/p9/procedures/ppe_closed/sgpe/stop_gpe/p9_sgpe_main.C
+++ b/import/chips/p9/procedures/ppe_closed/sgpe/stop_gpe/p9_sgpe_main.C
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2015,2018 */
+/* COPYRIGHT 2015,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -28,7 +28,8 @@
#include "occhw_shared_data.h"
#include "p9_hcd_memmap_occ_sram.H"
#include "p9_hcd_memmap_base.H"
-
+#include "p9_hcd_occ_errldefs.h"
+#include "p9_hcd_errldefs.h"
//We define a global literal for these register addresses
////This way compiler put them in .sdata area, and the address
@@ -184,7 +185,7 @@ main(int argc, char** argv)
#elif (CUMULUS_DD_LEVEL != 0)
#define PVR_CONST (0x42090800 | (((CUMULUS_DD_LEVEL ) / 10) << 8) | (CUMULUS_DD_LEVEL % 10))
#elif (AXONE_DD_LEVEL != 0)
-#define PVR_CONST (0x42091000 | (((AXONE_DD_LEVEL ) / 10) << 8) | (AXONE_DD_LEVEL % 10))
+#define PVR_CONST (0x42090000 | (((AXONE_DD_LEVEL ) / 10) << 8) | (AXONE_DD_LEVEL % 10))
#else
#define PVR_CONST 0
#endif
@@ -222,6 +223,7 @@ main(int argc, char** argv)
PK_PANIC(SGPE_MAIN_FAPI2_INIT_FAILED);
}
+ initErrLogging ((uint8_t) ERRL_SOURCE_XGPE);
p9_sgpe_stop_init();
// Initialize the thread control block for G_p9_sgpe_stop_enter_thread
@@ -258,6 +260,7 @@ main(int argc, char** argv)
OSD_PTR->occ_comp_shr_data.gpe3_data.gpe3_image_header_addr = OCC_SRAM_SGPE_BASE_ADDR + SGPE_HEADER_IMAGE_OFFSET;
OSD_PTR->occ_comp_shr_data.gpe3_data.gpe3_debug_header_addr = OCC_SRAM_SGPE_BASE_ADDR + SGPE_DEBUG_PTRS_OFFSET;
+
// Start running the highest priority thread.
// This function never returns
pk_start_threads();
diff --git a/import/chips/p9/procedures/ppe_closed/sgpe/stop_gpe/p9_sgpe_stop_irq_handlers.c b/import/chips/p9/procedures/ppe_closed/sgpe/stop_gpe/p9_sgpe_stop_irq_handlers.c
index 0a691912..70aabe8b 100644
--- a/import/chips/p9/procedures/ppe_closed/sgpe/stop_gpe/p9_sgpe_stop_irq_handlers.c
+++ b/import/chips/p9/procedures/ppe_closed/sgpe/stop_gpe/p9_sgpe_stop_irq_handlers.c
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2015,2018 */
+/* COPYRIGHT 2015,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -71,6 +71,7 @@ SgpeStopRecord G_sgpe_stop_record __attribute__((section (".dump_ptrs"))) =
{{0, 0, 0}}
};
+extern uint32_t G_pib_reset_flag;
void
@@ -78,6 +79,8 @@ p9_sgpe_fit_handler()
{
PK_TRACE("FIT: Handler Fired");
+ PK_TRACE_DBG("IPB reset flag value %x", G_pib_reset_flag);
+ G_pib_reset_flag = 0;
uint32_t tpending = in32(G_OCB_OPIT0PRA) |
in32(G_OCB_OPIT3PRA) |
in32(G_OCB_OPIT6PRB);
diff --git a/import/chips/p9/procedures/ppe_closed/sgpe/stop_gpe/pk_app_cfg.h b/import/chips/p9/procedures/ppe_closed/sgpe/stop_gpe/pk_app_cfg.h
index d087066b..c4adedec 100644
--- a/import/chips/p9/procedures/ppe_closed/sgpe/stop_gpe/pk_app_cfg.h
+++ b/import/chips/p9/procedures/ppe_closed/sgpe/stop_gpe/pk_app_cfg.h
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2015,2018 */
+/* COPYRIGHT 2015,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -156,4 +156,9 @@
/// This file provides platform specific panic codes
#define PLATFORM_PANIC_CODES_H "sgpe_panic_codes.h"
+#define PK_MACHINE_HANDLER_SUPPORT 1
+
+#define PPE42_MACHINE_CHECK_HANDLER \
+ b __special_machine_check_handler
+
#endif /*__PK_APP_CFG_H__*/
diff --git a/import/chips/p9/procedures/ppe_closed/sgpe/stop_gpe/stop_gpe_p9a10.mk b/import/chips/p9/procedures/ppe_closed/sgpe/stop_gpe/stop_gpe_p9a10.mk
new file mode 100644
index 00000000..d3a889bf
--- /dev/null
+++ b/import/chips/p9/procedures/ppe_closed/sgpe/stop_gpe/stop_gpe_p9a10.mk
@@ -0,0 +1,96 @@
+# IBM_PROLOG_BEGIN_TAG
+# This is an automatically generated prolog.
+#
+# $Source: import/chips/p9/procedures/ppe_closed/sgpe/stop_gpe/stop_gpe_p9a10.mk $
+#
+# OpenPOWER HCODE Project
+#
+# COPYRIGHT 2016,2019
+# [+] International Business Machines Corp.
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied. See the License for the specific language governing
+# permissions and limitations under the License.
+#
+# IBM_PROLOG_END_TAG
+#
+## IMAGE=qpmr_header
+##
+## # Target tool chain
+## $(IMAGE)_TARGET=PPE
+##
+## #linkscript to use
+## $(IMAGE)_LINK_SCRIPT=linkqpmr.cmd
+##
+## OBJS = p9_sgpe_qpmr.o
+## $(call BUILD_BINHEADER)
+
+STOP_TARGET := stop_gpe_p9a10
+IMAGE := $(STOP_TARGET)
+
+
+# Note: Flags are resolved very late,
+# so local variables can't be used to build them
+
+# Options for Platforms specific tuning
+
+$(IMAGE)_COMMONFLAGS+= -DNIMBUS_DD_LEVEL=0
+$(IMAGE)_COMMONFLAGS+= -DCUMULUS_DD_LEVEL=0
+$(IMAGE)_COMMONFLAGS+= -DAXONE_DD_LEVEL=10
+
+$(IMAGE)_COMMONFLAGS+= -DLAB_P9_TUNING=0
+
+$(IMAGE)_COMMONFLAGS+= -DEPM_P9_TUNING=0
+$(IMAGE)_COMMONFLAGS+= -DEPM_BROADSIDE_SCAN0=0
+
+$(IMAGE)_COMMONFLAGS+= -DSIMICS_TUNING=0
+$(IMAGE)_COMMONFLAGS+= -DUSE_SIMICS_IO=0
+
+
+include $(SGPE_SRCDIR)/stop_gpe/stop_common.mk
+$(IMAGE)_COMMONFLAGS += $(STOP_COMMONFLAGS)
+OBJS := $(STOP_OBJS)
+
+$(call BUILD_PPEIMAGE)
+
+# QPMR header edit:
+IMAGE=qpmr_header_p9a10
+IMAGE_EDITOR=stop_gpeImgEdit.exe
+
+# Target tool chain
+$(IMAGE)_TARGET=PPE
+
+#linkscript to use
+$(IMAGE)_LINK_SCRIPT=linkqpmr.cmd
+
+OBJS = p9_sgpe_qpmr.o
+
+
+$(call ADD_BINHEADER_INCDIR,$(IMAGE),\
+ $(PK_SRCDIR)/kernel \
+ $(PK_SRCDIR)/ppe42 \
+ $(PK_SRCDIR)/trace \
+ $(PK_SRCDIR)/$(_PPE_TYPE) \
+ $(PM_LIBDIR)/include \
+ $(PM_LIBDIR)/include/registers \
+ $(PM_LIBDIR)/common \
+ $(PM_LIBDIR)/occlib \
+ $(HCODE_LIBDIR) \
+ $(HCODE_COMMON_LIBDIR) \
+ $(HCODE_UTILS_INCDIR) \
+ $(ROOTPATH)/chips/p9/procedures/hwp/lib/ \
+ $(ROOTPATH)/chips/p9/utils/imageProcs/ \
+ )
+gitsha := $(shell git log -1 --pretty=format:"%h")
+$(call BUILD_BINHEADER,$(IMAGEPATH)/$(STOP_TARGET)/$(STOP_TARGET).bin, \
+ $(gitsha))
+
diff --git a/import/chips/p9/procedures/utils/stopreg/p9_core_save_restore_routines.S b/import/chips/p9/procedures/utils/stopreg/p9_core_save_restore_routines.S
new file mode 100755
index 00000000..1a74447a
--- /dev/null
+++ b/import/chips/p9/procedures/utils/stopreg/p9_core_save_restore_routines.S
@@ -0,0 +1,856 @@
+# IBM_PROLOG_BEGIN_TAG
+# This is an automatically generated prolog.
+#
+# $Source: import/chips/p9/procedures/utils/stopreg/p9_core_save_restore_routines.S $
+#
+# OpenPOWER HCODE Project
+#
+# COPYRIGHT 2015,2020
+# [+] International Business Machines Corp.
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied. See the License for the specific language governing
+# permissions and limitations under the License.
+#
+# IBM_PROLOG_END_TAG
+
+ .set r0, 0
+ .set r1, 1
+ .set r2, 2
+ .set r3, 3
+ .set r4, 4
+ .set r5, 5
+ .set r6, 6
+ .set r7, 7
+ .set r8, 8
+ .set r9, 9
+ .set r10, 10
+ .set r11, 11
+ .set r12, 12
+ .set r13, 13
+ .set r14, 14
+ .set r15, 15
+ .set r16, 16
+ .set r17, 17
+ .set r18, 18
+ .set r19, 19
+ .set r20, 20
+ .set r21, 21
+ .set r22, 22
+ .set r23, 23
+ .set r24, 24
+ .set r25, 25
+ .set r26, 26
+ .set r27, 27
+ .set r28, 28
+ .set r29, 29
+ .set r30, 30
+ .set r31, 31
+
+ .set f0, 0
+ .set f1, 1
+ .set f2, 2
+ .set f3, 3
+ .set f4, 4
+ .set f5, 5
+ .set f6, 6
+ .set f7, 7
+ .set f8, 8
+ .set f9, 9
+ .set f10, 10
+ .set f11, 11
+ .set f12, 12
+ .set f13, 13
+ .set f14, 14
+ .set f15, 15
+ .set f16, 16
+ .set f17, 17
+ .set f18, 18
+ .set f19, 19
+ .set f20, 20
+ .set f21, 21
+ .set f22, 22
+ .set f23, 23
+ .set f24, 24
+ .set f25, 25
+ .set f26, 26
+ .set f27, 27
+ .set f28, 28
+ .set f29, 29
+ .set f30, 30
+ .set f31, 31
+
+#--------------------------------------------------------------------#
+# SPR Constants #
+#--------------------------------------------------------------------#
+
+ .set XER, 1
+ .set LR, 8
+ .set CTR, 9
+ .set DSISR, 18
+ .set DAR, 19
+ .set DEC, 22
+ .set SDR1, 25
+ .set SRR0, 26
+ .set SRR1, 27
+ .set CFAR, 28
+ .set HFSCR, 190
+ .set TB, 268
+ .set SPRG0, 272
+ .set SPRG1, 273
+ .set SPRG2, 274
+ .set SPRG3, 275
+ .set SPRC, 276
+ .set SPRD, 277
+ .set EAR, 282
+ .set TBL, 284
+ .set TBU, 285
+ .set PVR, 287
+ .set HSPRG0, 304
+ .set HSPRG1, 305
+ .set HDSISR, 306
+ .set HDEC, 310
+ .set HRMOR, 313
+ .set HSRR0, 314
+ .set HSRR1, 315
+ .set HMER, 336
+ .set URMOR, 505 # Ultravisor
+ .set USRR0, 506
+ .set USRR1, 507
+ .set SMFCTRL, 511 # Ultravisor
+ .set HID, 1008
+ .set PIR, 1023
+
+#--------------------SPR definition ends---------------------------------------
+
+#--------------------constants begin ------------------------------------------
+
+ #offset wrt to start of HOMER at which thread launcher code
+ #is located.
+ .set THREAD_LAUNCHER_START_OFFSET, 0x2000
+ .set THREAD_LAUNCHER_SIZE_OFFSET, 1024
+ .set CORE_SPR_OFFSET, 1024
+ .set SPR_RESTORE_OFFSET, THREAD_LAUNCHER_START_OFFSET + THREAD_LAUNCHER_SIZE_OFFSET
+ .set HRMOR_RESTORE_OFFSET, 0x1200
+ .set URMOR_RESTORE_OFFSET, 0x1280
+ .set SKIP_HRMOR_UPDATE_OFFSET, 4
+ .set SKIP_URMOR_UPDATE_OFFSET, 8
+ .set SPR_SAVE_ROUTINE_OFFSET, 0x2300
+
+ .set STOP, 0x4C0002E4
+ .set ATTN, 0x00000200
+ .set urfid, 0x4C000264
+ .set SECURE_MODE_CONST, 0x0040 # bit 41, note: must be shifted left 16 bits
+ .set MACHINE_CHECK_ENABLE_CONST, 0x1000 # bit 51
+ .set ERR_CODE_SMF_E_NOT_SET, 0x0001 # Core is SMF capable but SMF[E] not SET
+ .set ERR_CODE_SMF_BAD_B62_63_CFG, 0x0002 # Core is not configured to exit UV mode
+ .set SMFCTRL_ENABLE_BIT, 0
+ .set MSR_SECURITY_BIT, 41
+ .set SCRATCH_RUNTIME_MODE_BIT, 59
+
+ .set OTHER_THREADS_STOPPED, 0x07
+ .set CORE_THREAD_STATE_REG_ID, 0x01E0
+ .set CONFIG_CORE_SCRATCH_REG0, 0x0000
+ .set CONFIG_CORE_SCRATCH_REG1, 0x0008
+ .set SECURE_THREAD_SPR_REGION_SIZE, 9216 # 9216 Bytes ( Interrupt Region ) + 1024 Bytes ( Thread Launch Size )
+ .set CORE_SELF_RESTORE_OFFSET, 0xC00
+ .set CORE_SELF_SAVE_OFFSET, 0xE00 # 3.5KB
+ .set THREAD_SELF_SAVE_SIZE, 256
+ .set SELF_REST_VER_INFO_OFFSET, 0x1C
+ .set SMF_SIGNATURE_OFFSET, 0x1300
+ .set SMF_SIGNATURE_CONST, 0x5f534d46 # '_SMF'
+ .set HILE_BIT_POS, 4
+ .set LE_BIT_POS, 63
+ .set MF_HRMOR_R1, 0xa64a397c
+ .set CLEAR_MSR_LE, 0xa407b57a
+ .set MT_SRR1, 0xa603bb7e
+ .set ADDI_R1_32, 0x20012138
+ .set MT_SRR0_R1, 0xa6033a7c
+ .set RFID, 0x2400004c
+ .set TRAP_LE, 0x0800e07f
+ .set MFMSR_R21, 0xa600a07e
+
+
+ .set SPR_SAVE_SCRATCH_REG, r0
+ .set SPR_DATA_REG, r1
+ .set PIR_VAL_REG, r2
+ .set CPMR_BASE_REG, r3
+ .set FUSED_STATUS_REG, r4
+ .set CORE_ID_REG, r5
+ .set THREAD_ID_REG, r6
+ .set BASE_ADDR_REG, r7
+ .set TEMP_REG1, r8
+ .set URMOR_RESTORE_REG, r9
+ .set HRMOR_RESTORE_REG, r10
+ .set THREAD_ACTIVE_STATE_REG, r11
+ .set CORE_SCOPE_RESTORE_ADDR_REG, r12
+ .set THREAD_SCOPE_RESTORE_ADDR_REG, r13
+ .set THREAD_SELF_SAVE_BASE_ADDR, r14
+ .set CORE_SELF_SAVE_BASE_ADDR, r15
+ .set SMF_VAL_REG, r16
+ .set TEMP_REG2, r17
+ .set THREAD_SCRATCH_VAL_REG, r18
+ .set RMOR_INIT_REG, r20
+ .set MSR_INIT_REG, r21
+ .set MSR_SECURITY_ENABLE_REG, r23
+ .set TEST_REG, r24
+ .set SELF_REST_ERR_REG, r25
+ .set SELF_REST_VER_REG, r26
+ .set SELF_SAVE_ADDR_REG, r30
+ .set SELF_RESTORE_ADDR_REG, r31
+
+#--------------------------------------------------------------------#
+#
+# Interrupt Vectors
+#
+#-----------------------------------------------------------------------#
+ .set SRESET, 0x0100
+
+#--------------------------------------------------------------------#
+#
+# CR Register Constants
+#
+#--------------------------------------------------------------------#
+
+ .set cr0, 0
+ .set cr1, 1
+ .set cr2, 2
+ .set cr3, 3
+ .set cr4, 4
+ .set cr5, 5
+ .set cr6, 6
+ .set cr7, 7
+ .set lt, 0
+ .set gt, 1
+ .set eq, 2
+ .set so, 3
+
+#--------------------------------------------------------------------#
+.section ".selfRestore" , "ax"
+.global _start
+
+#There is CPMR header just before SRESET handler. Below is its layout.
+#------------------------------CPMR Header ------------------------------------
+# Address Offset Contents
+#-----------------------------------------------------------------------------
+# 0x00 ATTN Opcode ATTN Opcode
+#------------------------------------------------------------------------------
+# 0x08 Magic Number
+#------------------------------------------------------------------------------
+# 0x10 Build Date Version
+#-------------------------------------------------------------------------------
+# 0x18 Resvd|Resvd|Resvd|Resvd|Resvd|Resvd|Resvd|Fused Flag
+#-------------------------------------------------------------------------------
+# 0x20 CME Hcode Offset | CME Hcode Length
+#-------------------------------------------------------------------------------
+# 0x28 CME Common Rings | CME Common Rings Section
+# Section Offset | Length
+#-------------------------------------------------------------------------------
+# 0x30 CME Quad Pstate Region | CME Quad Pstate Region
+# Offset Length
+#-------------------------------------------------------------------------------
+# 0x38-0xF8 Reserved( Filled with ATTN instructions )
+#-------------------------------------------------------------------------------#
+_start:
+
+#--------------------------------------------------------------------#
+
+#SRESET handler routine
+#In wakeup and STOP path, CME generates reset signal for P9 CORE. It generates
+#SRESET interrupt for all threads of the core.
+
+#At the beginning of SRESET, thread executing this code determines its privilege level.
+#Once privilege level is known, execution is steered towards common thread launcher.
+
+#If thread executing the code is working with Hyp privilege, thread launcher address is
+#calculated using contents of HRMOR where as if thread is executing code as Ultravisor,
+#thread prepares to exit ultavisor mode using trampoline sequence. Thread launcher address
+#is computed using URMOR. Refer to table below
+
+#----------------------------------------------------------------------------
+# Privilege SPR Src SPR Dest
+#----------------------------------------------------------------------------
+# Hyp* HRMOR SRR0
+# MSR SRR1
+#---------------------------------------------------------------------------
+# Ultravisor** URMOR USRR0
+# MSR USRR1
+#---------------------------------------------------------------------------
+#
+# * Copy is initiated by rfid instruction
+# ** Copy is initiated by urfid instruction
+#---------------------------------------------------------------------------
+
+# Assume scan init: MSR[Secure]==1 and MSR[HV]=1 and SMFCTRL[E]==0
+
+.org _start + SRESET
+
+_sreset_hndlr:
+
+b big_endian_start
+
+little_endian_start:
+.long MF_HRMOR_R1
+.long MFMSR_R21
+.long CLEAR_MSR_LE
+.long MT_SRR1
+.long ADDI_R1_32
+.long MT_SRR0_R1
+.long RFID
+
+#Note: below are instructions for swizzled machine code used above for
+#LE core entering STOP
+#mfspr r1, HRMOR
+#mfmsr MSR_INIT_REG
+#clrrdi MSR_INIT_REG, MSR_INIT_REG, 1
+#mtsrr1 MSR_INIT_REG
+#addi r1, r1, 288
+#mtsrr0 r1
+#rfid
+
+
+
+big_endian_start:
+mfspr SPR_DATA_REG, HID
+li TEMP_REG1, 0
+insrdi SPR_DATA_REG, TEMP_REG1, 1, HILE_BIT_POS
+mtspr HID, SPR_DATA_REG # Cleared HILE bit position
+mfmsr MSR_INIT_REG
+ori MSR_INIT_REG, MSR_INIT_REG, MACHINE_CHECK_ENABLE_CONST # Set the ME bit
+extrdi. MSR_SECURITY_ENABLE_REG, MSR_INIT_REG, 1, MSR_SECURITY_BIT # read Secure Bit (S) of MSR
+beq hv_core_init # it is a non-secure mode system
+
+uv_core_check:
+#Check For SMF enable bit
+#SMFCTRL[E]=1?
+
+li TEMP_REG2, ERR_CODE_SMF_E_NOT_SET
+mfspr SMF_VAL_REG, SMFCTRL
+extrdi. TEMP_REG1, SMF_VAL_REG, 1, SMFCTRL_ENABLE_BIT
+beq uv_init_error # Core is initialization is not consistent
+
+li TEMP_REG2, ERR_CODE_SMF_BAD_B62_63_CFG
+extrdi TEMP_REG1, SMF_VAL_REG, 2, 62
+cmpwi TEMP_REG1, 0x02
+beq uv_core_init
+
+uv_init_error:
+
+#Put error code in a specific GPR
+#SPATTN to halt as the inits and the mode are not consistent
+
+mr SELF_REST_ERR_REG, TEMP_REG2
+.long ATTN # Error out and block self restore completion
+
+hv_core_init:
+mfspr RMOR_INIT_REG, HRMOR # Get Stop_HRMOR (placed by CME)
+addi TEMP_REG2, RMOR_INIT_REG, 0x2000 # Thread Launcher offset
+mtsrr0 TEMP_REG2 # Save Thread Launcher address to SRR0
+mtsrr1 MSR_INIT_REG # Save MSR to SRR1
+rfid # Invoke Thread Launcher with ME=1 in HV mode
+
+uv_core_init:
+mfspr RMOR_INIT_REG, URMOR
+addi TEMP_REG1, RMOR_INIT_REG, 0x2000 # Thread Launcher offset
+mtspr USRR0, TEMP_REG1 # Save Thread Launcher address to USRR0
+mtspr USRR1, MSR_INIT_REG # Save MSR to USRR1
+.long urfid # Invoke Thread Launcher with ME=1 in UV mode
+
+#--------------------------------------------------------------------#
+
+#Error handling for other interrupt vectors.
+
+#--------------------------------------------------------------------#
+# Machine Check
+#--------------------------------------------------------------------#
+.org _start + 0x0200
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Data Storage
+#--------------------------------------------------------------------#
+.org _start + 0x0300
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Data Segment
+#--------------------------------------------------------------------#
+.org _start + 0x0380
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Instruction Storage
+#--------------------------------------------------------------------#
+.org _start + 0x0400
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Instruction Segment
+#--------------------------------------------------------------------#
+.org _start + 0x0480
+.long ATTN
+
+#--------------------------------------------------------------------#
+# External
+#--------------------------------------------------------------------#
+.org _start + 0x0500
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Alignment
+#--------------------------------------------------------------------#
+.org _start + 0x0600
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Program
+#--------------------------------------------------------------------#
+.org _start + 0x0700
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Floating Point Unavailable
+#--------------------------------------------------------------------#
+.org _start + 0x0800
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Decrementer
+#--------------------------------------------------------------------#
+.org _start + 0x0900
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Ultravisor Decrementer
+#--------------------------------------------------------------------#
+.org _start + 0x0980
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Directed Priviledged Doorbell
+#--------------------------------------------------------------------#
+.org _start + 0x0A00
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Reserved
+#--------------------------------------------------------------------#
+.org _start + 0x0B00
+.long ATTN
+
+#--------------------------------------------------------------------#
+# System Call
+#--------------------------------------------------------------------#
+.org _start + 0x0C00
+b _sreset_hndlr
+
+#--------------------------------------------------------------------#
+# Trace
+#--------------------------------------------------------------------#
+.org _start + 0x0D00
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Ultravisor Data Storage
+#--------------------------------------------------------------------#
+.org _start + 0x0E00
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Ultravisor Instruction Storage
+#--------------------------------------------------------------------#
+.org _start + 0x0E20
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Ultravisor Emulation Assistance
+#--------------------------------------------------------------------#
+.org _start + 0x0E40
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Ultravisor Maintenance
+#--------------------------------------------------------------------#
+.org _start + 0x0E60
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Directed Ultravisor Doorbell
+#--------------------------------------------------------------------#
+.org _start + 0x0E80
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Reserved
+#--------------------------------------------------------------------#
+.org _start + 0x0EA0
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Reserved
+#--------------------------------------------------------------------#
+.org _start + 0x0EC0
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Reserved
+#--------------------------------------------------------------------#
+.org _start + 0x0EE0
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Performance Monitoring
+#--------------------------------------------------------------------#
+.org _start + 0x0F00
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Vector Unavailable
+#--------------------------------------------------------------------#
+.org _start + 0x0F20
+.long ATTN
+
+#--------------------------------------------------------------------#
+# VSX Unavailable
+#--------------------------------------------------------------------#
+.org _start + 0x0F40
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Facility Unavailable
+#--------------------------------------------------------------------#
+.org _start + 0x0F60
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Ultravisor Facility Unavailable
+#--------------------------------------------------------------------#
+.org _start + 0x0F80
+.long ATTN
+
+
+#--------------------------------------------------------------------#
+# Self Restore Completion
+#--------------------------------------------------------------------#
+.org _start + HRMOR_RESTORE_OFFSET
+
+#restore the URMOR/HRMOR to the value needed by the Ultravisor upon wakeup
+#there can be no future I-fetches after this point, so no more than 7
+#instructions after this
+
+mtspr HRMOR, HRMOR_RESTORE_REG
+
+#necessary to invalidate stale translations in the ERATs that were created
+#during the self-restore code execution.
+
+save_restore_done:
+slbia
+.long STOP # Core entering STOP state from HV state
+.long ATTN
+#--------------------------------------------------------------------
+
+.org _start + URMOR_RESTORE_OFFSET
+mtspr HRMOR, HRMOR_RESTORE_REG
+mtspr URMOR, URMOR_RESTORE_REG
+slbia
+.long STOP # Core entering STOP from UV state
+.long ATTN
+
+#---------------------------------------------------------------------
+
+.org _start + SMF_SIGNATURE_OFFSET
+## A signature indicating that self save-restore image supports SMF.
+.long SMF_SIGNATURE_CONST
+#---------------------------------------------------------------------
+#
+# common code for thread restoration
+#
+#---------------------------------------------------------------------
+
+.org _start + THREAD_LAUNCHER_START_OFFSET
+
+thread_launcher_start:
+mfspr PIR_VAL_REG, PIR #Processor Identification Register
+#Select to read the Thread State Status register in PC using SPRC & SPRD
+li TEMP_REG1, CORE_THREAD_STATE_REG_ID
+mtspr SPRC, TEMP_REG1
+mfspr FUSED_STATUS_REG, SPRD
+extrdi. FUSED_STATUS_REG, FUSED_STATUS_REG, 1, 63
+beq core_is_not_fused
+
+core_is_fused:
+#core is fused. Find physical core number from PIR bits.
+#Bit 60: Fuse Core Select within the Quad
+#Bit 61:62 Thread select within a core chiplet
+#Bit 63 chip select within the fused core
+
+#Multiply the fused core select bit (bit 60) by 2 since there are two core chiplets in
+#a pair forming the fused core. Bit 63 selects even or odd core within the pair.
+#Physical core id = 2 * (bit 60) + (bit 63)
+
+extrdi CORE_ID_REG, PIR_VAL_REG, 1, 60
+sldi CORE_ID_REG, CORE_ID_REG, 1
+extrdi TEMP_REG1, PIR_VAL_REG, 1, 63
+add CORE_ID_REG, CORE_ID_REG, TEMP_REG1
+
+# thread id = 2 * (bit 61 ) + bit 62
+extrdi THREAD_ID_REG, PIR_VAL_REG, 2, 61
+b thread_restore
+
+core_is_not_fused:
+#core is not fused. Find relative id within Quad
+#bit 60:61 core chiplet select within a Quad.
+#bit 62:63 thread select within a core chiplet.
+
+extrdi CORE_ID_REG, PIR_VAL_REG, 2, 60
+extrdi THREAD_ID_REG, PIR_VAL_REG, 2, 62
+
+#********* Determining core id relative to P9 chip by using quad info **************
+# bit 57:59 Quad Select within the P9 Chip
+
+thread_restore:
+extrdi TEMP_REG1, PIR_VAL_REG, 3, 57 # get quad bits
+sldi TEMP_REG1, TEMP_REG1, 2 # quad id * 4 core chiplets per quad
+add CORE_ID_REG, CORE_ID_REG, TEMP_REG1 # P9 core id = 4 * quad id + index within Quad
+
+#***************** find address where restore instructions are present **************
+
+#found core id and thread id . Calculate offset associated with restore area
+
+#Below is a representation of UV & HV register restore section layout
+#************************************************************ core base address ( b )
+#*********************************End of core interrupt region ********************** b
+# Core 0 Thread 0 Self Restore
+#------------------------------------------------------------------------------------ b + 512B
+# Core 0 Thread 1 Self Restore
+#------------------------------------------------------------------------------------ b + 1024B
+# Core 0 Thread 2 Self Restore
+#------------------------------------------------------------------------------------ b + 1536B
+# Core 0 Thread 3 Self Restore
+#------------------------------------------------------------------------------------ b + 2048B
+# Core 0 Thread 0 Self Save
+#------------------------------------------------------------------------------------ b + 2304B
+# Core 0 Thread 1 Self Save
+#------------------------------------------------------------------------------------ b + 2560B
+# Core 0 Thread 2 Self Save
+#------------------------------------------------------------------------------------ b + 2816B
+# Core 0 Thread 3 Self Save
+#------------------------------------------------------------------------------------ b + 3072B
+# Core 0 Self Restore 256
+#------------------------------------------------------------------------------------ b + 3584B
+# Core 0 Self Save 128
+#------------------------------------------------------------------------------------ b + 4096B
+# Core 1 Thread 0 Self Restore
+#------------------------------------------------------------------------------------ b + 4608B
+# Core 1 Thread 1 Self Restore
+#------------------------------------------------------------------------------------ b + 5120B
+# .
+# .
+# .
+# .
+# .
+# .
+#------------------------------------------------------------------------------------
+
+sldi BASE_ADDR_REG, CORE_ID_REG, 12 # times 2K = 2^12
+addi BASE_ADDR_REG, BASE_ADDR_REG, SECURE_THREAD_SPR_REGION_SIZE
+add BASE_ADDR_REG, BASE_ADDR_REG, RMOR_INIT_REG # plus CPMR Base
+
+calculate_thread_save_addr: # 256 * thread id
+sldi THREAD_SELF_SAVE_BASE_ADDR, THREAD_ID_REG, 8
+add THREAD_SELF_SAVE_BASE_ADDR, THREAD_SELF_SAVE_BASE_ADDR, BASE_ADDR_REG
+addi THREAD_SELF_SAVE_BASE_ADDR, THREAD_SELF_SAVE_BASE_ADDR, 2048
+
+calculate_core_self_save_addr:
+mr CORE_SELF_SAVE_BASE_ADDR, BASE_ADDR_REG
+addi CORE_SELF_SAVE_BASE_ADDR, CORE_SELF_SAVE_BASE_ADDR, CORE_SELF_SAVE_OFFSET
+
+calculate_self_restore_address:
+mr THREAD_SCOPE_RESTORE_ADDR_REG, BASE_ADDR_REG
+sldi TEMP_REG1, THREAD_ID_REG, 9
+add THREAD_SCOPE_RESTORE_ADDR_REG, THREAD_SCOPE_RESTORE_ADDR_REG, TEMP_REG1
+addi CORE_SCOPE_RESTORE_ADDR_REG, BASE_ADDR_REG, CORE_SELF_RESTORE_OFFSET
+
+##read register scratch0 for even core and scratch1 for odd core
+mr TEMP_REG1, CORE_ID_REG
+andi. TEMP_REG1, TEMP_REG1, 0x01
+cmplwi TEMP_REG1, 0x00
+beq set_scratch_reg0
+
+set_scratch_reg1:
+li TEMP_REG1, CONFIG_CORE_SCRATCH_REG1
+mtspr SPRC, TEMP_REG1
+b read_scratch_reg
+
+set_scratch_reg0:
+li TEMP_REG1, CONFIG_CORE_SCRATCH_REG0
+mtspr SPRC, TEMP_REG1
+
+read_scratch_reg:
+mfspr THREAD_SCRATCH_VAL_REG, SPRD
+li TEMP_REG2, 0x01
+and TEMP_REG1, THREAD_SCRATCH_VAL_REG, TEMP_REG2
+cmpwi TEMP_REG1, 0x00
+bne find_self_save
+
+thread_restore_base:
+li TEMP_REG1, CORE_THREAD_STATE_REG_ID
+mtspr SPRC, TEMP_REG1
+mtlr THREAD_SCOPE_RESTORE_ADDR_REG
+blrl ## branch to thread register restore area
+
+# return here after thread register restoration
+thread_restore_return:
+cmpwi THREAD_ID_REG, 0 # if thread in question is not 0, skip core shared reg restore
+bne restore_done # else wait for other threads to be stopped again
+
+
+wait_until_single_thread:
+mfspr TEMP_REG1, SPRD
+extrdi TEMP_REG1, TEMP_REG1, 4, 56
+cmpwi TEMP_REG1, OTHER_THREADS_STOPPED
+bne wait_until_single_thread # wait until all threads are done restoring
+
+mtlr CORE_SCOPE_RESTORE_ADDR_REG
+blrl # branch to core shared register restore area
+
+
+# return here after shared core register restoration
+restore_done: # now all regs are restored except URMOR & MSR
+
+# Use RFID to restore the requested MSR and, if thread0, to finally restore the URMOR
+# before executing STOP again to indicate completion to CME. Always override the given
+# MSR value to remain in Big Endian and Secure Mode so we can complete the self restore
+# (although override is really only needed for the case of non-secure HV-only Linux
+# systems, since the Hypervisor cannot set the Secure bit and Linux will set LE)
+# Note: SRESET on the eventual wakeup will properly configure the LE and S bits in the MSR
+
+addi TEMP_REG1, 0, -2 # = 0xF...FFFE, create a mask excluding bit 63
+and. MSR_INIT_REG, MSR_INIT_REG, TEMP_REG1 # to clear LE bit
+cmplwi MSR_SECURITY_ENABLE_REG, 0
+beq initiate_hv_compatibility_mode # HV compatibility mode , SMCTRL[E] is 0b0
+
+extrdi. TEMP_REG2, THREAD_SCRATCH_VAL_REG, 1, SCRATCH_RUNTIME_MODE_BIT # Check Run-Time Wakeup Mode
+beq initiate_urmor_restore
+
+initiate_runtime_hv_wakeup:
+
+#------------------------------------------------------------------------------------
+# HW Bug Workaround: With MSR[S] bit ON, clearing SMFCTRL[E] bit leads to
+# Checksttop. Clearing of SMFCTRL[E] is must for exit as HV. Inorder to
+# accomplish it, following workaround has been implemented :
+# (1). Slave threads should not attempt to clear SMFCTRL[E]. Only master thread
+# should do that.
+# (2). Before clearing SMFCTRl[E], clear bit 41 in SPR HSRR1 and SRR1.
+#------------------------------------------------------------------------------------
+li TEMP_REG1, 0
+mfsrr1 TEMP_REG2
+insrdi TEMP_REG2, TEMP_REG1, 1, MSR_SECURITY_BIT
+mtsrr1 TEMP_REG2
+mfspr TEMP_REG2, HSRR1
+insrdi TEMP_REG2, TEMP_REG1, 1, MSR_SECURITY_BIT
+mtspr HSRR1, TEMP_REG2
+mfspr SMF_VAL_REG, SMFCTRL
+insrdi SMF_VAL_REG, TEMP_REG1, 1, SMFCTRL_ENABLE_BIT
+insrdi MSR_INIT_REG, TEMP_REG1, 1, MSR_SECURITY_BIT
+
+addi TEMP_REG1, RMOR_INIT_REG, HRMOR_RESTORE_OFFSET
+cmplwi THREAD_ID_REG, 0
+beq update_usrrx
+addi TEMP_REG1, TEMP_REG1, SKIP_HRMOR_UPDATE_OFFSET # restore HRMOR only if thread0
+
+update_usrrx:
+mtspr USRR0, TEMP_REG1
+mtspr USRR1, MSR_INIT_REG
+cmpwi THREAD_ID_REG, 0
+bne exit_to_thread_stop
+
+#------------------------------ Trampoline Sequence Start -------------------------------
+
+mtspr SMFCTRL, SMF_VAL_REG #SMFCTRL[E] = 0b0 for HV exit and 0b1 for UV exit
+
+exit_to_thread_stop:
+isync
+.long urfid
+#------------------------------ Trampoline Sequence End ----------------------------------
+.long ATTN
+
+
+initiate_hv_compatibility_mode:
+
+addi TEMP_REG1, RMOR_INIT_REG, HRMOR_RESTORE_OFFSET
+cmplwi THREAD_ID_REG, 0
+beq update_srrx
+addi TEMP_REG1, TEMP_REG1, SKIP_HRMOR_UPDATE_OFFSET # restore HRMOR only if thread0
+
+update_srrx:
+mtsrr0 TEMP_REG1
+mtsrr1 MSR_INIT_REG
+rfid
+.long ATTN
+
+initiate_urmor_restore:
+addi TEMP_REG1, RMOR_INIT_REG, URMOR_RESTORE_OFFSET
+cmplwi THREAD_ID_REG, 0
+beq update_uv_exit
+addi TEMP_REG1, TEMP_REG1, SKIP_URMOR_UPDATE_OFFSET # restore URMOR only if thread0
+
+update_uv_exit:
+mtspr USRR0, TEMP_REG1
+mtspr USRR1, MSR_INIT_REG
+.long urfid
+.long ATTN
+
+# THREAD_LAUNCHER_SIZE_OFFSET must be >= (4 * number of instructions between
+# here and thread_launcher_start)
+
+find_self_save:
+addi SELF_RESTORE_ADDR_REG, THREAD_SCOPE_RESTORE_ADDR_REG, 8
+mtlr THREAD_SELF_SAVE_BASE_ADDR
+blrl
+cmpwi THREAD_ID_REG, 0 # if thread in question is 0, also, save core SPRs
+bne save_restore_done # else saving of SPRs is done
+
+save_core_spr:
+# 8B for mflr r30
+# 32B for skipping HRMOR restore entry
+# Self save should start at an offset 8B + 32B = 40B
+addi SELF_RESTORE_ADDR_REG, CORE_SCOPE_RESTORE_ADDR_REG, 40
+mtlr CORE_SELF_SAVE_BASE_ADDR
+blrl
+
+b save_restore_done
+
+#-------------------- Self Save Routine --------------------------------
+
+## This is a common routine which can edit SPR restore entry for an SPR
+## of scope core or thread. It basically edits parts of the SPR restore
+## entry which contains the data to which given SPR needs to be restored.
+
+.org _start + SPR_SAVE_ROUTINE_OFFSET
+
+li SPR_SAVE_SCRATCH_REG, 0x0278
+oris SPR_SAVE_SCRATCH_REG, SPR_SAVE_SCRATCH_REG, 0x7c00
+stw SPR_SAVE_SCRATCH_REG, -4 (SELF_RESTORE_ADDR_REG)
+extrdi SPR_SAVE_SCRATCH_REG, SPR_DATA_REG, 16, 0
+oris SPR_SAVE_SCRATCH_REG, SPR_SAVE_SCRATCH_REG, 0x6400
+sthu SPR_SAVE_SCRATCH_REG, 2( SELF_RESTORE_ADDR_REG )
+extrdi SPR_SAVE_SCRATCH_REG, SPR_DATA_REG, 16, 16
+sthu SPR_SAVE_SCRATCH_REG, 4( SELF_RESTORE_ADDR_REG )
+extrdi SPR_SAVE_SCRATCH_REG, SPR_DATA_REG, 16, 32
+sthu SPR_SAVE_SCRATCH_REG, 8( SELF_RESTORE_ADDR_REG )
+extrdi SPR_SAVE_SCRATCH_REG, SPR_DATA_REG , 16, 48
+sthu SPR_SAVE_SCRATCH_REG, 4( SELF_RESTORE_ADDR_REG )
+addi SELF_RESTORE_ADDR_REG, SELF_RESTORE_ADDR_REG, 14
+blr
+
+#--------------------------------- End Thread Launcher ---------------
diff --git a/import/chips/p9/procedures/utils/stopreg/p9_core_save_restore_routines.s b/import/chips/p9/procedures/utils/stopreg/p9_core_save_restore_routines.s
new file mode 100644
index 00000000..77d5ca8f
--- /dev/null
+++ b/import/chips/p9/procedures/utils/stopreg/p9_core_save_restore_routines.s
@@ -0,0 +1,860 @@
+# IBM_PROLOG_BEGIN_TAG
+# This is an automatically generated prolog.
+#
+# $Source: import/chips/p9/procedures/utils/stopreg/p9_core_save_restore_routines.s $
+#
+# OpenPOWER HCODE Project
+#
+# COPYRIGHT 2015,2020
+# [+] International Business Machines Corp.
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied. See the License for the specific language governing
+# permissions and limitations under the License.
+#
+# IBM_PROLOG_END_TAG
+# 1 "/esw/san2/premjha2/ekbTest/p9_ekb/ekb/chips/p9/procedures/utils/stopreg/p9_core_save_restore_routines.S"
+# 1 "<built-in>"
+# 1 "<command-line>"
+# 1 "/esw/san2/premjha2/ekbTest/p9_ekb/ekb/chips/p9/procedures/utils/stopreg/p9_core_save_restore_routines.S"
+
+ .set r0, 0
+ .set r1, 1
+ .set r2, 2
+ .set r3, 3
+ .set r4, 4
+ .set r5, 5
+ .set r6, 6
+ .set r7, 7
+ .set r8, 8
+ .set r9, 9
+ .set r10, 10
+ .set r11, 11
+ .set r12, 12
+ .set r13, 13
+ .set r14, 14
+ .set r15, 15
+ .set r16, 16
+ .set r17, 17
+ .set r18, 18
+ .set r19, 19
+ .set r20, 20
+ .set r21, 21
+ .set r22, 22
+ .set r23, 23
+ .set r24, 24
+ .set r25, 25
+ .set r26, 26
+ .set r27, 27
+ .set r28, 28
+ .set r29, 29
+ .set r30, 30
+ .set r31, 31
+
+ .set f0, 0
+ .set f1, 1
+ .set f2, 2
+ .set f3, 3
+ .set f4, 4
+ .set f5, 5
+ .set f6, 6
+ .set f7, 7
+ .set f8, 8
+ .set f9, 9
+ .set f10, 10
+ .set f11, 11
+ .set f12, 12
+ .set f13, 13
+ .set f14, 14
+ .set f15, 15
+ .set f16, 16
+ .set f17, 17
+ .set f18, 18
+ .set f19, 19
+ .set f20, 20
+ .set f21, 21
+ .set f22, 22
+ .set f23, 23
+ .set f24, 24
+ .set f25, 25
+ .set f26, 26
+ .set f27, 27
+ .set f28, 28
+ .set f29, 29
+ .set f30, 30
+ .set f31, 31
+
+#--------------------------------------------------------------------#
+# SPR Constants #
+#--------------------------------------------------------------------#
+
+ .set XER, 1
+ .set LR, 8
+ .set CTR, 9
+ .set DSISR, 18
+ .set DAR, 19
+ .set DEC, 22
+ .set SDR1, 25
+ .set SRR0, 26
+ .set SRR1, 27
+ .set CFAR, 28
+ .set HFSCR, 190
+ .set TB, 268
+ .set SPRG0, 272
+ .set SPRG1, 273
+ .set SPRG2, 274
+ .set SPRG3, 275
+ .set SPRC, 276
+ .set SPRD, 277
+ .set EAR, 282
+ .set TBL, 284
+ .set TBU, 285
+ .set PVR, 287
+ .set HSPRG0, 304
+ .set HSPRG1, 305
+ .set HDSISR, 306
+ .set HDEC, 310
+ .set HRMOR, 313
+ .set HSRR0, 314
+ .set HSRR1, 315
+ .set HMER, 336
+ .set URMOR, 505 # Ultravisor
+ .set USRR0, 506
+ .set USRR1, 507
+ .set SMFCTRL, 511 # Ultravisor
+ .set HID, 1008
+ .set PIR, 1023
+
+#--------------------SPR definition ends---------------------------------------
+
+#--------------------constants begin ------------------------------------------
+
+ #offset wrt to start of HOMER at which thread launcher code
+ #is located.
+ .set THREAD_LAUNCHER_START_OFFSET, 0x2000
+ .set THREAD_LAUNCHER_SIZE_OFFSET, 1024
+ .set CORE_SPR_OFFSET, 1024
+ .set SPR_RESTORE_OFFSET, THREAD_LAUNCHER_START_OFFSET + THREAD_LAUNCHER_SIZE_OFFSET
+ .set HRMOR_RESTORE_OFFSET, 0x1200
+ .set URMOR_RESTORE_OFFSET, 0x1280
+ .set SKIP_HRMOR_UPDATE_OFFSET, 4
+ .set SKIP_URMOR_UPDATE_OFFSET, 8
+ .set SPR_SAVE_ROUTINE_OFFSET, 0x2300
+
+ .set STOP, 0x4C0002E4
+ .set ATTN, 0x00000200
+ .set urfid, 0x4C000264
+ .set SECURE_MODE_CONST, 0x0040 # bit 41, note: must be shifted left 16 bits
+ .set MACHINE_CHECK_ENABLE_CONST, 0x1000 # bit 51
+ .set ERR_CODE_SMF_E_NOT_SET, 0x0001 # Core is SMF capable but SMF[E] not SET
+ .set ERR_CODE_SMF_BAD_B62_63_CFG, 0x0002 # Core is not configured to exit UV mode
+ .set SMFCTRL_ENABLE_BIT, 0
+ .set MSR_SECURITY_BIT, 41
+ .set SCRATCH_RUNTIME_MODE_BIT, 59
+
+ .set OTHER_THREADS_STOPPED, 0x07
+ .set CORE_THREAD_STATE_REG_ID, 0x01E0
+ .set CONFIG_CORE_SCRATCH_REG0, 0x0000
+ .set CONFIG_CORE_SCRATCH_REG1, 0x0008
+ .set SECURE_THREAD_SPR_REGION_SIZE, 9216 # 9216 Bytes ( Interrupt Region ) + 1024 Bytes ( Thread Launch Size )
+ .set CORE_SELF_RESTORE_OFFSET, 0xC00
+ .set CORE_SELF_SAVE_OFFSET, 0xE00 # 3.5KB
+ .set THREAD_SELF_SAVE_SIZE, 256
+ .set SELF_REST_VER_INFO_OFFSET, 0x1C
+ .set SMF_SIGNATURE_OFFSET, 0x1300
+ .set SMF_SIGNATURE_CONST, 0x5f534d46 # '_SMF'
+ .set HILE_BIT_POS, 4
+ .set LE_BIT_POS, 63
+ .set MF_HRMOR_R1, 0xa64a397c
+ .set CLEAR_MSR_LE, 0xa407b57a
+ .set MT_SRR1, 0xa603bb7e
+ .set ADDI_R1_32, 0x20012138
+ .set MT_SRR0_R1, 0xa6033a7c
+ .set RFID, 0x2400004c
+ .set TRAP_LE, 0x0800e07f
+ .set MFMSR_R21, 0xa600a07e
+
+
+ .set SPR_SAVE_SCRATCH_REG, r0
+ .set SPR_DATA_REG, r1
+ .set PIR_VAL_REG, r2
+ .set CPMR_BASE_REG, r3
+ .set FUSED_STATUS_REG, r4
+ .set CORE_ID_REG, r5
+ .set THREAD_ID_REG, r6
+ .set BASE_ADDR_REG, r7
+ .set TEMP_REG1, r8
+ .set URMOR_RESTORE_REG, r9
+ .set HRMOR_RESTORE_REG, r10
+ .set THREAD_ACTIVE_STATE_REG, r11
+ .set CORE_SCOPE_RESTORE_ADDR_REG, r12
+ .set THREAD_SCOPE_RESTORE_ADDR_REG, r13
+ .set THREAD_SELF_SAVE_BASE_ADDR, r14
+ .set CORE_SELF_SAVE_BASE_ADDR, r15
+ .set SMF_VAL_REG, r16
+ .set TEMP_REG2, r17
+ .set THREAD_SCRATCH_VAL_REG, r18
+ .set RMOR_INIT_REG, r20
+ .set MSR_INIT_REG, r21
+ .set MSR_SECURITY_ENABLE_REG, r23
+ .set TEST_REG, r24
+ .set SELF_REST_ERR_REG, r25
+ .set SELF_REST_VER_REG, r26
+ .set SELF_SAVE_ADDR_REG, r30
+ .set SELF_RESTORE_ADDR_REG, r31
+
+#--------------------------------------------------------------------#
+
+# Interrupt Vectors
+
+#-----------------------------------------------------------------------#
+ .set SRESET, 0x0100
+
+#--------------------------------------------------------------------#
+
+# CR Register Constants
+
+#--------------------------------------------------------------------#
+
+ .set cr0, 0
+ .set cr1, 1
+ .set cr2, 2
+ .set cr3, 3
+ .set cr4, 4
+ .set cr5, 5
+ .set cr6, 6
+ .set cr7, 7
+ .set lt, 0
+ .set gt, 1
+ .set eq, 2
+ .set so, 3
+
+#--------------------------------------------------------------------#
+.section ".selfRestore" , "ax"
+.global _start
+
+#There is CPMR header just before SRESET handler. Below is its layout.
+#------------------------------CPMR Header ------------------------------------
+# Address Offset Contents
+#-----------------------------------------------------------------------------
+# 0x00 ATTN Opcode ATTN Opcode
+#------------------------------------------------------------------------------
+# 0x08 Magic Number
+#------------------------------------------------------------------------------
+# 0x10 Build Date Version
+#-------------------------------------------------------------------------------
+# 0x18 Resvd|Resvd|Resvd|Resvd|Resvd|Resvd|Resvd|Fused Flag
+#-------------------------------------------------------------------------------
+# 0x20 CME Hcode Offset | CME Hcode Length
+#-------------------------------------------------------------------------------
+# 0x28 CME Common Rings | CME Common Rings Section
+# Section Offset | Length
+#-------------------------------------------------------------------------------
+# 0x30 CME Quad Pstate Region | CME Quad Pstate Region
+# Offset Length
+#-------------------------------------------------------------------------------
+# 0x38-0xF8 Reserved( Filled with ATTN instructions )
+#-------------------------------------------------------------------------------#
+_start:
+
+#--------------------------------------------------------------------#
+
+#SRESET handler routine
+#In wakeup and STOP path, CME generates reset signal for P9 CORE. It generates
+#SRESET interrupt for all threads of the core.
+
+#At the beginning of SRESET, thread executing this code determines its privilege level.
+#Once privilege level is known, execution is steered towards common thread launcher.
+
+#If thread executing the code is working with Hyp privilege, thread launcher address is
+#calculated using contents of HRMOR where as if thread is executing code as Ultravisor,
+#thread prepares to exit ultavisor mode using trampoline sequence. Thread launcher address
+#is computed using URMOR. Refer to table below
+
+#----------------------------------------------------------------------------
+# Privilege SPR Src SPR Dest
+#----------------------------------------------------------------------------
+# Hyp* HRMOR SRR0
+# MSR SRR1
+#---------------------------------------------------------------------------
+# Ultravisor** URMOR USRR0
+# MSR USRR1
+#---------------------------------------------------------------------------
+
+# * Copy is initiated by rfid instruction
+# ** Copy is initiated by urfid instruction
+#---------------------------------------------------------------------------
+
+# Assume scan init: MSR[Secure]==1 and MSR[HV]=1 and SMFCTRL[E]==0
+
+.org _start + SRESET
+
+_sreset_hndlr:
+
+b big_endian_start
+
+little_endian_start:
+.long MF_HRMOR_R1
+.long MFMSR_R21
+.long CLEAR_MSR_LE
+.long MT_SRR1
+.long ADDI_R1_32
+.long MT_SRR0_R1
+.long RFID
+
+#Note: below are instructions for swizzled machine code used above for
+#LE core entering STOP
+#mfspr r1, HRMOR
+#mfmsr MSR_INIT_REG
+#clrrdi MSR_INIT_REG, MSR_INIT_REG, 1
+#mtsrr1 MSR_INIT_REG
+#addi r1, r1, 288
+#mtsrr0 r1
+#rfid
+
+
+
+big_endian_start:
+mfspr SPR_DATA_REG, HID
+li TEMP_REG1, 0
+insrdi SPR_DATA_REG, TEMP_REG1, 1, HILE_BIT_POS
+mtspr HID, SPR_DATA_REG # Cleared HILE bit position
+mfmsr MSR_INIT_REG
+ori MSR_INIT_REG, MSR_INIT_REG, MACHINE_CHECK_ENABLE_CONST # Set the ME bit
+extrdi. MSR_SECURITY_ENABLE_REG, MSR_INIT_REG, 1, MSR_SECURITY_BIT # read Secure Bit (S) of MSR
+beq hv_core_init # it is a non-secure mode system
+
+uv_core_check:
+#Check For SMF enable bit
+#SMFCTRL[E]=1?
+
+li TEMP_REG2, ERR_CODE_SMF_E_NOT_SET
+mfspr SMF_VAL_REG, SMFCTRL
+extrdi. TEMP_REG1, SMF_VAL_REG, 1, SMFCTRL_ENABLE_BIT
+beq uv_init_error # Core is initialization is not consistent
+
+li TEMP_REG2, ERR_CODE_SMF_BAD_B62_63_CFG
+extrdi TEMP_REG1, SMF_VAL_REG, 2, 62
+cmpwi TEMP_REG1, 0x02
+beq uv_core_init
+
+uv_init_error:
+
+#Put error code in a specific GPR
+#SPATTN to halt as the inits and the mode are not consistent
+
+mr SELF_REST_ERR_REG, TEMP_REG2
+.long ATTN # Error out and block self restore completion
+
+hv_core_init:
+mfspr RMOR_INIT_REG, HRMOR # Get Stop_HRMOR (placed by CME)
+addi TEMP_REG2, RMOR_INIT_REG, 0x2000 # Thread Launcher offset
+mtsrr0 TEMP_REG2 # Save Thread Launcher address to SRR0
+mtsrr1 MSR_INIT_REG # Save MSR to SRR1
+rfid # Invoke Thread Launcher with ME=1 in HV mode
+
+uv_core_init:
+mfspr RMOR_INIT_REG, URMOR
+addi TEMP_REG1, RMOR_INIT_REG, 0x2000 # Thread Launcher offset
+mtspr USRR0, TEMP_REG1 # Save Thread Launcher address to USRR0
+mtspr USRR1, MSR_INIT_REG # Save MSR to USRR1
+.long urfid # Invoke Thread Launcher with ME=1 in UV mode
+
+#--------------------------------------------------------------------#
+
+#Error handling for other interrupt vectors.
+
+#--------------------------------------------------------------------#
+# Machine Check
+#--------------------------------------------------------------------#
+.org _start + 0x0200
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Data Storage
+#--------------------------------------------------------------------#
+.org _start + 0x0300
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Data Segment
+#--------------------------------------------------------------------#
+.org _start + 0x0380
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Instruction Storage
+#--------------------------------------------------------------------#
+.org _start + 0x0400
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Instruction Segment
+#--------------------------------------------------------------------#
+.org _start + 0x0480
+.long ATTN
+
+#--------------------------------------------------------------------#
+# External
+#--------------------------------------------------------------------#
+.org _start + 0x0500
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Alignment
+#--------------------------------------------------------------------#
+.org _start + 0x0600
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Program
+#--------------------------------------------------------------------#
+.org _start + 0x0700
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Floating Point Unavailable
+#--------------------------------------------------------------------#
+.org _start + 0x0800
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Decrementer
+#--------------------------------------------------------------------#
+.org _start + 0x0900
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Ultravisor Decrementer
+#--------------------------------------------------------------------#
+.org _start + 0x0980
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Directed Priviledged Doorbell
+#--------------------------------------------------------------------#
+.org _start + 0x0A00
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Reserved
+#--------------------------------------------------------------------#
+.org _start + 0x0B00
+.long ATTN
+
+#--------------------------------------------------------------------#
+# System Call
+#--------------------------------------------------------------------#
+.org _start + 0x0C00
+b _sreset_hndlr
+
+#--------------------------------------------------------------------#
+# Trace
+#--------------------------------------------------------------------#
+.org _start + 0x0D00
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Ultravisor Data Storage
+#--------------------------------------------------------------------#
+.org _start + 0x0E00
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Ultravisor Instruction Storage
+#--------------------------------------------------------------------#
+.org _start + 0x0E20
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Ultravisor Emulation Assistance
+#--------------------------------------------------------------------#
+.org _start + 0x0E40
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Ultravisor Maintenance
+#--------------------------------------------------------------------#
+.org _start + 0x0E60
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Directed Ultravisor Doorbell
+#--------------------------------------------------------------------#
+.org _start + 0x0E80
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Reserved
+#--------------------------------------------------------------------#
+.org _start + 0x0EA0
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Reserved
+#--------------------------------------------------------------------#
+.org _start + 0x0EC0
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Reserved
+#--------------------------------------------------------------------#
+.org _start + 0x0EE0
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Performance Monitoring
+#--------------------------------------------------------------------#
+.org _start + 0x0F00
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Vector Unavailable
+#--------------------------------------------------------------------#
+.org _start + 0x0F20
+.long ATTN
+
+#--------------------------------------------------------------------#
+# VSX Unavailable
+#--------------------------------------------------------------------#
+.org _start + 0x0F40
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Facility Unavailable
+#--------------------------------------------------------------------#
+.org _start + 0x0F60
+.long ATTN
+
+#--------------------------------------------------------------------#
+# Ultravisor Facility Unavailable
+#--------------------------------------------------------------------#
+.org _start + 0x0F80
+.long ATTN
+
+
+#--------------------------------------------------------------------#
+# Self Restore Completion
+#--------------------------------------------------------------------#
+.org _start + HRMOR_RESTORE_OFFSET
+
+#restore the URMOR/HRMOR to the value needed by the Ultravisor upon wakeup
+#there can be no future I-fetches after this point, so no more than 7
+#instructions after this
+
+mtspr HRMOR, HRMOR_RESTORE_REG
+
+#necessary to invalidate stale translations in the ERATs that were created
+#during the self-restore code execution.
+
+save_restore_done:
+slbia
+.long STOP # Core entering STOP state from HV state
+.long ATTN
+#--------------------------------------------------------------------
+
+.org _start + URMOR_RESTORE_OFFSET
+mtspr HRMOR, HRMOR_RESTORE_REG
+mtspr URMOR, URMOR_RESTORE_REG
+slbia
+.long STOP # Core entering STOP from UV state
+.long ATTN
+
+#---------------------------------------------------------------------
+
+.org _start + SMF_SIGNATURE_OFFSET
+## A signature indicating that self save-restore image supports SMF.
+.long SMF_SIGNATURE_CONST
+#---------------------------------------------------------------------
+
+# common code for thread restoration
+
+#---------------------------------------------------------------------
+
+.org _start + THREAD_LAUNCHER_START_OFFSET
+
+thread_launcher_start:
+mfspr PIR_VAL_REG, PIR #Processor Identification Register
+#Select to read the Thread State Status register in PC using SPRC & SPRD
+li TEMP_REG1, CORE_THREAD_STATE_REG_ID
+mtspr SPRC, TEMP_REG1
+mfspr FUSED_STATUS_REG, SPRD
+extrdi. FUSED_STATUS_REG, FUSED_STATUS_REG, 1, 63
+beq core_is_not_fused
+
+core_is_fused:
+#core is fused. Find physical core number from PIR bits.
+#Bit 60: Fuse Core Select within the Quad
+#Bit 61:62 Thread select within a core chiplet
+#Bit 63 chip select within the fused core
+
+#Multiply the fused core select bit (bit 60) by 2 since there are two core chiplets in
+#a pair forming the fused core. Bit 63 selects even or odd core within the pair.
+#Physical core id = 2 * (bit 60) + (bit 63)
+
+extrdi CORE_ID_REG, PIR_VAL_REG, 1, 60
+sldi CORE_ID_REG, CORE_ID_REG, 1
+extrdi TEMP_REG1, PIR_VAL_REG, 1, 63
+add CORE_ID_REG, CORE_ID_REG, TEMP_REG1
+
+# thread id = 2 * (bit 61 ) + bit 62
+extrdi THREAD_ID_REG, PIR_VAL_REG, 2, 61
+b thread_restore
+
+core_is_not_fused:
+#core is not fused. Find relative id within Quad
+#bit 60:61 core chiplet select within a Quad.
+#bit 62:63 thread select within a core chiplet.
+
+extrdi CORE_ID_REG, PIR_VAL_REG, 2, 60
+extrdi THREAD_ID_REG, PIR_VAL_REG, 2, 62
+
+#********* Determining core id relative to P9 chip by using quad info **************
+# bit 57:59 Quad Select within the P9 Chip
+
+thread_restore:
+extrdi TEMP_REG1, PIR_VAL_REG, 3, 57 # get quad bits
+sldi TEMP_REG1, TEMP_REG1, 2 # quad id * 4 core chiplets per quad
+add CORE_ID_REG, CORE_ID_REG, TEMP_REG1 # P9 core id = 4 * quad id + index within Quad
+
+#***************** find address where restore instructions are present **************
+
+#found core id and thread id . Calculate offset associated with restore area
+
+#Below is a representation of UV & HV register restore section layout
+#************************************************************ core base address ( b )
+#*********************************End of core interrupt region ********************** b
+# Core 0 Thread 0 Self Restore
+#------------------------------------------------------------------------------------ b + 512B
+# Core 0 Thread 1 Self Restore
+#------------------------------------------------------------------------------------ b + 1024B
+# Core 0 Thread 2 Self Restore
+#------------------------------------------------------------------------------------ b + 1536B
+# Core 0 Thread 3 Self Restore
+#------------------------------------------------------------------------------------ b + 2048B
+# Core 0 Thread 0 Self Save
+#------------------------------------------------------------------------------------ b + 2304B
+# Core 0 Thread 1 Self Save
+#------------------------------------------------------------------------------------ b + 2560B
+# Core 0 Thread 2 Self Save
+#------------------------------------------------------------------------------------ b + 2816B
+# Core 0 Thread 3 Self Save
+#------------------------------------------------------------------------------------ b + 3072B
+# Core 0 Self Restore 256
+#------------------------------------------------------------------------------------ b + 3584B
+# Core 0 Self Save 128
+#------------------------------------------------------------------------------------ b + 4096B
+# Core 1 Thread 0 Self Restore
+#------------------------------------------------------------------------------------ b + 4608B
+# Core 1 Thread 1 Self Restore
+#------------------------------------------------------------------------------------ b + 5120B
+# .
+# .
+# .
+# .
+# .
+# .
+#------------------------------------------------------------------------------------
+
+sldi BASE_ADDR_REG, CORE_ID_REG, 12 # times 2K = 2^12
+addi BASE_ADDR_REG, BASE_ADDR_REG, SECURE_THREAD_SPR_REGION_SIZE
+add BASE_ADDR_REG, BASE_ADDR_REG, RMOR_INIT_REG # plus CPMR Base
+
+calculate_thread_save_addr: # 256 * thread id
+sldi THREAD_SELF_SAVE_BASE_ADDR, THREAD_ID_REG, 8
+add THREAD_SELF_SAVE_BASE_ADDR, THREAD_SELF_SAVE_BASE_ADDR, BASE_ADDR_REG
+addi THREAD_SELF_SAVE_BASE_ADDR, THREAD_SELF_SAVE_BASE_ADDR, 2048
+
+calculate_core_self_save_addr:
+mr CORE_SELF_SAVE_BASE_ADDR, BASE_ADDR_REG
+addi CORE_SELF_SAVE_BASE_ADDR, CORE_SELF_SAVE_BASE_ADDR, CORE_SELF_SAVE_OFFSET
+
+calculate_self_restore_address:
+mr THREAD_SCOPE_RESTORE_ADDR_REG, BASE_ADDR_REG
+sldi TEMP_REG1, THREAD_ID_REG, 9
+add THREAD_SCOPE_RESTORE_ADDR_REG, THREAD_SCOPE_RESTORE_ADDR_REG, TEMP_REG1
+addi CORE_SCOPE_RESTORE_ADDR_REG, BASE_ADDR_REG, CORE_SELF_RESTORE_OFFSET
+
+##read register scratch0 for even core and scratch1 for odd core
+mr TEMP_REG1, CORE_ID_REG
+andi. TEMP_REG1, TEMP_REG1, 0x01
+cmplwi TEMP_REG1, 0x00
+beq set_scratch_reg0
+
+set_scratch_reg1:
+li TEMP_REG1, CONFIG_CORE_SCRATCH_REG1
+mtspr SPRC, TEMP_REG1
+b read_scratch_reg
+
+set_scratch_reg0:
+li TEMP_REG1, CONFIG_CORE_SCRATCH_REG0
+mtspr SPRC, TEMP_REG1
+
+read_scratch_reg:
+mfspr THREAD_SCRATCH_VAL_REG, SPRD
+li TEMP_REG2, 0x01
+and TEMP_REG1, THREAD_SCRATCH_VAL_REG, TEMP_REG2
+cmpwi TEMP_REG1, 0x00
+bne find_self_save
+
+thread_restore_base:
+li TEMP_REG1, CORE_THREAD_STATE_REG_ID
+mtspr SPRC, TEMP_REG1
+mtlr THREAD_SCOPE_RESTORE_ADDR_REG
+blrl ## branch to thread register restore area
+
+# return here after thread register restoration
+thread_restore_return:
+cmpwi THREAD_ID_REG, 0 # if thread in question is not 0, skip core shared reg restore
+bne restore_done # else wait for other threads to be stopped again
+
+
+wait_until_single_thread:
+mfspr TEMP_REG1, SPRD
+extrdi TEMP_REG1, TEMP_REG1, 4, 56
+cmpwi TEMP_REG1, OTHER_THREADS_STOPPED
+bne wait_until_single_thread # wait until all threads are done restoring
+
+mtlr CORE_SCOPE_RESTORE_ADDR_REG
+blrl # branch to core shared register restore area
+
+
+# return here after shared core register restoration
+restore_done: # now all regs are restored except URMOR & MSR
+
+# Use RFID to restore the requested MSR and, if thread0, to finally restore the URMOR
+# before executing STOP again to indicate completion to CME. Always override the given
+# MSR value to remain in Big Endian and Secure Mode so we can complete the self restore
+# (although override is really only needed for the case of non-secure HV-only Linux
+# systems, since the Hypervisor cannot set the Secure bit and Linux will set LE)
+# Note: SRESET on the eventual wakeup will properly configure the LE and S bits in the MSR
+
+addi TEMP_REG1, 0, -2 # = 0xF...FFFE, create a mask excluding bit 63
+and. MSR_INIT_REG, MSR_INIT_REG, TEMP_REG1 # to clear LE bit
+cmplwi MSR_SECURITY_ENABLE_REG, 0
+beq initiate_hv_compatibility_mode # HV compatibility mode , SMCTRL[E] is 0b0
+
+extrdi. TEMP_REG2, THREAD_SCRATCH_VAL_REG, 1, SCRATCH_RUNTIME_MODE_BIT # Check Run-Time Wakeup Mode
+beq initiate_urmor_restore
+
+initiate_runtime_hv_wakeup:
+
+#------------------------------------------------------------------------------------
+# HW Bug Workaround: With MSR[S] bit ON, clearing SMFCTRL[E] bit leads to
+# Checksttop. Clearing of SMFCTRL[E] is must for exit as HV. Inorder to
+# accomplish it, following workaround has been implemented :
+# (1). Slave threads should not attempt to clear SMFCTRL[E]. Only master thread
+# should do that.
+# (2). Before clearing SMFCTRl[E], clear bit 41 in SPR HSRR1 and SRR1.
+#------------------------------------------------------------------------------------
+li TEMP_REG1, 0
+mfsrr1 TEMP_REG2
+insrdi TEMP_REG2, TEMP_REG1, 1, MSR_SECURITY_BIT
+mtsrr1 TEMP_REG2
+mfspr TEMP_REG2, HSRR1
+insrdi TEMP_REG2, TEMP_REG1, 1, MSR_SECURITY_BIT
+mtspr HSRR1, TEMP_REG2
+mfspr SMF_VAL_REG, SMFCTRL
+insrdi SMF_VAL_REG, TEMP_REG1, 1, SMFCTRL_ENABLE_BIT
+insrdi MSR_INIT_REG, TEMP_REG1, 1, MSR_SECURITY_BIT
+
+addi TEMP_REG1, RMOR_INIT_REG, HRMOR_RESTORE_OFFSET
+cmplwi THREAD_ID_REG, 0
+beq update_usrrx
+addi TEMP_REG1, TEMP_REG1, SKIP_HRMOR_UPDATE_OFFSET # restore HRMOR only if thread0
+
+update_usrrx:
+mtspr USRR0, TEMP_REG1
+mtspr USRR1, MSR_INIT_REG
+cmpwi THREAD_ID_REG, 0
+bne exit_to_thread_stop
+
+#------------------------------ Trampoline Sequence Start -------------------------------
+
+mtspr SMFCTRL, SMF_VAL_REG #SMFCTRL[E] = 0b0 for HV exit and 0b1 for UV exit
+
+exit_to_thread_stop:
+isync
+.long urfid
+#------------------------------ Trampoline Sequence End ----------------------------------
+.long ATTN
+
+
+initiate_hv_compatibility_mode:
+
+addi TEMP_REG1, RMOR_INIT_REG, HRMOR_RESTORE_OFFSET
+cmplwi THREAD_ID_REG, 0
+beq update_srrx
+addi TEMP_REG1, TEMP_REG1, SKIP_HRMOR_UPDATE_OFFSET # restore HRMOR only if thread0
+
+update_srrx:
+mtsrr0 TEMP_REG1
+mtsrr1 MSR_INIT_REG
+rfid
+.long ATTN
+
+initiate_urmor_restore:
+addi TEMP_REG1, RMOR_INIT_REG, URMOR_RESTORE_OFFSET
+cmplwi THREAD_ID_REG, 0
+beq update_uv_exit
+addi TEMP_REG1, TEMP_REG1, SKIP_URMOR_UPDATE_OFFSET # restore URMOR only if thread0
+
+update_uv_exit:
+mtspr USRR0, TEMP_REG1
+mtspr USRR1, MSR_INIT_REG
+.long urfid
+.long ATTN
+
+# THREAD_LAUNCHER_SIZE_OFFSET must be >= (4 * number of instructions between
+# here and thread_launcher_start)
+
+find_self_save:
+addi SELF_RESTORE_ADDR_REG, THREAD_SCOPE_RESTORE_ADDR_REG, 8
+mtlr THREAD_SELF_SAVE_BASE_ADDR
+blrl
+cmpwi THREAD_ID_REG, 0 # if thread in question is 0, also, save core SPRs
+bne save_restore_done # else saving of SPRs is done
+
+save_core_spr:
+# 8B for mflr r30
+# 32B for skipping HRMOR restore entry
+# Self save should start at an offset 8B + 32B = 40B
+addi SELF_RESTORE_ADDR_REG, CORE_SCOPE_RESTORE_ADDR_REG, 40
+mtlr CORE_SELF_SAVE_BASE_ADDR
+blrl
+
+b save_restore_done
+
+#-------------------- Self Save Routine --------------------------------
+
+## This is a common routine which can edit SPR restore entry for an SPR
+## of scope core or thread. It basically edits parts of the SPR restore
+## entry which contains the data to which given SPR needs to be restored.
+
+.org _start + SPR_SAVE_ROUTINE_OFFSET
+
+li SPR_SAVE_SCRATCH_REG, 0x0278
+oris SPR_SAVE_SCRATCH_REG, SPR_SAVE_SCRATCH_REG, 0x7c00
+stw SPR_SAVE_SCRATCH_REG, -4 (SELF_RESTORE_ADDR_REG)
+extrdi SPR_SAVE_SCRATCH_REG, SPR_DATA_REG, 16, 0
+oris SPR_SAVE_SCRATCH_REG, SPR_SAVE_SCRATCH_REG, 0x6400
+sthu SPR_SAVE_SCRATCH_REG, 2( SELF_RESTORE_ADDR_REG )
+extrdi SPR_SAVE_SCRATCH_REG, SPR_DATA_REG, 16, 16
+sthu SPR_SAVE_SCRATCH_REG, 4( SELF_RESTORE_ADDR_REG )
+extrdi SPR_SAVE_SCRATCH_REG, SPR_DATA_REG, 16, 32
+sthu SPR_SAVE_SCRATCH_REG, 8( SELF_RESTORE_ADDR_REG )
+extrdi SPR_SAVE_SCRATCH_REG, SPR_DATA_REG , 16, 48
+sthu SPR_SAVE_SCRATCH_REG, 4( SELF_RESTORE_ADDR_REG )
+addi SELF_RESTORE_ADDR_REG, SELF_RESTORE_ADDR_REG, 14
+blr
+
+#--------------------------------- End Thread Launcher ---------------
diff --git a/import/chips/p9/procedures/utils/stopreg/p9_cpu_reg_restore_instruction.H b/import/chips/p9/procedures/utils/stopreg/p9_cpu_reg_restore_instruction.H
index a7a87519..6f647eb4 100755
--- a/import/chips/p9/procedures/utils/stopreg/p9_cpu_reg_restore_instruction.H
+++ b/import/chips/p9/procedures/utils/stopreg/p9_cpu_reg_restore_instruction.H
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2015,2018 */
+/* COPYRIGHT 2015,2020 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -76,7 +76,8 @@ enum
SKIP_SPR_REST_INST = 0x4800001c, //b . +0x01c
MFLR_R30 = 0x7fc802a6,
SKIP_SPR_SELF_SAVE = 0x3bff0020, //addi r31 r31, 0x20
- MTLR_INST = 0x7fc803a6 //mtlr r30
+ MTLR_INST = 0x7fc803a6, //mtlr r30
+ BRANCH_BE_INST = 0x48000020,
};
#ifdef __cplusplus
diff --git a/import/chips/p9/procedures/utils/stopreg/p9_stop_api.C b/import/chips/p9/procedures/utils/stopreg/p9_stop_api.C
index 295dbb17..dcf92d82 100755
--- a/import/chips/p9/procedures/utils/stopreg/p9_stop_api.C
+++ b/import/chips/p9/procedures/utils/stopreg/p9_stop_api.C
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2015,2018 */
+/* COPYRIGHT 2015,2020 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -80,7 +80,7 @@ const uint32_t LEGACY_QUAD_SCOM_SUPPORTED = 63;
//-----------------------------------------------------------------------------
/**
- * @brief vaildated input arguments passed to p9_stop_save_cpureg_control.
+ * @brief validated input arguments passed to p9_stop_save_cpureg_control.
* @param[in] i_pImage point to start of HOMER
* @param[in] i_coreId id of the core
* @param[in] i_threadId id of the thread
@@ -651,138 +651,9 @@ StopReturnCode_t p9_stop_save_cpureg( void* const i_pImage,
const uint64_t i_regData,
const uint64_t i_pir )
{
- StopReturnCode_t l_rc = STOP_SAVE_SUCCESS; // procedure return code
- HomerSection_t* chipHomer = NULL;
- SmfHomerSection_t* smfChipHomer = NULL;
-
- do
- {
- uint32_t threadId = 0;
- uint32_t coreId = 0;
- uint32_t lookUpKey = 0;
- void* pSprEntryLocation = NULL; // an offset w.r.t. to start of image
- void* pThreadLocation = NULL;
- bool threadScopeReg = false;
- uint8_t l_urmorFix = false;
- uint64_t l_sprValue = 0;
- uint8_t l_selfRestVer = 0;
-
- MY_INF(">> p9_stop_save_cpureg" );
-
- l_rc = getCoreAndThread( i_pImage, i_pir, &coreId, &threadId );
-
- if( l_rc )
- {
- MY_ERR("Failed to determine Core Id and Thread Id from PIR 0x%016llx",
- i_pir);
- break;
- }
-
- MY_INF( " PIR 0x%016llx coreId %d threadid %d "
- " registerId %d", i_pir, coreId,
- threadId, i_regId );
-
- // First of all let us validate all input arguments.
- l_rc = validateSprImageInputs( i_pImage,
- i_regId,
- coreId,
- &threadId,
- &threadScopeReg );
-
- if( l_rc )
- {
- // Error: bad argument traces out error code
- MY_ERR("Bad input argument rc %d", l_rc );
-
- break;
- }
-
- l_urmorFix = *(uint8_t*)((uint8_t*)i_pImage + CPMR_HOMER_OFFSET + CPMR_URMOR_FIX_BYTE);
- l_selfRestVer = *(uint8_t *)((uint8_t *)i_pImage + CPMR_HOMER_OFFSET + CPMR_SELF_RESTORE_VER_BYTE );
-
- if( l_selfRestVer )
- {
- smfChipHomer = ( SmfHomerSection_t*)i_pImage;
-
- if( threadScopeReg )
- {
- pThreadLocation =
- &(smfChipHomer->iv_coreThreadRestore[coreId].iv_threadRestoreArea[threadId][0]);
- }
- else
- {
- pThreadLocation =
- &(smfChipHomer->iv_coreThreadRestore[coreId].iv_coreRestoreArea[0]);
- }
- }
- else //Old fips or OPAL release that doesn't support SMF
- {
- chipHomer = (HomerSection_t*)i_pImage;
-
- if( threadScopeReg )
- {
- pThreadLocation =
- &(chipHomer->iv_coreThreadRestore[coreId][threadId].iv_threadArea[0]);
- }
- else
- {
- pThreadLocation =
- &(chipHomer->iv_coreThreadRestore[coreId][threadId].iv_coreArea[0]);
- }
- }
-
- if( ( SWIZZLE_4_BYTE(BLR_INST) == *(uint32_t*)pThreadLocation ) ||
- ( SWIZZLE_4_BYTE(ATTN_OPCODE) == *(uint32_t*) pThreadLocation ) )
- {
- // table for given core id doesn't exit. It needs to be
- // defined.
- pSprEntryLocation = pThreadLocation;
- }
- else
- {
- // an SPR restore section for given core already exists
- lookUpKey = genKeyForSprLookup( i_regId );
- l_rc = lookUpSprInImage( (uint32_t*)pThreadLocation,
- lookUpKey,
- threadScopeReg,
- &pSprEntryLocation,
- l_selfRestVer );
- }
-
- if( l_rc )
- {
- MY_ERR("Invalid or corrupt SPR entry. CoreId 0x%08x threadId ",
- "0x%08x regId 0x%08x lookUpKey 0x%08x pThreadLocation 0x%08x"
- , coreId, threadId, i_regId, lookUpKey, pThreadLocation );
- break;
- }
-
- if( ( P9_STOP_SPR_URMOR == i_regId ) && ( l_urmorFix ) )
- {
- l_sprValue = i_regData - URMOR_CORRECTION;
- }
- else
- {
- l_sprValue = i_regData;
- }
-
- l_rc = updateSprEntryInImage( (uint32_t*) pSprEntryLocation,
- i_regId,
- l_sprValue,
- UPDATE_SPR_ENTRY );
-
- if( l_rc )
- {
- MY_ERR( " Failed to update the SPR entry of PIR 0x%08x reg"
- "0x%08x", i_pir, i_regId );
- break;
- }
-
- }
- while(0);
+ MY_INF(">> p9_stop_save_cpureg" );
- MY_INF("<< p9_stop_save_cpureg" );
- return l_rc;
+ return proc_stop_save_cpureg( i_pImage, i_regId, i_regData, i_pir );
}
//-----------------------------------------------------------------------------
@@ -1008,6 +879,239 @@ StopReturnCode_t p9_stop_save_scom( void* const i_pImage,
const ScomOperation_t i_operation,
const ScomSection_t i_section )
{
+ MY_INF(">> p9_stop_save_scom");
+
+ return proc_stop_save_scom( i_pImage, i_scomAddress,
+ i_scomData, i_operation, i_section );
+}
+
+//-----------------------------------------------------------------------------
+
+/**
+ * @brief searches a self save entry of an SPR in self-save segment.
+ * @param[in] i_sprBitPos bit position associated with SPR in save mask vector.
+ * @param[in] l_pSprSaveStart start location of SPR save segment
+ * @param[in] i_searchLength length of SPR save segment
+ * @param[in] i_pSaveSprLoc start location of save entry for a given SPR.
+ * @return STOP_SAVE_SUCCESS if look up succeeds, error code otherwise.
+ */
+STATIC StopReturnCode_t lookUpSelfSaveSpr( uint32_t i_sprBitPos, uint32_t* l_pSprSaveStart,
+ uint32_t i_searchLength, uint32_t** i_pSaveSprLoc )
+{
+ int32_t l_saveWordLength = (int32_t)(i_searchLength >> 2);
+ uint32_t l_oriInst = getOriInstruction( 0, 0, i_sprBitPos );
+ StopReturnCode_t l_rc = STOP_SAVE_FAIL;
+
+ while( l_saveWordLength > 0 )
+ {
+ if( l_oriInst == *l_pSprSaveStart )
+ {
+ *i_pSaveSprLoc = l_pSprSaveStart;
+ l_rc = STOP_SAVE_SUCCESS;
+ break;
+ }
+
+ l_pSprSaveStart++;
+ l_saveWordLength--;
+ }
+
+ return l_rc;
+}
+
+//-----------------------------------------------------------------------------
+
+/**
+ * @brief searches a self save entry of an SPR in self-save segment.
+ * @param[in] i_pSaveReg start of editable location of a SPR save entry.
+ * @param[in] i_sprNum Id of the SPR for which entry needs to be edited.
+ * @return STOP_SAVE_SUCCESS if look up succeeds, error code otherwise.
+ */
+STATIC StopReturnCode_t updateSelfSaveEntry( uint32_t* i_pSaveReg, uint16_t i_sprNum )
+{
+ StopReturnCode_t l_rc = STOP_SAVE_SUCCESS;
+
+ do
+ {
+ if( !i_pSaveReg )
+ {
+ l_rc = STOP_SAVE_FAIL;
+ MY_ERR( "Failed to update self save area for SPR 0x%04x", i_sprNum );
+ break;
+ }
+
+ if( P9_STOP_SPR_MSR == i_sprNum )
+ {
+ *i_pSaveReg = getMfmsrInstruction( 1 );
+ }
+ else
+ {
+ *i_pSaveReg = getMfsprInstruction( 1, i_sprNum );
+ }
+
+ i_pSaveReg++;
+
+ *i_pSaveReg = getBranchLinkRegInstruction( );
+ }
+ while(0);
+
+ return l_rc;
+}
+
+//-----------------------------------------------------------------------------
+
+StopReturnCode_t p9_stop_save_cpureg_control( void* i_pImage,
+ const uint64_t i_pir,
+ const uint32_t i_saveRegVector )
+{
+ MY_INF( ">> p9_stop_save_cpureg_control" );
+
+ return proc_stop_save_cpureg_control( i_pImage, i_pir, i_saveRegVector );
+}
+
+//-----------------------------------------------------------------------------------------------------
+
+StopReturnCode_t p9_stop_init_cpureg( void* const i_pImage, const uint32_t i_corePos )
+{
+ MY_INF( ">> p9_stop_init_cpureg" );
+
+ return proc_stop_init_cpureg( i_pImage, i_corePos );
+}
+
+//-----------------------------------------------------------------------------------------------------
+
+StopReturnCode_t p9_stop_init_self_save( void* const i_pImage, const uint32_t i_corePos )
+{
+ MY_INF( ">> p9_stop_init_self_save" );
+
+ return proc_stop_init_self_save( i_pImage, i_corePos );
+}
+
+//-----------------------------------------------------------------------------------------------------
+
+StopReturnCode_t proc_stop_init_cpureg( void* const i_pImage, const uint32_t i_corePos )
+{
+
+ StopReturnCode_t l_rc = STOP_SAVE_SUCCESS;
+ uint32_t* l_pRestoreStart = NULL;
+ void* l_pTempLoc = NULL;
+ SmfHomerSection_t* l_pHomer = NULL;
+ uint32_t l_threadPos = 0;
+ uint32_t l_lookUpKey = 0;
+ uint32_t l_sprIndex = 0;
+ uint8_t l_selfRestVer = 0;
+
+ MY_INF( ">> proc_stop_init_cpureg" );
+
+ do
+ {
+ if( !i_pImage )
+ {
+ l_rc = STOP_SAVE_ARG_INVALID_IMG;
+ break;
+ }
+
+ if( i_corePos > MAX_CORE_ID_SUPPORTED )
+ {
+ l_rc = STOP_SAVE_ARG_INVALID_CORE;
+ break;
+ }
+
+ l_pHomer = ( SmfHomerSection_t * ) i_pImage;
+ l_selfRestVer = *(uint8_t *)((uint8_t *)i_pImage + CPMR_HOMER_OFFSET + CPMR_SELF_RESTORE_VER_BYTE );
+
+ for( l_sprIndex = 0; l_sprIndex < MAX_SPR_SUPPORTED; l_sprIndex++ )
+ {
+ //Check if a given SPR needs to be self-saved each time on STOP entry
+
+ l_lookUpKey = genKeyForSprLookup( ( CpuReg_t )g_sprRegister[l_sprIndex].iv_sprId );
+
+ if( g_sprRegister[l_sprIndex].iv_isThreadScope )
+ {
+ for( l_threadPos = 0; l_threadPos < MAX_THREADS_PER_CORE; l_threadPos++ )
+ {
+ l_pRestoreStart =
+ (uint32_t*)&l_pHomer->iv_coreThreadRestore[i_corePos].iv_threadRestoreArea[l_threadPos][0];
+
+ l_rc = lookUpSprInImage( (uint32_t*)l_pRestoreStart, l_lookUpKey,
+ g_sprRegister[l_sprIndex].iv_isThreadScope,
+ &l_pTempLoc,
+ l_selfRestVer );
+
+ if( l_rc )
+ {
+ MY_ERR( "Thread SPR lookup failed in p9_stop_init_cpureg SPR %d Core %d Thread %d Index %d",
+ g_sprRegister[l_sprIndex].iv_sprId, i_corePos, l_threadPos, l_sprIndex );
+ break;
+ }
+
+ l_rc = updateSprEntryInImage( (uint32_t*) l_pTempLoc,
+ ( CpuReg_t )g_sprRegister[l_sprIndex].iv_sprId,
+ 0x00,
+ INIT_SPR_REGION );
+
+ if( l_rc )
+ {
+ MY_ERR( "Thread SPR region init failed. Core %d SPR Id %d",
+ i_corePos, g_sprRegister[l_sprIndex].iv_sprId );
+ break;
+ }
+
+ }//end for thread
+
+ if( l_rc )
+ {
+ break;
+ }
+
+ }//end if SPR threadscope
+ else
+ {
+ l_pRestoreStart = (uint32_t*)&l_pHomer->iv_coreThreadRestore[i_corePos].iv_coreRestoreArea[0];
+
+ l_rc = lookUpSprInImage( (uint32_t*)l_pRestoreStart, l_lookUpKey,
+ g_sprRegister[l_sprIndex].iv_isThreadScope,
+ &l_pTempLoc, l_selfRestVer );
+
+ if( l_rc )
+ {
+ MY_ERR( "Core SPR lookup failed in p9_stop_init_cpureg" );
+ break;
+ }
+
+ l_rc = updateSprEntryInImage( (uint32_t*) l_pTempLoc,
+ ( CpuReg_t )g_sprRegister[l_sprIndex].iv_sprId,
+ 0x00,
+ INIT_SPR_REGION );
+
+ if( l_rc )
+ {
+ MY_ERR( "Core SPR region init failed. Core %d SPR Id %d SPR Index %d",
+ i_corePos, g_sprRegister[l_sprIndex].iv_sprId, l_sprIndex );
+ break;
+ }
+
+ }// end else
+
+ }// end for l_sprIndex
+
+ }
+ while(0);
+
+ MY_INF( "<< proc_stop_init_cpureg" );
+
+ return l_rc;
+}
+
+//-----------------------------------------------------------------------------------------------------
+
+StopReturnCode_t proc_stop_save_scom( void* const i_pImage,
+ const uint32_t i_scomAddress,
+ const uint64_t i_scomData,
+ const ScomOperation_t i_operation,
+ const ScomSection_t i_section )
+{
+ MY_INF( ">> proc_stop_save_scom" );
+
StopReturnCode_t l_rc = STOP_SAVE_SUCCESS;
uint32_t entryLimit = 0;
uint8_t chipletId = 0;
@@ -1028,7 +1132,6 @@ StopReturnCode_t p9_stop_save_scom( void* const i_pImage,
uint32_t swizzleBlr = SWIZZLE_4_BYTE(BLR_INST);
bool cacheEntry = true;
- MY_INF(">> p9_stop_save_scom");
//Reads SGPE image version info from QPMR Header in HOMER
//For backward compatibility, for base version of SGPE Hcode,
@@ -1323,87 +1426,16 @@ StopReturnCode_t p9_stop_save_scom( void* const i_pImage,
updateEntryHeader( pEditScomHeader, imageVer, l_maxScomRestoreEntry );
}
- MY_INF("<< p9_stop_save_scom");
- return l_rc;
-}
-
-//-----------------------------------------------------------------------------
-
-/**
- * @brief searches a self save entry of an SPR in self-save segment.
- * @param[in] i_sprBitPos bit position associated with SPR in save mask vector.
- * @param[in] l_pSprSaveStart start location of SPR save segment
- * @param[in] i_searchLength length of SPR save segment
- * @param[in] i_pSaveSprLoc start location of save entry for a given SPR.
- * @return STOP_SAVE_SUCCESS if look up succeeds, error code otherwise.
- */
-STATIC StopReturnCode_t lookUpSelfSaveSpr( uint32_t i_sprBitPos, uint32_t* l_pSprSaveStart,
- uint32_t i_searchLength, uint32_t** i_pSaveSprLoc )
-{
- int32_t l_saveWordLength = (int32_t)(i_searchLength >> 2);
- uint32_t l_oriInst = getOriInstruction( 0, 0, i_sprBitPos );
- StopReturnCode_t l_rc = STOP_SAVE_FAIL;
-
- while( l_saveWordLength > 0 )
- {
- if( l_oriInst == *l_pSprSaveStart )
- {
- *i_pSaveSprLoc = l_pSprSaveStart;
- l_rc = STOP_SAVE_SUCCESS;
- break;
- }
-
- l_pSprSaveStart++;
- l_saveWordLength--;
- }
-
- return l_rc;
-}
-
-//-----------------------------------------------------------------------------
-
-/**
- * @brief searches a self save entry of an SPR in self-save segment.
- * @param[in] i_pSaveReg start of editable location of a SPR save entry.
- * @param[in] i_sprNum Id of the SPR for which entry needs to be edited.
- * @return STOP_SAVE_SUCCESS if look up succeeds, error code otherwise.
- */
-STATIC StopReturnCode_t updateSelfSaveEntry( uint32_t* i_pSaveReg, uint16_t i_sprNum )
-{
- StopReturnCode_t l_rc = STOP_SAVE_SUCCESS;
-
- do
- {
- if( !i_pSaveReg )
- {
- l_rc = STOP_SAVE_FAIL;
- MY_ERR( "Failed to update self save area for SPR 0x%04x", i_sprNum );
- break;
- }
-
- if( P9_STOP_SPR_MSR == i_sprNum )
- {
- *i_pSaveReg = getMfmsrInstruction( 1 );
- }
- else
- {
- *i_pSaveReg = getMfsprInstruction( 1, i_sprNum );
- }
-
- i_pSaveReg++;
-
- *i_pSaveReg = getBranchLinkRegInstruction( );
- }
- while(0);
+ MY_INF( "<< proc_stop_save_scom" );
return l_rc;
}
-//-----------------------------------------------------------------------------
+//-----------------------------------------------------------------------------------------------------
-StopReturnCode_t p9_stop_save_cpureg_control( void* i_pImage,
- const uint64_t i_pir,
- const uint32_t i_saveRegVector )
+StopReturnCode_t proc_stop_save_cpureg_control( void* i_pImage,
+ const uint64_t i_pir,
+ const uint32_t i_saveRegVector )
{
StopReturnCode_t l_rc = STOP_SAVE_SUCCESS;
uint32_t l_coreId = 0;
@@ -1419,6 +1451,7 @@ StopReturnCode_t p9_stop_save_cpureg_control( void* i_pImage,
uint32_t * l_pTempWord = NULL;
SmfHomerSection_t* l_pHomer = NULL;
uint8_t l_selfRestVer = 0;
+ MY_INF(">> proc_stop_save_cpureg_control" );
do
{
@@ -1522,134 +1555,168 @@ StopReturnCode_t p9_stop_save_cpureg_control( void* i_pImage,
}
while(0);
+ MY_INF("<< proc_stop_save_cpureg_control" );
+
return l_rc;
+
}
//-----------------------------------------------------------------------------------------------------
-StopReturnCode_t p9_stop_init_cpureg( void* const i_pImage, const uint32_t i_corePos )
+StopReturnCode_t proc_stop_save_cpureg( void* const i_pImage,
+ const CpuReg_t i_regId,
+ const uint64_t i_regData,
+ const uint64_t i_pir )
{
- StopReturnCode_t l_rc = STOP_SAVE_SUCCESS;
- uint32_t* l_pRestoreStart = NULL;
- void* l_pTempLoc = NULL;
- SmfHomerSection_t* l_pHomer = NULL;
- uint32_t l_threadPos = 0;
- uint32_t l_lookUpKey = 0;
- uint32_t l_sprIndex = 0;
- uint8_t l_selfRestVer = 0;
+ MY_INF(">> proc_stop_save_cpureg" );
- MY_INF( ">> p9_stop_init_cpureg" );
+ StopReturnCode_t l_rc = STOP_SAVE_SUCCESS; // procedure return code
+ HomerSection_t* chipHomer = NULL;
+ SmfHomerSection_t* smfChipHomer = NULL;
do
{
- if( !i_pImage )
+ uint32_t threadId = 0;
+ uint32_t coreId = 0;
+ uint32_t lookUpKey = 0;
+ void* pSprEntryLocation = NULL; // an offset w.r.t. to start of image
+ void* pThreadLocation = NULL;
+ bool threadScopeReg = false;
+ uint8_t l_urmorFix = false;
+ uint64_t l_sprValue = 0;
+ uint8_t l_selfRestVer = 0;
+
+
+ l_rc = getCoreAndThread( i_pImage, i_pir, &coreId, &threadId );
+
+ if( l_rc )
{
- l_rc = STOP_SAVE_ARG_INVALID_IMG;
+ MY_ERR("Failed to determine Core Id and Thread Id from PIR 0x%016llx",
+ i_pir);
break;
}
- if( i_corePos > MAX_CORE_ID_SUPPORTED )
+ MY_INF( " PIR 0x%016llx coreId %d threadid %d "
+ " registerId %d", i_pir, coreId,
+ threadId, i_regId );
+
+ // First of all let us validate all input arguments.
+ l_rc = validateSprImageInputs( i_pImage,
+ i_regId,
+ coreId,
+ &threadId,
+ &threadScopeReg );
+
+ if( l_rc )
{
- l_rc = STOP_SAVE_ARG_INVALID_CORE;
+ // Error: bad argument traces out error code
+ MY_ERR("Bad input argument rc %d", l_rc );
+
break;
}
- l_pHomer = ( SmfHomerSection_t * ) i_pImage;
+ l_urmorFix = *(uint8_t*)((uint8_t*)i_pImage + CPMR_HOMER_OFFSET + CPMR_URMOR_FIX_BYTE);
l_selfRestVer = *(uint8_t *)((uint8_t *)i_pImage + CPMR_HOMER_OFFSET + CPMR_SELF_RESTORE_VER_BYTE );
- for( l_sprIndex = 0; l_sprIndex < MAX_SPR_SUPPORTED; l_sprIndex++ )
+ if( l_selfRestVer )
{
- //Check if a given SPR needs to be self-saved each time on STOP entry
-
- l_lookUpKey = genKeyForSprLookup( ( CpuReg_t )g_sprRegister[l_sprIndex].iv_sprId );
+ smfChipHomer = ( SmfHomerSection_t*)i_pImage;
- if( g_sprRegister[l_sprIndex].iv_isThreadScope )
+ if( threadScopeReg )
{
- for( l_threadPos = 0; l_threadPos < MAX_THREADS_PER_CORE; l_threadPos++ )
- {
- l_pRestoreStart =
- (uint32_t*)&l_pHomer->iv_coreThreadRestore[i_corePos].iv_threadRestoreArea[l_threadPos][0];
-
- l_rc = lookUpSprInImage( (uint32_t*)l_pRestoreStart, l_lookUpKey,
- g_sprRegister[l_sprIndex].iv_isThreadScope,
- &l_pTempLoc,
- l_selfRestVer );
-
- if( l_rc )
- {
- MY_ERR( "Thread SPR lookup failed in p9_stop_init_cpureg SPR %d Core %d Thread %d Index %d",
- g_sprRegister[l_sprIndex].iv_sprId, i_corePos, l_threadPos, l_sprIndex );
- break;
- }
-
- l_rc = updateSprEntryInImage( (uint32_t*) l_pTempLoc,
- ( CpuReg_t )g_sprRegister[l_sprIndex].iv_sprId,
- 0x00,
- INIT_SPR_REGION );
-
- if( l_rc )
- {
- MY_ERR( "Thread SPR region init failed. Core %d SPR Id %d",
- i_corePos, g_sprRegister[l_sprIndex].iv_sprId );
- break;
- }
-
- }//end for thread
-
- if( l_rc )
- {
- break;
- }
-
- }//end if SPR threadscope
+ pThreadLocation =
+ &(smfChipHomer->iv_coreThreadRestore[coreId].iv_threadRestoreArea[threadId][0]);
+ }
else
{
- l_pRestoreStart = (uint32_t*)&l_pHomer->iv_coreThreadRestore[i_corePos].iv_coreRestoreArea[0];
+ pThreadLocation =
+ &(smfChipHomer->iv_coreThreadRestore[coreId].iv_coreRestoreArea[0]);
+ }
+ }
+ else //Old fips or OPAL release that doesn't support SMF
+ {
+ chipHomer = (HomerSection_t*)i_pImage;
- l_rc = lookUpSprInImage( (uint32_t*)l_pRestoreStart, l_lookUpKey,
- g_sprRegister[l_sprIndex].iv_isThreadScope,
- &l_pTempLoc, l_selfRestVer );
+ if( threadScopeReg )
+ {
+ pThreadLocation =
+ &(chipHomer->iv_coreThreadRestore[coreId][threadId].iv_threadArea[0]);
+ }
+ else
+ {
+ pThreadLocation =
+ &(chipHomer->iv_coreThreadRestore[coreId][threadId].iv_coreArea[0]);
+ }
+ }
- if( l_rc )
- {
- MY_ERR( "Core SPR lookup failed in p9_stop_init_cpureg" );
- break;
- }
+ if( ( SWIZZLE_4_BYTE(BLR_INST) == *(uint32_t*)pThreadLocation ) ||
+ ( SWIZZLE_4_BYTE(ATTN_OPCODE) == *(uint32_t*) pThreadLocation ) )
+ {
+ // table for given core id doesn't exit. It needs to be
+ // defined.
+ pSprEntryLocation = pThreadLocation;
+ }
+ else
+ {
+ // an SPR restore section for given core already exists
+ lookUpKey = genKeyForSprLookup( i_regId );
+ l_rc = lookUpSprInImage( (uint32_t*)pThreadLocation,
+ lookUpKey,
+ threadScopeReg,
+ &pSprEntryLocation,
+ l_selfRestVer );
+ }
- l_rc = updateSprEntryInImage( (uint32_t*) l_pTempLoc,
- ( CpuReg_t )g_sprRegister[l_sprIndex].iv_sprId,
- 0x00,
- INIT_SPR_REGION );
+ if( l_rc )
+ {
+ MY_ERR("Invalid or corrupt SPR entry. CoreId 0x%08x threadId ",
+ "0x%08x regId 0x%08x lookUpKey 0x%08x pThreadLocation 0x%08x"
+ , coreId, threadId, i_regId, lookUpKey, pThreadLocation );
+ break;
+ }
- if( l_rc )
- {
- MY_ERR( "Core SPR region init failed. Core %d SPR Id %d SPR Index %d",
- i_corePos, g_sprRegister[l_sprIndex].iv_sprId, l_sprIndex );
- break;
- }
+ if( ( P9_STOP_SPR_URMOR == i_regId ) && ( l_urmorFix ) )
+ {
+ l_sprValue = i_regData - URMOR_CORRECTION;
+ }
+ else
+ {
+ l_sprValue = i_regData;
+ }
- }// end else
+ l_rc = updateSprEntryInImage( (uint32_t*) pSprEntryLocation,
+ i_regId,
+ l_sprValue,
+ UPDATE_SPR_ENTRY );
- }// end for l_sprIndex
+ if( l_rc )
+ {
+ MY_ERR( " Failed to update the SPR entry of PIR 0x%08x reg"
+ "0x%08x", i_pir, i_regId );
+ break;
+ }
}
while(0);
- MY_INF( "<< p9_stop_init_cpureg" );
+ MY_INF("<< proc_stop_save_cpureg" );
+
return l_rc;
}
//-----------------------------------------------------------------------------------------------------
-StopReturnCode_t p9_stop_init_self_save( void* const i_pImage, const uint32_t i_corePos )
+StopReturnCode_t proc_stop_init_self_save( void* const i_pImage, const uint32_t i_corePos )
{
+
StopReturnCode_t l_rc = STOP_SAVE_SUCCESS;
uint32_t* l_pSaveStart = NULL;
SmfHomerSection_t * l_pHomer = NULL;
uint32_t l_threadPos = 0;
uint32_t l_sprBitPos = 0;
uint32_t l_sprIndexAdj = 0;
- MY_INF( ">> p9_stop_init_self_save" );
+
+ MY_INF(">> proc_stop_init_self_save" );
do
{
@@ -1756,12 +1823,74 @@ StopReturnCode_t p9_stop_init_self_save( void* const i_pImage, const uint32_t i
}
while(0);
- MY_INF( "<< p9_stop_init_self_save" );
+ MY_INF("<< proc_stop_init_self_save" );
+
return l_rc;
}
+//-----------------------------------------------------------------------------------------------------
+
+StopReturnCode_t proc_stop_api_discover_capability( void* const i_pImage, uint64_t * o_inCompVector )
+{
+ StopReturnCode_t l_rc = STOP_SAVE_SUCCESS;
+ uint64_t l_incompVector = 0;
+ uint32_t l_tempWord = 0;
+ *o_inCompVector = 0;
+
+ do
+ {
+ if( !i_pImage )
+ {
+ l_rc = STOP_SAVE_ARG_INVALID_IMG;
+ break;
+ }
+
+ l_tempWord =
+ *(uint32_t*)((uint8_t*)i_pImage + CPMR_HOMER_OFFSET + SMF_SUPPORT_SIGNATURE_OFFSET);
+
+ if( l_tempWord != SWIZZLE_4_BYTE(SMF_SELF_SIGNATURE) )
+ {
+ l_incompVector |= SMF_SUPPORT_MISSING_IN_HOMER;
+ }
+
+ l_tempWord = *(uint32_t *)((uint8_t *)i_pImage + CPMR_HOMER_OFFSET + CPMR_HEADER_SIZE );
+
+ if( l_tempWord != SWIZZLE_4_BYTE(BRANCH_BE_INST) )
+ {
+ l_incompVector |= SELF_SUPPORT_MISSING_FOR_LE_HYP;
+ }
+
+ l_tempWord = *(uint8_t *)((uint8_t *)i_pImage + CPMR_HOMER_OFFSET + CPMR_SELF_RESTORE_VER_BYTE );
+
+ if( l_tempWord < SELF_SAVE_RESTORE_VER )
+ {
+ l_incompVector |= SELF_RESTORE_VER_MISMATCH;
+ }
+
+ l_tempWord = *(uint8_t *)((uint8_t *)i_pImage + CPMR_HOMER_OFFSET + CPMR_STOP_API_VER_BYTE );
+
+ if( l_tempWord < STOP_API_CPU_SAVE_VER )
+ {
+ l_incompVector |= IPL_RUNTIME_CPU_SAVE_VER_MISMATCH;
+ }
+
+ *o_inCompVector = l_incompVector;
+
+ if( l_incompVector )
+ {
+ l_rc = STOP_SAVE_API_IMG_INCOMPATIBLE;
+ }
+
+ }while(0);
+
+ return l_rc;
+}
+
+} //extern "C"
+
#ifdef __cplusplus
} //namespace stopImageSection ends
-} //extern "C"
+//-----------------------------------------------------------------------------------------------------
+
#endif
diff --git a/import/chips/p9/procedures/utils/stopreg/p9_stop_api.H b/import/chips/p9/procedures/utils/stopreg/p9_stop_api.H
index 1bcede92..d64fe145 100755
--- a/import/chips/p9/procedures/utils/stopreg/p9_stop_api.H
+++ b/import/chips/p9/procedures/utils/stopreg/p9_stop_api.H
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2015,2018 */
+/* COPYRIGHT 2015,2020 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -70,6 +70,26 @@ typedef enum
P9_STOP_SPR_PMCR = 884, // core register
P9_STOP_SPR_HID = 1008, // core register
P9_STOP_SPR_MSR = 2000, // thread register
+
+ //enum members which are project agnostic
+ PROC_STOP_SPR_DAWR = 180, // thread register
+ PROC_STOP_SPR_CIABR = 187, // thread register
+ PROC_STOP_SPR_DAWRX = 188, // thread register
+ PROC_STOP_SPR_HSPRG0 = 304, // thread register
+ PROC_STOP_SPR_HRMOR = 313, // core register
+ PROC_STOP_SPR_LPCR = 318, // thread register
+ PROC_STOP_SPR_HMEER = 337, // core register
+ PROC_STOP_SPR_PTCR = 464, // core register
+ PROC_STOP_SPR_USPRG0 = 496, // thread register
+ PROC_STOP_SPR_USPRG1 = 497, // thread register
+ PROC_STOP_SPR_URMOR = 505, // core register
+ PROC_STOP_SPR_SMFCTRL = 511, // thread register
+ PROC_STOP_SPR_LDBAR = 850, // thread register
+ PROC_STOP_SPR_PSSCR = 855, // thread register
+ PROC_STOP_SPR_PMCR = 884, // core register
+ PROC_STOP_SPR_HID = 1008, // core register
+ PROC_STOP_SPR_MSR = 2000, // thread register
+
} CpuReg_t;
/**
@@ -94,6 +114,7 @@ typedef enum
STOP_SAVE_FAIL = 14, // for internal failure within firmware.
STOP_SAVE_SPR_ENTRY_MISSING = 15,
STOP_SAVE_SPR_BIT_POS_RESERVE = 16,
+ STOP_SAVE_API_IMG_INCOMPATIBLE = 18,
} StopReturnCode_t;
/**
@@ -110,7 +131,20 @@ typedef enum
P9_STOP_SCOM_RESET = 6,
P9_STOP_SCOM_OR_APPEND = 7,
P9_STOP_SCOM_AND_APPEND = 8,
- P9_STOP_SCOM_OP_MAX = 9
+ P9_STOP_SCOM_OP_MAX = 9,
+
+ //enum members which are project agnostic
+ PROC_STOP_SCOM_OP_MIN = 0,
+ PROC_STOP_SCOM_APPEND = 1,
+ PROC_STOP_SCOM_REPLACE = 2,
+ PROC_STOP_SCOM_OR = 3,
+ PROC_STOP_SCOM_AND = 4,
+ PROC_STOP_SCOM_NOOP = 5,
+ PROC_STOP_SCOM_RESET = 6,
+ PROC_STOP_SCOM_OR_APPEND = 7,
+ PROC_STOP_SCOM_AND_APPEND = 8,
+ PROC_STOP_SCOM_OP_MAX = 9,
+
} ScomOperation_t;
/**
@@ -123,7 +157,15 @@ typedef enum
P9_STOP_SECTION_EQ_SCOM = 2,
P9_STOP_SECTION_L2 = 3,
P9_STOP_SECTION_L3 = 4,
- P9_STOP_SECTION_MAX = 5
+ P9_STOP_SECTION_MAX = 5,
+
+ //enum members which are project agnostic
+ PROC_STOP_SECTION_MIN = 0,
+ PROC_STOP_SECTION_CORE_SCOM = 1,
+ PROC_STOP_SECTION_EQ_SCOM = 2,
+ PROC_STOP_SECTION_L2 = 3,
+ PROC_STOP_SECTION_L3 = 4,
+ PROC_STOP_SECTION_MAX = 5,
} ScomSection_t;
/**
@@ -157,6 +199,21 @@ typedef enum
BIT_POS_USPRG1 = 30,
} SprBitPositionList_t;
+/**
+ * @brief List of major incompatibilities between API version.
+ * @note STOP APIs assumes a specific HOMER layout, certain
+ * level of CME-SGPE hcode and certain version of self-save restore
+ * binary. A mismatch can break STOP function.
+ */
+
+typedef enum
+{
+ SMF_SUPPORT_MISSING_IN_HOMER = 0x01,
+ SELF_SUPPORT_MISSING_FOR_LE_HYP = 0x02,
+ IPL_RUNTIME_CPU_SAVE_VER_MISMATCH = 0x04,
+ SELF_RESTORE_VER_MISMATCH = 0x08,
+} VersionIncompList_t;
+
#ifdef __cplusplus
extern "C" {
@@ -227,13 +284,87 @@ p9_stop_save_cpureg_control( void* i_pImage, const uint64_t i_pir,
* @brief initializes self-save region with specific instruction.
* @param[in] i_pImage start address of homer image of P9 chip.
* @param[in] i_corePos physical core's relative position within processor chip.
- * @return STOP_SAVE_SUCCESS SUCCESS if self-save is initialized successfully,
+ * @return STOP_SAVE_SUCCESS if self-save is initialized successfully,
* error code otherwise.
* @note API is intended only for use case of HOMER build. There is no explicit
* effort to support any other use case.
*/
StopReturnCode_t p9_stop_init_self_save( void* const i_pImage, const uint32_t i_corePos );
+/**
+ * @brief creates SCOM restore entry for a given scom adress in HOMER.
+ * @param i_pImage points to start address of HOMER image.
+ * @param i_scomAddress address associated with SCOM restore entry.
+ * @param i_scomData data associated with SCOM restore entry.
+ * @param i_operation operation type requested for API.
+ * @param i_section section of HOMER in which restore entry needs to be created.
+ * @return STOP_SAVE_SUCCESS if API succeeds, error code otherwise.
+ * @note It is an API for creating SCOM restore entry in HOMER. It is agnostic to
+ * generation of POWER processor.
+ */
+
+StopReturnCode_t proc_stop_save_scom( void* const i_pImage,
+ const uint32_t i_scomAddress,
+ const uint64_t i_scomData,
+ const ScomOperation_t i_operation,
+ const ScomSection_t i_section );
+
+/**
+ * @brief initializes self save restore region of HOMER.
+ * @param[in] i_pImage points to base of HOMER image.
+ * @param[in] i_corePos position of the physical core.
+ * @return STOP_SAVE_SUCCESS if API succeeds, error code otherwise.
+ * @note It is an API for initializing self restore region in HOMER. It is agnostic to
+ * generation of POWER processor.
+ */
+StopReturnCode_t proc_stop_init_cpureg( void* const i_pImage, const uint32_t i_corePos );
+
+/**
+ * @brief enables self save for a given set of SPRs
+ * @param[in] i_pImage points to start address of HOMER image.
+ * @param[in] i_pir PIR value associated with core and thread.
+ * @param[in] i_saveRegVector bit vector representing the SPRs that needs to be self saved.
+ * @return STOP_SAVE_SUCCESS if API succeeds, error code otherwise.
+ * @note It is an API for enabling self save of SPRs and it is agnostic to
+ * generation of POWER processor.
+ */
+StopReturnCode_t proc_stop_save_cpureg_control( void* i_pImage,
+ const uint64_t i_pir,
+ const uint32_t i_saveRegVector );
+
+/**
+ * @brief creates an SPR restore entry in HOMER
+ * @param[in] i_pImage points to start address of HOMER image.
+ * @param[in] i_pir PIR value associated with core and thread.
+ * @param[in] i_saveRegVector bit vector representing the SPRs that needs to be self saved.
+ * @return STOP_SAVE_SUCCESS if API succeeds, error code otherwise.
+ * @note It is an API for enabling self save of SPRs and it is agnostic to
+ * generation of POWER processor.
+ */
+StopReturnCode_t proc_stop_save_cpureg( void* const i_pImage,
+ const CpuReg_t i_regId,
+ const uint64_t i_regData,
+ const uint64_t i_pir );
+
+/**
+ * @brief initializes self-save region with specific instruction.
+ * @param[in] i_pImage start address of homer image.
+ * @param[in] i_corePos physical core's relative position within processor chip.
+ * @return STOP_SAVE_SUCCESS if self-save is initialized successfully,
+ * error code otherwise.
+ * @note API is project agnostic and is intended only for use case of HOMER build.
+ * There is no explicit effort to support any other use case.
+ */
+StopReturnCode_t proc_stop_init_self_save( void* const i_pImage, const uint32_t i_corePos );
+
+/**
+ * @brief verifies if API is compatible of current HOMER image.
+ * @param[in] i_pImage points to the start of HOMER image of P9 chip.
+ * @param[out] o_inCompVector list of incompatibilities found.
+ * @return STOP_SAVE_SUCCESS if if API succeeds, error code otherwise.
+ */
+StopReturnCode_t proc_stop_api_discover_capability( void* const i_pImage, uint64_t* o_inCompVector );
+
#ifdef __cplusplus
} // extern "C"
}; // namespace stopImageSection ends
diff --git a/import/chips/p9/procedures/utils/stopreg/selfRest.bin b/import/chips/p9/procedures/utils/stopreg/selfRest.bin
index dfc7e80c..688b6917 100755
--- a/import/chips/p9/procedures/utils/stopreg/selfRest.bin
+++ b/import/chips/p9/procedures/utils/stopreg/selfRest.bin
Binary files differ
diff --git a/import/chips/p9/procedures/utils/stopreg/selfRest.list b/import/chips/p9/procedures/utils/stopreg/selfRest.list
index c9ce9ea1..f3b99851 100644
--- a/import/chips/p9/procedures/utils/stopreg/selfRest.list
+++ b/import/chips/p9/procedures/utils/stopreg/selfRest.list
@@ -1,5 +1,5 @@
-/esw/san2/premjha2/ekbTest/ekb/chips/p9/procedures/utils/stopreg/p9_core_save_restore_routines.o: file format elf64-powerpc
+/esw/san2/premjha2/ekbTest/p9_ekb/ekb/chips/p9/procedures/utils/stopreg/p9_core_save_restore_routines.o: file format elf64-powerpc
Disassembly of section .selfRestore:
@@ -8,38 +8,54 @@ Disassembly of section .selfRestore:
...
0000000000000100 <_sreset_hndlr>:
- 100: 7e a0 00 a6 mfmsr r21
- 104: 62 b5 10 00 ori r21,r21,4096
- 108: 7a b7 57 e3 rldicl. r23,r21,42,63
- 10c: 41 82 00 2c beq 138 <hv_core_init>
-
-0000000000000110 <uv_core_check>:
- 110: 3a 20 00 01 li r17,1
- 114: 7e 1f 7a a6 mfspr r16,511
- 118: 7a 08 0f e1 rldicl. r8,r16,1,63
- 11c: 41 82 00 14 beq 130 <uv_init_error>
- 120: 3a 20 00 02 li r17,2
- 124: 7a 08 07 a0 clrldi r8,r16,62
- 128: 2c 08 00 02 cmpwi r8,2
- 12c: 41 82 00 20 beq 14c <uv_core_init>
-
-0000000000000130 <uv_init_error>:
- 130: 7e 39 8b 78 mr r25,r17
- 134: 00 00 02 00 attn
-
-0000000000000138 <hv_core_init>:
- 138: 7e 99 4a a6 mfspr r20,313
- 13c: 3a 34 20 00 addi r17,r20,8192
- 140: 7e 3a 03 a6 mtsrr0 r17
- 144: 7e bb 03 a6 mtsrr1 r21
- 148: 4c 00 00 24 rfid
-
-000000000000014c <uv_core_init>:
- 14c: 7e 99 7a a6 mfspr r20,505
- 150: 39 14 20 00 addi r8,r20,8192
- 154: 7d 1a 7b a6 mtspr 506,r8
- 158: 7e bb 7b a6 mtspr 507,r21
- 15c: 4c 00 02 64 .long 0x4c000264
+ 100: 48 00 00 20 b 120 <big_endian_start>
+
+0000000000000104 <little_endian_start>:
+ 104: a6 4a 39 7c lhzu r18,14716(r10)
+ 108: a6 00 a0 7e lhzu r16,-24450(0)
+ 10c: a4 07 b5 7a lhzu r0,-19078(r7)
+ 110: a6 03 bb 7e lhzu r16,-17538(r3)
+ 114: 20 01 21 38 subfic r0,r1,8504
+ 118: a6 03 3a 7c lhzu r16,14972(r3)
+ 11c: 24 00 00 4c dozi r0,r0,76
+
+0000000000000120 <big_endian_start>:
+ 120: 7c 30 fa a6 mfspr r1,1008
+ 124: 39 00 00 00 li r8,0
+ 128: 79 01 d9 0e rldimi r1,r8,59,4
+ 12c: 7c 30 fb a6 mtspr 1008,r1
+ 130: 7e a0 00 a6 mfmsr r21
+ 134: 62 b5 10 00 ori r21,r21,4096
+ 138: 7a b7 57 e3 rldicl. r23,r21,42,63
+ 13c: 41 82 00 2c beq 168 <hv_core_init>
+
+0000000000000140 <uv_core_check>:
+ 140: 3a 20 00 01 li r17,1
+ 144: 7e 1f 7a a6 mfspr r16,511
+ 148: 7a 08 0f e1 rldicl. r8,r16,1,63
+ 14c: 41 82 00 14 beq 160 <uv_init_error>
+ 150: 3a 20 00 02 li r17,2
+ 154: 7a 08 07 a0 clrldi r8,r16,62
+ 158: 2c 08 00 02 cmpwi r8,2
+ 15c: 41 82 00 20 beq 17c <uv_core_init>
+
+0000000000000160 <uv_init_error>:
+ 160: 7e 39 8b 78 mr r25,r17
+ 164: 00 00 02 00 attn
+
+0000000000000168 <hv_core_init>:
+ 168: 7e 99 4a a6 mfspr r20,313
+ 16c: 3a 34 20 00 addi r17,r20,8192
+ 170: 7e 3a 03 a6 mtsrr0 r17
+ 174: 7e bb 03 a6 mtsrr1 r21
+ 178: 4c 00 00 24 rfid
+
+000000000000017c <uv_core_init>:
+ 17c: 7e 99 7a a6 mfspr r20,505
+ 180: 39 14 20 00 addi r8,r20,8192
+ 184: 7d 1a 7b a6 mtspr 506,r8
+ 188: 7e bb 7b a6 mtspr 507,r21
+ 18c: 4c 00 02 64 .long 0x4c000264
...
200: 00 00 02 00 attn
...
@@ -67,7 +83,7 @@ Disassembly of section .selfRestore:
...
b00: 00 00 02 00 attn
...
- c00: 00 00 02 00 attn
+ c00: 4b ff f5 00 b 100 <_sreset_hndlr>
...
d00: 00 00 02 00 attn
...
@@ -262,7 +278,7 @@ Disassembly of section .selfRestore:
219c: 40 82 f0 68 bne 1204 <save_restore_done>
00000000000021a0 <save_core_spr>:
- 21a0: 3b ec 00 08 addi r31,r12,8
+ 21a0: 3b ec 00 28 addi r31,r12,40
21a4: 7d e8 03 a6 mtlr r15
21a8: 4e 80 00 21 blrl
21ac: 4b ff f0 58 b 1204 <save_restore_done>
diff --git a/import/chips/p9/procedures/utils/stopreg/selfRest.map b/import/chips/p9/procedures/utils/stopreg/selfRest.map
index 205d6895..11eee235 100644
--- a/import/chips/p9/procedures/utils/stopreg/selfRest.map
+++ b/import/chips/p9/procedures/utils/stopreg/selfRest.map
@@ -13,16 +13,16 @@ Linker script and memory map
.selfRestore 0x0000000000000000 0x2338
*(.selfRestore)
- .selfRestore 0x0000000000000000 0x2338 /esw/san2/premjha2/ekbTest/ekb/chips/p9/procedures/utils/stopreg/p9_core_save_restore_routines.o
+ .selfRestore 0x0000000000000000 0x2338 /esw/san2/premjha2/ekbTest/p9_ekb/ekb/chips/p9/procedures/utils/stopreg/p9_core_save_restore_routines.o
0x0000000000000000 _start
-LOAD /esw/san2/premjha2/ekbTest/ekb/chips/p9/procedures/utils/stopreg/p9_core_save_restore_routines.o
-OUTPUT(/esw/san2/premjha2/ekbTest/ekb/chips/p9/procedures/utils/stopreg/selfRest.bin binary)
+LOAD /esw/san2/premjha2/ekbTest/p9_ekb/ekb/chips/p9/procedures/utils/stopreg/p9_core_save_restore_routines.o
+OUTPUT(/esw/san2/premjha2/ekbTest/p9_ekb/ekb/chips/p9/procedures/utils/stopreg/selfRest.bin binary)
.text 0x0000000000002338 0x0
- .text 0x0000000000002338 0x0 /esw/san2/premjha2/ekbTest/ekb/chips/p9/procedures/utils/stopreg/p9_core_save_restore_routines.o
+ .text 0x0000000000002338 0x0 /esw/san2/premjha2/ekbTest/p9_ekb/ekb/chips/p9/procedures/utils/stopreg/p9_core_save_restore_routines.o
.data 0x0000000000002338 0x0
- .data 0x0000000000002338 0x0 /esw/san2/premjha2/ekbTest/ekb/chips/p9/procedures/utils/stopreg/p9_core_save_restore_routines.o
+ .data 0x0000000000002338 0x0 /esw/san2/premjha2/ekbTest/p9_ekb/ekb/chips/p9/procedures/utils/stopreg/p9_core_save_restore_routines.o
.bss 0x0000000000002338 0x0
- .bss 0x0000000000002338 0x0 /esw/san2/premjha2/ekbTest/ekb/chips/p9/procedures/utils/stopreg/p9_core_save_restore_routines.o
+ .bss 0x0000000000002338 0x0 /esw/san2/premjha2/ekbTest/p9_ekb/ekb/chips/p9/procedures/utils/stopreg/p9_core_save_restore_routines.o
diff --git a/import/chips/p9/xip/p9_xip_image.h b/import/chips/p9/xip/p9_xip_image.h
index 5c541e69..05133204 100644
--- a/import/chips/p9/xip/p9_xip_image.h
+++ b/import/chips/p9/xip/p9_xip_image.h
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HCODE Project */
/* */
-/* COPYRIGHT 2015,2018 */
+/* COPYRIGHT 2015,2019 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -1619,6 +1619,7 @@ p9_xip_decode_toc_dump(void* i_image, void* i_dump,
.macro .xip_toc, index:req, type:req, address:req, elements=1
.if (((\type) < 1) || ((\type) > P9_XIP_MAX_TYPE_INDEX))
+ // cppcheck-suppress syntaxError
.error ".xip_toc : Illegal type index"
.endif
diff --git a/rings/p9a.hw.overlays.bin b/rings/p9a.hw.overlays.bin
new file mode 100644
index 00000000..ede5efc7
--- /dev/null
+++ b/rings/p9a.hw.overlays.bin
Binary files differ
diff --git a/rings/p9a.hw.rings.bin b/rings/p9a.hw.rings.bin
new file mode 100644
index 00000000..3722ac80
--- /dev/null
+++ b/rings/p9a.hw.rings.bin
Binary files differ
diff --git a/rings/p9n.hw.rings.bin b/rings/p9n.hw.rings.bin
index 361878dc..b4eb5b7f 100644
--- a/rings/p9n.hw.rings.bin
+++ b/rings/p9n.hw.rings.bin
Binary files differ
diff --git a/tools/build/release_tag.txt b/tools/build/release_tag.txt
index dc917fa6..e81a6459 100644
--- a/tools/build/release_tag.txt
+++ b/tools/build/release_tag.txt
@@ -1 +1 @@
-hw042519a.940
+hw021820a.opmst
diff --git a/tools/build/rules.dir/chips.env.mk b/tools/build/rules.dir/chips.env.mk
index ba1e6014..a959708f 100644
--- a/tools/build/rules.dir/chips.env.mk
+++ b/tools/build/rules.dir/chips.env.mk
@@ -25,9 +25,12 @@
# Lists of chip subdirectories.
CHIPS += p9
+CHIPS += p9a
p9_CHIPID += p9n
+p9_CHIPID += p9a
p9n_EC += 20 21 22 23
+p9a_EC += 10
HW_IMAGE_VARIATIONS = hw
diff --git a/tools/imageProcs/hw_image.mk b/tools/imageProcs/hw_image.mk
index 0281aecb..feb6ced6 100644
--- a/tools/imageProcs/hw_image.mk
+++ b/tools/imageProcs/hw_image.mk
@@ -5,7 +5,7 @@
#
# OpenPOWER HCODE Project
#
-# COPYRIGHT 2016,2018
+# COPYRIGHT 2016,2019
# [+] International Business Machines Corp.
#
#
@@ -97,6 +97,9 @@ define VERIFY_SBE_RING_SECTION
$(eval $(call XIP_TOOL,check-sbe-ring-section,,$($(IMAGE)_PATH)/.$(IMAGE).report, $1 $2,$3))
endef
-$(eval $(call BUILD_HW_IMAGE,hw,p9n))
+$(eval P9_CHIP=$(filter p9,$(CHIPS)))
+$(foreach chip,$(P9_CHIP),\
+ $(foreach chipId, $($(chip)_CHIPID),\
+ $(eval $(call BUILD_HW_IMAGE,hw,$(chipId)))))
OpenPOWER on IntegriCloud