summaryrefslogtreecommitdiffstats
path: root/hw/fsp
diff options
context:
space:
mode:
Diffstat (limited to 'hw/fsp')
-rw-r--r--hw/fsp/Makefile.inc9
-rw-r--r--hw/fsp/fsp-codeupdate.c1197
-rw-r--r--hw/fsp/fsp-console.c922
-rw-r--r--hw/fsp/fsp-diag.c58
-rw-r--r--hw/fsp/fsp-dump.c917
-rw-r--r--hw/fsp/fsp-elog-read.c520
-rw-r--r--hw/fsp/fsp-elog-write.c643
-rw-r--r--hw/fsp/fsp-leds.c1080
-rw-r--r--hw/fsp/fsp-mdst-table.c252
-rw-r--r--hw/fsp/fsp-mem-err.c415
-rw-r--r--hw/fsp/fsp-nvram.c414
-rw-r--r--hw/fsp/fsp-op-panel.c249
-rw-r--r--hw/fsp/fsp-rtc.c572
-rw-r--r--hw/fsp/fsp-sensor.c788
-rw-r--r--hw/fsp/fsp-surveillance.c209
-rw-r--r--hw/fsp/fsp-sysparam.c454
-rw-r--r--hw/fsp/fsp.c2147
17 files changed, 10846 insertions, 0 deletions
diff --git a/hw/fsp/Makefile.inc b/hw/fsp/Makefile.inc
new file mode 100644
index 00000000..c16d0603
--- /dev/null
+++ b/hw/fsp/Makefile.inc
@@ -0,0 +1,9 @@
+SUBDIRS += hw/fsp
+
+FSP_OBJS = fsp.o fsp-console.o fsp-rtc.o fsp-nvram.o fsp-sysparam.o
+FSP_OBJS += fsp-surveillance.o fsp-codeupdate.o fsp-sensor.o
+FSP_OBJS += fsp-diag.o fsp-leds.o fsp-mem-err.o fsp-op-panel.o
+FSP_OBJS += fsp-elog-read.o fsp-elog-write.o
+FSP_OBJS += fsp-dump.o fsp-mdst-table.o
+FSP = hw/fsp/built-in.o
+$(FSP): $(FSP_OBJS:%=hw/fsp/%)
diff --git a/hw/fsp/fsp-codeupdate.c b/hw/fsp/fsp-codeupdate.c
new file mode 100644
index 00000000..be705a48
--- /dev/null
+++ b/hw/fsp/fsp-codeupdate.c
@@ -0,0 +1,1197 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <skiboot.h>
+#include <fsp.h>
+#include <fsp-sysparam.h>
+#include <lock.h>
+#include <codeupdate.h>
+#include <device.h>
+#include <ccan/endian/endian.h>
+#include <fsp-elog.h>
+
+enum flash_state {
+ FLASH_STATE_ABSENT,
+ FLASH_STATE_INVALID, /* IPL side marker lid is invalid */
+ FLASH_STATE_READING,
+ FLASH_STATE_READ,
+};
+
+enum lid_fetch_side {
+ FETCH_T_SIDE_ONLY,
+ FETCH_P_SIDE_ONLY,
+ FETCH_BOTH_SIDE,
+};
+
+static enum flash_state flash_state = FLASH_STATE_INVALID;
+static enum lid_fetch_side lid_fetch_side = FETCH_BOTH_SIDE;
+
+/* Image buffers */
+static struct opal_sg_list *image_data;
+static uint32_t tce_start;
+static void *lid_data;
+static char validate_buf[VALIDATE_BUF_SIZE];
+
+/* TCE buffer lock */
+static struct lock flash_lock = LOCK_UNLOCKED;
+
+/* FW VPD data */
+static struct fw_image_vpd fw_vpd[2];
+
+/* Code update related sys parameters */
+static uint32_t ipl_side;
+static uint32_t hmc_managed;
+static uint32_t update_policy;
+static uint32_t in_flight_params;
+
+/* If non-NULL, this gets called just before rebooting */
+int (*fsp_flash_term_hook)(void);
+
+DEFINE_LOG_ENTRY(OPAL_RC_CU_INIT, OPAL_PLATFORM_ERR_EVT, OPAL_CODEUPDATE,
+ OPAL_PLATFORM_FIRMWARE,
+ OPAL_PREDICTIVE_ERR_GENERAL, OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_CU_FLASH, OPAL_PLATFORM_ERR_EVT, OPAL_CODEUPDATE,
+ OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_CU_SG_LIST, OPAL_PLATFORM_ERR_EVT, OPAL_CODEUPDATE,
+ OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_CU_COMMIT, OPAL_PLATFORM_ERR_EVT, OPAL_CODEUPDATE,
+ OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_CU_MSG, OPAL_PLATFORM_ERR_EVT, OPAL_CODEUPDATE,
+ OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_CU_NOTIFY, OPAL_PLATFORM_ERR_EVT, OPAL_CODEUPDATE,
+ OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_CU_MARKER_LID, OPAL_PLATFORM_ERR_EVT, OPAL_CODEUPDATE,
+ OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+static inline void code_update_tce_map(uint32_t tce_offset,
+ void *buffer, uint32_t size)
+{
+ uint32_t tlen = ALIGN_UP(size, TCE_PSIZE);
+
+ fsp_tce_map(PSI_DMA_CODE_UPD + tce_offset, buffer, tlen);
+}
+
+static inline void code_update_tce_unmap(uint32_t size)
+{
+ fsp_tce_unmap(PSI_DMA_CODE_UPD, size);
+}
+
+static inline void set_def_fw_version(uint32_t side)
+{
+ strncpy(fw_vpd[side].MI_keyword, FW_VERSION_UNKNOWN, MI_KEYWORD_SIZE);
+ strncpy(fw_vpd[side].ext_fw_id, FW_VERSION_UNKNOWN, ML_KEYWORD_SIZE);
+}
+
+/*
+ * Get IPL side
+ */
+static void get_ipl_side(void)
+{
+ struct dt_node *iplp;
+ const char *side = NULL;
+
+ iplp = dt_find_by_path(dt_root, "ipl-params/ipl-params");
+ if (iplp)
+ side = dt_prop_get_def(iplp, "cec-ipl-side", NULL);
+ printf("CUPD: IPL SIDE = %s\n", side);
+
+ if (!side || !strcmp(side, "temp"))
+ ipl_side = FW_IPL_SIDE_TEMP;
+ else
+ ipl_side = FW_IPL_SIDE_PERM;
+}
+
+
+/*
+ * Helper routines to retrieve code update related
+ * system parameters from FSP.
+ */
+
+static void inc_in_flight_param(void)
+{
+ lock(&flash_lock);
+ in_flight_params++;
+ unlock(&flash_lock);
+}
+
+static void dec_in_flight_param(void)
+{
+ lock(&flash_lock);
+ assert(in_flight_params > 0);
+ in_flight_params--;
+ unlock(&flash_lock);
+}
+
+static void got_code_update_policy(uint32_t param_id __unused, int err_len,
+ void *data __unused)
+{
+ if (err_len != 4) {
+ log_simple_error(&e_info(OPAL_RC_CU_INIT), "CUPD: Error "
+ "retrieving code update policy: %d\n", err_len);
+ } else
+ printf("CUPD: Code update policy from FSP: %d\n",
+ update_policy);
+
+ dec_in_flight_param();
+}
+
+static void get_code_update_policy(void)
+{
+ int rc;
+
+ inc_in_flight_param();
+ rc = fsp_get_sys_param(SYS_PARAM_FLASH_POLICY, &update_policy, 4,
+ got_code_update_policy, NULL);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_CU_INIT),
+ "CUPD: Error %d queueing param request\n", rc);
+ dec_in_flight_param();
+ }
+}
+
+static void got_platform_hmc_managed(uint32_t param_id __unused, int err_len,
+ void *data __unused)
+{
+ if (err_len != 4) {
+ log_simple_error(&e_info(OPAL_RC_CU_INIT), "CUPD: Error "
+ "retrieving hmc managed status: %d\n", err_len);
+ } else
+ printf("CUPD: HMC managed status from FSP: %d\n", hmc_managed);
+
+ dec_in_flight_param();
+}
+
+static void get_platform_hmc_managed(void)
+{
+ int rc;
+
+ inc_in_flight_param();
+ rc = fsp_get_sys_param(SYS_PARAM_HMC_MANAGED, &hmc_managed, 4,
+ got_platform_hmc_managed, NULL);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_CU_INIT),
+ "FLASH: Error %d queueing param request\n", rc);
+ dec_in_flight_param();
+ }
+}
+
+static int64_t code_update_check_state(void)
+{
+ switch(flash_state) {
+ case FLASH_STATE_ABSENT:
+ return OPAL_HARDWARE;
+ case FLASH_STATE_INVALID:
+ return OPAL_INTERNAL_ERROR;
+ case FLASH_STATE_READING:
+ return OPAL_BUSY;
+ default:
+ break;
+ }
+ return OPAL_SUCCESS;
+}
+
+/*
+ * Get common marker LID additional data section
+ */
+static void *get_adf_sec_data(struct com_marker_adf_sec *adf_sec,
+ uint32_t name)
+{
+ struct com_marker_adf_header *adf_header;
+ int i;
+
+ adf_header = (void *)adf_sec->adf_data;
+ for (i = 0; i < be32_to_cpu(adf_sec->adf_cnt); i++) {
+ if (be32_to_cpu(adf_header->name) == name)
+ return adf_header;
+
+ adf_header = (void *)adf_header + be32_to_cpu(adf_header->size);
+ }
+ return NULL;
+}
+
+/*
+ * Parse common marker LID to get FW version details
+ *
+ * Note:
+ * At present, we are parsing "Service Pack Nomenclature ADF"
+ * section only. If we are adding FW IP support, then we have
+ * to parse "Firmware IP Protection ADF" as well.
+ */
+static void parse_marker_lid(uint32_t side)
+{
+ struct com_marker_header *header;
+ struct com_marker_mi_section *mi_sec;
+ struct com_marker_adf_sec *adf_sec;
+ struct com_marker_adf_sp *adf_sp;
+
+ header = (void *)lid_data;
+
+ /* Get MI details */
+ mi_sec = (void *)header + be32_to_cpu(header->MI_offset);
+ /*
+ * If Marker LID is invalid, then FSP will return a Marker
+ * LID with ASCII zeros for the entire MI keyword.
+ */
+ if (mi_sec->MI_keyword[0] == '0')
+ return;
+
+ strncpy(fw_vpd[side].MI_keyword, mi_sec->MI_keyword, MI_KEYWORD_SIZE);
+ fw_vpd[side].MI_keyword[MI_KEYWORD_SIZE - 1] = '\0';
+ printf("CUPD: %s side MI Keyword = %s\n",
+ side == 0x00 ? "P" : "T", fw_vpd[side].MI_keyword);
+
+ /* Get ML details */
+ adf_sec = (void *)header + be32_to_cpu(mi_sec->adf_offset);
+ adf_sp = get_adf_sec_data(adf_sec, ADF_NAME_SP);
+ if (!adf_sp)
+ return;
+
+ strncpy(fw_vpd[side].ext_fw_id,
+ (void *)adf_sp + be32_to_cpu(adf_sp->sp_name_offset),
+ ML_KEYWORD_SIZE);
+ fw_vpd[side].ext_fw_id[ML_KEYWORD_SIZE - 1] = '\0';
+ printf("CUPD: %s side ML Keyword = %s\n",
+ side == 0x00 ? "P" : "T", fw_vpd[side].ext_fw_id);
+}
+
+static void validate_com_marker_lid(void)
+{
+ if (!strncmp(fw_vpd[ipl_side].MI_keyword, FW_VERSION_UNKNOWN,
+ sizeof(FW_VERSION_UNKNOWN))) {
+ log_simple_error(&e_info(OPAL_RC_CU_MARKER_LID),
+ "CUPD: IPL side Marker LID is not valid\n");
+ flash_state = FLASH_STATE_INVALID;
+ return;
+ }
+
+ flash_state = FLASH_STATE_READ;
+}
+
+static void fetch_lid_data_complete(struct fsp_msg *msg)
+{
+ void *buffer;
+ size_t length, chunk;
+ uint32_t lid_id, offset;
+ uint16_t id;
+ uint8_t flags, status;
+
+ status = (msg->resp->word1 >> 8) & 0xff;
+ flags = (msg->data.words[0] >> 16) & 0xff;
+ id = msg->data.words[0] & 0xffff;
+ lid_id = msg->data.words[1];
+ offset = msg->resp->data.words[1];
+ length = msg->resp->data.words[2];
+
+ printf("CUPD: Marker LID id : size : status = 0x%x : 0x%x : 0x%x\n",
+ msg->data.words[1], msg->resp->data.words[2], status);
+
+ fsp_freemsg(msg);
+
+ switch (status) {
+ case FSP_STATUS_SUCCESS: /* Read complete, parse VPD */
+ parse_marker_lid(lid_id == P_COM_MARKER_LID_ID ? 0 : 1);
+ break;
+ case FSP_STATUS_MORE_DATA: /* More data left */
+ offset += length;
+ chunk = MARKER_LID_SIZE - offset;
+ if (chunk > 0) {
+ buffer = (void *)PSI_DMA_CODE_UPD + offset;
+ fsp_fetch_data_queue(flags, id, lid_id,
+ offset, buffer, &chunk,
+ fetch_lid_data_complete);
+ return;
+ }
+ break;
+ default: /* Fetch LID call failed */
+ break;
+ }
+
+ /* If required, fetch T side marker LID */
+ if (lid_id == P_COM_MARKER_LID_ID &&
+ lid_fetch_side == FETCH_BOTH_SIDE) {
+ length = MARKER_LID_SIZE;
+ fsp_fetch_data_queue(flags, id, T_COM_MARKER_LID_ID,
+ 0, (void *)PSI_DMA_CODE_UPD,
+ &length, fetch_lid_data_complete);
+ return;
+ }
+
+ lock(&flash_lock);
+
+ /* Validate marker LID data */
+ validate_com_marker_lid();
+ /* TCE unmap */
+ code_update_tce_unmap(MARKER_LID_SIZE);
+
+ unlock(&flash_lock);
+}
+
+static void fetch_com_marker_lid(void)
+{
+ size_t length = MARKER_LID_SIZE;
+ uint32_t lid_id;
+ int rc;
+
+ /* Read in progress? */
+ rc = code_update_check_state();
+ if (rc == OPAL_HARDWARE || rc == OPAL_BUSY)
+ return;
+
+ if (lid_fetch_side == FETCH_T_SIDE_ONLY) {
+ lid_id = T_COM_MARKER_LID_ID;
+ set_def_fw_version(FW_IPL_SIDE_TEMP);
+ } else if (lid_fetch_side == FETCH_P_SIDE_ONLY) {
+ lid_id = P_COM_MARKER_LID_ID;
+ set_def_fw_version(FW_IPL_SIDE_PERM);
+ } else {
+ lid_id = P_COM_MARKER_LID_ID;
+ set_def_fw_version(FW_IPL_SIDE_PERM);
+ set_def_fw_version(FW_IPL_SIDE_TEMP);
+ }
+
+ code_update_tce_map(0, lid_data, length);
+ rc = fsp_fetch_data_queue(0x00, 0x05, lid_id, 0,
+ (void *)PSI_DMA_CODE_UPD, &length,
+ fetch_lid_data_complete);
+ if (!rc)
+ flash_state = FLASH_STATE_READING;
+ else
+ flash_state = FLASH_STATE_INVALID;
+}
+
+/*
+ * Add MI and ML keyword details into DT
+ */
+#define FW_VER_SIZE 64
+static void add_opal_firmware_version(void)
+{
+ struct dt_node *dt_fw;
+ char buffer[FW_VER_SIZE];
+ int offset;
+
+ dt_fw = dt_find_by_path(dt_root, "ibm,opal/firmware");
+ if (!dt_fw)
+ return;
+
+ /* MI version */
+ offset = snprintf(buffer, FW_VER_SIZE, "MI %s %s",
+ fw_vpd[FW_IPL_SIDE_TEMP].MI_keyword,
+ fw_vpd[FW_IPL_SIDE_PERM].MI_keyword);
+ if (ipl_side == FW_IPL_SIDE_TEMP)
+ snprintf(buffer + offset, FW_VER_SIZE - offset,
+ " %s", fw_vpd[FW_IPL_SIDE_TEMP].MI_keyword);
+ else
+ snprintf(buffer + offset, FW_VER_SIZE - offset,
+ " %s", fw_vpd[FW_IPL_SIDE_PERM].MI_keyword);
+
+ dt_add_property(dt_fw, "mi-version", buffer, strlen(buffer));
+
+ /* ML version */
+ offset = snprintf(buffer, FW_VER_SIZE, "ML %s %s",
+ fw_vpd[FW_IPL_SIDE_TEMP].ext_fw_id,
+ fw_vpd[FW_IPL_SIDE_PERM].ext_fw_id);
+ if (ipl_side == FW_IPL_SIDE_TEMP)
+ snprintf(buffer + offset, FW_VER_SIZE - offset,
+ " %s", fw_vpd[FW_IPL_SIDE_TEMP].ext_fw_id);
+ else
+ snprintf(buffer + offset, FW_VER_SIZE - offset,
+ " %s", fw_vpd[FW_IPL_SIDE_PERM].ext_fw_id);
+
+ dt_add_property(dt_fw, "ml-version", buffer, strlen(buffer));
+}
+
+/*
+ * This is called right before starting the payload (Linux) to
+ * ensure the common marker LID read and parsing has happened
+ * before we transfer control.
+ */
+void fsp_code_update_wait_vpd(bool is_boot)
+{
+ if (!fsp_present())
+ return;
+
+ printf("CUPD: Waiting read marker LID completion...\n");
+
+ while(flash_state == FLASH_STATE_READING)
+ fsp_poll();
+
+ printf("CUPD: Waiting in flight params completion...\n");
+ while(in_flight_params)
+ fsp_poll();
+
+ if (is_boot)
+ add_opal_firmware_version();
+}
+
+static int code_update_start(void)
+{
+ struct fsp_msg *msg;
+ int rc;
+ uint16_t comp = 0x00; /* All components */
+ uint8_t side = OPAL_COMMIT_TMP_SIDE; /* Temporary side */
+
+ msg = fsp_mkmsg(FSP_CMD_FLASH_START, 1, side << 16 | comp);
+ if (!msg) {
+ log_simple_error(&e_info(OPAL_RC_CU_MSG),
+ "CUPD: CMD_FLASH_START message allocation failed !\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ if (fsp_sync_msg(msg, false)) {
+ fsp_freemsg(msg);
+ return OPAL_INTERNAL_ERROR;
+ }
+ rc = (msg->resp->word1 >> 8) & 0xff;
+ fsp_freemsg(msg);
+ return rc;
+}
+
+static int code_update_write_lid(uint32_t lid_id, uint32_t size)
+{
+ struct fsp_msg *msg;
+ int rc, n_pairs = 1;
+
+ msg = fsp_mkmsg(FSP_CMD_FLASH_WRITE, 5, lid_id,
+ n_pairs, 0, tce_start, size);
+ if (!msg) {
+ log_simple_error(&e_info(OPAL_RC_CU_MSG),
+ "CUPD: CMD_FLASH_WRITE message allocation failed !\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ if (fsp_sync_msg(msg, false)) {
+ fsp_freemsg(msg);
+ return OPAL_INTERNAL_ERROR;
+ }
+ rc = (msg->resp->word1 >> 8) & 0xff;
+ fsp_freemsg(msg);
+ return rc;
+}
+
+static int code_update_del_lid(uint32_t lid_id)
+{
+ struct fsp_msg *msg;
+ int rc;
+
+ msg = fsp_mkmsg(FSP_CMD_FLASH_DEL, 1, lid_id);
+ if (!msg) {
+ log_simple_error(&e_info(OPAL_RC_CU_MSG),
+ "CUPD: CMD_FLASH_DEL message allocation failed !\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ if (fsp_sync_msg(msg, false)) {
+ fsp_freemsg(msg);
+ return OPAL_INTERNAL_ERROR;
+ }
+ rc = (msg->resp->word1 >> 8) & 0xff;
+ fsp_freemsg(msg);
+ return rc;
+}
+
+static int code_update_complete(uint32_t cmd)
+{
+ struct fsp_msg *msg;
+ int rc;
+
+ msg = fsp_mkmsg(cmd, 0);
+ if (!msg) {
+ log_simple_error(&e_info(OPAL_RC_CU_MSG),
+ "CUPD: CUPD COMPLETE message allocation failed !\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ if (fsp_sync_msg(msg, false)) {
+ fsp_freemsg(msg);
+ return OPAL_INTERNAL_ERROR;
+ }
+ rc = (msg->resp->word1 >> 8) & 0xff;
+ fsp_freemsg(msg);
+ return rc;
+}
+
+static int code_update_swap_side(void)
+{
+ struct fsp_msg *msg;
+ int rc;
+
+ msg = fsp_mkmsg(FSP_CMD_FLASH_SWAP, 0);
+ if (!msg) {
+ log_simple_error(&e_info(OPAL_RC_CU_MSG),
+ "CUPD: CMD_FLASH_SWAP message allocation failed !\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+
+ if (fsp_sync_msg(msg, false)) {
+ fsp_freemsg(msg);
+ return OPAL_INTERNAL_ERROR;
+ }
+ rc = (msg->resp->word1 >> 8) & 0xff;
+ fsp_freemsg(msg);
+ return rc;
+}
+
+static int code_update_set_ipl_side(void)
+{
+ struct fsp_msg *msg;
+ uint8_t side = FW_IPL_SIDE_TEMP; /* Next IPL side */
+ int rc;
+
+ msg = fsp_mkmsg(FSP_CMD_SET_IPL_SIDE, 1, side << 16);
+ if (!msg) {
+ log_simple_error(&e_info(OPAL_RC_CU_MSG),
+ "CUPD: CMD_SET_IPL_SIDE message allocation failed!\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ if (fsp_sync_msg(msg, false)) {
+ fsp_freemsg(msg);
+ log_simple_error(&e_info(OPAL_RC_CU_MSG),
+ "CUPD: Setting next IPL side failed!\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ rc = (msg->resp->word1 >> 8) & 0xff;
+ fsp_freemsg(msg);
+ return rc;
+}
+
+static void code_update_commit_complete(struct fsp_msg *msg)
+{
+ int rc;
+ uint8_t type;
+
+ rc = (msg->resp->word1 >> 8) & 0xff;
+ type = (msg->word1 >> 8) & 0xff;
+ fsp_freemsg(msg);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_CU_COMMIT),
+ "CUPD: Code update commit failed, err 0x%x\n", rc);
+ return;
+ }
+
+ /* Reset cached VPD data */
+ lock(&flash_lock);
+
+ /* Find commit type */
+ if (type == 0x01) {
+ lid_fetch_side = FETCH_P_SIDE_ONLY;
+ } else if (type == 0x02)
+ lid_fetch_side = FETCH_T_SIDE_ONLY;
+ else
+ lid_fetch_side = FETCH_BOTH_SIDE;
+
+ fetch_com_marker_lid();
+
+ unlock(&flash_lock);
+}
+
+static int code_update_commit(uint32_t cmd)
+{
+ struct fsp_msg *msg;
+
+ msg = fsp_mkmsg(cmd, 0);
+ if (!msg) {
+ log_simple_error(&e_info(OPAL_RC_CU_MSG),
+ "CUPD: COMMIT message allocation failed !\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ if (fsp_queue_msg(msg, code_update_commit_complete)) {
+ log_simple_error(&e_info(OPAL_RC_CU_COMMIT),
+ "CUPD: Failed to queue code update commit message\n");
+ fsp_freemsg(msg);
+ return OPAL_INTERNAL_ERROR;
+ }
+ return OPAL_SUCCESS;
+}
+
+/*
+ * Inband code update is allowed?
+ */
+static int64_t validate_inband_policy(void)
+{
+ /* Quirk:
+ * If the code update policy is out-of-band, but the system
+ * is not HMC-managed, then inband update is allowed.
+ */
+ if (hmc_managed != PLATFORM_HMC_MANAGED)
+ return 0;
+ if (update_policy == INBAND_UPDATE_ALLOWED)
+ return 0;
+
+ return -1;
+}
+
+/*
+ * Validate magic Number
+ */
+static int64_t validate_magic_num(uint16_t magic)
+{
+ if (magic != IMAGE_MAGIC_NUMBER)
+ return -1;
+ return 0;
+}
+
+/*
+ * Compare MI keyword to make sure candidate image
+ * is valid for this platform.
+ */
+static int64_t validate_image_version(struct update_image_header *header,
+ uint32_t *result)
+{
+ struct fw_image_vpd vpd;
+ int t_valid = 0, p_valid = 0, cton_ver = -1, ptot_ver = -1;
+
+ /* Valid flash image level? */
+ if (strncmp(fw_vpd[0].MI_keyword, FW_VERSION_UNKNOWN,
+ sizeof(FW_VERSION_UNKNOWN)) != 0)
+ p_valid = 1;
+
+ if (strncmp(fw_vpd[1].MI_keyword, FW_VERSION_UNKNOWN,
+ sizeof(FW_VERSION_UNKNOWN)) != 0)
+ t_valid = 1;
+
+ /* Validate with IPL side image */
+ vpd = fw_vpd[ipl_side];
+
+ /* Validate platform identifier (first two char of MI keyword) */
+ if (strncmp(vpd.MI_keyword, header->MI_keyword_data, 2) != 0) {
+ *result = VALIDATE_INVALID_IMG;
+ return OPAL_SUCCESS;
+ }
+
+ /* Don't flash different FW series (like P7 image on P8) */
+ if (vpd.MI_keyword[2] != header->MI_keyword_data[2]) {
+ *result = VALIDATE_INVALID_IMG;
+ return OPAL_SUCCESS;
+ }
+
+ /* Get current to new version difference */
+ cton_ver = strncmp(vpd.MI_keyword + 3, header->MI_keyword_data + 3, 6);
+
+ /* Get P to T version difference */
+ if (t_valid && p_valid)
+ ptot_ver = strncmp(fw_vpd[0].MI_keyword + 3,
+ fw_vpd[1].MI_keyword + 3, 6);
+
+ /* Update validation result */
+ if (ipl_side == FW_IPL_SIDE_TEMP) {
+ if (!ptot_ver && cton_ver > 0) /* downgrade T side */
+ *result = VALIDATE_TMP_UPDATE_DL;
+ else if (!ptot_ver && cton_ver <= 0) /* upgrade T side */
+ *result = VALIDATE_TMP_UPDATE;
+ else if (cton_ver > 0) /* Implied commit & downgrade T side */
+ *result = VALIDATE_TMP_COMMIT_DL;
+ else /* Implied commit & upgrade T side */
+ *result = VALIDATE_TMP_COMMIT;
+ } else {
+ if (!t_valid) /* Current unknown */
+ *result = VALIDATE_CUR_UNKNOWN;
+ else if (cton_ver > 0) /* downgrade FW version */
+ *result = VALIDATE_TMP_UPDATE_DL;
+ else /* upgrade FW version */
+ *result = VALIDATE_TMP_UPDATE;
+ }
+ return OPAL_SUCCESS;
+}
+
+/*
+ * Validate candidate image
+ */
+static int validate_candidate_image(uint64_t buffer,
+ uint32_t size, uint32_t *result)
+{
+ struct update_image_header *header;
+ int rc = OPAL_PARAMETER;
+
+ if (size < VALIDATE_BUF_SIZE)
+ goto out;
+
+ rc = code_update_check_state();
+ if (rc != OPAL_SUCCESS)
+ goto out;
+
+ if (validate_inband_policy() != 0) {
+ *result = VALIDATE_FLASH_AUTH;
+ rc = OPAL_SUCCESS;
+ goto out;
+ }
+
+ memcpy(validate_buf, (void *)buffer, VALIDATE_BUF_SIZE);
+ header = (struct update_image_header *)validate_buf;
+
+ if (validate_magic_num(be32_to_cpu(header->magic)) != 0) {
+ *result = VALIDATE_INVALID_IMG;
+ rc = OPAL_SUCCESS;
+ goto out;
+ }
+ rc = validate_image_version(header, result);
+out:
+ return rc;
+}
+
+static int validate_out_buf_mi_data(void *buffer, int offset, uint32_t result)
+{
+ struct update_image_header *header = (void *)validate_buf;
+
+ /* Current T & P side MI data */
+ offset += snprintf(buffer + offset, VALIDATE_BUF_SIZE - offset,
+ "MI %s %s\n",
+ fw_vpd[1].MI_keyword, fw_vpd[0].MI_keyword);
+
+ /* New T & P side MI data */
+ offset += snprintf(buffer + offset, VALIDATE_BUF_SIZE - offset,
+ "MI %s", header->MI_keyword_data);
+ if (result == VALIDATE_TMP_COMMIT_DL ||
+ result == VALIDATE_TMP_COMMIT)
+ offset += snprintf(buffer + offset,
+ VALIDATE_BUF_SIZE - offset,
+ " %s\n", fw_vpd[1].MI_keyword);
+ else
+ offset += snprintf(buffer + offset,
+ VALIDATE_BUF_SIZE - offset,
+ " %s\n", fw_vpd[0].MI_keyword);
+ return offset;
+}
+
+static int validate_out_buf_ml_data(void *buffer, int offset, uint32_t result)
+{
+ struct update_image_header *header = (void *)validate_buf;
+ /* Candidate image ML data */
+ char *ext_fw_id = (void *)header->data;
+
+ /* Current T & P side ML data */
+ offset += snprintf(buffer + offset, VALIDATE_BUF_SIZE - offset,
+ "ML %s %s\n",
+ fw_vpd[1].ext_fw_id, fw_vpd[0].ext_fw_id);
+
+ /* New T & P side ML data */
+ offset += snprintf(buffer + offset, VALIDATE_BUF_SIZE - offset,
+ "ML %s", ext_fw_id);
+ if (result == VALIDATE_TMP_COMMIT_DL ||
+ result == VALIDATE_TMP_COMMIT)
+ offset += snprintf(buffer + offset,
+ VALIDATE_BUF_SIZE - offset,
+ " %s\n", fw_vpd[1].ext_fw_id);
+ else
+ offset += snprintf(buffer + offset,
+ VALIDATE_BUF_SIZE - offset,
+ " %s\n", fw_vpd[0].ext_fw_id);
+
+ return offset;
+}
+
+/*
+ * Copy LID data to TCE buffer
+ */
+static int get_lid_data(struct opal_sg_list *list,
+ int lid_size, int lid_offset)
+{
+ struct opal_sg_list *sg;
+ struct opal_sg_entry *entry;
+ int length, num_entries, i, buf_pos = 0;
+ int map_act, map_size;
+ bool last = false;
+
+ /* Reset TCE start address */
+ tce_start = 0;
+
+ for (sg = list; sg; sg = sg->next) {
+ length = (sg->length & ~(SG_LIST_VERSION << 56)) - 16;
+ num_entries = length / sizeof(struct opal_sg_entry);
+ if (num_entries <= 0)
+ return -1;
+
+ for (i = 0; i < num_entries; i++) {
+ entry = &sg->entry[i];
+
+ /*
+ * Continue until we get data block which
+ * contains LID data
+ */
+ if (lid_offset > entry->length) {
+ lid_offset -= entry->length;
+ continue;
+ }
+
+ /*
+ * SG list entry size can be more than 4k.
+ * Map only required pages, instead of
+ * mapping entire entry.
+ */
+ map_act = entry->length;
+ map_size = entry->length;
+
+ /* First TCE mapping */
+ if (!tce_start) {
+ tce_start = PSI_DMA_CODE_UPD +
+ (lid_offset & 0xfff);
+ map_act = entry->length - lid_offset;
+ lid_offset &= ~0xfff;
+ map_size = entry->length - lid_offset;
+ }
+
+ /* Check pending LID size to map */
+ if (lid_size <= map_act) {
+ /* (map_size - map_act) gives page
+ * start to tce offset difference.
+ * This is required when LID size
+ * is <= 4k.
+ */
+ map_size = (map_size - map_act) + lid_size;
+ last = true;
+ }
+
+ /* Ajust remaining size to map */
+ lid_size -= map_act;
+
+ /* TCE mapping */
+ code_update_tce_map(buf_pos, entry->data + lid_offset,
+ map_size);
+ buf_pos += map_size;
+ /* Reset LID offset count */
+ lid_offset = 0;
+
+ if (last)
+ return OPAL_SUCCESS;
+ }
+ } /* outer loop */
+ return -1;
+}
+
+/*
+ * If IPL side is T, then swap P & T sides to add
+ * new fix to T side.
+ */
+static int validate_ipl_side(void)
+{
+ if (ipl_side == FW_IPL_SIDE_PERM)
+ return 0;
+ return code_update_swap_side();
+}
+
+static int64_t fsp_opal_validate_flash(uint64_t buffer,
+ uint32_t *size, uint32_t *result)
+{
+ int64_t rc = 0;
+ int offset;
+
+ lock(&flash_lock);
+
+ rc = validate_candidate_image(buffer, *size, result);
+ /* Fill output buffer
+ *
+ * Format:
+ * MI<sp>current-T-image<sp>current-P-image<0x0A>
+ * MI<sp>new-T-image<sp>new-P-image<0x0A>
+ * ML<sp>current-T-image<sp>current-P-image<0x0A>
+ * ML<sp>new-T-image<sp>new-P-image<0x0A>
+ */
+ if (!rc && (*result != VALIDATE_FLASH_AUTH &&
+ *result != VALIDATE_INVALID_IMG)) {
+ /* Clear output buffer */
+ memset((void *)buffer, 0, VALIDATE_BUF_SIZE);
+
+ offset = validate_out_buf_mi_data((void *)buffer, 0, *result);
+ offset += validate_out_buf_ml_data((void *)buffer,
+ offset, *result);
+ *size = offset;
+ }
+
+ unlock(&flash_lock);
+ return rc;
+}
+
+/* Commit/Reject T side image */
+static int64_t fsp_opal_manage_flash(uint8_t op)
+{
+ uint32_t cmd;
+ int rc;
+
+ lock(&flash_lock);
+ rc = code_update_check_state();
+ unlock(&flash_lock);
+
+ if (rc != OPAL_SUCCESS)
+ return rc;
+
+ if (op != OPAL_REJECT_TMP_SIDE && op != OPAL_COMMIT_TMP_SIDE)
+ return OPAL_PARAMETER;
+
+ if ((op == OPAL_COMMIT_TMP_SIDE && ipl_side == FW_IPL_SIDE_PERM) ||
+ (op == OPAL_REJECT_TMP_SIDE && ipl_side == FW_IPL_SIDE_TEMP))
+ return OPAL_ACTIVE_SIDE_ERR;
+
+ if (op == OPAL_COMMIT_TMP_SIDE)
+ cmd = FSP_CMD_FLASH_NORMAL;
+ else
+ cmd = FSP_CMD_FLASH_REMOVE;
+
+ return code_update_commit(cmd);
+}
+
+static int fsp_flash_firmware(void)
+{
+ struct update_image_header *header;
+ struct lid_index_entry *idx_entry;
+ struct opal_sg_list *list;
+ struct opal_sg_entry *entry;
+ int rc, i;
+
+ lock(&flash_lock);
+
+ /* Make sure no outstanding LID read is in progress */
+ rc = code_update_check_state();
+ if (rc == OPAL_BUSY)
+ fsp_code_update_wait_vpd(false);
+
+ /* Get LID Index */
+ list = image_data;
+ if (!list)
+ goto out;
+ entry = &list->entry[0];
+ header = (struct update_image_header *)entry->data;
+ idx_entry = (void *)header + be16_to_cpu(header->lid_index_offset);
+
+ /* FIXME:
+ * At present we depend on FSP to validate CRC for
+ * individual LIDs. Calculate and validate individual
+ * LID CRC here.
+ */
+
+ if (validate_ipl_side() != 0)
+ goto out;
+
+ /* Set next IPL side */
+ if (code_update_set_ipl_side() != 0)
+ goto out;
+
+ /* Start code update process */
+ if (code_update_start() != 0)
+ goto out;
+
+ /*
+ * Delete T side LIDs before writing.
+ *
+ * Note:
+ * - Applicable for FWv >= 760.
+ * - Current Code Update design is to ignore
+ * any delete lid failure, and continue with
+ * the update.
+ */
+ rc = code_update_del_lid(DEL_UPD_SIDE_LIDS);
+
+ for (i = 0; i < be16_to_cpu(header->number_lids); i++) {
+ if (be32_to_cpu(idx_entry->size) > LID_MAX_SIZE) {
+ log_simple_error(&e_info(OPAL_RC_CU_FLASH), "CUPD: "
+ "LID size 0x%x is > max LID size \n",
+ be32_to_cpu(idx_entry->size));
+
+ goto abort_update;
+ }
+
+ rc = get_lid_data(list, be32_to_cpu(idx_entry->size),
+ be32_to_cpu(idx_entry->offset));
+ if (rc)
+ goto abort_update;
+
+ rc = code_update_write_lid(be32_to_cpu(idx_entry->id),
+ be32_to_cpu(idx_entry->size));
+ if (rc)
+ goto abort_update;
+
+ /* Unmap TCE */
+ code_update_tce_unmap(PSI_DMA_CODE_UPD_SIZE);
+
+ /* Next LID index */
+ idx_entry = (void *)idx_entry + sizeof(struct lid_index_entry);
+ }
+
+ /* Code update completed */
+ rc = code_update_complete(FSP_CMD_FLASH_COMPLETE);
+
+ unlock(&flash_lock);
+ return rc;
+
+abort_update:
+ log_simple_error(&e_info(OPAL_RC_CU_FLASH), "CUPD: LID update failed "
+ "Aborting codeupdate! rc:%d", rc);
+ rc = code_update_complete(FSP_CMD_FLASH_ABORT);
+out:
+ unlock(&flash_lock);
+ return -1;
+}
+
+static int64_t validate_sglist(struct opal_sg_list *list)
+{
+ struct opal_sg_list *sg;
+ struct opal_sg_entry *prev_entry, *entry;
+ int length, num_entries, i;
+
+ prev_entry = NULL;
+ for (sg = list; sg; sg = sg->next) {
+ length = (sg->length & ~(SG_LIST_VERSION << 56)) - 16;
+ num_entries = length / sizeof(struct opal_sg_entry);
+ if (num_entries <= 0)
+ return -1;
+
+ for (i = 0; i < num_entries; i++) {
+ entry = &sg->entry[i];
+
+ /* All entries must be aligned */
+ if (((uint64_t)entry->data) & 0xfff)
+ return OPAL_PARAMETER;
+
+ /* All non-terminal entries size must be aligned */
+ if (prev_entry && (prev_entry->length & 0xfff))
+ return OPAL_PARAMETER;
+
+ prev_entry = entry;
+ }
+ }
+ return OPAL_SUCCESS;
+}
+
+static int64_t fsp_opal_update_flash(struct opal_sg_list *list)
+{
+ struct opal_sg_entry *entry;
+ int length, num_entries, result = 0, rc = OPAL_PARAMETER;
+
+ /* Ensure that the sg list honors our alignment requirements */
+ rc = validate_sglist(list);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_CU_SG_LIST),
+ "CUPD: sglist fails alignment requirements\n");
+ return rc;
+ }
+
+ lock(&flash_lock);
+ if (!list) { /* Cancel update request */
+ fsp_flash_term_hook = NULL;
+ image_data = NULL;
+ rc = OPAL_SUCCESS;
+ goto out;
+ }
+ length = (list->length & ~(SG_LIST_VERSION << 56)) - 16;
+ num_entries = length / sizeof(struct opal_sg_entry);
+ if (num_entries <= 0)
+ goto out;
+
+ /* Validate image header */
+ entry = &list->entry[0];
+ rc = validate_candidate_image((uint64_t)entry->data,
+ VALIDATE_BUF_SIZE, &result);
+ if (!rc && (result != VALIDATE_FLASH_AUTH &&
+ result != VALIDATE_INVALID_IMG)) {
+ image_data = list;
+ fsp_flash_term_hook = fsp_flash_firmware;
+ goto out;
+ }
+
+ /* Adjust return code */
+ if (result == VALIDATE_FLASH_AUTH)
+ rc = OPAL_FLASH_NO_AUTH;
+ else if (result == VALIDATE_INVALID_IMG)
+ rc = OPAL_INVALID_IMAGE;
+
+out:
+ unlock(&flash_lock);
+ return rc;
+}
+
+/*
+ * Code Update notifications
+ *
+ * Note: At present we just ACK these notifications.
+ * Reset cached VPD data if we are going to support
+ * concurrent image maint in future.
+ */
+static bool code_update_notify(uint32_t cmd_sub_mod, struct fsp_msg *msg)
+{
+ int rc;
+ uint32_t cmd;
+
+ switch(cmd_sub_mod) {
+ case FSP_CMD_FLASH_CACHE:
+ cmd = FSP_CMD_FLASH_CACHE_RSP;
+ printf("CUPD: Update LID cache event [data = 0x%x]\n",
+ msg->data.words[0]);
+ break;
+ case FSP_CMD_FLASH_OUTC:
+ case FSP_CMD_FLASH_OUTR:
+ case FSP_CMD_FLASH_OUTS:
+ cmd = FSP_CMD_FLASH_OUT_RSP;
+ printf("CUPD: Out of band commit notify [Type = 0x%x]\n",
+ (msg->word1 >> 8) & 0xff);
+ break;
+ default:
+ log_simple_error(&e_info(OPAL_RC_CU_NOTIFY), "CUPD: Unknown "
+ "notification [cmd = 0x%x]\n", cmd_sub_mod);
+ return false;
+ }
+
+ rc = fsp_queue_msg(fsp_mkmsg(cmd, 0), fsp_freemsg);
+ if (rc)
+ log_simple_error(&e_info(OPAL_RC_CU_NOTIFY), "CUPD: Failed to "
+ "queue code update notification response :%d\n", rc);
+
+ return true;
+}
+
+static struct fsp_client fsp_get_notify = {
+ .message = code_update_notify,
+};
+
+void fsp_code_update_init(void)
+{
+ if (!fsp_present()) {
+ flash_state = FLASH_STATE_ABSENT;
+ return;
+ }
+
+ /* OPAL interface */
+ opal_register(OPAL_FLASH_VALIDATE, fsp_opal_validate_flash, 3);
+ opal_register(OPAL_FLASH_MANAGE, fsp_opal_manage_flash, 1);
+ opal_register(OPAL_FLASH_UPDATE, fsp_opal_update_flash, 1);
+
+ /* register Code Update Class D3 */
+ fsp_register_client(&fsp_get_notify, FSP_MCLASS_CODE_UPDATE);
+
+ /* Flash hook */
+ fsp_flash_term_hook = NULL;
+
+ /* Fetch various code update related sys parameters */
+ get_ipl_side();
+ get_code_update_policy();
+ get_platform_hmc_managed();
+
+ /* Fetch common marker LID */
+ lid_data = memalign(TCE_PSIZE, MARKER_LID_SIZE);
+ if (!lid_data) {
+ log_simple_error(&e_info(OPAL_RC_CU_INIT),
+ "CUPD: Failed to allocate memory for marker LID\n");
+ flash_state = FLASH_STATE_ABSENT;
+ return;
+ }
+ fetch_com_marker_lid();
+}
diff --git a/hw/fsp/fsp-console.c b/hw/fsp/fsp-console.c
new file mode 100644
index 00000000..725edcc2
--- /dev/null
+++ b/hw/fsp/fsp-console.c
@@ -0,0 +1,922 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Service Processor serial console handling code
+ */
+#include <skiboot.h>
+#include <processor.h>
+#include <io.h>
+#include <fsp.h>
+#include <console.h>
+#include <opal.h>
+#include <timebase.h>
+#include <device.h>
+
+struct fsp_serbuf_hdr {
+ u16 partition_id;
+ u8 session_id;
+ u8 hmc_id;
+ u16 data_offset;
+ u16 last_valid;
+ u16 ovf_count;
+ u16 next_in;
+ u8 flags;
+ u8 reserved;
+ u16 next_out;
+ u8 data[];
+};
+#define SER_BUF_DATA_SIZE (0x10000 - sizeof(struct fsp_serbuf_hdr))
+
+struct fsp_serial {
+ bool available;
+ bool open;
+ bool has_part0;
+ bool has_part1;
+ bool log_port;
+ bool out_poke;
+ char loc_code[LOC_CODE_SIZE];
+ u16 rsrc_id;
+ struct fsp_serbuf_hdr *in_buf;
+ struct fsp_serbuf_hdr *out_buf;
+ struct fsp_msg *poke_msg;
+};
+
+#define SER_BUFFER_SIZE 0x00040000UL
+#define MAX_SERIAL 4
+
+static struct fsp_serial fsp_serials[MAX_SERIAL];
+static bool got_intf_query;
+static bool got_assoc_resp;
+static bool got_deassoc_resp;
+static struct lock fsp_con_lock = LOCK_UNLOCKED;
+static void* ser_buffer = NULL;
+
+static void fsp_console_reinit(void)
+{
+ int i;
+ void *base;
+
+ /* Initialize out data structure pointers & TCE maps */
+ base = ser_buffer;
+ for (i = 0; i < MAX_SERIAL; i++) {
+ struct fsp_serial *ser = &fsp_serials[i];
+
+ ser->in_buf = base;
+ ser->out_buf = base + SER_BUFFER_SIZE/2;
+ base += SER_BUFFER_SIZE;
+ }
+ fsp_tce_map(PSI_DMA_SER0_BASE, ser_buffer,
+ 4 * PSI_DMA_SER0_SIZE);
+
+ for (i = 0; i < MAX_SERIAL; i++) {
+ struct fsp_serial *fs = &fsp_serials[i];
+
+ if (fs->rsrc_id == 0xffff)
+ continue;
+ printf("FSP: Reassociating HVSI console %d\n", i);
+ got_assoc_resp = false;
+ fsp_sync_msg(fsp_mkmsg(FSP_CMD_ASSOC_SERIAL, 2,
+ (fs->rsrc_id << 16) | 1, i), true);
+ /* XXX add timeout ? */
+ while(!got_assoc_resp)
+ fsp_poll();
+ }
+}
+
+static void fsp_close_consoles(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_SERIAL; i++) {
+ struct fsp_serial *fs = &fsp_serials[i];
+
+ if (!fs->available)
+ continue;
+
+ if (fs->rsrc_id == 0xffff) /* Get clarity from benh */
+ continue;
+
+ lock(&fsp_con_lock);
+ if (fs->open) {
+ fs->open = false;
+ fs->out_poke = false;
+ if (fs->poke_msg->state != fsp_msg_unused)
+ fsp_cancelmsg(fs->poke_msg);
+ fsp_freemsg(fs->poke_msg);
+ fs->poke_msg = NULL;
+ }
+ unlock(&fsp_con_lock);
+ }
+ printf("FSPCON: Closed consoles on account of FSP reset/reload\n");
+}
+
+static void fsp_pokemsg_reclaim(struct fsp_msg *msg)
+{
+ struct fsp_serial *fs = msg->user_data;
+
+ /*
+ * The poke_msg might have been "detached" from the console
+ * in vserial_close, so we need to check whether it's current
+ * before touching the state, otherwise, just free it
+ */
+ lock(&fsp_con_lock);
+ if (fs->open && fs->poke_msg == msg) {
+ if (fs->out_poke) {
+ fs->out_poke = false;
+ fsp_queue_msg(fs->poke_msg, fsp_pokemsg_reclaim);
+ } else
+ fs->poke_msg->state = fsp_msg_unused;
+ } else
+ fsp_freemsg(msg);
+ unlock(&fsp_con_lock);
+}
+
+/* Called with the fsp_con_lock held */
+static size_t fsp_write_vserial(struct fsp_serial *fs, const char *buf,
+ size_t len)
+{
+ struct fsp_serbuf_hdr *sb = fs->out_buf;
+ u16 old_nin = sb->next_in;
+ u16 space, chunk;
+
+ if (!fs->open)
+ return 0;
+
+ space = (sb->next_out + SER_BUF_DATA_SIZE - old_nin - 1)
+ % SER_BUF_DATA_SIZE;
+ if (space < len)
+ len = space;
+ if (!len)
+ return 0;
+
+ chunk = SER_BUF_DATA_SIZE - old_nin;
+ if (chunk > len)
+ chunk = len;
+ memcpy(&sb->data[old_nin], buf, chunk);
+ if (chunk < len)
+ memcpy(&sb->data[0], buf + chunk, len - chunk);
+ lwsync();
+ sb->next_in = (old_nin + len) % SER_BUF_DATA_SIZE;
+ sync();
+
+ if (sb->next_out == old_nin && fs->poke_msg) {
+ if (fs->poke_msg->state == fsp_msg_unused)
+ fsp_queue_msg(fs->poke_msg, fsp_pokemsg_reclaim);
+ else
+ fs->out_poke = true;
+ }
+#ifndef DISABLE_CON_PENDING_EVT
+ opal_update_pending_evt(OPAL_EVENT_CONSOLE_OUTPUT,
+ OPAL_EVENT_CONSOLE_OUTPUT);
+#endif
+ return len;
+}
+
+#ifdef DVS_CONSOLE
+static int fsp_con_port = -1;
+static bool fsp_con_full;
+
+/*
+ * This is called by the code in console.c without the con_lock
+ * held. However it can be called as the result of any printf
+ * thus any other lock might be held including possibly the
+ * FSP lock
+ */
+static size_t fsp_con_write(const char *buf, size_t len)
+{
+ size_t written;
+
+ if (fsp_con_port < 0)
+ return 0;
+
+ lock(&fsp_con_lock);
+ written = fsp_write_vserial(&fsp_serials[fsp_con_port], buf, len);
+ fsp_con_full = (written < len);
+ unlock(&fsp_con_lock);
+
+ return written;
+}
+
+static struct con_ops fsp_con_ops = {
+ .write = fsp_con_write,
+};
+#endif /* DVS_CONSOLE */
+
+static void fsp_open_vserial(struct fsp_msg *msg)
+{
+ u16 part_id = msg->data.words[0] & 0xffff;
+ u16 sess_id = msg->data.words[1] & 0xffff;
+ u8 hmc_sess = msg->data.bytes[0];
+ u8 hmc_indx = msg->data.bytes[1];
+ u8 authority = msg->data.bytes[4];
+ u32 tce_in, tce_out;
+ struct fsp_serial *fs;
+
+ printf("FSPCON: Got VSerial Open\n");
+ printf(" part_id = 0x%04x\n", part_id);
+ printf(" sess_id = 0x%04x\n", sess_id);
+ printf(" hmc_sess = 0x%02x\n", hmc_sess);
+ printf(" hmc_indx = 0x%02x\n", hmc_indx);
+ printf(" authority = 0x%02x\n", authority);
+
+ if (sess_id >= MAX_SERIAL || !fsp_serials[sess_id].available) {
+ fsp_queue_msg(fsp_mkmsg(FSP_RSP_OPEN_VSERIAL | 0x2f, 0),
+ fsp_freemsg);
+ printf(" NOT AVAILABLE !\n");
+ return;
+ }
+
+ fs = &fsp_serials[sess_id];
+
+ /* Hack ! On blades, the console opened via the mm has partition 1
+ * while the debug DVS generally has partition 0 (though you can
+ * use what you want really).
+ * We don't want a DVS open/close to crap on the blademm console
+ * thus if it's a raw console, gets an open with partID 1, we
+ * set a flag that ignores the close of partid 0
+ */
+ if (fs->rsrc_id == 0xffff) {
+ if (part_id == 0)
+ fs->has_part0 = true;
+ if (part_id == 1)
+ fs->has_part1 = true;
+ }
+
+ tce_in = PSI_DMA_SER0_BASE + PSI_DMA_SER0_SIZE * sess_id;
+ tce_out = tce_in + SER_BUFFER_SIZE/2;
+
+ lock(&fsp_con_lock);
+ if (fs->open) {
+ printf(" already open, skipping init !\n");
+ unlock(&fsp_con_lock);
+ goto already_open;
+ }
+
+ fs->open = true;
+
+ fs->poke_msg = fsp_mkmsg(FSP_CMD_VSERIAL_OUT, 2,
+ msg->data.words[0],
+ msg->data.words[1] & 0xffff);
+ fs->poke_msg->user_data = fs;
+
+ fs->in_buf->partition_id = fs->out_buf->partition_id = part_id;
+ fs->in_buf->session_id = fs->out_buf->session_id = sess_id;
+ fs->in_buf->hmc_id = fs->out_buf->hmc_id = hmc_indx;
+ fs->in_buf->data_offset = fs->out_buf->data_offset =
+ sizeof(struct fsp_serbuf_hdr);
+ fs->in_buf->last_valid = fs->out_buf->last_valid =
+ SER_BUF_DATA_SIZE - 1;
+ fs->in_buf->ovf_count = fs->out_buf->ovf_count = 0;
+ fs->in_buf->next_in = fs->out_buf->next_in = 0;
+ fs->in_buf->flags = fs->out_buf->flags = 0;
+ fs->in_buf->reserved = fs->out_buf->reserved = 0;
+ fs->in_buf->next_out = fs->out_buf->next_out = 0;
+ unlock(&fsp_con_lock);
+
+ already_open:
+ fsp_queue_msg(fsp_mkmsg(FSP_RSP_OPEN_VSERIAL, 6,
+ msg->data.words[0],
+ msg->data.words[1] & 0xffff,
+ 0, tce_in, 0, tce_out), fsp_freemsg);
+
+#ifdef DVS_CONSOLE
+ printf(" log_port = %d\n", fs->log_port);
+ if (fs->log_port) {
+ fsp_con_port = sess_id;
+ sync();
+ /*
+ * We mark the FSP lock as being in the console
+ * path. We do that only once, we never unmark it
+ * (there is really no much point)
+ */
+ fsp_used_by_console();
+ fsp_con_lock.in_con_path = true;
+ set_console(&fsp_con_ops);
+ }
+#endif
+}
+
+static void fsp_close_vserial(struct fsp_msg *msg)
+{
+ u16 part_id = msg->data.words[0] & 0xffff;
+ u16 sess_id = msg->data.words[1] & 0xffff;
+ u8 hmc_sess = msg->data.bytes[0];
+ u8 hmc_indx = msg->data.bytes[1];
+ u8 authority = msg->data.bytes[4];
+ struct fsp_serial *fs;
+
+ printf("FSPCON: Got VSerial Close\n");
+ printf(" part_id = 0x%04x\n", part_id);
+ printf(" sess_id = 0x%04x\n", sess_id);
+ printf(" hmc_sess = 0x%02x\n", hmc_sess);
+ printf(" hmc_indx = 0x%02x\n", hmc_indx);
+ printf(" authority = 0x%02x\n", authority);
+
+ if (sess_id >= MAX_SERIAL || !fsp_serials[sess_id].available) {
+ printf(" NOT AVAILABLE !\n");
+ goto skip_close;
+ }
+
+ fs = &fsp_serials[sess_id];
+
+ /* See "HACK" comment in open */
+ if (fs->rsrc_id == 0xffff) {
+ if (part_id == 0)
+ fs->has_part0 = false;
+ if (part_id == 1)
+ fs->has_part1 = false;
+ if (fs->has_part0 || fs->has_part1) {
+ printf(" skipping close !\n");
+ goto skip_close;
+ }
+ }
+
+#ifdef DVS_CONSOLE
+ if (fs->log_port) {
+ fsp_con_port = -1;
+ set_console(NULL);
+ }
+#endif
+
+ lock(&fsp_con_lock);
+ if (fs->open) {
+ fs->open = false;
+ fs->out_poke = false;
+ if (fs->poke_msg && fs->poke_msg->state == fsp_msg_unused) {
+ fsp_freemsg(fs->poke_msg);
+ fs->poke_msg = NULL;
+ }
+ }
+ unlock(&fsp_con_lock);
+ skip_close:
+ fsp_queue_msg(fsp_mkmsg(FSP_RSP_CLOSE_VSERIAL, 2,
+ msg->data.words[0],
+ msg->data.words[1] & 0xffff),
+ fsp_freemsg);
+}
+
+static bool fsp_con_msg_hmc(u32 cmd_sub_mod, struct fsp_msg *msg)
+{
+ /* Associate response */
+ if ((cmd_sub_mod >> 8) == 0xe08a) {
+ printf("FSPCON: Got associate response, status 0x%02x\n",
+ cmd_sub_mod & 0xff);
+ got_assoc_resp = true;
+ return true;
+ }
+ if ((cmd_sub_mod >> 8) == 0xe08b) {
+ printf("Got unassociate response, status 0x%02x\n",
+ cmd_sub_mod & 0xff);
+ got_deassoc_resp = true;
+ return true;
+ }
+ switch(cmd_sub_mod) {
+ case FSP_CMD_OPEN_VSERIAL:
+ fsp_open_vserial(msg);
+ return true;
+ case FSP_CMD_CLOSE_VSERIAL:
+ fsp_close_vserial(msg);
+ return true;
+ case FSP_CMD_HMC_INTF_QUERY:
+ printf("FSPCON: Got HMC interface query\n");
+
+ /* Keep that synchronous due to FSP fragile ordering
+ * of the boot sequence
+ */
+ fsp_sync_msg(fsp_mkmsg(FSP_RSP_HMC_INTF_QUERY, 1,
+ msg->data.words[0] & 0x00ffffff), true);
+ got_intf_query = true;
+ return true;
+ }
+ return false;
+}
+
+static bool fsp_con_msg_vt(u32 cmd_sub_mod, struct fsp_msg *msg)
+{
+ u16 sess_id = msg->data.words[1] & 0xffff;
+
+ if (cmd_sub_mod == FSP_CMD_VSERIAL_IN && sess_id < MAX_SERIAL) {
+ struct fsp_serial *fs = &fsp_serials[sess_id];
+
+ if (!fs->open)
+ return true;
+
+ /* FSP is signaling some incoming data. We take the console
+ * lock to avoid racing with a simultaneous read, though we
+ * might want to consider to simplify all that locking into
+ * one single lock that covers the console and the pending
+ * events.
+ */
+ lock(&fsp_con_lock);
+ opal_update_pending_evt(OPAL_EVENT_CONSOLE_INPUT,
+ OPAL_EVENT_CONSOLE_INPUT);
+ unlock(&fsp_con_lock);
+ }
+ return true;
+}
+
+static bool fsp_con_msg_rr(u32 cmd_sub_mod, struct fsp_msg *msg)
+{
+ assert(msg == NULL);
+
+ switch (cmd_sub_mod) {
+ case FSP_RESET_START:
+ fsp_close_consoles();
+ return true;
+ case FSP_RELOAD_COMPLETE:
+ fsp_console_reinit();
+ return true;
+ }
+ return false;
+}
+
+static struct fsp_client fsp_con_client_hmc = {
+ .message = fsp_con_msg_hmc,
+};
+
+static struct fsp_client fsp_con_client_vt = {
+ .message = fsp_con_msg_vt,
+};
+
+static struct fsp_client fsp_con_client_rr = {
+ .message = fsp_con_msg_rr,
+};
+
+static void fsp_serial_add(int index, u16 rsrc_id, const char *loc_code,
+ bool log_port)
+{
+ struct fsp_serial *ser;
+
+ lock(&fsp_con_lock);
+ ser = &fsp_serials[index];
+
+ if (ser->available) {
+ unlock(&fsp_con_lock);
+ return;
+ }
+
+ ser->rsrc_id = rsrc_id;
+ strncpy(ser->loc_code, loc_code, LOC_CODE_SIZE);
+ ser->available = true;
+ ser->log_port = log_port;
+ unlock(&fsp_con_lock);
+
+ /* DVS doesn't have that */
+ if (rsrc_id != 0xffff) {
+ got_assoc_resp = false;
+ fsp_sync_msg(fsp_mkmsg(FSP_CMD_ASSOC_SERIAL, 2,
+ (rsrc_id << 16) | 1, index), true);
+ /* XXX add timeout ? */
+ while(!got_assoc_resp)
+ fsp_poll();
+ }
+}
+
+void fsp_console_preinit(void)
+{
+ int i;
+ void *base;
+
+ if (!fsp_present())
+ return;
+
+ ser_buffer = memalign(TCE_PSIZE, SER_BUFFER_SIZE * MAX_SERIAL);
+
+ /* Initialize out data structure pointers & TCE maps */
+ base = ser_buffer;
+ for (i = 0; i < MAX_SERIAL; i++) {
+ struct fsp_serial *ser = &fsp_serials[i];
+
+ ser->in_buf = base;
+ ser->out_buf = base + SER_BUFFER_SIZE/2;
+ base += SER_BUFFER_SIZE;
+ }
+ fsp_tce_map(PSI_DMA_SER0_BASE, ser_buffer,
+ 4 * PSI_DMA_SER0_SIZE);
+
+ /* Register for class E0 and E1 */
+ fsp_register_client(&fsp_con_client_hmc, FSP_MCLASS_HMC_INTFMSG);
+ fsp_register_client(&fsp_con_client_vt, FSP_MCLASS_HMC_VT);
+ fsp_register_client(&fsp_con_client_rr, FSP_MCLASS_RR_EVENT);
+
+ /* Add DVS ports. We currently have session 0 and 3, 0 is for
+ * OS use. 3 is our debug port. We need to add those before
+ * we complete the OPL or we'll potentially miss the
+ * console setup on Firebird blades.
+ */
+ fsp_serial_add(0, 0xffff, "DVS_OS", false);
+ op_display(OP_LOG, OP_MOD_FSPCON, 0x0001);
+ fsp_serial_add(3, 0xffff, "DVS_FW", true);
+ op_display(OP_LOG, OP_MOD_FSPCON, 0x0002);
+
+}
+
+static int64_t fsp_console_write(int64_t term_number, int64_t *length,
+ const uint8_t *buffer)
+{
+ struct fsp_serial *fs;
+ size_t written, requested;
+
+ if (term_number < 0 || term_number >= MAX_SERIAL)
+ return OPAL_PARAMETER;
+ fs = &fsp_serials[term_number];
+ if (!fs->available || fs->log_port)
+ return OPAL_PARAMETER;
+ lock(&fsp_con_lock);
+ if (!fs->open) {
+ unlock(&fsp_con_lock);
+ return OPAL_CLOSED;
+ }
+ /* Clamp to a reasonable size */
+ requested = *length;
+ if (requested > 0x1000)
+ requested = 0x1000;
+ written = fsp_write_vserial(fs, buffer, requested);
+
+#ifdef OPAL_DEBUG_CONSOLE_IO
+ printf("OPAL: console write req=%ld written=%ld ni=%d no=%d\n",
+ requested, written, fs->out_buf->next_in, fs->out_buf->next_out);
+ printf(" %02x %02x %02x %02x "
+ "%02x \'%c\' %02x \'%c\' %02x \'%c\'.%02x \'%c\'..\n",
+ buffer[0], buffer[1], buffer[2], buffer[3],
+ buffer[4], buffer[4], buffer[5], buffer[5],
+ buffer[6], buffer[6], buffer[7], buffer[7]);
+#endif /* OPAL_DEBUG_CONSOLE_IO */
+
+ *length = written;
+ unlock(&fsp_con_lock);
+
+ return written ? OPAL_SUCCESS : OPAL_BUSY_EVENT;
+}
+
+static int64_t fsp_console_write_buffer_space(int64_t term_number,
+ int64_t *length)
+{
+ struct fsp_serial *fs;
+ struct fsp_serbuf_hdr *sb;
+
+ if (term_number < 0 || term_number >= MAX_SERIAL)
+ return OPAL_PARAMETER;
+ fs = &fsp_serials[term_number];
+ if (!fs->available || fs->log_port)
+ return OPAL_PARAMETER;
+ lock(&fsp_con_lock);
+ if (!fs->open) {
+ unlock(&fsp_con_lock);
+ return OPAL_CLOSED;
+ }
+ sb = fs->out_buf;
+ *length = (sb->next_out + SER_BUF_DATA_SIZE - sb->next_in - 1)
+ % SER_BUF_DATA_SIZE;
+ unlock(&fsp_con_lock);
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t fsp_console_read(int64_t term_number, int64_t *length,
+ uint8_t *buffer __unused)
+{
+ struct fsp_serial *fs;
+ struct fsp_serbuf_hdr *sb;
+ bool pending = false;
+ uint32_t old_nin, n, i, chunk, req = *length;
+
+ if (term_number < 0 || term_number >= MAX_SERIAL)
+ return OPAL_PARAMETER;
+ fs = &fsp_serials[term_number];
+ if (!fs->available || fs->log_port)
+ return OPAL_PARAMETER;
+ lock(&fsp_con_lock);
+ if (!fs->open) {
+ unlock(&fsp_con_lock);
+ return OPAL_CLOSED;
+ }
+ sb = fs->in_buf;
+ old_nin = sb->next_in;
+ lwsync();
+ n = (old_nin + SER_BUF_DATA_SIZE - sb->next_out)
+ % SER_BUF_DATA_SIZE;
+ if (n > req) {
+ pending = true;
+ n = req;
+ }
+ *length = n;
+
+ chunk = SER_BUF_DATA_SIZE - sb->next_out;
+ if (chunk > n)
+ chunk = n;
+ memcpy(buffer, &sb->data[sb->next_out], chunk);
+ if (chunk < n)
+ memcpy(buffer + chunk, &sb->data[0], n - chunk);
+ sb->next_out = (sb->next_out + n) % SER_BUF_DATA_SIZE;
+
+#ifdef OPAL_DEBUG_CONSOLE_IO
+ printf("OPAL: console read req=%d read=%d ni=%d no=%d\n",
+ req, n, sb->next_in, sb->next_out);
+ printf(" %02x %02x %02x %02x %02x %02x %02x %02x ...\n",
+ buffer[0], buffer[1], buffer[2], buffer[3],
+ buffer[4], buffer[5], buffer[6], buffer[7]);
+#endif /* OPAL_DEBUG_CONSOLE_IO */
+
+ /* Might clear the input pending flag */
+ for (i = 0; i < MAX_SERIAL && !pending; i++) {
+ struct fsp_serial *fs = &fsp_serials[i];
+ struct fsp_serbuf_hdr *sb = fs->in_buf;
+
+ if (fs->log_port || !fs->open)
+ continue;
+ if (sb->next_out != sb->next_in)
+ pending = true;
+ }
+ if (!pending)
+ opal_update_pending_evt(OPAL_EVENT_CONSOLE_INPUT, 0);
+
+ unlock(&fsp_con_lock);
+
+ return OPAL_SUCCESS;
+}
+
+void fsp_console_poll(void *data __unused)
+{
+#ifdef OPAL_DEBUG_CONSOLE_POLL
+ static int debug;
+#endif
+
+ /*
+ * We don't get messages for out buffer being consumed, so we
+ * need to poll. We also defer sending of poke messages from
+ * the sapphire console to avoid a locking nightmare with
+ * beging called from printf() deep into an existing lock nest
+ * stack.
+ */
+ if (fsp_con_full ||
+ (opal_pending_events & OPAL_EVENT_CONSOLE_OUTPUT)) {
+ unsigned int i;
+ bool pending = false;
+
+ /* We take the console lock. This is somewhat inefficient
+ * but it guarantees we aren't racing with a write, and
+ * thus clearing an event improperly
+ */
+ lock(&fsp_con_lock);
+ for (i = 0; i < MAX_SERIAL && !pending; i++) {
+ struct fsp_serial *fs = &fsp_serials[i];
+ struct fsp_serbuf_hdr *sb = fs->out_buf;
+
+ if (!fs->open)
+ continue;
+ if (sb->next_out == sb->next_in)
+ continue;
+ if (fs->log_port)
+ __flush_console();
+ else {
+#ifdef OPAL_DEBUG_CONSOLE_POLL
+ if (debug < 5) {
+ printf("OPAL: %d still pending"
+ " ni=%d no=%d\n",
+ i, sb->next_in, sb->next_out);
+ debug++;
+ }
+#endif /* OPAL_DEBUG_CONSOLE_POLL */
+ pending = true;
+ }
+ }
+ if (!pending) {
+ opal_update_pending_evt(OPAL_EVENT_CONSOLE_OUTPUT, 0);
+#ifdef OPAL_DEBUG_CONSOLE_POLL
+ debug = 0;
+#endif
+ }
+ unlock(&fsp_con_lock);
+ }
+}
+
+void fsp_console_init(void)
+{
+ struct dt_node *serials, *ser;
+ int i;
+
+ if (!fsp_present())
+ return;
+
+ opal_register(OPAL_CONSOLE_READ, fsp_console_read, 3);
+ opal_register(OPAL_CONSOLE_WRITE_BUFFER_SPACE,
+ fsp_console_write_buffer_space, 2);
+ opal_register(OPAL_CONSOLE_WRITE, fsp_console_write, 3);
+
+ /* Wait until we got the intf query before moving on */
+ while (!got_intf_query)
+ fsp_poll();
+
+ op_display(OP_LOG, OP_MOD_FSPCON, 0x0000);
+
+ /* Register poller */
+ opal_add_poller(fsp_console_poll, NULL);
+
+ /* Parse serial port data */
+ serials = dt_find_by_path(dt_root, "ipl-params/fsp-serial");
+ if (!serials) {
+ prerror("FSPCON: No FSP serial ports in device-tree\n");
+ return;
+ }
+
+ i = 1;
+ dt_for_each_child(serials, ser) {
+ u32 rsrc_id = dt_prop_get_u32(ser, "reg");
+ const void *lc = dt_prop_get(ser, "ibm,loc-code");
+
+ printf("FSPCON: Serial %d rsrc: %04x loc: %s\n",
+ i, rsrc_id, (const char *)lc);
+ fsp_serial_add(i++, rsrc_id, lc, false);
+ op_display(OP_LOG, OP_MOD_FSPCON, 0x0010 + i);
+ }
+
+ op_display(OP_LOG, OP_MOD_FSPCON, 0x0005);
+}
+
+static void flush_all_input(void)
+{
+ unsigned int i;
+
+ lock(&fsp_con_lock);
+ for (i = 0; i < MAX_SERIAL; i++) {
+ struct fsp_serial *fs = &fsp_serials[i];
+ struct fsp_serbuf_hdr *sb = fs->in_buf;
+
+ if (fs->log_port)
+ continue;
+
+ sb->next_out = sb->next_in;
+ }
+ unlock(&fsp_con_lock);
+}
+
+static bool send_all_hvsi_close(void)
+{
+ unsigned int i;
+ bool has_hvsi = false;
+ static const uint8_t close_packet[] = { 0xfe, 6, 0, 1, 0, 3 };
+
+ lock(&fsp_con_lock);
+ for (i = 0; i < MAX_SERIAL; i++) {
+ struct fsp_serial *fs = &fsp_serials[i];
+ struct fsp_serbuf_hdr *sb = fs->out_buf;
+ unsigned int space, timeout = 10;
+
+ if (fs->log_port)
+ continue;
+ if (fs->rsrc_id == 0xffff)
+ continue;
+ has_hvsi = true;
+
+ /* Do we have room ? Wait a bit if not */
+ while(timeout--) {
+ space = (sb->next_out + SER_BUF_DATA_SIZE -
+ sb->next_in - 1) % SER_BUF_DATA_SIZE;
+ if (space >= 6)
+ break;
+ time_wait_ms(500);
+ }
+ fsp_write_vserial(fs, close_packet, 6);
+ }
+ unlock(&fsp_con_lock);
+
+ return has_hvsi;
+}
+
+static void reopen_all_hvsi(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_SERIAL; i++) {
+ struct fsp_serial *fs = &fsp_serials[i];
+ if (fs->rsrc_id == 0xffff)
+ continue;
+ printf("FSP: Deassociating HVSI console %d\n", i);
+ got_deassoc_resp = false;
+ fsp_sync_msg(fsp_mkmsg(FSP_CMD_UNASSOC_SERIAL, 1,
+ (i << 16) | 1), true);
+ /* XXX add timeout ? */
+ while(!got_deassoc_resp)
+ fsp_poll();
+ }
+ for (i = 0; i < MAX_SERIAL; i++) {
+ struct fsp_serial *fs = &fsp_serials[i];
+ if (fs->rsrc_id == 0xffff)
+ continue;
+ printf("FSP: Reassociating HVSI console %d\n", i);
+ got_assoc_resp = false;
+ fsp_sync_msg(fsp_mkmsg(FSP_CMD_ASSOC_SERIAL, 2,
+ (fs->rsrc_id << 16) | 1, i), true);
+ /* XXX add timeout ? */
+ while(!got_assoc_resp)
+ fsp_poll();
+ }
+}
+
+void fsp_console_reset(void)
+{
+ printf("FSP: Console reset !\n");
+
+ /* This is called on a fast-reset. To work around issues with HVSI
+ * initial negotiation, before we reboot the kernel, we flush all
+ * input and send an HVSI close packet.
+ */
+ flush_all_input();
+
+ /* Returns false if there is no HVSI console */
+ if (!send_all_hvsi_close())
+ return;
+
+ time_wait_ms(500);
+
+ flush_all_input();
+
+ reopen_all_hvsi();
+
+}
+
+void fsp_console_add_nodes(void)
+{
+ unsigned int i;
+ struct dt_node *consoles;
+
+ consoles = dt_new(opal_node, "consoles");
+ dt_add_property_cells(consoles, "#address-cells", 1);
+ dt_add_property_cells(consoles, "#size-cells", 0);
+ for (i = 0; i < MAX_SERIAL; i++) {
+ struct fsp_serial *fs = &fsp_serials[i];
+ struct dt_node *fs_node;
+ char name[32];
+
+ if (fs->log_port || !fs->available)
+ continue;
+
+ snprintf(name, sizeof(name), "serial@%d", i);
+ fs_node = dt_new(consoles, name);
+ if (fs->rsrc_id == 0xffff)
+ dt_add_property_string(fs_node, "compatible",
+ "ibm,opal-console-raw");
+ else
+ dt_add_property_string(fs_node, "compatible",
+ "ibm,opal-console-hvsi");
+ dt_add_property_cells(fs_node,
+ "#write-buffer-size", SER_BUF_DATA_SIZE);
+ dt_add_property_cells(fs_node, "reg", i);
+ dt_add_property_string(fs_node, "device_type", "serial");
+ }
+}
+
+void fsp_console_select_stdout(void)
+{
+ struct dt_node *iplp;
+ u32 ipl_mode = 0;
+
+ if (!fsp_present())
+ return;
+
+ /*
+ * We hijack the "os-ipl-mode" setting in iplparams to select
+ * out output console. This is the "i5/OS partition mode boot"
+ * setting in ASMI converted to an integer: 0=A, 1=B, ...
+ */
+ iplp = dt_find_by_path(dt_root, "ipl-params/ipl-params");
+ if (iplp)
+ ipl_mode = dt_prop_get_u32_def(iplp, "os-ipl-mode", 0);
+
+ /*
+ * Now, if ipl_mode is 1 or 2, we set the corresponding serial
+ * port if it exists (ie, is opened) as the default console.
+ *
+ * In any other case, we set the default console to serial0
+ * which is DVS or IPMI
+ */
+ if (ipl_mode == 1 && fsp_serials[1].open) {
+ dt_add_property_string(dt_chosen, "linux,stdout-path",
+ "/ibm,opal/consoles/serial@1");
+ printf("FSPCON: default console 1\n");
+ } else if (ipl_mode == 2 && fsp_serials[2].open) {
+ dt_add_property_string(dt_chosen, "linux,stdout-path",
+ "/ibm,opal/consoles/serial@2");
+ printf("FSPCON: default console 2\n");
+ } else {
+ dt_add_property_string(dt_chosen, "linux,stdout-path",
+ "/ibm,opal/consoles/serial@0");
+ printf("FSPCON: default console 0\n");
+ }
+}
+
diff --git a/hw/fsp/fsp-diag.c b/hw/fsp/fsp-diag.c
new file mode 100644
index 00000000..5f588af9
--- /dev/null
+++ b/hw/fsp/fsp-diag.c
@@ -0,0 +1,58 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Code for handling FSP_MCLASS_DIAG messages (cmd 0xee)
+ * Receiving a high level ack timeout is likely indicative of a firmware bug
+ */
+#include <skiboot.h>
+#include <fsp.h>
+#include <lock.h>
+#include <processor.h>
+#include <timebase.h>
+#include <opal.h>
+#include <fsp-sysparam.h>
+
+static bool fsp_diag_msg(u32 cmd_sub_mod, struct fsp_msg *msg)
+{
+
+ if (cmd_sub_mod == FSP_RSP_DIAG_LINK_ERROR) {
+ printf("FIXME: Unhandled FSP_MCLASS_DIAG Link Error Report\n");
+ return false;
+ }
+
+ if (cmd_sub_mod != FSP_RSP_DIAG_ACK_TIMEOUT) {
+ printf("BUG: Unhandled subcommand: 0x%x (New FSP spec?)\n",
+ cmd_sub_mod);
+ return false;
+ }
+
+ printf("BUG: High Level ACK timeout (FSP_MCLASS_DIAG) for 0x%x\n",
+ msg->data.words[0] & 0xffff0000);
+
+ return true;
+}
+
+static struct fsp_client fsp_diag = {
+ .message = fsp_diag_msg,
+};
+
+/* This is called at boot time */
+void fsp_init_diag(void)
+{
+ /* Register for the diag event */
+ fsp_register_client(&fsp_diag, FSP_MCLASS_DIAG);
+}
diff --git a/hw/fsp/fsp-dump.c b/hw/fsp/fsp-dump.c
new file mode 100644
index 00000000..be1aa7c2
--- /dev/null
+++ b/hw/fsp/fsp-dump.c
@@ -0,0 +1,917 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/*
+ * Dump support:
+ * We get dump notification from different sources:
+ * - During system intialization via HDAT
+ * - During FSP reset/reload (FipS dump)
+ * - Dump available notification MBOX command (0xCE, 0x78, 0x00)
+ *
+ * To avoid complications, we keep list of dumps in a list and fetch
+ * them serially.
+ *
+ * Dump retrieve process:
+ * - Once we get notification from FSP we enqueue the dump ID and notify
+ * Linux via OPAL event notification.
+ * - Linux reads dump info and allocates required memory to fetch the dump
+ * and makes dump read call.
+ * - Sapphire fetches dump data from FSP.
+ * - Linux writes dump to disk and sends acknowledgement.
+ * - Sapphire acknowledges FSP.
+ */
+
+#include <fsp.h>
+#include <psi.h>
+#include <lock.h>
+#include <device.h>
+#include <skiboot.h>
+#include <fsp-elog.h>
+
+/*
+ * Max outstanding dumps to retrieve
+ *
+ * Note:
+ * Dumps are serialized. We don't get notification for second
+ * dump of given type until we acknowledge first one. But we
+ * may get notification for different dump type. And our dump
+ * retrieval code is serialized. Hence we use list to keep
+ * track of outstanding dumps to be retrieved.
+ */
+#define MAX_DUMP_RECORD 0x04
+
+/* Max retry */
+#define FIPS_DUMP_MAX_RETRY 0x03
+
+/* Dump type */
+#define DUMP_TYPE_FSP 0x01
+#define DUMP_TYPE_SYS 0x02
+#define DUMP_TYPE_SMA 0x03
+
+/* Dump fetch size */
+#define DUMP_FETCH_SIZE_FSP 0x500000
+#define DUMP_FETCH_SIZE_SYS 0x400000
+#define DUMP_FETCH_SIZE_RES 0x200000
+
+/* Params for Fips dump */
+#define FSP_DUMP_TOOL_TYPE "SYS "
+#define FSP_DUMP_CLIENT_ID "SAPPHIRE_CLIENT"
+
+enum dump_state {
+ DUMP_STATE_ABSENT, /* No FSP dump */
+ DUMP_STATE_NONE, /* No dump to retrieve */
+ DUMP_STATE_NOTIFY, /* Notified Linux */
+ DUMP_STATE_FETCHING, /* Dump retrieval is in progress */
+ DUMP_STATE_FETCH, /* Dump retrieve complete */
+ DUMP_STATE_PARTIAL, /* Partial read */
+ DUMP_STATE_ABORTING, /* Aborting due to kexec */
+};
+
+/* Pending dump list */
+struct dump_record {
+ uint8_t type;
+ uint32_t id;
+ uint32_t size;
+ struct list_node link;
+};
+
+/* List definations */
+static LIST_HEAD(dump_pending);
+static LIST_HEAD(dump_free);
+
+/* Dump retrieve state */
+static enum dump_state dump_state = DUMP_STATE_NONE;
+
+/* Dump buffer SG list */
+static struct opal_sg_list *dump_data;
+static struct dump_record *dump_entry;
+static int64_t dump_offset;
+static size_t fetch_remain;
+
+/* FipS dump retry count */
+static int retry_cnt;
+
+/* Protect list and dump retrieve state */
+static struct lock dump_lock = LOCK_UNLOCKED;
+
+/* Forward declaration */
+static int64_t fsp_opal_dump_init(uint8_t dump_type);
+static int64_t fsp_dump_read(void);
+
+DEFINE_LOG_ENTRY(OPAL_RC_DUMP_INIT, OPAL_PLATFORM_ERR_EVT, OPAL_DUMP,
+ OPAL_PLATFORM_FIRMWARE,
+ OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_DUMP_LIST, OPAL_PLATFORM_ERR_EVT, OPAL_DUMP,
+ OPAL_PLATFORM_FIRMWARE,
+ OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_DUMP_ACK, OPAL_PLATFORM_ERR_EVT, OPAL_DUMP,
+ OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+/*
+ * Helper functions
+ */
+static inline void update_dump_state(enum dump_state state)
+{
+ dump_state = state;
+}
+
+static int64_t check_dump_state(void)
+{
+ switch (dump_state) {
+ case DUMP_STATE_ABSENT:
+ return OPAL_HARDWARE;
+ case DUMP_STATE_NONE:
+ case DUMP_STATE_NOTIFY:
+ /* During dump fetch, notify is wrong state */
+ return OPAL_WRONG_STATE;
+ case DUMP_STATE_FETCHING:
+ case DUMP_STATE_ABORTING:
+ return OPAL_BUSY_EVENT;
+ case DUMP_STATE_FETCH:
+ return OPAL_SUCCESS;
+ case DUMP_STATE_PARTIAL:
+ return OPAL_PARTIAL;
+ }
+ return OPAL_SUCCESS;
+}
+
+static inline void dump_tce_map(uint32_t tce_offset,
+ void *buffer, uint32_t size)
+{
+ uint32_t tlen = ALIGN_UP(size, TCE_PSIZE);
+ fsp_tce_map(PSI_DMA_DUMP_DATA + tce_offset, buffer, tlen);
+}
+
+static inline void dump_tce_unmap(uint32_t size)
+{
+ fsp_tce_unmap(PSI_DMA_DUMP_DATA, size);
+}
+
+/*
+ * Returns Data set ID for the given dump type
+ */
+static inline uint16_t get_dump_data_set_id(uint8_t type)
+{
+ switch (type) {
+ case DUMP_TYPE_FSP:
+ return FSP_DATASET_SP_DUMP;
+ case DUMP_TYPE_SYS:
+ return FSP_DATASET_HW_DUMP;
+ default:
+ break;
+ }
+ return OPAL_INTERNAL_ERROR;
+}
+
+/*
+ * Returns max data we can fetch from FSP fetch data call
+ */
+static inline int64_t get_dump_fetch_max_size(uint8_t type)
+{
+ switch (type) {
+ case DUMP_TYPE_FSP:
+ return DUMP_FETCH_SIZE_FSP;
+ case DUMP_TYPE_SYS:
+ return DUMP_FETCH_SIZE_SYS;
+ default:
+ break;
+ }
+ return OPAL_INTERNAL_ERROR;
+}
+
+/*
+ * Get dump record from pending list
+ */
+static inline struct dump_record *get_dump_rec_from_list(uint32_t id)
+{
+ struct dump_record *record;
+
+ list_for_each(&dump_pending, record, link) {
+ if (record->id == id)
+ return record;
+ }
+ return NULL;
+}
+
+/*
+ * New dump available notification to Linux
+ */
+static void update_opal_dump_notify(void)
+{
+ /*
+ * Wait until current dump retrieval to complete
+ * before notifying again.
+ */
+ if (dump_state != DUMP_STATE_NONE)
+ return;
+
+ /* More dump's to retrieve */
+ if (!list_empty(&dump_pending)) {
+ update_dump_state(DUMP_STATE_NOTIFY);
+ opal_update_pending_evt(OPAL_EVENT_DUMP_AVAIL,
+ OPAL_EVENT_DUMP_AVAIL);
+ }
+}
+
+static int64_t remove_dump_id_from_list(uint32_t dump_id)
+{
+ struct dump_record *record, *nxt_record;
+ int rc = OPAL_SUCCESS;
+ bool found = false;
+
+ /* Remove record from pending list */
+ list_for_each_safe(&dump_pending, record, nxt_record, link) {
+ if (record->id != dump_id)
+ continue;
+
+ found = true;
+ list_del(&record->link);
+ list_add(&dump_free, &record->link);
+ break;
+ }
+
+ /*
+ * Continue update_opal_dump_notify even if it fails
+ * to remove ID. So that we can resend notification
+ * for the same dump ID to Linux.
+ */
+ if (!found) { /* List corrupted? */
+ log_simple_error(&e_info(OPAL_RC_DUMP_LIST),
+ "DUMP: ID 0x%x not found in list!\n",
+ dump_id);
+ rc = OPAL_PARAMETER;
+ }
+
+ /* Update state */
+ update_dump_state(DUMP_STATE_NONE);
+ /* Notify next available dump to retrieve */
+ update_opal_dump_notify();
+
+ return rc;
+}
+
+static int64_t add_dump_id_to_list(uint8_t dump_type,
+ uint32_t dump_id, uint32_t dump_size)
+{
+ struct dump_record *record;
+ int rc = OPAL_SUCCESS;
+
+ lock(&dump_lock);
+
+ rc = check_dump_state();
+ if (rc == OPAL_HARDWARE)
+ goto out;
+
+ /* List is full ? */
+ if (list_empty(&dump_free)) {
+ printf("DUMP: Dump ID 0x%x is not queued.\n", dump_id);
+ rc = OPAL_RESOURCE;
+ goto out;
+ }
+
+ /* Already queued? */
+ record = get_dump_rec_from_list(dump_id);
+ if (record) {
+ rc = OPAL_SUCCESS;
+ goto out;
+ }
+
+ /* Add to list */
+ record = list_pop(&dump_free, struct dump_record, link);
+ record->type = dump_type;
+ record->id = dump_id;
+ record->size = dump_size;
+ list_add_tail(&dump_pending, &record->link);
+
+ /* OPAL notification */
+ update_opal_dump_notify();
+ rc = OPAL_SUCCESS;
+
+out:
+ unlock(&dump_lock);
+ return rc;
+}
+
+static void dump_init_complete(struct fsp_msg *msg)
+{
+ uint8_t status = (msg->resp->word1 >> 8) & 0xff;
+
+ printf("DUMP: FipS dump init status = 0x%x\n", status);
+ fsp_freemsg(msg);
+
+ switch (status) {
+ case FSP_STATUS_SUCCESS:
+ printf("DUMP: Initiated FipS dump.\n");
+ break;
+ case FSP_STATUS_BUSY: /* Retry, if FSP is busy */
+ if (retry_cnt++ < FIPS_DUMP_MAX_RETRY)
+ if (fsp_opal_dump_init(DUMP_TYPE_FSP) == OPAL_SUCCESS)
+ return;
+ break;
+ default:
+ break;
+ }
+ /* Reset max retry count */
+ retry_cnt = 0;
+}
+
+/*
+ * Initiate new FipS dump
+ */
+static int64_t fsp_opal_dump_init(uint8_t dump_type)
+{
+ struct fsp_msg *msg;
+ int rc = OPAL_SUCCESS;
+ uint32_t *tool_type = (void *)FSP_DUMP_TOOL_TYPE;
+ uint32_t *client_id = (void *)FSP_DUMP_CLIENT_ID;
+
+ /* Only FipS dump generate request is supported */
+ if (dump_type != DUMP_TYPE_FSP)
+ return OPAL_PARAMETER;
+
+ msg = fsp_mkmsg(FSP_CMD_FSP_DUMP_INIT, 6, *tool_type,
+ sizeof(FSP_DUMP_CLIENT_ID), *client_id,
+ *(client_id + 1), *(client_id + 2), *(client_id + 3));
+
+ if (!msg) {
+ log_simple_error(&e_info(OPAL_RC_DUMP_INIT),
+ "DUMP: Message allocation failed.\n");
+ rc = OPAL_INTERNAL_ERROR;
+ } else if (fsp_queue_msg(msg, dump_init_complete)) {
+ log_simple_error(&e_info(OPAL_RC_DUMP_INIT),
+ "DUMP: Failed to queue FipS dump init request.\n");
+ fsp_freemsg(msg);
+ rc = OPAL_INTERNAL_ERROR;
+ }
+
+ return rc;
+}
+
+/*
+ * OPAL interface to send dump information to Linux.
+ */
+static int64_t fsp_opal_dump_info2(uint32_t *dump_id, uint32_t *dump_size,
+ uint32_t *dump_type)
+{
+ struct dump_record *record;
+ int rc = OPAL_SUCCESS;
+
+ lock(&dump_lock);
+
+ /* Clear notification */
+ opal_update_pending_evt(OPAL_EVENT_DUMP_AVAIL, 0);
+
+ record = list_top(&dump_pending, struct dump_record, link);
+ if (!record) { /* List corrupted? */
+ update_dump_state(DUMP_STATE_NONE);
+ rc = OPAL_INTERNAL_ERROR;
+ goto out;
+ }
+ *dump_id = record->id;
+ *dump_size = record->size;
+ *dump_type = record->type;
+
+out:
+ unlock(&dump_lock);
+ return rc;
+}
+
+static int64_t fsp_opal_dump_info(uint32_t *dump_id, uint32_t *dump_size)
+{
+ uint32_t dump_type;
+ return fsp_opal_dump_info2(dump_id, dump_size, &dump_type);
+}
+
+static int64_t validate_dump_sglist(struct opal_sg_list *list,
+ int64_t *size)
+{
+ struct opal_sg_list *sg;
+ struct opal_sg_entry *prev_entry, *entry;
+ int length, num_entries, i;
+
+ prev_entry = NULL;
+ *size = 0;
+ for (sg = list; sg; sg = sg->next) {
+ length = sg->length - 16;
+ num_entries = length / sizeof(struct opal_sg_entry);
+ if (num_entries <= 0)
+ return OPAL_PARAMETER;
+
+ for (i = 0; i < num_entries; i++) {
+ entry = &sg->entry[i];
+ *size += entry->length;
+
+ /* All entries must be aligned */
+ if (((uint64_t)entry->data) & 0xfff)
+ return OPAL_PARAMETER;
+
+ /* All non-terminal entries size must be aligned */
+ if (prev_entry && (prev_entry->length & 0xfff))
+ return OPAL_PARAMETER;
+
+ prev_entry = entry;
+ }
+ }
+ return OPAL_SUCCESS;
+}
+
+/*
+ * Map dump buffer to TCE buffer
+ */
+static int64_t map_dump_buffer(void)
+{
+ struct opal_sg_list *sg;
+ struct opal_sg_entry *entry;
+ int64_t fetch_max;
+ int length, num_entries, i;
+ int buf_off, fetch_off, tce_off, sg_off;
+ bool last = false;
+
+ /* FSP fetch max size */
+ fetch_max = get_dump_fetch_max_size(dump_entry->type);
+ if (fetch_max > (dump_entry->size - dump_offset))
+ fetch_remain = dump_entry->size - dump_offset;
+ else
+ fetch_remain = fetch_max;
+
+ /* offsets */
+ fetch_off = fetch_remain;
+ tce_off = sg_off = 0;
+
+ for (sg = dump_data; sg; sg = sg->next) {
+ num_entries = (sg->length - 16) /
+ sizeof(struct opal_sg_entry);
+ if (num_entries <= 0)
+ return OPAL_PARAMETER;
+
+ for (i = 0; i < num_entries; i++) {
+ entry = &sg->entry[i];
+
+ /* Continue until we get offset */
+ if ((sg_off + entry->length) < dump_offset) {
+ sg_off += entry->length;
+ continue;
+ }
+
+ /*
+ * SG list entry size can be more than 4k.
+ * Map only required pages, instead of
+ * mapping entire entry.
+ */
+ if (!tce_off) {
+ buf_off = (dump_offset - sg_off) & ~0xfff;
+ length = entry->length - buf_off;
+ } else {
+ buf_off = 0;
+ length = entry->length;
+ }
+
+ /* Adjust length for last mapping */
+ if (fetch_off <= length) {
+ length = fetch_off;
+ last = true;
+ }
+
+ /* Adjust offset */
+ sg_off += entry->length;
+ fetch_off -= length;
+
+ /* TCE mapping */
+ dump_tce_map(tce_off, entry->data + buf_off, length);
+ tce_off += length;
+
+ /* TCE mapping complete */
+ if (last)
+ return OPAL_SUCCESS;
+ }
+ } /* outer loop */
+ return OPAL_PARAMETER;
+}
+
+static void dump_read_complete(struct fsp_msg *msg)
+{
+ void *buffer;
+ size_t length, offset;
+ int rc;
+ uint32_t dump_id;
+ uint16_t id;
+ uint8_t flags, status;
+ bool compl = false;
+
+ status = (msg->resp->word1 >> 8) & 0xff;
+ flags = (msg->data.words[0] >> 16) & 0xff;
+ id = msg->data.words[0] & 0xffff;
+ dump_id = msg->data.words[1];
+ offset = msg->resp->data.words[1];
+ length = msg->resp->data.words[2];
+
+ fsp_freemsg(msg);
+
+ lock(&dump_lock);
+
+ if (dump_state == DUMP_STATE_ABORTING) {
+ printf("DUMP: Fetch dump aborted, ID = 0x%x\n", dump_id);
+ dump_tce_unmap(PSI_DMA_DUMP_DATA_SIZE);
+ update_dump_state(DUMP_STATE_NONE);
+ goto bail;
+ }
+
+ switch (status) {
+ case FSP_STATUS_SUCCESS: /* Fetch next dump block */
+ if (dump_offset < dump_entry->size) {
+ dump_tce_unmap(PSI_DMA_DUMP_DATA_SIZE);
+ rc = fsp_dump_read();
+ if (rc == OPAL_SUCCESS)
+ goto bail;
+ } else { /* Dump read complete */
+ compl = true;
+ }
+ break;
+ case FSP_STATUS_MORE_DATA: /* More data to read */
+ offset += length;
+ buffer = (void *)PSI_DMA_DUMP_DATA + offset;
+ fetch_remain -= length;
+
+ rc = fsp_fetch_data_queue(flags, id, dump_id, offset, buffer,
+ &fetch_remain, dump_read_complete);
+ if (rc == OPAL_SUCCESS)
+ goto bail;
+ break;
+ default:
+ break;
+ }
+
+ dump_tce_unmap(PSI_DMA_DUMP_DATA_SIZE);
+
+ /* Update state */
+ if (compl) {
+ printf("DUMP: Fetch dump success. ID = 0x%x\n", dump_id);
+ update_dump_state(DUMP_STATE_FETCH);
+ } else {
+ printf("DUMP: Fetch dump partial. ID = 0x%x\n", dump_id);
+ update_dump_state(DUMP_STATE_PARTIAL);
+ }
+ bail:
+ unlock(&dump_lock);
+}
+
+/*
+ * Fetch dump data from FSP
+ */
+static int64_t fsp_dump_read(void)
+{
+ int64_t rc;
+ uint16_t data_set;
+ uint8_t flags = 0x00;
+
+ /* Get data set ID */
+ data_set = get_dump_data_set_id(dump_entry->type);
+
+ /* Map TCE buffer */
+ rc = map_dump_buffer();
+ if (rc != OPAL_SUCCESS) {
+ printf("DUMP: TCE mapping failed\n");
+ return rc;
+ }
+
+ printf("DUMP: Fetch Dump. ID = %02x, sub ID = %08x, len = %ld\n",
+ data_set, dump_entry->id, fetch_remain);
+
+ /* Fetch data */
+ rc = fsp_fetch_data_queue(flags, data_set, dump_entry->id,
+ dump_offset, (void *)PSI_DMA_DUMP_DATA,
+ &fetch_remain, dump_read_complete);
+
+ /* Adjust dump fetch offset */
+ dump_offset += fetch_remain;
+
+ return rc;
+}
+
+static int64_t fsp_opal_dump_read(uint32_t dump_id,
+ struct opal_sg_list *list)
+{
+ struct dump_record *record;
+ int64_t rc, size;
+
+ lock(&dump_lock);
+
+ /* Check state */
+ if (dump_state != DUMP_STATE_NOTIFY) {
+ rc = check_dump_state();
+ goto out;
+ }
+
+ /* Validate dump ID */
+ record = get_dump_rec_from_list(dump_id);
+ if (!record) { /* List corrupted? */
+ rc = OPAL_INTERNAL_ERROR;
+ goto out;
+ }
+
+ /* Validate dump buffer and size */
+ rc = validate_dump_sglist(list, &size);
+ if (rc != OPAL_SUCCESS) {
+ printf("DUMP: SG list validation failed\n");
+ goto out;
+ }
+
+ if (size < record->size) { /* Insuffient buffer */
+ printf("DUMP: Insufficient buffer\n");
+ rc = OPAL_PARAMETER;
+ goto out;
+ }
+
+ /* Update state */
+ update_dump_state(DUMP_STATE_FETCHING);
+
+ /* Fetch dump data */
+ dump_entry = record;
+ dump_data = list;
+ dump_offset = 0;
+ rc = fsp_dump_read();
+ if (rc != OPAL_SUCCESS)
+ goto out;
+
+ /* Check status after initiating fetch data */
+ rc = check_dump_state();
+
+out:
+ unlock(&dump_lock);
+ return rc;
+}
+
+static void dump_ack_complete(struct fsp_msg *msg)
+{
+ uint8_t status = (msg->resp->word1 >> 8) & 0xff;
+
+ if (status)
+ log_simple_error(&e_info(OPAL_RC_DUMP_ACK),
+ "DUMP: ACK failed for ID: 0x%x\n",
+ msg->data.words[0]);
+ else
+ printf("DUMP: ACKed dump ID: 0x%x\n", msg->data.words[0]);
+
+ fsp_freemsg(msg);
+}
+
+/*
+ * Acknowledge dump
+ */
+static int64_t fsp_opal_dump_ack(uint32_t dump_id)
+{
+ struct dump_record *record;
+ struct fsp_msg *msg;
+ int rc;
+ uint32_t cmd;
+ uint8_t dump_type = 0;
+
+ /* Get dump type */
+ lock(&dump_lock);
+ record = get_dump_rec_from_list(dump_id);
+ if (record)
+ dump_type = record->type;
+
+ /*
+ * Next available dump in pending list will be of different
+ * type. Hence we don't need to wait for ack complete.
+ *
+ * Note:
+ * This allows us to proceed even if we fail to ACK.
+ * In the worst case we may get notification for the
+ * same dump again, which is probably better than
+ * looping forever.
+ */
+ rc = remove_dump_id_from_list(dump_id);
+ if (rc != OPAL_SUCCESS) /* Invalid dump id */
+ goto out;
+
+ /* Adjust mod value */
+ cmd = FSP_CMD_ACK_DUMP | (dump_type & 0xff);
+ msg = fsp_mkmsg(cmd, 1, dump_id);
+ if (!msg) {
+ log_simple_error(&e_info(OPAL_RC_DUMP_ACK),
+ "DUMP: Message allocation failed.!\n");
+ rc = OPAL_INTERNAL_ERROR;
+ } else if (fsp_queue_msg(msg, dump_ack_complete)) {
+ log_simple_error(&e_info(OPAL_RC_DUMP_ACK),
+ "DUMP: Failed to queue dump ack message.\n");
+ fsp_freemsg(msg);
+ rc = OPAL_INTERNAL_ERROR;
+ }
+out:
+ unlock(&dump_lock);
+ return rc;
+}
+
+/* Resend dump available notification */
+static int64_t fsp_opal_dump_resend_notification(void)
+{
+ lock(&dump_lock);
+
+ if (dump_state != DUMP_STATE_ABSENT)
+ update_dump_state(DUMP_STATE_NONE);
+
+ update_opal_dump_notify();
+
+ unlock(&dump_lock);
+
+ return OPAL_SUCCESS;
+}
+
+/*
+ * Handle FSP R/R event.
+ */
+static bool fsp_dump_retrieve_rr(uint32_t cmd_sub_mod,
+ struct fsp_msg *msg __unused)
+{
+ switch (cmd_sub_mod) {
+ case FSP_RESET_START:
+ lock(&dump_lock);
+ /* Reset dump state */
+ if (dump_state == DUMP_STATE_FETCHING)
+ update_dump_state(DUMP_STATE_ABORTING);
+ unlock(&dump_lock);
+ return true;
+ case FSP_RELOAD_COMPLETE:
+ lock(&dump_lock);
+
+ /* Reset TCE mapping */
+ dump_tce_unmap(PSI_DMA_DUMP_DATA_SIZE);
+
+ /* Reset dump state */
+ update_dump_state(DUMP_STATE_NONE);
+
+ /*
+ * For now keeping R/R handler simple. In the worst case
+ * we may endup resending dump available notification for
+ * same dump ID twice to Linux.
+ */
+ update_opal_dump_notify();
+ unlock(&dump_lock);
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Handle host kexec'ing scenarios
+ */
+static bool opal_kexec_dump_notify(void *data __unused)
+{
+ bool ready = true;
+
+ lock(&dump_lock);
+
+ /* Dump retrieve is in progress? */
+ if (dump_state == DUMP_STATE_FETCHING)
+ dump_state = DUMP_STATE_ABORTING;
+
+ /* Not yet safe to kexec */
+ if (dump_state == DUMP_STATE_ABORTING)
+ ready = false;
+
+ unlock(&dump_lock);
+
+ return ready;
+}
+
+/*
+ * FipS dump notification
+ */
+void fsp_fips_dump_notify(uint32_t dump_id, uint32_t dump_size)
+{
+ printf("DUMP: FipS dump available. ID = 0x%x [size: %d bytes]\n",
+ dump_id, dump_size);
+ add_dump_id_to_list(DUMP_TYPE_FSP, dump_id, dump_size);
+}
+
+/*
+ * System/Platform dump notification
+ */
+static bool fsp_sys_dump_notify(uint32_t cmd_sub_mod, struct fsp_msg *msg)
+{
+ /*
+ * Though spec says mod 00 is deprecated we still
+ * seems to get mod 00 notification (at least on
+ * P7 machine).
+ */
+ if (cmd_sub_mod != FSP_RSP_SYS_DUMP &&
+ cmd_sub_mod != FSP_RSP_SYS_DUMP_OLD)
+ return false;
+
+ printf("DUMP: Platform dump available. ID = 0x%x [size: %d bytes]\n",
+ msg->data.words[0], msg->data.words[1]);
+
+ add_dump_id_to_list(DUMP_TYPE_SYS,
+ msg->data.words[0], msg->data.words[1]);
+ return true;
+}
+
+/*
+ * If platform dump available during IPL time, then we
+ * get notification via HDAT. Check for DT for the dump
+ * presence.
+ */
+static void check_ipl_sys_dump(void)
+{
+ struct dt_node *dump_node;
+ uint32_t dump_id, dump_size;
+
+ dump_node = dt_find_by_path(dt_root, "ipl-params/platform-dump");
+ if (!dump_node)
+ return;
+
+ if (!dt_find_property(dump_node, "dump-id"))
+ return;
+
+ dump_id = dt_prop_get_u32(dump_node, "dump-id");
+ dump_size = (uint32_t)dt_prop_get_u64(dump_node, "total-size");
+
+ printf("DUMP: Platform dump present during IPL.\n");
+ printf(" ID = 0x%x [size: %d bytes]\n", dump_id, dump_size);
+
+ add_dump_id_to_list(DUMP_TYPE_SYS, dump_id, dump_size);
+}
+
+/*
+ * Allocate and initialize dump list
+ */
+static int init_dump_free_list(void)
+{
+ struct dump_record *entry;
+ int i;
+
+ entry = zalloc(sizeof(struct dump_record) * MAX_DUMP_RECORD);
+ if (!entry) {
+ log_simple_error(&e_info(OPAL_RC_DUMP_INIT),
+ "DUMP: Out of memory\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < MAX_DUMP_RECORD; i++) {
+ list_add_tail(&dump_free, &entry->link);
+ entry++;
+ }
+ return 0;
+}
+
+static struct fsp_client fsp_sys_dump_client = {
+ .message = fsp_sys_dump_notify,
+};
+
+static struct fsp_client fsp_dump_client_rr = {
+ .message = fsp_dump_retrieve_rr,
+};
+
+void fsp_dump_init(void)
+{
+ if (!fsp_present()) {
+ update_dump_state(DUMP_STATE_ABSENT);
+ return;
+ }
+
+ /* Initialize list */
+ if (init_dump_free_list() != 0) {
+ update_dump_state(DUMP_STATE_ABSENT);
+ return;
+ }
+
+ /* Register for Class CE */
+ fsp_register_client(&fsp_sys_dump_client, FSP_MCLASS_SERVICE);
+ /* Register for Class AA (FSP R/R) */
+ fsp_register_client(&fsp_dump_client_rr, FSP_MCLASS_RR_EVENT);
+
+ /* Register for sync on host reboot call */
+ opal_add_host_sync_notifier(opal_kexec_dump_notify, NULL);
+
+ /* OPAL interface */
+ opal_register(OPAL_DUMP_INIT, fsp_opal_dump_init, 1);
+ opal_register(OPAL_DUMP_INFO, fsp_opal_dump_info, 2);
+ opal_register(OPAL_DUMP_INFO2, fsp_opal_dump_info2, 3);
+ opal_register(OPAL_DUMP_READ, fsp_opal_dump_read, 2);
+ opal_register(OPAL_DUMP_ACK, fsp_opal_dump_ack, 1);
+ opal_register(OPAL_DUMP_RESEND, fsp_opal_dump_resend_notification, 0);
+
+ /* Check for platform dump presence during IPL time */
+ check_ipl_sys_dump();
+}
diff --git a/hw/fsp/fsp-elog-read.c b/hw/fsp/fsp-elog-read.c
new file mode 100644
index 00000000..f4a689ff
--- /dev/null
+++ b/hw/fsp/fsp-elog-read.c
@@ -0,0 +1,520 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/*
+ * This code will enable retrieving of error log from fsp->sapphire
+ * in sequence.
+ * Here, FSP would send next log only when sapphire sends a new
+ * log notification response to FSP. On Completion of reading
+ * the log from FSP, OPAL_EVENT_ERROR_LOG_AVAIL is signaled.
+ * This will remain raised until a call to opal_elog_read()
+ * is made and OPAL_SUCCESS is returned, upon which.
+ * the operation is complete and the event is cleared.
+ * This is READ action from FSP.
+ */
+
+/*
+ * Design of READ error log :
+ * When we receive a new error log entry notificatiion from FSP,
+ * we queue it into the "pending" list.
+ * If the "pending" list is not empty, then we start the fetching log from FSP.
+ *
+ * When Linux reads a log entry, we dequeue it from the "pending" list
+ * and enqueue it to another "processed" list. At this point, if the
+ * "pending" list is not empty, we continue to fetch the next log.
+ *
+ * When Linux calls opal_resend_pending_logs(), we fetch the log
+ * corresponding to the head of the pending list and move it to the
+ * processed list, and continue this process this until the pending list is
+ * empty. If the pending list was empty earlier and is currently non-empty, we
+ * initiate an error log fetch.
+ *
+ * When Linux acks an error log, we remove it from processed list.
+ */
+
+#include <skiboot.h>
+#include <fsp.h>
+#include <cpu.h>
+#include <lock.h>
+#include <errno.h>
+#include <psi.h>
+#include <fsp-elog.h>
+
+/*
+ * Maximum number of entries that are pre-allocated
+ * to keep track of pending elogs to be fetched.
+ */
+#define ELOG_READ_MAX_RECORD 128
+
+/* Following variables are used to indicate state of the
+ * head log entry which is being fetched from FSP and
+ * these variables are not overwritten until next log is
+ * retrieved from FSP.
+ */
+enum elog_head_state {
+ ELOG_STATE_FETCHING, /*In the process of reading log from FSP. */
+ ELOG_STATE_FETCHED, /* Indicates reading log from FSP completed */
+ ELOG_STATE_NONE, /* Indicates to fetch next log */
+ ELOG_STATE_REJECTED, /* resend all pending logs to linux */
+};
+
+/* structure to maintain log-id,log-size, pending and processed list */
+struct fsp_log_entry {
+ uint32_t log_id;
+ size_t log_size;
+ struct list_node link;
+};
+
+static LIST_HEAD(elog_read_pending);
+static LIST_HEAD(elog_read_processed);
+static LIST_HEAD(elog_read_free);
+
+/*
+ * lock is used to protect overwriting of processed and pending list
+ * and also used while updating state of each log
+ */
+static struct lock elog_read_lock = LOCK_UNLOCKED;
+
+/* log buffer to copy FSP log for READ */
+#define ELOG_READ_BUFFER_SIZE 0x00040000
+static void *elog_read_buffer = NULL;
+static uint32_t elog_head_id; /* FSP entry ID */
+static size_t elog_head_size; /* actual FSP log size */
+static uint32_t elog_read_retries; /* bad response status count */
+
+/* Initialize the state of the log */
+static enum elog_head_state elog_head_state = ELOG_STATE_NONE;
+
+/* Need forward declaration because of Circular dependency */
+static void fsp_elog_queue_fetch(void);
+
+/*
+ * check the response message for mbox acknowledgment
+ * command send to FSP.
+ */
+static void fsp_elog_ack_complete(struct fsp_msg *msg)
+{
+ uint8_t val;
+
+ if (!msg->resp)
+ return;
+ val = (msg->resp->word1 >> 8) & 0xff;
+ if (val != 0)
+ prerror("ELOG: Acknowledgment error\n");
+ fsp_freemsg(msg);
+}
+
+/* send Error Log PHYP Acknowledgment to FSP with entry ID */
+static int64_t fsp_send_elog_ack(uint32_t log_id)
+{
+
+ struct fsp_msg *ack_msg;
+
+ ack_msg = fsp_mkmsg(FSP_CMD_ERRLOG_PHYP_ACK, 1, log_id);
+ if (!ack_msg) {
+ prerror("ELOG: Failed to allocate ack message\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ if (fsp_queue_msg(ack_msg, fsp_elog_ack_complete)) {
+ fsp_freemsg(ack_msg);
+ ack_msg = NULL;
+ prerror("ELOG: Error queueing elog ack complete\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ return OPAL_SUCCESS;
+}
+
+/* retrive error log from FSP with TCE for the data transfer */
+static void fsp_elog_check_and_fetch_head(void)
+{
+ lock(&elog_read_lock);
+
+ if (elog_head_state != ELOG_STATE_NONE ||
+ list_empty(&elog_read_pending)) {
+ unlock(&elog_read_lock);
+ return;
+ }
+
+ elog_read_retries = 0;
+
+ /* Start fetching first entry from the pending list */
+ fsp_elog_queue_fetch();
+ unlock(&elog_read_lock);
+}
+
+/* this function should be called with the lock held */
+static void fsp_elog_set_head_state(enum elog_head_state state)
+{
+ enum elog_head_state old_state = elog_head_state;
+
+ elog_head_state = state;
+
+ if (state == ELOG_STATE_FETCHED && old_state != ELOG_STATE_FETCHED)
+ opal_update_pending_evt(OPAL_EVENT_ERROR_LOG_AVAIL,
+ OPAL_EVENT_ERROR_LOG_AVAIL);
+ if (state != ELOG_STATE_FETCHED && old_state == ELOG_STATE_FETCHED)
+ opal_update_pending_evt(OPAL_EVENT_ERROR_LOG_AVAIL, 0);
+}
+
+/*
+ * when we try maximum time of fetching log from fsp
+ * we call following function to delete log from the
+ * pending list and update the state to fetch next log
+ *
+ * this function should be called with the lock held
+ */
+static void fsp_elog_fetch_failure(uint8_t fsp_status)
+{
+ struct fsp_log_entry *log_data;
+
+ /* read top list and delete the node */
+ log_data = list_top(&elog_read_pending, struct fsp_log_entry, link);
+ list_del(&log_data->link);
+ list_add(&elog_read_free, &log_data->link);
+ prerror("ELOG: received invalid data: %x FSP status: 0x%x\n",
+ log_data->log_id, fsp_status);
+ fsp_elog_set_head_state(ELOG_STATE_NONE);
+}
+
+/* Read response value from FSP for fetch sp data mbox command */
+static void fsp_elog_read_complete(struct fsp_msg *read_msg)
+{
+ uint8_t val;
+ /*struct fsp_log_entry *log_data;*/
+
+ lock(&elog_read_lock);
+ val = (read_msg->resp->word1 >> 8) & 0xff;
+ fsp_freemsg(read_msg);
+
+ switch (val) {
+ case FSP_STATUS_SUCCESS:
+ fsp_elog_set_head_state(ELOG_STATE_FETCHED);
+ break;
+
+ case FSP_STATUS_DMA_ERROR:
+ if (elog_read_retries++ < MAX_RETRIES) {
+ /*
+ * for a error response value from FSP, we try to
+ * send fetch sp data mbox command again for three
+ * times if response from FSP is still not valid
+ * we send generic error response to fsp.
+ */
+ fsp_elog_queue_fetch();
+ break;
+ }
+ fsp_elog_fetch_failure(val);
+ break;
+
+ default:
+ fsp_elog_fetch_failure(val);
+ }
+ if (elog_head_state == ELOG_STATE_REJECTED)
+ fsp_elog_set_head_state(ELOG_STATE_NONE);
+ unlock(&elog_read_lock);
+
+ /* Check if a new log needs fetching */
+ fsp_elog_check_and_fetch_head();
+}
+
+/* read error log from FSP through mbox commands */
+static void fsp_elog_queue_fetch(void)
+{
+ int rc;
+ uint8_t flags = 0;
+ struct fsp_log_entry *entry;
+
+ entry = list_top(&elog_read_pending, struct fsp_log_entry, link);
+ fsp_elog_set_head_state(ELOG_STATE_FETCHING);
+ elog_head_id = entry->log_id;
+ elog_head_size = entry->log_size;
+
+ rc = fsp_fetch_data_queue(flags, FSP_DATASET_ERRLOG, elog_head_id,
+ 0, (void *)PSI_DMA_ERRLOG_READ_BUF,
+ &elog_head_size, fsp_elog_read_complete);
+ if (rc) {
+ prerror("ELOG: failed to queue read message: %d\n", rc);
+ fsp_elog_set_head_state(ELOG_STATE_NONE);
+ }
+}
+
+/* opal interface for powernv to read log size and log ID from sapphire */
+static int64_t fsp_opal_elog_info(uint64_t *opla_elog_id,
+ uint64_t *opal_elog_size, uint64_t *elog_type)
+{
+ struct fsp_log_entry *log_data;
+
+ /* copy type of the error log */
+ *elog_type = ELOG_TYPE_PEL;
+
+ lock(&elog_read_lock);
+ if (elog_head_state != ELOG_STATE_FETCHED) {
+ unlock(&elog_read_lock);
+ return OPAL_WRONG_STATE;
+ }
+ log_data = list_top(&elog_read_pending, struct fsp_log_entry, link);
+ *opla_elog_id = log_data->log_id;
+ *opal_elog_size = log_data->log_size;
+ unlock(&elog_read_lock);
+ return OPAL_SUCCESS;
+}
+
+/* opal interface for powernv to read log from sapphire */
+static int64_t fsp_opal_elog_read(uint64_t *buffer, uint64_t opal_elog_size,
+ uint64_t opla_elog_id)
+{
+ struct fsp_log_entry *log_data;
+
+ /*
+ * Read top entry from list.
+ * as we know always top record of the list is fetched from FSP
+ */
+ lock(&elog_read_lock);
+ if (elog_head_state != ELOG_STATE_FETCHED) {
+ unlock(&elog_read_lock);
+ return OPAL_WRONG_STATE;
+ }
+
+ log_data = list_top(&elog_read_pending, struct fsp_log_entry, link);
+
+ /* Check log ID and log size are same and then read log from buffer */
+ if ((opla_elog_id != log_data->log_id) &&
+ (opal_elog_size != log_data->log_size)) {
+ unlock(&elog_read_lock);
+ return OPAL_PARAMETER;
+ }
+
+ memcpy((void *)buffer, elog_read_buffer, opal_elog_size);
+
+ /*
+ * once log is read from linux move record from pending
+ * to processed list and delete record from pending list
+ * and change state of the log to fetch next record
+ */
+ list_del(&log_data->link);
+ list_add(&elog_read_processed, &log_data->link);
+ fsp_elog_set_head_state(ELOG_STATE_NONE);
+ unlock(&elog_read_lock);
+
+
+ /* read error log from FSP */
+ fsp_elog_check_and_fetch_head();
+
+ return OPAL_SUCCESS;
+}
+
+/* set state of the log head before fetching the log */
+static void elog_reject_head(void)
+{
+ if (elog_head_state == ELOG_STATE_FETCHING)
+ fsp_elog_set_head_state(ELOG_STATE_REJECTED);
+ if (elog_head_state == ELOG_STATE_FETCHED)
+ fsp_elog_set_head_state(ELOG_STATE_NONE);
+}
+
+/* opal Interface for powernv to send ack to fsp with log ID */
+static int64_t fsp_opal_elog_ack(uint64_t ack_id)
+{
+ int rc = 0;
+ struct fsp_log_entry *record, *next_record;
+
+ /* Send acknowledgement to FSP */
+ rc = fsp_send_elog_ack(ack_id);
+ if (rc != OPAL_SUCCESS) {
+ prerror("ELOG: failed to send acknowledgement: %d\n", rc);
+ return rc;
+ }
+ lock(&elog_read_lock);
+ if (ack_id == elog_head_id)
+ elog_reject_head();
+ list_for_each_safe(&elog_read_pending, record, next_record, link) {
+ if (record->log_id != ack_id)
+ continue;
+ list_del(&record->link);
+ list_add(&elog_read_free, &record->link);
+ }
+ list_for_each_safe(&elog_read_processed, record, next_record, link) {
+ if (record->log_id != ack_id)
+ continue;
+ list_del(&record->link);
+ list_add(&elog_read_free, &record->link);
+ }
+ unlock(&elog_read_lock);
+
+ return rc;
+}
+
+/*
+ * once linux kexec's it ask to resend all logs which
+ * are not acknowledged from linux
+ */
+static void fsp_opal_resend_pending_logs(void)
+{
+ struct fsp_log_entry *entry;
+
+ lock(&elog_read_lock);
+
+ /*
+ * If processed list is not empty add all record from
+ * processed list to pending list at head of the list
+ * and delete records from processed list.
+ */
+ while (!list_empty(&elog_read_processed)) {
+ entry = list_pop(&elog_read_processed,
+ struct fsp_log_entry, link);
+ list_add(&elog_read_pending, &entry->link);
+ }
+
+ /*
+ * If the current fetched or fetching log doesn't match our
+ * new pending list head, then reject it
+ */
+ if (!list_empty(&elog_read_pending)) {
+ entry = list_top(&elog_read_pending,
+ struct fsp_log_entry, link);
+ if (entry->log_id != elog_head_id)
+ elog_reject_head();
+ }
+
+ unlock(&elog_read_lock);
+
+ /* Read error log from FSP if needed */
+ fsp_elog_check_and_fetch_head();
+}
+
+/* fsp elog notify function */
+static bool fsp_elog_msg(uint32_t cmd_sub_mod, struct fsp_msg *msg)
+{
+ int rc = 0;
+ struct fsp_log_entry *record;
+ uint32_t log_id;
+ uint32_t log_size;
+
+
+ if (cmd_sub_mod != FSP_CMD_ERRLOG_NOTIFICATION)
+ return false;
+
+ log_id = msg->data.words[0];
+ log_size = msg->data.words[1];
+
+ printf("ELOG: Notified of log 0x%08x (size: %d)\n",
+ log_id, log_size);
+
+ /* take a lock until we take out the node from elog_read_free */
+ lock(&elog_read_lock);
+ if (!list_empty(&elog_read_free)) {
+ /* Create a new entry in the pending list */
+ record = list_pop(&elog_read_free, struct fsp_log_entry, link);
+ record->log_id = log_id;
+ record->log_size = log_size;
+ list_add_tail(&elog_read_pending, &record->link);
+ unlock(&elog_read_lock);
+
+ /* Send response back to FSP for a new elog notify message */
+ rc = fsp_queue_msg(fsp_mkmsg(FSP_RSP_ERRLOG_NOTIFICATION,
+ 1, log_id), fsp_freemsg);
+ if (rc)
+ prerror("ELOG: Failed to queue errlog notification"
+ " response: %d\n", rc);
+
+ /* read error log from FSP */
+ fsp_elog_check_and_fetch_head();
+
+ } else {
+ printf("ELOG: Log entry 0x%08x discarded\n", log_id);
+
+ /* unlock if elog_read_free is empty */
+ unlock(&elog_read_lock);
+
+ rc = fsp_queue_msg(fsp_mkmsg(FSP_RSP_ERRLOG_NOTIFICATION,
+ 1, log_id), fsp_freemsg);
+ if (rc)
+ prerror("ELOG: Failed to queue errlog notification"
+ " response: %d\n", rc);
+ /*
+ * if list is full with max record then we
+ * send discarded by phyp (condition full) ack to FSP.
+ *
+ * At some point in the future, we'll get notified again.
+ * This is largely up to FSP as to when they tell us about
+ * the log again.
+ */
+ rc = fsp_queue_msg(fsp_mkmsg(FSP_CMD_ERRLOG_PHYP_ACK | 0x02,
+ 1, log_id), fsp_freemsg);
+ if (rc)
+ prerror("ELOG: Failed to queue errlog ack"
+ " response: %d\n", rc);
+ }
+
+ return true;
+}
+
+static struct fsp_client fsp_get_elog_notify = {
+ .message = fsp_elog_msg,
+};
+
+/* Pre-allocate memory for reading error log from FSP */
+static int init_elog_read_free_list(uint32_t num_entries)
+{
+ struct fsp_log_entry *entry;
+ int i;
+
+ entry = zalloc(sizeof(struct fsp_log_entry) * num_entries);
+ if (!entry)
+ goto out_err;
+
+ for (i = 0; i < num_entries; ++i) {
+ list_add_tail(&elog_read_free, &entry->link);
+ entry++;
+ }
+ return 0;
+
+out_err:
+ return -ENOMEM;
+}
+
+/* fsp elog read init function */
+void fsp_elog_read_init(void)
+{
+ int val = 0;
+
+ if (!fsp_present())
+ return;
+
+ elog_read_buffer = memalign(TCE_PSIZE, ELOG_READ_BUFFER_SIZE);
+ if (!elog_read_buffer) {
+ prerror("FSP: could not allocate FSP ELOG_READ_BUFFER!\n");
+ return;
+ }
+
+ /* Map TCEs */
+ fsp_tce_map(PSI_DMA_ERRLOG_READ_BUF, elog_read_buffer,
+ PSI_DMA_ERRLOG_READ_BUF_SZ);
+
+ /* pre allocate memory for 128 record */
+ val = init_elog_read_free_list(ELOG_READ_MAX_RECORD);
+ if (val != 0)
+ return;
+
+ /* register Eror log Class D2 */
+ fsp_register_client(&fsp_get_elog_notify, FSP_MCLASS_ERR_LOG);
+
+ /* register opal Interface */
+ opal_register(OPAL_ELOG_READ, fsp_opal_elog_read, 3);
+ opal_register(OPAL_ELOG_ACK, fsp_opal_elog_ack, 1);
+ opal_register(OPAL_ELOG_RESEND, fsp_opal_resend_pending_logs, 0);
+ opal_register(OPAL_ELOG_SIZE, fsp_opal_elog_info, 3);
+}
diff --git a/hw/fsp/fsp-elog-write.c b/hw/fsp/fsp-elog-write.c
new file mode 100644
index 00000000..ee79c4d9
--- /dev/null
+++ b/hw/fsp/fsp-elog-write.c
@@ -0,0 +1,643 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/*
+ * This code will enable generation and pushing of error log
+ * from powernv, sapphire to FSP
+ * Critical events from sapphire that needs to be reported
+ * will be pushed on to FSP after converting the
+ * error log to Platform Error Log (PEL) format.
+ * This is termed as WRITE action to FSP.
+ */
+
+#include <skiboot.h>
+#include <fsp.h>
+#include <cpu.h>
+#include <lock.h>
+#include <errno.h>
+#include <fsp-elog.h>
+
+/*
+ * Maximum number buffers that are pre-allocated
+ * to hold elogs that are reported on Sapphire and
+ * powernv.
+ */
+#define ELOG_WRITE_MAX_RECORD 64
+
+static LIST_HEAD(elog_write_pending);
+static LIST_HEAD(elog_write_free);
+
+static struct lock elog_write_lock = LOCK_UNLOCKED;
+static struct lock elog_panic_write_lock = LOCK_UNLOCKED;
+
+/* Platform Log ID as per the spec */
+static uint32_t sapphire_elog_id = 0xB0000000;
+static uint32_t powernv_elog_id = 0xB1000000;
+
+/* log buffer to copy FSP log for READ */
+#define ELOG_WRITE_BUFFER_SIZE 0x00050000
+static void *elog_write_buffer = NULL;
+
+#define ELOG_PANIC_WRITE_BUFFER_SIZE 0x0010000
+static void *elog_panic_write_buffer = NULL;
+
+struct opal_errorlog *panic_write_buffer;
+static int panic_write_buffer_valid;
+static uint32_t elog_write_retries;
+
+/* Need forward declaration because of Circular dependency */
+static int create_opal_event(struct opal_errorlog *elog_data, char *pel_buffer);
+static int opal_send_elog_to_fsp(void);
+
+void log_error(struct opal_err_info *e_info, void *data, uint16_t size,
+ const char *fmt, ...)
+{
+ struct opal_errorlog *buf;
+ int tag = 0x44455343; /* ASCII of DESC */
+ va_list list;
+ char err_msg[250];
+
+ va_start(list, fmt);
+ vsnprintf(err_msg, sizeof(err_msg), fmt, list);
+ va_end(list);
+
+ /* Log the error on to Sapphire console */
+ prerror("%s", err_msg);
+
+ buf = opal_elog_create(e_info);
+ if (buf == NULL)
+ prerror("ELOG: Error getting buffer to log error\n");
+ else {
+ opal_elog_update_user_dump(buf, err_msg, tag, strlen(err_msg));
+ /* Append any number of call out dumps */
+ if (e_info->call_out)
+ e_info->call_out(buf, data, size);
+ if (elog_fsp_commit(buf))
+ prerror("ELOG: Re-try error logging\n");
+ }
+}
+
+
+void log_simple_error(struct opal_err_info *e_info, const char *fmt, ...)
+{
+ struct opal_errorlog *buf;
+ int tag = 0x44455343; /* ASCII of DESC */
+ va_list list;
+ char err_msg[250];
+
+ va_start(list, fmt);
+ vsnprintf(err_msg, sizeof(err_msg), fmt, list);
+ va_end(list);
+
+ /* Log the error on to Sapphire console */
+ prerror("%s", err_msg);
+
+ buf = opal_elog_create(e_info);
+ if (buf == NULL)
+ prerror("ELOG: Error getting buffer to log error\n");
+ else {
+ opal_elog_update_user_dump(buf, err_msg, tag, strlen(err_msg));
+ if (elog_fsp_commit(buf))
+ prerror("ELOG: Re-try error logging\n");
+ }
+}
+
+static struct opal_errorlog *get_write_buffer(int opal_event_severity)
+{
+ struct opal_errorlog *buf;
+
+ lock(&elog_write_lock);
+ if (list_empty(&elog_write_free)) {
+ unlock(&elog_write_lock);
+ if (opal_event_severity == OPAL_ERROR_PANIC) {
+ lock(&elog_panic_write_lock);
+ if (panic_write_buffer_valid == 0) {
+ buf = (struct opal_errorlog *)
+ panic_write_buffer;
+ panic_write_buffer_valid = 1; /* In Use */
+ unlock(&elog_panic_write_lock);
+ } else {
+ unlock(&elog_panic_write_lock);
+ prerror("ELOG: Write buffer full. Retry later\n");
+ return NULL;
+ }
+ } else {
+ prerror("ELOG: Write buffer list is full. Retry later\n");
+ return NULL;
+ }
+ } else {
+ buf = list_pop(&elog_write_free, struct opal_errorlog, link);
+ unlock(&elog_write_lock);
+ }
+
+ memset(buf, 0, sizeof(struct opal_errorlog));
+ return buf;
+}
+
+/* Reporting of error via struct opal_errorlog */
+struct opal_errorlog *opal_elog_create(struct opal_err_info *e_info)
+{
+ struct opal_errorlog *buf;
+
+ buf = get_write_buffer(e_info->sev);
+ if (buf) {
+ buf->error_event_type = e_info->err_type;
+ buf->component_id = e_info->cmp_id;
+ buf->subsystem_id = e_info->subsystem;
+ buf->event_severity = e_info->sev;
+ buf->event_subtype = e_info->event_subtype;
+ buf->reason_code = e_info->reason_code;
+ buf->elog_origin = ORG_SAPPHIRE;
+ }
+
+ return buf;
+}
+
+static void remove_elog_head_entry(void)
+{
+ struct opal_errorlog *entry;
+
+ lock(&elog_write_lock);
+ entry = list_pop(&elog_write_pending, struct opal_errorlog, link);
+ list_add_tail(&elog_write_free, &entry->link);
+ elog_write_retries = 0;
+ unlock(&elog_write_lock);
+}
+
+static void opal_fsp_write_complete(struct fsp_msg *read_msg)
+{
+ uint8_t val;
+
+ val = (read_msg->resp->word1 >> 8) & 0xff;
+ fsp_freemsg(read_msg);
+
+ switch (val) {
+ case FSP_STATUS_SUCCESS:
+ remove_elog_head_entry();
+ break;
+
+ default:
+ if (elog_write_retries++ >= MAX_RETRIES) {
+ remove_elog_head_entry();
+ prerror("ELOG: Error in writing to FSP!\n");
+ }
+ break;
+ }
+
+ if (opal_send_elog_to_fsp() != OPAL_SUCCESS)
+ prerror("ELOG: Error sending elog to FSP !\n");
+}
+
+/* write PEL format hex dump of the log to FSP */
+static int64_t fsp_opal_elog_write(size_t opal_elog_size)
+{
+ struct fsp_msg *elog_msg;
+
+ elog_msg = fsp_mkmsg(FSP_CMD_CREATE_ERRLOG, 3, opal_elog_size,
+ 0, PSI_DMA_ERRLOG_WRITE_BUF);
+ if (!elog_msg) {
+ prerror("ELOG: Failed to create message for WRITE to FSP\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ if (fsp_queue_msg(elog_msg, opal_fsp_write_complete)) {
+ fsp_freemsg(elog_msg);
+ elog_msg = NULL;
+ prerror("FSP: Error queueing elog update\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ return OPAL_SUCCESS;
+}
+
+static int opal_send_elog_to_fsp(void)
+{
+ struct opal_errorlog *head;
+ int rc = OPAL_SUCCESS;
+ int pel_offset = 0;
+
+ /* Convert entry to PEL
+ * and push it down to FSP. We wait for the ack from
+ * FSP.
+ */
+ lock(&elog_write_lock);
+ if (!list_empty(&elog_write_pending)) {
+ head = list_top(&elog_write_pending,
+ struct opal_errorlog, link);
+ pel_offset = create_opal_event(head, (char *)elog_write_buffer);
+ rc = fsp_opal_elog_write(pel_offset);
+ unlock(&elog_write_lock);
+ return rc;
+ }
+ unlock(&elog_write_lock);
+ return rc;
+}
+
+static int opal_push_logs_sync_to_fsp(struct opal_errorlog *buf)
+{
+ struct fsp_msg *elog_msg;
+ int opal_elog_size = 0;
+ int rc = OPAL_SUCCESS;
+
+ lock(&elog_panic_write_lock);
+ opal_elog_size = create_opal_event(buf,
+ (char *)elog_panic_write_buffer);
+
+ elog_msg = fsp_mkmsg(FSP_CMD_CREATE_ERRLOG, 3, opal_elog_size,
+ 0, PSI_DMA_ELOG_PANIC_WRITE_BUF);
+ if (!elog_msg) {
+ prerror("ELOG: Failed to create message for WRITE to FSP\n");
+ unlock(&elog_panic_write_lock);
+ return OPAL_INTERNAL_ERROR;
+ }
+
+ if (fsp_sync_msg(elog_msg, false)) {
+ fsp_freemsg(elog_msg);
+ rc = OPAL_INTERNAL_ERROR;
+ } else {
+ rc = (elog_msg->resp->word1 >> 8) & 0xff;
+ fsp_freemsg(elog_msg);
+ }
+
+ if ((buf == panic_write_buffer) && (panic_write_buffer_valid == 1)) {
+ panic_write_buffer_valid = 0;
+ unlock(&elog_panic_write_lock);
+ } else {
+ /* buffer got from the elog_write list , put it back */
+ unlock(&elog_panic_write_lock);
+ lock(&elog_write_lock);
+ list_add_tail(&elog_write_free, &buf->link);
+ unlock(&elog_write_lock);
+ }
+ return rc;
+}
+
+int elog_fsp_commit(struct opal_errorlog *buf)
+{
+ int rc = OPAL_SUCCESS;
+
+ if (buf->event_severity == OPAL_ERROR_PANIC) {
+ rc = opal_push_logs_sync_to_fsp(buf);
+ return rc;
+ }
+
+ lock(&elog_write_lock);
+ if (list_empty(&elog_write_pending)) {
+ list_add_tail(&elog_write_pending, &buf->link);
+ unlock(&elog_write_lock);
+ rc = opal_send_elog_to_fsp();
+ return rc;
+ }
+ list_add_tail(&elog_write_pending, &buf->link);
+ unlock(&elog_write_lock);
+ return rc;
+}
+
+/* This function is called from POWERNV to push logs
+ * on FSP
+ */
+static int opal_commit_log_to_fsp(struct opal_errorlog *buf)
+{
+ struct opal_errorlog *opal_buf;
+ int rc = OPAL_SUCCESS;
+
+ /* Copy the buffer to Sapphire and queue it to push
+ * to FSP and return
+ */
+ lock(&elog_write_lock);
+ if (list_empty(&elog_write_free)) {
+ unlock(&elog_write_lock);
+ prerror("ELOG: Error! Write buffer list is full. Retry later\n");
+ return -1;
+ }
+ opal_buf = list_pop(&elog_write_free, struct opal_errorlog, link);
+ unlock(&elog_write_lock);
+ memcpy(opal_buf, buf, sizeof(struct opal_errorlog));
+ opal_buf->elog_origin = ORG_POWERNV;
+ rc = elog_fsp_commit(opal_buf);
+ return rc;
+}
+
+int opal_elog_update_user_dump(struct opal_errorlog *buf, unsigned char *data,
+ uint32_t tag, uint16_t size)
+{
+ char *buffer;
+ struct opal_user_data_section *tmp;
+
+ if (!buf) {
+ prerror("ELOG: Cannot update user data. Buffer is invalid\n");
+ return -1;
+ }
+
+ buffer = (char *)buf->user_data_dump + buf->user_section_size;
+ if ((buf->user_section_size + size) > OPAL_LOG_MAX_DUMP) {
+ prerror("ELOG: Size of dump data overruns buffer\n");
+ return -1;
+ }
+
+ tmp = (struct opal_user_data_section *)buffer;
+ tmp->tag = tag;
+ tmp->size = size + sizeof(struct opal_user_data_section) - 1;
+ memcpy(tmp->data_dump, data, size);
+
+ buf->user_section_size += tmp->size;
+ buf->user_section_count++;
+ return 0;
+}
+
+/* Create MTMS section for sapphire log */
+static void create_mtms_section(struct opal_errorlog *elog_data,
+ char *pel_buffer, int *pel_offset)
+{
+ struct opal_mtms_section *mtms = (struct opal_mtms_section *)
+ (pel_buffer + *pel_offset);
+
+ mtms->v6header.id = ELOG_SID_MACHINE_TYPE;
+ mtms->v6header.length = MTMS_SECTION_SIZE;
+ mtms->v6header.version = OPAL_EXT_HRD_VER;
+ mtms->v6header.subtype = 0;
+ mtms->v6header.component_id = elog_data->component_id;
+
+ memset(mtms->model, 0x00, sizeof(mtms->model));
+ memcpy(mtms->model, dt_prop_get(dt_root, "model"), OPAL_SYS_MODEL_LEN);
+ memset(mtms->serial_no, 0x00, sizeof(mtms->serial_no));
+
+ memcpy(mtms->serial_no, dt_prop_get(dt_root, "system-id"),
+ OPAL_SYS_SERIAL_LEN);
+ *pel_offset += MTMS_SECTION_SIZE;
+}
+
+/* Create extended header section */
+static void create_extended_header_section(struct opal_errorlog *elog_data,
+ char *pel_buffer, int *pel_offset)
+{
+ const char *opalmodel = NULL;
+ uint64_t extd_time;
+
+ struct opal_extended_header_section *extdhdr =
+ (struct opal_extended_header_section *)
+ (pel_buffer + *pel_offset);
+
+ extdhdr->v6header.id = ELOG_SID_EXTENDED_HEADER;
+ extdhdr->v6header.length = EXTENDED_HEADER_SECTION_SIZE;
+ extdhdr->v6header.version = OPAL_EXT_HRD_VER;
+ extdhdr->v6header.subtype = 0;
+ extdhdr->v6header.component_id = elog_data->component_id;
+
+ memset(extdhdr->model, 0x00, sizeof(extdhdr->model));
+ opalmodel = dt_prop_get(dt_root, "model");
+ memcpy(extdhdr->model, opalmodel, OPAL_SYS_MODEL_LEN);
+
+ memset(extdhdr->serial_no, 0x00, sizeof(extdhdr->serial_no));
+ memcpy(extdhdr->serial_no, dt_prop_get(dt_root, "system-id"),
+ OPAL_SYS_SERIAL_LEN);
+
+ memset(extdhdr->opal_release_version, 0x00,
+ sizeof(extdhdr->opal_release_version));
+ memset(extdhdr->opal_subsys_version, 0x00,
+ sizeof(extdhdr->opal_subsys_version));
+
+ fsp_rtc_get_cached_tod(&extdhdr->extended_header_date, &extd_time);
+ extdhdr->extended_header_time = extd_time >> 32;
+ extdhdr->opal_symid_len = 0;
+ memset(extdhdr->opalsymid, 0x00, sizeof(extdhdr->opalsymid));
+
+ *pel_offset += EXTENDED_HEADER_SECTION_SIZE;
+}
+
+/* set src type */
+static void settype(struct opal_src_section *src, uint8_t src_type)
+{
+ char type[4];
+ sprintf(type, "%02X", src_type);
+ memcpy(src->srcstring, type, 2);
+}
+
+/* set SRC subsystem type */
+static void setsubsys(struct opal_src_section *src, uint8_t src_subsys)
+{
+ char subsys[4];
+ sprintf(subsys, "%02X", src_subsys);
+ memcpy(src->srcstring+2, subsys, 2);
+}
+
+/* Ser reason code of SRC */
+static void setrefcode(struct opal_src_section *src, uint16_t src_refcode)
+{
+ char refcode[8];
+ sprintf(refcode, "%04X", src_refcode);
+ memcpy(src->srcstring+4, refcode, 4);
+}
+
+/* Create SRC section of OPAL log */
+static void create_src_section(struct opal_errorlog *elog_data,
+ char *pel_buffer, int *pel_offset)
+{
+ struct opal_src_section *src = (struct opal_src_section *)
+ (pel_buffer + *pel_offset);
+
+ src->v6header.id = ELOG_SID_PRIMARY_SRC;
+ src->v6header.length = SRC_SECTION_SIZE;
+ src->v6header.version = OPAL_ELOG_VERSION;
+ src->v6header.subtype = OPAL_ELOG_SST;
+ src->v6header.component_id = elog_data->component_id;
+
+ src->version = OPAL_SRC_SEC_VER;
+ src->flags = 0;
+ src->wordcount = OPAL_SRC_MAX_WORD_COUNT;
+ src->srclength = SRC_LENGTH;
+ settype(src, OPAL_SRC_TYPE_ERROR);
+ setsubsys(src, OPAL_FAILING_SUBSYSTEM);
+ setrefcode(src, elog_data->reason_code);
+ memset(src->hexwords, 0 , (8 * 4));
+ src->hexwords[0] = OPAL_SRC_FORMAT;
+ src->hexwords[4] = elog_data->additional_info[0];
+ src->hexwords[5] = elog_data->additional_info[1];
+ src->hexwords[6] = elog_data->additional_info[2];
+ src->hexwords[7] = elog_data->additional_info[3];
+ *pel_offset += SRC_SECTION_SIZE;
+}
+
+/* Create user header section */
+static void create_user_header_section(struct opal_errorlog *elog_data,
+ char *pel_buffer, int *pel_offset)
+{
+ struct opal_user_header_section *usrhdr =
+ (struct opal_user_header_section *)
+ (pel_buffer + *pel_offset);
+
+ usrhdr->v6header.id = ELOG_SID_USER_HEADER;
+ usrhdr->v6header.length = USER_HEADER_SECTION_SIZE;
+ usrhdr->v6header.version = OPAL_ELOG_VERSION;
+ usrhdr->v6header.subtype = OPAL_ELOG_SST;
+ usrhdr->v6header.component_id = elog_data->component_id;
+
+ usrhdr->subsystem_id = elog_data->subsystem_id;
+ usrhdr->event_scope = 0;
+ usrhdr->event_severity = elog_data->event_severity;
+ usrhdr->event_type = elog_data->event_subtype;
+
+ if (elog_data->elog_origin == ORG_SAPPHIRE)
+ usrhdr->action_flags = ERRL_ACTION_REPORT;
+ else
+ usrhdr->action_flags = ERRL_ACTION_NONE;
+
+ *pel_offset += USER_HEADER_SECTION_SIZE;
+}
+
+/* Create private header section */
+static void create_private_header_section(struct opal_errorlog *elog_data,
+ char *pel_buffer, int *pel_offset)
+{
+ uint64_t ctime;
+ struct opal_private_header_section *privhdr =
+ (struct opal_private_header_section *)
+ pel_buffer;
+
+ privhdr->v6header.id = ELOG_SID_PRIVATE_HEADER;
+ privhdr->v6header.length = PRIVATE_HEADER_SECTION_SIZE;
+ privhdr->v6header.version = OPAL_ELOG_VERSION;
+ privhdr->v6header.subtype = OPAL_ELOG_SST;
+ privhdr->v6header.component_id = elog_data->component_id;
+
+ fsp_rtc_get_cached_tod(&privhdr->create_date, &ctime);
+ privhdr->create_time = ctime >> 32;
+ privhdr->section_count = 5;
+
+ privhdr->creator_subid_hi = 0x00;
+ privhdr->creator_subid_lo = 0x00;
+
+ if (elog_data->elog_origin == ORG_SAPPHIRE) {
+ privhdr->plid = ++sapphire_elog_id;
+ privhdr->creator_id = OPAL_CID_SAPPHIRE;
+ } else {
+ privhdr->plid = ++powernv_elog_id;
+ privhdr->creator_id = OPAL_CID_POWERNV;
+ }
+ privhdr->log_entry_id = 0x00; /* entry id is updated by FSP */
+
+ *pel_offset += PRIVATE_HEADER_SECTION_SIZE;
+}
+
+static void create_user_defined_section(struct opal_errorlog *elog_data,
+ char *pel_buffer, int *pel_offset)
+{
+ char *dump = (char *)pel_buffer + *pel_offset;
+ char *opal_buf = (char *)elog_data->user_data_dump;
+ struct opal_user_section *usrhdr;
+ struct opal_user_data_section *opal_usr_data;
+ struct opal_private_header_section *privhdr =
+ (struct opal_private_header_section *)pel_buffer;
+ int i;
+
+ for (i = 0; i < elog_data->user_section_count; i++) {
+
+ usrhdr = (struct opal_user_section *)dump;
+ opal_usr_data = (struct opal_user_data_section *)opal_buf;
+
+ usrhdr->v6header.id = ELOG_SID_USER_DEFINED;
+ usrhdr->v6header.version = OPAL_ELOG_VERSION;
+ usrhdr->v6header.length = sizeof(struct opal_v6_header) +
+ opal_usr_data->size;
+ usrhdr->v6header.subtype = OPAL_ELOG_SST;
+ usrhdr->v6header.component_id = elog_data->component_id;
+
+ memcpy(usrhdr->dump, opal_buf, opal_usr_data->size);
+ *pel_offset += usrhdr->v6header.length;
+ dump += usrhdr->v6header.length;
+ opal_buf += opal_usr_data->size;
+ privhdr->section_count++;
+ }
+}
+
+/* Create all require section of PEL log and write to TCE buffer */
+static int create_opal_event(struct opal_errorlog *elog_data, char *pel_buffer)
+{
+ int pel_offset = 0;
+
+ memset(pel_buffer, 0, PSI_DMA_ERRLOG_WRITE_BUF_SZ);
+
+ create_private_header_section(elog_data, pel_buffer, &pel_offset);
+ create_user_header_section(elog_data, pel_buffer, &pel_offset);
+ create_src_section(elog_data, pel_buffer, &pel_offset);
+ create_extended_header_section(elog_data, pel_buffer, &pel_offset);
+ create_mtms_section(elog_data, pel_buffer, &pel_offset);
+ if (elog_data->user_section_count)
+ create_user_defined_section(elog_data, pel_buffer, &pel_offset);
+
+ return pel_offset;
+}
+
+/* Pre-allocate memory for writing error log to FSP */
+static int init_elog_write_free_list(uint32_t num_entries)
+{
+ struct opal_errorlog *entry;
+ int i;
+
+ entry = zalloc(sizeof(struct opal_errorlog) * num_entries);
+ if (!entry)
+ goto out_err;
+
+ for (i = 0; i < num_entries; ++i) {
+ list_add_tail(&elog_write_free, &entry->link);
+ entry++;
+ }
+
+ /* Pre-allocate one single buffer for PANIC path */
+ panic_write_buffer = zalloc(sizeof(struct opal_errorlog));
+ if (!panic_write_buffer)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ return -ENOMEM;
+}
+
+/* fsp elog init function */
+void fsp_elog_write_init(void)
+{
+ if (!fsp_present())
+ return;
+
+ elog_panic_write_buffer = memalign(TCE_PSIZE,
+ ELOG_PANIC_WRITE_BUFFER_SIZE);
+ if (!elog_panic_write_buffer) {
+ prerror("FSP: could not allocate ELOG_PANIC_WRITE_BUFFER!\n");
+ return;
+ }
+
+ elog_write_buffer = memalign(TCE_PSIZE, ELOG_WRITE_BUFFER_SIZE);
+ if (!elog_write_buffer) {
+ prerror("FSP: could not allocate ELOG_WRITE_BUFFER!\n");
+ return;
+ }
+
+ /* Map TCEs */
+ fsp_tce_map(PSI_DMA_ELOG_PANIC_WRITE_BUF, elog_panic_write_buffer,
+ PSI_DMA_ELOG_PANIC_WRITE_BUF_SZ);
+
+ fsp_tce_map(PSI_DMA_ERRLOG_WRITE_BUF, elog_write_buffer,
+ PSI_DMA_ERRLOG_WRITE_BUF_SZ);
+
+ /* pre-allocate memory for 128 records */
+ if (init_elog_write_free_list(ELOG_WRITE_MAX_RECORD)) {
+ prerror("ELOG: Cannot allocate WRITE buffers to log errors!\n");
+ return;
+ }
+
+ /* register opal Interface */
+ opal_register(OPAL_ELOG_SEND, opal_commit_log_to_fsp, 1);
+}
diff --git a/hw/fsp/fsp-leds.c b/hw/fsp/fsp-leds.c
new file mode 100644
index 00000000..69b05830
--- /dev/null
+++ b/hw/fsp/fsp-leds.c
@@ -0,0 +1,1080 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/*
+ * LED location code and indicator handling
+ */
+#include <skiboot.h>
+#include <processor.h>
+#include <io.h>
+#include <fsp.h>
+#include <console.h>
+#include <timebase.h>
+#include <device.h>
+#include <fsp-leds.h>
+#include <stdio.h>
+#include <spcn.h>
+#include <timebase.h>
+#include <hdata/spira.h>
+#include <hdata/hdata.h>
+#include <fsp-elog.h>
+
+/* Debug prefix */
+#define PREFIX "FSPLED: "
+
+#define buf_write(p, type, val) do { *(type *)(p) = val;\
+ p += sizeof(type); } while(0)
+#define buf_read(p, type, addr) do { *addr = *(type *)(p);\
+ p += sizeof(type); } while(0)
+
+//#define DBG(fmt...) do { printf(PREFIX fmt); } while(0)
+#define DBG(fmt...) do { } while(0)
+
+/* SPCN replay threshold */
+#define SPCN_REPLAY_THRESHOLD 2
+
+/* Sapphire LED support */
+static bool led_support;
+
+/*
+ * PSI mapped buffer for LED data
+ *
+ * Mapped once and never unmapped. Used for fetching all
+ * available LED information and creating the list. Also
+ * used for setting individual LED state.
+ *
+ */
+static void *led_buffer;
+
+/* Maintain list of all LEDs
+ *
+ * The contents here will be used to cater requests from FSP
+ * async commands and HV initiated OPAL calls.
+ */
+static struct list_head cec_ledq; /* CEC LED list */
+static struct list_head encl_ledq; /* Enclosure LED list */
+
+/* LED lock */
+static struct lock led_lock = LOCK_UNLOCKED;
+
+/* Last SPCN command */
+static u32 last_spcn_cmd;
+static int replay = 0;
+
+
+static void fsp_leds_query_spcn(void);
+static void fsp_read_leds_data_complete(struct fsp_msg *msg);
+
+DEFINE_LOG_ENTRY(OPAL_RC_LED_SPCN, OPAL_PLATFORM_ERR_EVT, OPAL_LED,
+ OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_LED_BUFF, OPAL_PLATFORM_ERR_EVT, OPAL_LED,
+ OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_LED_LC, OPAL_PLATFORM_ERR_EVT, OPAL_LED,
+ OPAL_PLATFORM_FIRMWARE, OPAL_INFO, OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_LED_STATE, OPAL_PLATFORM_ERR_EVT, OPAL_LED,
+ OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_LED_SUPPORT, OPAL_PLATFORM_ERR_EVT, OPAL_LED,
+ OPAL_PLATFORM_FIRMWARE, OPAL_INFO, OPAL_NA, NULL);
+
+/* Find descendent LED record with CEC location code in CEC list */
+static struct fsp_led_data * fsp_find_cec_led(char * loc_code)
+{
+ struct fsp_led_data *led, *next;
+
+ list_for_each_safe(&cec_ledq, led, next, link) {
+ if (strcmp(led->loc_code, loc_code))
+ continue;
+ return led;
+ }
+ return NULL;
+}
+
+/* Find encl LED record with ENCL location code in ENCL list */
+static struct fsp_led_data * fsp_find_encl_led(char * loc_code)
+{
+ struct fsp_led_data *led, *next;
+
+ list_for_each_safe(&encl_ledq, led, next, link) {
+ if (strcmp(led->loc_code, loc_code))
+ continue;
+ return led;
+ }
+ return NULL;
+}
+
+/* Find encl LED record with CEC location code in CEC list */
+static struct fsp_led_data * fsp_find_encl_cec_led(char *loc_code)
+{
+ struct fsp_led_data *led, *next;
+
+ list_for_each_safe(&cec_ledq, led, next, link) {
+ if (strstr(led->loc_code, "-"))
+ continue;
+ if (!strstr(loc_code, led->loc_code))
+ continue;
+ return led;
+ }
+ return NULL;
+}
+
+/* Find encl LED record with CEC location code in ENCL list */
+static struct fsp_led_data * fsp_find_encl_encl_led(char *loc_code)
+{
+ struct fsp_led_data *led, *next;
+
+ list_for_each_safe(&encl_ledq, led, next, link) {
+ if (!strstr(loc_code, led->loc_code))
+ continue;
+ return led;
+ }
+ return NULL;
+}
+
+/* Compute the ENCL LED status in CEC list */
+static void compute_encl_status_cec(struct fsp_led_data *encl_led)
+{
+ struct fsp_led_data *led, *next;
+
+ encl_led->status &= ~SPCN_LED_IDENTIFY_MASK;
+ encl_led->status &= ~SPCN_LED_FAULT_MASK;
+
+ list_for_each_safe(&cec_ledq, led, next, link) {
+ if (!strstr(led->loc_code, encl_led->loc_code))
+ continue;
+
+ /* Dont count the enclsure LED itself */
+ if (!strcmp(led->loc_code, encl_led->loc_code))
+ continue;
+
+ if (led->status & SPCN_LED_IDENTIFY_MASK)
+ encl_led->status |= SPCN_LED_IDENTIFY_MASK;
+
+ if (led->status & SPCN_LED_FAULT_MASK)
+ encl_led->status |= SPCN_LED_FAULT_MASK;
+ }
+}
+
+/* Is a enclosure LED */
+static bool is_enclosure_led(char *loc_code)
+{
+ if (strstr(loc_code, "-"))
+ return false;
+ if (!fsp_find_cec_led(loc_code) || !fsp_find_encl_led(loc_code))
+ return false;
+ return true;
+}
+
+/*
+ * Update both the local LED lists to reflect upon led state changes
+ * occured with the recent SPCN command. Subsequent LED requests will
+ * be served with these updates changed to the list.
+ */
+static void update_led_list(char *loc_code, u32 led_state)
+{
+ struct fsp_led_data *led = NULL, *encl_led = NULL, *encl_cec_led = NULL;
+ bool is_encl_led = is_enclosure_led(loc_code);
+
+ if (is_encl_led)
+ goto enclosure;
+
+ /* Descendant LED in CEC list */
+ led = fsp_find_cec_led(loc_code);
+ if (!led) {
+ log_simple_error(&e_info(OPAL_RC_LED_LC),
+ "LED: Could not find descendent LED in CEC LC=%s\n",
+ loc_code);
+ return;
+ }
+ led->status = led_state;
+
+enclosure:
+ /* Enclosure LED in CEC list */
+ encl_cec_led = fsp_find_encl_cec_led(loc_code);
+ if (!encl_cec_led) {
+ log_simple_error(&e_info(OPAL_RC_LED_LC),
+ "LED: Could not find enclosure LED in CEC LC=%s\n",
+ loc_code);
+ return;
+ }
+
+ /* Enclosure LED in ENCL list */
+ encl_led = fsp_find_encl_encl_led(loc_code);
+ if (!encl_led) {
+ log_simple_error(&e_info(OPAL_RC_LED_LC),
+ "LED: Could not find enclosure LED in ENCL LC=%s\n",
+ loc_code);
+ return;
+ }
+
+ /* Compute descendent rolled up status */
+ compute_encl_status_cec(encl_cec_led);
+
+ /* Check whether exclussive bits set */
+ if (encl_cec_led->excl_bit & FSP_LED_EXCL_FAULT)
+ encl_cec_led->status |= SPCN_LED_FAULT_MASK;
+
+ if (encl_cec_led->excl_bit & FSP_LED_EXCL_IDENTIFY)
+ encl_cec_led->status |= SPCN_LED_IDENTIFY_MASK;
+
+ /* Copy over */
+ encl_led->status = encl_cec_led->status;
+ encl_led->excl_bit = encl_cec_led->excl_bit;
+}
+
+static void fsp_spcn_set_led_completion(struct fsp_msg *msg)
+{
+ bool fail;
+ u16 ckpt_status;
+ char loc_code[LOC_CODE_SIZE + 1];
+ struct fsp_msg *resp = msg->resp;
+ u32 cmd = FSP_RSP_SET_LED_STATE;
+ u8 status = resp->word1 & 0xff00;
+
+ /*
+ * LED state update request came as part of FSP async message
+ * FSP_CMD_SET_LED_STATE, hence need to send response message.
+ */
+ fail = (status == FSP_STATUS_INVALID_DATA) ||
+ (status == FSP_STATUS_DMA_ERROR) ||
+ (status == FSP_STATUS_SPCN_ERROR);
+
+ /* SPCN command failed: Identify the command and roll back changes */
+ if (fail) {
+ log_simple_error(&e_info(OPAL_RC_LED_SPCN),
+ "LED: Last SPCN command failed, status=%02x\n",
+ status);
+ cmd |= FSP_STATUS_GENERIC_ERROR;
+
+ /* Identify the failed command */
+ memset(loc_code, 0, sizeof(loc_code));
+ strncpy(loc_code,
+ ((struct fsp_led_data *)(msg->user_data))->loc_code,
+ LOC_CODE_SIZE);
+ ckpt_status = ((struct fsp_led_data *)(msg->user_data))
+ ->ckpt_status;
+
+ /* Rollback the changes */
+ update_led_list(loc_code, ckpt_status);
+ }
+ fsp_queue_msg(fsp_mkmsg(cmd, 0), fsp_freemsg);
+}
+
+/*
+ * Set the state of the LED pointed by the location code
+ *
+ * LED command: FAULT state or IDENTIFY state
+ * LED state : OFF (reset) or ON (set)
+ *
+ * SPCN TCE mapped buffer entries for setting LED state
+ *
+ * struct spcn_led_data {
+ * u8 lc_len;
+ * u16 state;
+ * char lc_code[LOC_CODE_SIZE];
+ *};
+ */
+static int fsp_msg_set_led_state(char *loc_code, bool command, bool state)
+{
+ struct spcn_led_data sled;
+ struct fsp_msg *msg = NULL;
+ struct fsp_led_data *led = NULL;
+ void *buf = led_buffer;
+ u16 data_len = 0;
+ u32 cmd_hdr = 0;
+ int rc = 0;
+
+ sled.lc_len = strlen(loc_code);
+ strncpy(sled.lc_code, loc_code, sled.lc_len);
+
+ /* Location code length + Location code + LED control */
+ data_len = LOC_CODE_LEN + sled.lc_len + LED_CONTROL_LEN;
+ cmd_hdr = SPCN_MOD_SET_LED_CTL_LOC_CODE << 24 | SPCN_CMD_SET << 16 |
+ data_len;
+
+ /* Fetch the current state of LED */
+ led = fsp_find_cec_led(loc_code);
+
+ /* LED not present */
+ if (led == NULL) {
+ u32 cmd = 0;
+ int rc = -1;
+
+ cmd = FSP_RSP_SET_LED_STATE | FSP_STATUS_INVALID_LC;
+ fsp_queue_msg(fsp_mkmsg(cmd, 0), fsp_freemsg);
+ return rc;
+ }
+
+ /*
+ * Checkpoint the status here, will use it if the SPCN
+ * command eventually fails.
+ */
+ led->ckpt_status = led->status;
+ sled.state = led->status;
+
+ /* Update the exclussive LED bits */
+ if (is_enclosure_led(loc_code)) {
+ if (command == LED_COMMAND_FAULT) {
+ if (state == LED_STATE_ON)
+ led->excl_bit |= FSP_LED_EXCL_FAULT;
+ if (state == LED_STATE_OFF)
+ led->excl_bit &= ~FSP_LED_EXCL_FAULT;
+ }
+
+ if (command == LED_COMMAND_IDENTIFY) {
+ if (state == LED_STATE_ON)
+ led->excl_bit |= FSP_LED_EXCL_IDENTIFY;
+ if (state == LED_STATE_OFF)
+ led->excl_bit &= ~FSP_LED_EXCL_IDENTIFY;
+ }
+ }
+
+ /* LED FAULT commad */
+ if (command == LED_COMMAND_FAULT) {
+ if (state == LED_STATE_ON)
+ sled.state |= SPCN_LED_FAULT_MASK;
+ if (state == LED_STATE_OFF)
+ sled.state &= ~SPCN_LED_FAULT_MASK;
+ }
+
+ /* LED IDENTIFY command */
+ if (command == LED_COMMAND_IDENTIFY){
+ if (state == LED_STATE_ON)
+ sled.state |= SPCN_LED_IDENTIFY_MASK;
+ if (state == LED_STATE_OFF)
+ sled.state &= ~SPCN_LED_IDENTIFY_MASK;
+ }
+
+ /* Write into SPCN TCE buffer */
+ buf_write(buf, u8, sled.lc_len); /* Location code length */
+ strncpy(buf, sled.lc_code, sled.lc_len); /* Location code */
+ buf += sled.lc_len;
+ buf_write(buf, u16, sled.state); /* LED state */
+
+ msg = fsp_mkmsg(FSP_CMD_SPCN_PASSTHRU, 4,
+ SPCN_ADDR_MODE_CEC_NODE, cmd_hdr, 0, PSI_DMA_LED_BUF);
+ /*
+ * Update the local lists based on the attempted SPCN command to
+ * set/reset an individual led (CEC or ENCL).
+ */
+ lock(&led_lock);
+ update_led_list(loc_code, sled.state);
+ msg->user_data = led;
+ unlock(&led_lock);
+
+ rc = fsp_queue_msg(msg, fsp_spcn_set_led_completion);
+ return rc;
+}
+
+/*
+ * Write single location code information into the TCE outbound buffer
+ *
+ * Data layout
+ *
+ * 2 bytes - Length of location code structure
+ * 4 bytes - CCIN in ASCII
+ * 1 byte - Resource status flag
+ * 1 byte - Indicator state
+ * 1 byte - Raw loc code length
+ * 1 byte - Loc code field size
+ * Field size byte - Null terminated ASCII string padded to 4 byte boundary
+ *
+ */
+static u32 fsp_push_data_to_tce(struct fsp_led_data *led, u8 *out_data,
+ u32 total_size)
+{
+ struct fsp_loc_code_data lcode;
+
+ /* CCIN value is irrelevant */
+ lcode.ccin = 0x0;
+
+ lcode.status = FSP_IND_NOT_IMPLMNTD;
+
+ if (led->parms & SPCN_LED_IDENTIFY_MASK)
+ lcode.status = FSP_IND_IMPLMNTD;
+
+ /* LED indicator status */
+ lcode.ind_state = FSP_IND_INACTIVE;
+ if (led->status & SPCN_LED_IDENTIFY_MASK)
+ lcode.ind_state |= FSP_IND_IDENTIFY_ACTV;
+ if (led->status & SPCN_LED_FAULT_MASK)
+ lcode.ind_state |= FSP_IND_FAULT_ACTV;
+
+ /* Location code */
+ memset(lcode.loc_code, 0, LOC_CODE_SIZE);
+ lcode.raw_len = strlen(led->loc_code);
+ strncpy(lcode.loc_code, led->loc_code, lcode.raw_len);
+ lcode.fld_sz = sizeof(lcode.loc_code);
+
+ /* Rest of the structure */
+ lcode.size = sizeof(lcode);
+ lcode.status &= 0x0f;
+
+ /*
+ * Check for outbound buffer overflow. If there are still
+ * more LEDs to be sent across to FSP, dont send, ignore.
+ */
+ if ((total_size + lcode.size) > PSI_DMA_LOC_COD_BUF_SZ)
+ return 0;
+
+ /* Copy over to the buffer */
+ memcpy(out_data, &lcode, sizeof(lcode));
+
+ return lcode.size;
+}
+
+/*
+ * Send out LED information structure pointed by "loc_code"
+ * to FSP through the PSI DMA mapping. Buffer layout structure
+ * must be followed.
+ */
+static void fsp_ret_loc_code_list(u16 req_type, char *loc_code)
+{
+ struct fsp_led_data *led, *next;
+
+ u8 *data; /* Start of TCE mapped buffer */
+ u8 *out_data; /* Start of location code data */
+ u32 bytes_sent = 0, total_size = 0;
+ u16 header_size = 0, flags = 0;
+
+ /* Init the addresses */
+ data = (u8 *) PSI_DMA_LOC_COD_BUF;
+ out_data = NULL;
+
+ /* Unmapping through FSP_CMD_RET_LOC_BUFFER command */
+ fsp_tce_map(PSI_DMA_LOC_COD_BUF, (void*)data, PSI_DMA_LOC_COD_BUF_SZ);
+ out_data = data + 8;
+
+ /* CEC LED list */
+ list_for_each_safe(&cec_ledq, led, next, link) {
+ /*
+ * When the request type is system wide led list
+ * i.e GET_LC_CMPLT_SYS, send the entire contents
+ * of the CEC list including both all descendents
+ * and all of their enclosures.
+ */
+
+ if (req_type == GET_LC_ENCLOSURES)
+ break;
+
+ if (req_type == GET_LC_ENCL_DESCENDANTS) {
+ if (strstr(led->loc_code, loc_code) == NULL)
+ continue;
+ }
+
+ if (req_type == GET_LC_SINGLE_LOC_CODE) {
+ if (strcmp(led->loc_code, loc_code))
+ continue;
+ }
+
+ /* Push the data into TCE buffer */
+ bytes_sent = 0;
+ bytes_sent = fsp_push_data_to_tce(led, out_data, total_size);
+
+ /* Advance the TCE pointer */
+ out_data += bytes_sent;
+ total_size += bytes_sent;
+ }
+
+ /* Enclosure LED list */
+ if (req_type == GET_LC_ENCLOSURES) {
+ list_for_each_safe(&encl_ledq, led, next, link) {
+
+ /* Push the data into TCE buffer */
+ bytes_sent = 0;
+ bytes_sent = fsp_push_data_to_tce(led,
+ out_data, total_size);
+
+ /* Advance the TCE pointer */
+ out_data += bytes_sent;
+ total_size += bytes_sent;
+ }
+ }
+
+ /* Count from 'data' instead of 'data_out' */
+ total_size += 8;
+ memcpy(data, &total_size, sizeof(total_size));
+
+ header_size = OUTBUF_HEADER_SIZE;
+ memcpy(data + sizeof(total_size), &header_size, sizeof(header_size));
+
+ if (req_type == GET_LC_ENCL_DESCENDANTS)
+ flags = 0x8000;
+
+ memcpy(data + sizeof(total_size) + sizeof(header_size), &flags,
+ sizeof(flags));
+ fsp_queue_msg(fsp_mkmsg(FSP_RSP_GET_LED_LIST,
+ 3, 0, PSI_DMA_LOC_COD_BUF, total_size),
+ fsp_freemsg);
+}
+
+/*
+ * FSP async command: FSP_CMD_GET_LED_LIST
+ *
+ * (1) FSP sends the list of location codes through inbound buffer
+ * (2) HV sends the status of those location codes through outbound buffer
+ *
+ * Inbound buffer data layout (loc code request structure)
+ *
+ * 2 bytes - Length of entire structure
+ * 2 bytes - Request type
+ * 1 byte - Raw length of location code
+ * 1 byte - Location code field size
+ * `Field size` bytes - NULL terminated ASCII location code string
+ */
+void fsp_get_led_list(struct fsp_msg *msg)
+{
+ struct fsp_loc_code_req req;
+ u32 tce_token = msg->data.words[1];
+ void *buf;
+
+ /* Parse inbound buffer */
+ buf = fsp_inbound_buf_from_tce(tce_token);
+ if (!buf) {
+ fsp_queue_msg(fsp_mkmsg(FSP_RSP_GET_LED_LIST |
+ FSP_STATUS_INVALID_DATA,
+ 0), fsp_freemsg);
+ return;
+ }
+ memcpy(&req, buf, sizeof(req));
+
+ printf(PREFIX "Request for loc code list type 0x%04x LC=%s\n",
+ req.req_type, req.loc_code);
+
+ fsp_ret_loc_code_list(req.req_type, req.loc_code);
+}
+
+/*
+ * FSP async command: FSP_CMD_RET_LOC_BUFFER
+ *
+ * With this command FSP returns ownership of the outbound buffer
+ * used by Sapphire to pass the indicator list previous time. That
+ * way FSP tells Sapphire that it has consumed all the data present
+ * on the outbound buffer and Sapphire can reuse it for next request.
+ */
+void fsp_free_led_list_buf(struct fsp_msg *msg)
+{
+ u32 tce_token = msg->data.words[1];
+ u32 cmd = FSP_RSP_RET_LED_BUFFER;
+
+ /* Token does not point to outbound buffer */
+ if (tce_token != PSI_DMA_LOC_COD_BUF) {
+ log_simple_error(&e_info(OPAL_RC_LED_BUFF),
+ "LED: Invalid tce token from FSP\n");
+ cmd |= FSP_STATUS_GENERIC_ERROR;
+ fsp_queue_msg(fsp_mkmsg(cmd, 0), fsp_freemsg);
+ return;
+ }
+
+ /* Unmap the location code DMA buffer */
+ fsp_tce_unmap(PSI_DMA_LOC_COD_BUF, PSI_DMA_LOC_COD_BUF_SZ);
+
+ /* Respond the FSP */
+ fsp_queue_msg(fsp_mkmsg(cmd, 0), fsp_freemsg);
+}
+
+static void fsp_ret_led_state(char *loc_code)
+{
+ struct fsp_led_data *led, *next;
+ u8 ind_state = 0;
+
+ list_for_each_safe(&cec_ledq, led, next, link) {
+ if (strcmp(loc_code, led->loc_code))
+ continue;
+
+ /* Found the location code */
+ if (led->status & SPCN_LED_IDENTIFY_MASK)
+ ind_state |= FSP_IND_IDENTIFY_ACTV;
+ if (led->status & SPCN_LED_FAULT_MASK)
+ ind_state |= FSP_IND_FAULT_ACTV;
+ fsp_queue_msg(fsp_mkmsg(FSP_RSP_GET_LED_STATE, 1, ind_state),
+ fsp_freemsg);
+ return;
+ }
+
+ /* Location code not found */
+ log_simple_error(&e_info(OPAL_RC_LED_LC),
+ "LED: Could not find the location code LC=%s\n", loc_code);
+
+ fsp_queue_msg(fsp_mkmsg(FSP_RSP_GET_LED_STATE |
+ FSP_STATUS_INVALID_LC, 1, 0xff), fsp_freemsg);
+}
+
+/*
+ * FSP async command: FSP_CMD_GET_LED_STATE
+ *
+ * With this command FSP query the state for any given LED
+ */
+void fsp_get_led_state(struct fsp_msg *msg)
+{
+ struct fsp_get_ind_state_req req;
+ u32 tce_token = msg->data.words[1];
+ void *buf;
+
+ /* Parse the inbound buffer */
+ buf = fsp_inbound_buf_from_tce(tce_token);
+ if (!buf) {
+ fsp_queue_msg(fsp_mkmsg(FSP_RSP_GET_LED_STATE |
+ FSP_STATUS_INVALID_DATA, 0),
+ fsp_freemsg);
+ return;
+ }
+ memcpy(&req, buf, sizeof(req));
+
+ DBG("%s: tce=0x%08x buf=%p rq.sz=%d rq.lc_len=%d rq.fld_sz=%d"
+ " LC: %02x %02x %02x %02x....\n", __func__,
+ tce_token, buf, req.size, req.lc_len, req.fld_sz,
+ req.loc_code[0], req.loc_code[1],
+ req.loc_code[2], req.loc_code[3]);
+
+ /* Bound check */
+ if (req.lc_len >= LOC_CODE_SIZE) {
+ log_simple_error(&e_info(OPAL_RC_LED_LC),
+ "LED: Loc code too large in %s: %d bytes\n",
+ __func__, req.lc_len);
+ req.lc_len = LOC_CODE_SIZE - 1;
+ }
+ /* Ensure NULL termination */
+ req.loc_code[req.lc_len] = 0;
+
+ /* Do the deed */
+ fsp_ret_led_state(req.loc_code);
+}
+
+/*
+ * FSP async command: FSP_CMD_SET_LED_STATE
+ *
+ * With this command FSP sets/resets the state for any given LED
+ */
+void fsp_set_led_state(struct fsp_msg *msg)
+{
+ struct fsp_set_ind_state_req req;
+ struct fsp_led_data *led, *next;
+ u32 tce_token = msg->data.words[1];
+ bool command, state;
+ void *buf;
+
+ /* Parse the inbound buffer */
+ buf = fsp_inbound_buf_from_tce(tce_token);
+ if (!buf) {
+ fsp_queue_msg(fsp_mkmsg(FSP_RSP_SET_LED_STATE |
+ FSP_STATUS_INVALID_DATA,
+ 0), fsp_freemsg);
+ return;
+ }
+ memcpy(&req, buf, sizeof(req));
+
+ DBG("%s: tce=0x%08x buf=%p rq.sz=%d rq.typ=0x%04x rq.lc_len=%d"
+ " rq.fld_sz=%d LC: %02x %02x %02x %02x....\n", __func__,
+ tce_token, buf, req.size, req.lc_len, req.fld_sz,
+ req.req_type,
+ req.loc_code[0], req.loc_code[1],
+ req.loc_code[2], req.loc_code[3]);
+
+ /* Bound check */
+ if (req.lc_len >= LOC_CODE_SIZE) {
+ log_simple_error(&e_info(OPAL_RC_LED_LC),
+ "LED: Loc code too large in %s: %d bytes\n",
+ __func__, req.lc_len);
+ req.lc_len = LOC_CODE_SIZE - 1;
+ }
+ /* Ensure NULL termination */
+ req.loc_code[req.lc_len] = 0;
+
+ /* Decode command */
+ command = (req.ind_state & LOGICAL_IND_STATE_MASK) ?
+ LED_COMMAND_FAULT : LED_COMMAND_IDENTIFY;
+ state = (req.ind_state & ACTIVE_LED_STATE_MASK) ?
+ LED_STATE_ON : LED_STATE_OFF;
+
+ /* Handle requests */
+ switch(req.req_type) {
+ case SET_IND_ENCLOSURE:
+ list_for_each_safe(&cec_ledq, led, next, link) {
+ /* Only descendants of the same enclosure */
+ if (!strstr(led->loc_code, req.loc_code))
+ continue;
+
+ /* Skip the enclosure */
+ if (!strcmp(led->loc_code, req.loc_code))
+ continue;
+
+ if (fsp_msg_set_led_state(led->loc_code,
+ command, state))
+ log_simple_error(&e_info(OPAL_RC_LED_STATE),
+ "LED: Set led state failed at LC=%s\n",
+ led->loc_code);
+ }
+ break;
+ case SET_IND_SINGLE_LOC_CODE:
+ /* Set led state for single descendent led */
+ if (fsp_msg_set_led_state(req.loc_code, command, state))
+ log_simple_error(&e_info(OPAL_RC_LED_STATE),
+ "LED: Set led state failed at LC=%s\n",
+ req.loc_code);
+ break;
+ default:
+ fsp_queue_msg(fsp_mkmsg(FSP_RSP_SET_LED_STATE |
+ FSP_STATUS_NOT_SUPPORTED, 0),
+ fsp_freemsg);
+ }
+}
+
+/* Handle received indicator message from FSP */
+static bool fsp_indicator_message(u32 cmd_sub_mod, struct fsp_msg *msg)
+{
+ /* LED support not available yet */
+ if (!led_support) {
+ log_simple_error(&e_info(OPAL_RC_LED_SUPPORT),
+ PREFIX "Indicator message while LED support not"
+ " available yet\n");
+ return false;
+ }
+
+ switch(cmd_sub_mod) {
+ case FSP_CMD_GET_LED_LIST:
+ printf(PREFIX
+ "FSP_CMD_GET_LED_LIST command received\n");
+ fsp_get_led_list(msg);
+ return true;
+ case FSP_CMD_RET_LED_BUFFER:
+ printf(PREFIX
+ "FSP_CMD_RET_LED_BUFFER command received\n");
+ fsp_free_led_list_buf(msg);
+ return true;
+ case FSP_CMD_GET_LED_STATE:
+ printf(PREFIX
+ "FSP_CMD_GET_LED_STATE command received\n");
+ fsp_get_led_state(msg);
+ return true;
+ case FSP_CMD_SET_LED_STATE:
+ printf(PREFIX
+ "FSP_CMD_SET_LED_STATE command received\n");
+ fsp_set_led_state(msg);
+ return true;
+ default:
+ printf(PREFIX
+ "Invalid FSP async sub command %06x\n",
+ cmd_sub_mod);
+ return false;
+ }
+}
+
+/* Indicator class client */
+static struct fsp_client fsp_indicator_client = {
+ .message = fsp_indicator_message,
+};
+
+/*
+ * Process the received LED data from SPCN
+ *
+ * Every LED state data is added into the CEC list. If the location
+ * code is a enclosure type, its added into the enclosure list as well.
+ *
+ */
+static void fsp_process_leds_data(u16 len)
+{
+ struct fsp_led_data *led_data = NULL;
+ void *buf = NULL;
+
+ /*
+ * Process the entire captured data from the last command
+ *
+ * TCE mapped 'led_buffer' contains the fsp_led_data structure
+ * one after the other till the total lenght 'len'.
+ *
+ */
+ buf = led_buffer;
+ while (len) {
+ /* Prepare */
+ led_data = zalloc(sizeof(struct fsp_led_data));
+ assert(led_data);
+
+ /* Resource ID */
+ buf_read(buf, u16, &led_data->rid);
+ len -= sizeof(led_data->rid);
+
+ /* Location code length */
+ buf_read(buf, u8, &led_data->lc_len);
+ len -= sizeof(led_data->lc_len);
+
+ if (led_data->lc_len == 0) {
+ free(led_data);
+ break;
+ }
+
+ /* Location code */
+ strncpy(led_data->loc_code, buf, led_data->lc_len);
+ strcat(led_data->loc_code, "\0");
+
+ buf += led_data->lc_len;
+ len -= led_data->lc_len;
+
+ /* Parameters */
+ buf_read(buf, u16, &led_data->parms);
+ len -= sizeof(led_data->parms);
+
+ /* Status */
+ buf_read(buf, u16, &led_data->status);
+ len -= sizeof(led_data->status);
+
+ /*
+ * This is Enclosure LED's location code, need to go
+ * inside the enclosure LED list as well.
+ */
+ if (!strstr(led_data->loc_code, "-")) {
+ struct fsp_led_data *encl_led_data = NULL;
+ encl_led_data = zalloc(sizeof(struct fsp_led_data));
+ assert(encl_led_data);
+
+ /* copy over the original */
+ encl_led_data->rid = led_data->rid;
+ encl_led_data->lc_len = led_data->lc_len;
+ strncpy(encl_led_data->loc_code, led_data->loc_code,
+ led_data->lc_len);
+ encl_led_data->loc_code[led_data->lc_len] = '\0';
+ encl_led_data->parms = led_data->parms;
+ encl_led_data->status = led_data->status;
+
+ /* Add to the list of enclosure LEDs */
+ list_add_tail(&encl_ledq, &encl_led_data->link);
+ }
+
+ /* Push this onto the list */
+ list_add_tail(&cec_ledq, &led_data->link);
+ }
+}
+
+/* Replay the SPCN command */
+static void replay_spcn_cmd(u32 last_spcn_cmd)
+{
+ u32 cmd_hdr = 0;
+ int rc = 0;
+
+ /* Reached threshold */
+ if (replay == SPCN_REPLAY_THRESHOLD) {
+ replay = 0;
+ return;
+ }
+
+ replay++;
+ if (last_spcn_cmd == SPCN_MOD_PRS_LED_DATA_FIRST) {
+ cmd_hdr = SPCN_MOD_PRS_LED_DATA_FIRST << 24 |
+ SPCN_CMD_PRS << 16;
+ rc = fsp_queue_msg(fsp_mkmsg(FSP_CMD_SPCN_PASSTHRU, 4,
+ SPCN_ADDR_MODE_CEC_NODE,
+ cmd_hdr, 0,
+ PSI_DMA_LED_BUF),
+ fsp_read_leds_data_complete);
+ if (rc)
+ printf(PREFIX
+ "Replay SPCN_MOD_PRS_LED_DATA_FIRST"
+ " command could not be queued\n");
+ }
+
+ if (last_spcn_cmd == SPCN_MOD_PRS_LED_DATA_SUB) {
+ cmd_hdr = SPCN_MOD_PRS_LED_DATA_SUB << 24 | SPCN_CMD_PRS << 16;
+ rc = fsp_queue_msg(fsp_mkmsg(FSP_CMD_SPCN_PASSTHRU, 4,
+ SPCN_ADDR_MODE_CEC_NODE, cmd_hdr,
+ 0, PSI_DMA_LED_BUF),
+ fsp_read_leds_data_complete);
+ if (rc)
+ printf(PREFIX
+ "Replay SPCN_MOD_PRS_LED_DATA_SUB"
+ " command could not be queued\n");
+ }
+}
+
+/*
+ * FSP message response handler for following SPCN LED commands
+ * which are used to fetch all of the LED data from SPCN
+ *
+ * 1. SPCN_MOD_PRS_LED_DATA_FIRST --> First 1KB of LED data
+ * 2. SPCN_MOD_PRS_LED_DATA_SUB --> Subsequent 1KB of LED data
+ *
+ * Once the SPCN_RSP_STATUS_SUCCESS response code has been received
+ * indicating the last batch of 1KB LED data is here, the list addition
+ * process is now complete and we enable LED support for FSP async commands
+ * and for OPAL interface.
+ */
+static void fsp_read_leds_data_complete(struct fsp_msg *msg)
+{
+ struct fsp_led_data *led, *next;
+ struct fsp_msg *resp = msg->resp;
+ u32 cmd_hdr = 0;
+ int rc = 0;
+
+ u32 msg_status = resp->word1 & 0xff00;
+ u32 led_status = (resp->data.words[1] >> 24) & 0xff;
+ u16 data_len = (u16)(resp->data.words[1] & 0xffff);
+
+ if (msg_status != FSP_STATUS_SUCCESS) {
+ log_simple_error(&e_info(OPAL_RC_LED_SUPPORT),
+ "LED: FSP returned error %x LED not supported\n",
+ msg_status);
+ /* LED support not available */
+ led_support = false;
+ return;
+ }
+
+ /* SPCN command status */
+ switch (led_status) {
+ /* Last 1KB of LED data */
+ case SPCN_RSP_STATUS_SUCCESS:
+ printf(PREFIX
+ "SPCN_RSP_STATUS_SUCCESS: %d bytes received\n",
+ data_len);
+
+ /* Copy data to the local list */
+ fsp_process_leds_data(data_len);
+ led_support = true;
+
+ /* LEDs captured on the system */
+ printf(PREFIX "CEC LEDs captured on the system:\n");
+ list_for_each_safe(&cec_ledq, led, next, link) {
+ printf(PREFIX "rid: %x\t", led->rid);
+ printf("len: %x ", led->lc_len);
+ printf("lcode: %-30s\t", led->loc_code);
+ printf("parms: %04x\t", led->parms);
+ printf("status: %04x\n", led->status);
+ }
+
+ printf(PREFIX "ENCL LEDs captured on the system:\n");
+ list_for_each_safe(&encl_ledq, led, next, link) {
+ printf(PREFIX "rid: %x\t", led->rid);
+ printf("len: %x ", led->lc_len);
+ printf("lcode: %-30s\t", led->loc_code);
+ printf("parms: %04x\t", led->parms);
+ printf("status: %04x\n", led->status);
+ }
+
+ break;
+
+ /* If more 1KB of LED data present */
+ case SPCN_RSP_STATUS_COND_SUCCESS:
+ printf(PREFIX
+ "SPCN_RSP_STATUS_COND_SUCCESS: %d bytes "
+ " received\n", data_len);
+
+ /* Copy data to the local list */
+ fsp_process_leds_data(data_len);
+
+ /* Fetch the remaining data from SPCN */
+ last_spcn_cmd = SPCN_MOD_PRS_LED_DATA_SUB;
+ cmd_hdr = SPCN_MOD_PRS_LED_DATA_SUB << 24 |
+ SPCN_CMD_PRS << 16;
+ rc = fsp_queue_msg(fsp_mkmsg(FSP_CMD_SPCN_PASSTHRU, 4,
+ SPCN_ADDR_MODE_CEC_NODE,
+ cmd_hdr,
+ 0, PSI_DMA_LED_BUF),
+ fsp_read_leds_data_complete);
+ if (rc)
+ printf(PREFIX
+ "SPCN_MOD_PRS_LED_DATA_SUB command"
+ " could not be queued\n");
+ break;
+
+ /* Other expected error codes*/
+ case SPCN_RSP_STATUS_INVALID_RACK:
+ case SPCN_RSP_STATUS_INVALID_SLAVE:
+ case SPCN_RSP_STATUS_INVALID_MOD:
+ case SPCN_RSP_STATUS_STATE_PROHIBIT:
+ case SPCN_RSP_STATUS_UNKNOWN:
+ /* Replay the previous SPCN command */
+ replay_spcn_cmd(last_spcn_cmd);
+ }
+ fsp_freemsg(msg);
+}
+
+/*
+ * Init the LED state
+ *
+ * This is called during the host boot process. This is the place where
+ * we figure out all the LEDs present on the system, their state and then
+ * create structure out of those information and popullate two master lists.
+ * One for all the LEDs on the CEC and one for all the LEDs on the enclosure.
+ * The LED information contained in the lists will cater either to various
+ * FSP initiated async commands or POWERNV initiated OPAL calls. Need to make
+ * sure that this initialization process is complete before allowing any requets
+ * on LED. Also need to be called to re-fetch data from SPCN after any LED state
+ * have been updated.
+ */
+static void fsp_leds_query_spcn()
+{
+ struct fsp_led_data *led = NULL;
+ int rc = 0;
+
+ u32 cmd_hdr = SPCN_MOD_PRS_LED_DATA_FIRST << 24 | SPCN_CMD_PRS << 16;
+
+ /* Till the last batch of LED data */
+ led_support = false;
+ last_spcn_cmd = 0;
+
+ /* Empty the lists */
+ while (!list_empty(&cec_ledq)) {
+ led = list_pop(&cec_ledq, struct fsp_led_data, link);
+ free(led);
+ }
+
+ while (!list_empty(&encl_ledq)) {
+ led = list_pop(&encl_ledq, struct fsp_led_data, link);
+ free(led);
+ }
+
+ /* Allocate buffer with alignment requirements */
+ if (led_buffer == NULL) {
+ led_buffer = memalign(TCE_PSIZE, PSI_DMA_LED_BUF_SZ);
+ if (!led_buffer)
+ return;
+ }
+
+ /* TCE mapping - will not unmap */
+ fsp_tce_map(PSI_DMA_LED_BUF, led_buffer, PSI_DMA_LED_BUF_SZ);
+
+ /* Request the first 1KB of LED data */
+ last_spcn_cmd = SPCN_MOD_PRS_LED_DATA_FIRST;
+ rc = fsp_queue_msg(fsp_mkmsg(FSP_CMD_SPCN_PASSTHRU, 4,
+ SPCN_ADDR_MODE_CEC_NODE, cmd_hdr, 0,
+ PSI_DMA_LED_BUF), fsp_read_leds_data_complete);
+ if (rc)
+ printf(PREFIX
+ "SPCN_MOD_PRS_LED_DATA_FIRST command could"
+ " not be queued\n");
+}
+
+/* Init the LED subsystem at boot time */
+void fsp_led_init(void)
+{
+ led_buffer = NULL;
+
+ /* Init the master lists */
+ list_head_init(&cec_ledq);
+ list_head_init(&encl_ledq);
+
+ fsp_leds_query_spcn();
+ printf(PREFIX "Init completed\n");
+
+ /* Handle FSP initiated async LED commands */
+ fsp_register_client(&fsp_indicator_client, FSP_MCLASS_INDICATOR);
+ printf(PREFIX "FSP async command client registered\n");
+}
diff --git a/hw/fsp/fsp-mdst-table.c b/hw/fsp/fsp-mdst-table.c
new file mode 100644
index 00000000..5b299482
--- /dev/null
+++ b/hw/fsp/fsp-mdst-table.c
@@ -0,0 +1,252 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/*
+ * Sapphire dump design:
+ * - During initialization we setup Memory Dump Source Table (MDST) table
+ * which contains address, size pair.
+ * - We send MDST table update notification to FSP via MBOX command.
+ * - During Sapphire checkstop:
+ * - FSP retrieves HWDUMP.
+ * - FSP retrieves CEC memory based on MDST table.
+ * - Once Sapphire reboot FSP sends new dump avialable notification via HDAT
+ */
+
+#include <fsp.h>
+#include <psi.h>
+#include <opal.h>
+#include <lock.h>
+#include <skiboot.h>
+#include <fsp-elog.h>
+#include <fsp-mdst-table.h>
+
+/*
+ * Sapphire dump size
+ * This is the maximum memory that FSP can retrieve during checkstop.
+ *
+ * Note:
+ * Presently we are hardcoding this parameter. Eventually we need
+ * new System parameter so that we can get max size dynamically.
+ */
+#define MAX_SAPPHIRE_DUMP_SIZE 0x1000000
+
+DEFINE_LOG_ENTRY(OPAL_RC_DUMP_MDST_INIT, OPAL_PLATFORM_ERR_EVT, OPAL_DUMP,
+ OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_DUMP_MDST_UPDATE, OPAL_PLATFORM_ERR_EVT, OPAL_DUMP,
+ OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+
+static struct dump_mdst_table *mdst_table;
+
+static int cur_mdst_entry;
+static int max_mdst_entry;
+static int cur_dump_size;
+/*
+ * Presently both sizes are same.. But if someday FSP gives more space
+ * than our TCE mapping then we need this validation..
+ *
+ * Also once FSP implements MAX_SAPPHIRE_DUMP_SIZE system param, we can
+ * move this validation to separate function.
+ */
+static int max_dump_size = MIN(MAX_SAPPHIRE_DUMP_SIZE, PSI_DMA_HYP_DUMP_SIZE);
+
+/* Protect MDST table entries */
+static struct lock mdst_lock = LOCK_UNLOCKED;
+
+/* Not supported on P7 */
+static inline bool fsp_mdst_supported(void)
+{
+ return proc_gen >= proc_gen_p8;
+}
+
+static void update_mdst_table_complete(struct fsp_msg *msg)
+{
+ uint8_t status = (msg->resp->word1 >> 8) & 0xff;
+
+ if (status)
+ log_simple_error(&e_info(OPAL_RC_DUMP_MDST_UPDATE),
+ "MDST: MDST table update failed: 0x%x\n",
+ status);
+ else
+ printf("MDST: Table updated.\n");
+
+ fsp_freemsg(msg);
+}
+
+/* Send MDST table to FSP */
+static int64_t fsp_update_mdst_table(void)
+{
+ struct fsp_msg *msg;
+ int rc = OPAL_SUCCESS;
+
+ if (cur_mdst_entry <= 0) {
+ printf("MDST: Table is empty\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+
+ lock(&mdst_lock);
+ msg = fsp_mkmsg(FSP_CMD_HYP_MDST_TABLE, 4, 0,
+ PSI_DMA_MDST_TABLE,
+ sizeof(*mdst_table) * cur_mdst_entry,
+ sizeof(*mdst_table));
+ unlock(&mdst_lock);
+
+ if (!msg) {
+ log_simple_error(&e_info(OPAL_RC_DUMP_MDST_UPDATE),
+ "MDST: Message allocation failed.!\n");
+ rc = OPAL_INTERNAL_ERROR;
+ } else if (fsp_queue_msg(msg, update_mdst_table_complete)) {
+ log_simple_error(&e_info(OPAL_RC_DUMP_MDST_UPDATE),
+ "MDST: Failed to queue MDST table message.\n");
+ fsp_freemsg(msg);
+ rc = OPAL_INTERNAL_ERROR;
+ }
+ return rc;
+}
+
+/* Add entry to MDST table */
+static int __mdst_table_add_entry(void *addr, uint32_t type, uint32_t size)
+{
+ int rc = OPAL_INTERNAL_ERROR;
+
+ lock(&mdst_lock);
+
+ if (!mdst_table)
+ goto out;
+
+ if (cur_mdst_entry >= max_mdst_entry) {
+ printf("MDST: Table is full.\n");
+ goto out;
+ }
+
+ /* Make sure we don't cross dump size limit */
+ if (cur_dump_size + size > max_dump_size) {
+ printf("MDST: %d is crossing max dump size (%d) limit.\n",
+ cur_dump_size + size, max_dump_size);
+ goto out;
+ }
+
+ /* TCE mapping */
+ fsp_tce_map(PSI_DMA_HYP_DUMP + cur_dump_size, addr, ALIGN_UP(size, TCE_PSIZE));
+
+ /* Add entry to MDST table */
+ mdst_table[cur_mdst_entry].addr = PSI_DMA_HYP_DUMP + cur_dump_size;
+ mdst_table[cur_mdst_entry].type = type;
+ mdst_table[cur_mdst_entry].size = size;
+
+ /* Update MDST count and dump size */
+ cur_mdst_entry++;
+ cur_dump_size += ALIGN_UP(size, TCE_PSIZE);
+
+ printf("MDST: Addr = 0x%llx [size : %d bytes] added to MDST table.\n",
+ (uint64_t)addr, size);
+
+ rc = OPAL_SUCCESS;
+
+out:
+ unlock(&mdst_lock);
+ return rc;
+}
+
+static int mdst_table_add_entries(void)
+{
+ int rc;
+
+ /* Add console buffer */
+ rc = __mdst_table_add_entry((void *)INMEM_CON_START,
+ DUMP_SECTION_CONSOLE, INMEM_CON_LEN);
+ if (rc)
+ return rc;
+
+ /* Add HBRT buffer */
+ rc = __mdst_table_add_entry((void *)HBRT_CON_START,
+ DUMP_SECTION_HBRT_LOG, HBRT_CON_LEN);
+
+ return rc;
+}
+
+/* TCE mapping */
+static inline void mdst_table_tce_map(void)
+{
+ fsp_tce_map(PSI_DMA_MDST_TABLE, mdst_table, PSI_DMA_MDST_TABLE_SIZE);
+}
+
+/* Initialize MDST table */
+static int mdst_table_init(void)
+{
+ max_mdst_entry = PSI_DMA_MDST_TABLE_SIZE / sizeof(*mdst_table);
+ printf("MDST: Max entries in MDST table : %d\n", max_mdst_entry);
+
+ mdst_table = memalign(TCE_PSIZE, PSI_DMA_MDST_TABLE_SIZE);
+ if (!mdst_table) {
+ log_simple_error(&e_info(OPAL_RC_DUMP_MDST_INIT),
+ "MDST: Failed to allocate memory for MDST table.\n");
+ return -ENOMEM;
+ }
+
+ memset(mdst_table, 0, PSI_DMA_MDST_TABLE_SIZE);
+ mdst_table_tce_map();
+
+ return OPAL_SUCCESS;
+}
+
+/*
+ * Handle FSP R/R event.
+ */
+static bool fsp_mdst_update_rr(uint32_t cmd_sub_mod,
+ struct fsp_msg *msg __unused)
+{
+ switch (cmd_sub_mod) {
+ case FSP_RESET_START:
+ return true;
+ case FSP_RELOAD_COMPLETE: /* Send MDST to FSP */
+ fsp_update_mdst_table();
+ return true;
+ }
+ return false;
+}
+
+static struct fsp_client fsp_mdst_client_rr = {
+ .message = fsp_mdst_update_rr,
+};
+
+/* Initialize MDST table and send notification to FSP */
+void fsp_mdst_table_init(void)
+{
+ if (!fsp_present())
+ return;
+
+ if (!fsp_mdst_supported())
+ return;
+
+ /* Initiate MDST */
+ if (mdst_table_init() != OPAL_SUCCESS)
+ return;
+
+ /*
+ * Ignore return code from mdst_table_add_entries so that
+ * we can atleast capture partial dump.
+ */
+ mdst_table_add_entries();
+ fsp_update_mdst_table();
+
+ /* Register for Class AA (FSP R/R) */
+ fsp_register_client(&fsp_mdst_client_rr, FSP_MCLASS_RR_EVENT);
+}
diff --git a/hw/fsp/fsp-mem-err.c b/hw/fsp/fsp-mem-err.c
new file mode 100644
index 00000000..8ebaaee5
--- /dev/null
+++ b/hw/fsp/fsp-mem-err.c
@@ -0,0 +1,415 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <opal.h>
+#include <opal-msg.h>
+#include <lock.h>
+#include <fsp.h>
+#include <fsp-elog.h>
+
+/* debug message prefix */
+#define PREFIX "FSPMEMERR: "
+
+/* FSP sends real address of 4K memory page. */
+#define MEM_ERR_PAGE_SIZE_4K (1UL << 12)
+
+/* maximum number of error event to hold until linux consumes it. */
+#define MERR_MAX_RECORD 1024
+
+/* FSP response status */
+#define FSP_RESP_STATUS_GENERIC_FAILURE 0xfe
+
+struct fsp_mem_err_node {
+ struct list_node list;
+ struct OpalMemoryErrorData data;
+};
+
+static LIST_HEAD(merr_free_list);
+static LIST_HEAD(mem_error_list);
+/*
+ * lock is used to protect overwriting of merr_free_list and mem_error_list
+ * list.
+ */
+static struct lock mem_err_lock = LOCK_UNLOCKED;
+
+void mem_err_info_dump(struct opal_errorlog *buf, void *data, uint16_t size);
+
+DEFINE_LOG_ENTRY(OPAL_RC_MEM_ERR_RES, OPAL_PLATFORM_ERR_EVT, OPAL_MEM_ERR,
+ OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, mem_err_info_dump);
+
+DEFINE_LOG_ENTRY(OPAL_RC_MEM_ERR_DEALLOC, OPAL_PLATFORM_ERR_EVT, OPAL_MEM_ERR,
+ OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, mem_err_info_dump);
+
+void mem_err_info_dump(struct opal_errorlog *buf, void *data, uint16_t size)
+{
+ opal_elog_update_user_dump(buf, data, 0x44455350, size);
+}
+
+static bool send_response_to_fsp(u32 cmd_sub_mod)
+{
+ struct fsp_msg *rsp;
+ int rc = -ENOMEM;
+
+ rsp = fsp_mkmsg(cmd_sub_mod, 0);
+ if (rsp)
+ rc = fsp_queue_msg(rsp, fsp_freemsg);
+ if (rc) {
+ /* XXX Generate error logs */
+ prerror(PREFIX "Error %d queueing FSP memory error"
+ " reply\n", rc);
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Queue up the memory error message for delivery.
+ *
+ * queue_event_for_delivery get called from two places.
+ * 1) from queue_mem_err_node when new fsp mem error is available and
+ * 2) from completion callback indicating that linux has consumed an message.
+ *
+ * TODO:
+ * There is a chance that, we may not get a free slot to queue our event
+ * for delivery to linux during both the above invocations. In that case
+ * we end up holding events with us until next fsp memory error comes in.
+ * We need to address this case either here OR fix up messaging infrastructure
+ * to make sure at least one slot will always be available per message type.
+ *
+ * XXX: BenH: I changed the msg infrastructure to attempt an allocation
+ * in that case, at least until we clarify a bit better how
+ * we want to handle things.
+ */
+static void queue_event_for_delivery(void *data __unused)
+{
+ struct fsp_mem_err_node *entry;
+ uint64_t *merr_data;
+ int rc;
+
+ lock(&mem_err_lock);
+ entry = list_pop(&mem_error_list, struct fsp_mem_err_node, list);
+ unlock(&mem_err_lock);
+
+ if (!entry)
+ return;
+
+ /*
+ * struct OpalMemoryErrorData is of (4 * 64 bits) size and well packed
+ * structure. Hence use uint64_t pointer to pass entire structure
+ * using 4 params in generic message format.
+ */
+ merr_data = (uint64_t *)&entry->data;
+
+ /* queue up for delivery */
+ rc = opal_queue_msg(OPAL_MSG_MEM_ERR, NULL,
+ queue_event_for_delivery,
+ merr_data[0], merr_data[1],
+ merr_data[2], merr_data[3]);
+ lock(&mem_err_lock);
+ if (rc) {
+ /*
+ * Failed to queue up the event for delivery. No free slot
+ * available. There is a chance that we are trying to queue
+ * up multiple event at the same time. We may already have
+ * at least one event queued up, in that case we will be
+ * called again through completion callback and we should
+ * be able to grab empty slot then.
+ *
+ * For now, put this node back on mem_error_list.
+ */
+ list_add(&mem_error_list, &entry->list);
+ } else
+ list_add(&merr_free_list, &entry->list);
+ unlock(&mem_err_lock);
+}
+
+static int queue_mem_err_node(struct OpalMemoryErrorData *merr_evt)
+{
+ struct fsp_mem_err_node *entry;
+
+ lock(&mem_err_lock);
+ entry = list_pop(&merr_free_list, struct fsp_mem_err_node, list);
+ if (!entry) {
+ printf(PREFIX "Failed to queue up memory error event.\n");
+ unlock(&mem_err_lock);
+ return -ENOMEM;
+ }
+
+ entry->data = *merr_evt;
+ list_add(&mem_error_list, &entry->list);
+ unlock(&mem_err_lock);
+
+ /* Queue up the event for delivery to OS. */
+ queue_event_for_delivery(NULL);
+ return 0;
+}
+
+/* Check if memory resilience event for same address already exists. */
+static bool is_resilience_event_exist(u64 paddr)
+{
+ struct fsp_mem_err_node *entry;
+ struct OpalMemoryErrorData *merr_evt;
+ int found = 0;
+
+ lock(&mem_err_lock);
+ list_for_each(&mem_error_list, entry, list) {
+ merr_evt = &entry->data;
+ if ((merr_evt->type == OPAL_MEM_ERR_TYPE_RESILIENCE) &&
+ (merr_evt->u.resilience.physical_address_start
+ == paddr)) {
+ found = 1;
+ break;
+ }
+ }
+ unlock(&mem_err_lock);
+ return !!found;
+}
+
+/*
+ * handle Memory Resilience error message.
+ * Section 28.2 of Hypervisor to FSP Mailbox Interface Specification.
+ *
+ * The flow for Memory Resilence Event is:
+ * 1. PRD component in FSP gets a recoverable attention from hardware when
+ * there is a corretable/uncorrectable memory error to free up a page.
+ * 2. PRD sends Memory Resilence Command to hypervisor with the real address of
+ * the 4K memory page in which the error occurred.
+ * 3. The hypervisor acknowledges with a status immediately. Immediate
+ * acknowledgment doesn’t require the freeing of the page to be completed.
+ */
+static bool handle_memory_resilience(u32 cmd_sub_mod, u64 paddr)
+{
+ int rc = 0;
+ u8 err = 0;
+ struct OpalMemoryErrorData mem_err_evt;
+
+ memset(&mem_err_evt, 0, sizeof(struct OpalMemoryErrorData));
+ /* Check arguments */
+ if (paddr == 0) {
+ prerror(PREFIX "memory resilience: Invalid real address.\n");
+ err = FSP_RESP_STATUS_GENERIC_FAILURE;
+ }
+
+ /* If we had an error, send response to fsp and return */
+ if (err)
+ return send_response_to_fsp(FSP_RSP_MEM_RES | err);
+
+ /* Check if event already exist for same address. */
+ if (is_resilience_event_exist(paddr))
+ goto send_response;
+
+ /* Populate an event. */
+ mem_err_evt.version = OpalMemErr_V1;
+ mem_err_evt.type = OPAL_MEM_ERR_TYPE_RESILIENCE;
+
+ switch (cmd_sub_mod) {
+ case FSP_CMD_MEM_RES_CE:
+ /*
+ * Should we keep counter for corrected errors in
+ * sapphire OR let linux (PowerNV) handle it?
+ *
+ * For now, send corrected errors to linux and let
+ * linux handle corrected errors thresholding.
+ */
+ mem_err_evt.flags |= OPAL_MEM_CORRECTED_ERROR;
+ mem_err_evt.u.resilience.resil_err_type =
+ OPAL_MEM_RESILIENCE_CE;
+ break;
+ case FSP_CMD_MEM_RES_UE:
+ mem_err_evt.u.resilience.resil_err_type =
+ OPAL_MEM_RESILIENCE_UE;
+ break;
+ case FSP_CMD_MEM_RES_UE_SCRB:
+ mem_err_evt.u.resilience.resil_err_type =
+ OPAL_MEM_RESILIENCE_UE_SCRUB;
+ break;
+ }
+ mem_err_evt.u.resilience.physical_address_start = paddr;
+ mem_err_evt.u.resilience.physical_address_end =
+ paddr + MEM_ERR_PAGE_SIZE_4K;
+
+ /* Queue up the event and inform OS about it. */
+ rc = queue_mem_err_node(&mem_err_evt);
+
+send_response:
+ /* Queue up an OK response to the resilience message itself */
+ if (!rc)
+ return send_response_to_fsp(FSP_RSP_MEM_RES);
+ else {
+ log_error(&e_info(OPAL_RC_MEM_ERR_RES),
+ &mem_err_evt, sizeof(struct OpalMemoryErrorData),
+ "OPAL_MEM_ERR: Cannot queue up memory "
+ "resilience error event to the OS");
+ return false;
+ }
+}
+
+/* update existing event entry if match is found. */
+static bool update_memory_deallocation_event(u64 paddr_start, u64 paddr_end)
+{
+ struct fsp_mem_err_node *entry;
+ struct OpalMemoryErrorData *merr_evt;
+ int found = 0;
+
+ lock(&mem_err_lock);
+ list_for_each(&mem_error_list, entry, list) {
+ merr_evt = &entry->data;
+ if ((merr_evt->type == OPAL_MEM_ERR_TYPE_DYN_DALLOC) &&
+ (merr_evt->u.dyn_dealloc.physical_address_start
+ == paddr_start)) {
+ found = 1;
+ if (merr_evt->u.dyn_dealloc.physical_address_end
+ < paddr_end)
+ merr_evt->u.dyn_dealloc.physical_address_end
+ = paddr_end;
+ break;
+ }
+ }
+ unlock(&mem_err_lock);
+ return !!found;
+}
+
+/*
+ * Handle dynamic memory deallocation message.
+ *
+ * When a condition occurs in which we need to do a large scale memory
+ * deallocation, PRD will send a starting and ending address of an area of
+ * memory to Hypervisor. Hypervisor then need to use this to deallocate all
+ * pages between and including the addresses.
+ *
+ */
+static bool handle_memory_deallocation(u64 paddr_start, u64 paddr_end)
+{
+ int rc = 0;
+ u8 err = 0;
+ struct OpalMemoryErrorData mem_err_evt;
+
+ memset(&mem_err_evt, 0, sizeof(struct OpalMemoryErrorData));
+ /* Check arguments */
+ if ((paddr_start == 0) || (paddr_end == 0)) {
+ prerror(PREFIX "memory deallocation: Invalid "
+ "starting/ending real address.\n");
+ err = FSP_RESP_STATUS_GENERIC_FAILURE;
+ }
+
+ /* If we had an error, send response to fsp and return */
+ if (err)
+ return send_response_to_fsp(FSP_RSP_MEM_DYN_DEALLOC | err);
+
+ /*
+ * FSP can send dynamic memory deallocation multiple times for the
+ * same address/address ranges. Hence check and update if we already
+ * have sam event queued.
+ */
+ if (update_memory_deallocation_event(paddr_start, paddr_end))
+ goto send_response;
+
+ /* Populate an new event. */
+ mem_err_evt.version = OpalMemErr_V1;
+ mem_err_evt.type = OPAL_MEM_ERR_TYPE_DYN_DALLOC;
+ mem_err_evt.u.dyn_dealloc.dyn_err_type =
+ OPAL_MEM_DYNAMIC_DEALLOC;
+ mem_err_evt.u.dyn_dealloc.physical_address_start = paddr_start;
+ mem_err_evt.u.dyn_dealloc.physical_address_end = paddr_end;
+
+ /* Queue up the event and inform OS about it. */
+ rc = queue_mem_err_node(&mem_err_evt);
+
+send_response:
+ /* Queue up an OK response to the memory deallocation message itself */
+ if (!rc)
+ return send_response_to_fsp(FSP_RSP_MEM_DYN_DEALLOC);
+ else {
+ log_error(&e_info(OPAL_RC_MEM_ERR_DEALLOC),
+ &mem_err_evt, sizeof(struct OpalMemoryErrorData),
+ "OPAL_MEM_ERR: Cannot queue up memory "
+ "deallocation error event to the OS");
+ return false;
+ }
+}
+
+/* Receive a memory error mesages and handle it. */
+static bool fsp_mem_err_msg(u32 cmd_sub_mod, struct fsp_msg *msg)
+{
+ u64 paddr_start, paddr_end;
+
+ printf(PREFIX "Received 0x%08ux command\n", cmd_sub_mod);
+ switch (cmd_sub_mod) {
+ case FSP_CMD_MEM_RES_CE:
+ case FSP_CMD_MEM_RES_UE:
+ case FSP_CMD_MEM_RES_UE_SCRB:
+ /*
+ * We get the memory relilence command from FSP for
+ * correctable/Uncorrectable/scrub UE errors with real
+ * address of 4K memory page in which the error occured.
+ */
+ paddr_start = *((u64 *)&msg->data.words[0]);
+ printf(PREFIX "Got memory resilience error message for "
+ "paddr=0x%016llux\n", paddr_start);
+ return handle_memory_resilience(cmd_sub_mod, paddr_start);
+ case FSP_CMD_MEM_DYN_DEALLOC:
+ paddr_start = *((u64 *)&msg->data.words[0]);
+ paddr_end = *((u64 *)&msg->data.words[2]);
+ printf(PREFIX "Got dynamic memory deallocation message: "
+ "paddr_start=0x%016llux, paddr_end=0x%016llux\n",
+ paddr_start, paddr_end);
+ return handle_memory_deallocation(paddr_start, paddr_end);
+ }
+ return false;
+}
+
+/*
+ * pre allocate memory to hold maximum of 128 memory error event until linux
+ * consumes it.
+ */
+static int init_merr_free_list(uint32_t num_entries)
+{
+ struct fsp_mem_err_node *entry;
+ int i;
+
+ entry = zalloc(sizeof(struct fsp_mem_err_node) * num_entries);
+ if (!entry)
+ return -ENOMEM;
+
+ for (i = 0; i < num_entries; ++i, entry++)
+ list_add_tail(&merr_free_list, &entry->list);
+
+ return 0;
+}
+
+static struct fsp_client fsp_mem_err_client = {
+ .message = fsp_mem_err_msg,
+};
+
+void fsp_memory_err_init(void)
+{
+ int rc;
+
+ printf(PREFIX "Intializing fsp memory handling.\n");
+ /* If we have an FSP, register for notifications */
+ if (!fsp_present())
+ return;
+
+ /* pre allocate memory for 128 record */
+ rc = init_merr_free_list(MERR_MAX_RECORD);
+ if (rc < 0)
+ return;
+
+ fsp_register_client(&fsp_mem_err_client, FSP_MCLASS_MEMORY_ERR);
+}
diff --git a/hw/fsp/fsp-nvram.c b/hw/fsp/fsp-nvram.c
new file mode 100644
index 00000000..b432c376
--- /dev/null
+++ b/hw/fsp/fsp-nvram.c
@@ -0,0 +1,414 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <fsp.h>
+#include <opal.h>
+#include <lock.h>
+#include <device.h>
+#include <fsp-elog.h>
+
+//#define DBG(fmt...) printf("RTC: " fmt)
+#define DBG(fmt...) do { } while(0)
+
+/*
+ * The FSP NVRAM API operates in "blocks" of 4K. It is entirely exposed
+ * to the OS via the OPAL APIs.
+ *
+ * In order to avoid dealing with complicated read/modify/write state
+ * machines (and added issues related to FSP failover in the middle)
+ * we keep a memory copy of the entire nvram which we load at boot
+ * time. We save only modified blocks.
+ *
+ * To limit the amount of memory used by the nvram image, we limit
+ * how much nvram we support to NVRAM_SIZE. Additionally, this limit
+ * of 1M is the maximum that the CHRP/PAPR nvram partition format
+ * supports for a partition entry.
+ *
+ * (Q: should we save the whole thing in case of FSP failover ?)
+ *
+ * The nvram is expected to comply with the CHRP/PAPR defined format,
+ * and specifically contain a System partition (ID 0x70) named "common"
+ * with configuration variables for the bootloader and a FW private
+ * partition for future use by skiboot.
+ *
+ * If the partition layout appears broken or lacks one of the above
+ * partitions, we reformat the entire nvram at boot time.
+ *
+ * We do not exploit the ability of the FSP to store a checksum. This
+ * is documented as possibly going away. The CHRP format for nvram
+ * that Linux uses has its own (though weak) checksum mechanism already
+ *
+ */
+
+#define NVRAM_BLKSIZE 0x1000
+
+struct nvram_triplet {
+ uint64_t dma_addr;
+ uint32_t blk_offset;
+ uint32_t blk_count;
+} __packed;
+
+#define NVRAM_FLAG_CLEAR_WPEND 0x80000000
+
+enum nvram_state {
+ NVRAM_STATE_CLOSED,
+ NVRAM_STATE_OPENING,
+ NVRAM_STATE_BROKEN,
+ NVRAM_STATE_OPEN,
+ NVRAM_STATE_ABSENT,
+};
+
+static void *fsp_nvram_image;
+static uint32_t fsp_nvram_size;
+static struct lock fsp_nvram_lock = LOCK_UNLOCKED;
+static struct fsp_msg *fsp_nvram_msg;
+static uint32_t fsp_nvram_dirty_start;
+static uint32_t fsp_nvram_dirty_end;
+static bool fsp_nvram_was_read;
+static struct nvram_triplet fsp_nvram_triplet __align(0x1000);
+static enum nvram_state fsp_nvram_state = NVRAM_STATE_CLOSED;
+
+DEFINE_LOG_ENTRY(OPAL_RC_NVRAM_INIT, OPAL_PLATFORM_ERR_EVT , OPAL_NVRAM,
+ OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_NVRAM_OPEN, OPAL_PLATFORM_ERR_EVT, OPAL_NVRAM,
+ OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_NVRAM_SIZE, OPAL_PLATFORM_ERR_EVT, OPAL_NVRAM,
+ OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_NVRAM_READ, OPAL_PLATFORM_ERR_EVT, OPAL_NVRAM,
+ OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_NVRAM_WRITE, OPAL_PLATFORM_ERR_EVT, OPAL_NVRAM,
+ OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+static void fsp_nvram_send_write(void);
+
+static void fsp_nvram_wr_complete(struct fsp_msg *msg)
+{
+ struct fsp_msg *resp = msg->resp;
+ uint8_t rc;
+
+ lock(&fsp_nvram_lock);
+ fsp_nvram_msg = NULL;
+
+ /* Check for various errors. If an error occurred,
+ * we generally assume the nvram is completely dirty
+ * but we won't trigger a new write until we get
+ * either a new attempt at writing, or an FSP reset
+ * reload (TODO)
+ */
+ if (!resp || resp->state != fsp_msg_response)
+ goto fail_dirty;
+ rc = (msg->word1 >> 8) & 0xff;
+ switch(rc) {
+ case 0:
+ case 0x44:
+ /* Sync to secondary required... XXX */
+ case 0x45:
+ break;
+ case 0xef:
+ /* Sync to secondary failed, let's ignore that for now,
+ * maybe when (if) we handle redundant FSPs ...
+ */
+ prerror("FSP: NVRAM sync to secondary failed\n");
+ break;
+ default:
+ log_simple_error(&e_info(OPAL_RC_NVRAM_WRITE),
+ "FSP: NVRAM write return error 0x%02x\n", rc);
+ goto fail_dirty;
+ }
+ fsp_freemsg(msg);
+ if (fsp_nvram_dirty_start <= fsp_nvram_dirty_end)
+ fsp_nvram_send_write();
+ unlock(&fsp_nvram_lock);
+ return;
+ fail_dirty:
+ fsp_nvram_dirty_start = 0;
+ fsp_nvram_dirty_end = fsp_nvram_size - 1;
+ fsp_freemsg(msg);
+ unlock(&fsp_nvram_lock);
+}
+
+static void fsp_nvram_send_write(void)
+{
+ uint32_t start = fsp_nvram_dirty_start;
+ uint32_t end = fsp_nvram_dirty_end;
+ uint32_t count;
+
+ if (start > end || fsp_nvram_state != NVRAM_STATE_OPEN)
+ return;
+ count = (end - start) / NVRAM_BLKSIZE + 1;
+ fsp_nvram_triplet.dma_addr = PSI_DMA_NVRAM_BODY + start;
+ fsp_nvram_triplet.blk_offset = start / NVRAM_BLKSIZE;
+ fsp_nvram_triplet.blk_count = count;
+ fsp_nvram_msg = fsp_mkmsg(FSP_CMD_WRITE_VNVRAM, 6,
+ 0, PSI_DMA_NVRAM_TRIPL, 1,
+ NVRAM_FLAG_CLEAR_WPEND, 0, 0);
+ if (fsp_queue_msg(fsp_nvram_msg, fsp_nvram_wr_complete)) {
+ fsp_freemsg(fsp_nvram_msg);
+ fsp_nvram_msg = NULL;
+ log_simple_error(&e_info(OPAL_RC_NVRAM_WRITE),
+ "FSP: Error queueing nvram update\n");
+ return;
+ }
+ fsp_nvram_dirty_start = fsp_nvram_size;
+ fsp_nvram_dirty_end = 0;
+}
+
+static void fsp_nvram_rd_complete(struct fsp_msg *msg)
+{
+ int64_t rc;
+
+ lock(&fsp_nvram_lock);
+
+ /* Read complete, check status. What to do if the read fails ?
+ *
+ * Well, there could be various reasons such as an FSP reboot
+ * at the wrong time, but there is really not much we can do
+ * so for now I'll just mark the nvram as closed, and we'll
+ * attempt a re-open and re-read whenever the OS tries to
+ * access it
+ */
+ rc = (msg->resp->word1 >> 8) & 0xff;
+ fsp_nvram_msg = NULL;
+ fsp_freemsg(msg);
+ if (rc) {
+ prerror("FSP: NVRAM read failed, will try again later\n");
+ fsp_nvram_state = NVRAM_STATE_CLOSED;
+ } else {
+ /* nvram was read once, no need to do it ever again */
+ fsp_nvram_was_read = true;
+ fsp_nvram_state = NVRAM_STATE_OPEN;
+
+ /* XXX Here we should look for nvram settings that concern
+ * us such as guest kernel arguments etc...
+ */
+ }
+ unlock(&fsp_nvram_lock);
+}
+
+static void fsp_nvram_send_read(void)
+{
+ fsp_nvram_msg = fsp_mkmsg(FSP_CMD_READ_VNVRAM, 4,
+ 0, PSI_DMA_NVRAM_BODY, 0,
+ fsp_nvram_size / NVRAM_BLKSIZE);
+ if (fsp_queue_msg(fsp_nvram_msg, fsp_nvram_rd_complete)) {
+ /* If the nvram read fails to queue, we mark ourselves
+ * closed. Shouldn't have happened anyway. Not much else
+ * we can do.
+ */
+ fsp_nvram_state = NVRAM_STATE_CLOSED;
+ fsp_freemsg(fsp_nvram_msg);
+ fsp_nvram_msg = NULL;
+ log_simple_error(&e_info(OPAL_RC_NVRAM_READ),
+ "FSP: Error queueing nvram read\n");
+ return;
+ }
+}
+
+static void fsp_nvram_open_complete(struct fsp_msg *msg)
+{
+ int8_t rc;
+
+ lock(&fsp_nvram_lock);
+
+ /* Open complete, check status */
+ rc = (msg->resp->word1 >> 8) & 0xff;
+ fsp_nvram_msg = NULL;
+ fsp_freemsg(msg);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_NVRAM_OPEN),
+ "FSP: NVRAM open failed, FSP error 0x%02x\n", rc);
+ goto failed;
+ }
+ if (fsp_nvram_was_read)
+ fsp_nvram_state = NVRAM_STATE_OPEN;
+ else
+ fsp_nvram_send_read();
+ unlock(&fsp_nvram_lock);
+ return;
+ failed:
+ fsp_nvram_state = NVRAM_STATE_CLOSED;
+ unlock(&fsp_nvram_lock);
+}
+
+static void fsp_nvram_send_open(void)
+{
+ printf("FSP NVRAM: Opening nvram...\n");
+ fsp_nvram_msg = fsp_mkmsg(FSP_CMD_OPEN_VNVRAM, 1, fsp_nvram_size);
+ assert(fsp_nvram_msg);
+ fsp_nvram_state = NVRAM_STATE_OPENING;
+ if (!fsp_queue_msg(fsp_nvram_msg, fsp_nvram_open_complete))
+ return;
+
+ prerror("FSP NVRAM: Failed to queue nvram open message\n");
+ fsp_freemsg(fsp_nvram_msg);
+ fsp_nvram_msg = NULL;
+ fsp_nvram_state = NVRAM_STATE_CLOSED;
+}
+
+static bool fsp_nvram_get_size(uint32_t *out_size)
+{
+ struct fsp_msg *msg;
+ int rc, size;
+
+ msg = fsp_mkmsg(FSP_CMD_GET_VNVRAM_SIZE, 0);
+ rc = fsp_sync_msg(msg, false);
+ size = msg->resp ? msg->resp->data.words[0] : 0;
+ fsp_freemsg(msg);
+ if (rc || size == 0) {
+ log_simple_error(&e_info(OPAL_RC_NVRAM_SIZE),
+ "FSP: Error %d nvram size reported is %d\n", rc, size);
+ fsp_nvram_state = NVRAM_STATE_BROKEN;
+ return false;
+ }
+ printf("FSP: NVRAM file size from FSP is %d bytes\n", size);
+ *out_size = size;
+ return true;
+}
+
+static bool fsp_nvram_msg_rr(u32 cmd_sub_mod, struct fsp_msg *msg)
+{
+ assert(msg == NULL);
+
+ switch (cmd_sub_mod) {
+ case FSP_RESET_START:
+ printf("FSP: Closing NVRAM on account of FSP Reset\n");
+ fsp_nvram_state = NVRAM_STATE_CLOSED;
+ return true;
+ case FSP_RELOAD_COMPLETE:
+ printf("FSP: Reopening NVRAM of FSP Reload complete\n");
+ lock(&fsp_nvram_lock);
+ fsp_nvram_send_open();
+ unlock(&fsp_nvram_lock);
+ return true;
+ }
+ return false;
+}
+
+static struct fsp_client fsp_nvram_client_rr = {
+ .message = fsp_nvram_msg_rr,
+};
+
+int fsp_nvram_info(uint32_t *total_size)
+{
+ if (!fsp_present()) {
+ fsp_nvram_state = NVRAM_STATE_ABSENT;
+ return OPAL_HARDWARE;
+ }
+
+ if (!fsp_nvram_get_size(total_size))
+ return OPAL_HARDWARE;
+ return OPAL_SUCCESS;
+}
+
+int fsp_nvram_start_read(void *dst, uint32_t src, uint32_t len)
+{
+ /* We are currently limited to fully aligned transfers */
+ assert((((uint64_t)dst) & 0xfff) == 0);
+ assert(dst);
+
+ /* Currently don't support src!=0 */
+ assert(src == 0);
+
+ if (!fsp_present())
+ return -ENODEV;
+
+ op_display(OP_LOG, OP_MOD_INIT, 0x0007);
+
+ lock(&fsp_nvram_lock);
+
+ /* Store image info */
+ fsp_nvram_image = dst;
+ fsp_nvram_size = len;
+
+ /* Mark nvram as not dirty */
+ fsp_nvram_dirty_start = len;
+ fsp_nvram_dirty_end = 0;
+
+ /* Map TCEs */
+ fsp_tce_map(PSI_DMA_NVRAM_TRIPL, &fsp_nvram_triplet,
+ PSI_DMA_NVRAM_TRIPL_SZ);
+ fsp_tce_map(PSI_DMA_NVRAM_BODY, dst, PSI_DMA_NVRAM_BODY_SZ);
+
+ /* Register for the reset/reload event */
+ fsp_register_client(&fsp_nvram_client_rr, FSP_MCLASS_RR_EVENT);
+
+ /* Open and load the nvram from the FSP */
+ fsp_nvram_send_open();
+
+ unlock(&fsp_nvram_lock);
+
+ return 0;
+}
+
+int fsp_nvram_write(uint32_t offset, void *src, uint32_t size)
+{
+ uint64_t end = offset + size - 1;
+
+ /* We only support writing from the original image */
+ if (src != fsp_nvram_image + offset)
+ return OPAL_HARDWARE;
+
+ offset &= ~(NVRAM_BLKSIZE - 1);
+ end &= ~(NVRAM_BLKSIZE - 1);
+
+ lock(&fsp_nvram_lock);
+ /* If the nvram is closed, try re-opening */
+ if (fsp_nvram_state == NVRAM_STATE_CLOSED)
+ fsp_nvram_send_open();
+ if (fsp_nvram_dirty_start > offset)
+ fsp_nvram_dirty_start = offset;
+ if (fsp_nvram_dirty_end < end)
+ fsp_nvram_dirty_end = end;
+ if (!fsp_nvram_msg && fsp_nvram_state == NVRAM_STATE_OPEN)
+ fsp_nvram_send_write();
+ unlock(&fsp_nvram_lock);
+
+ return 0;
+}
+
+/* This is called right before starting the payload (Linux) to
+ * ensure the initial open & read of nvram has happened before
+ * we transfer control as the guest OS. This is necessary as
+ * Linux will not handle a OPAL_BUSY return properly and treat
+ * it as an error
+ */
+void fsp_nvram_wait_open(void)
+{
+ if (!fsp_present())
+ return;
+
+ while(fsp_nvram_state == NVRAM_STATE_OPENING)
+ fsp_poll();
+
+ if (!fsp_nvram_was_read) {
+ log_simple_error(&e_info(OPAL_RC_NVRAM_INIT),
+ "FSP: NVRAM not read, skipping init\n");
+ nvram_read_complete(false);
+ return;
+ }
+
+ nvram_read_complete(true);
+}
diff --git a/hw/fsp/fsp-op-panel.c b/hw/fsp/fsp-op-panel.c
new file mode 100644
index 00000000..e2df34ea
--- /dev/null
+++ b/hw/fsp/fsp-op-panel.c
@@ -0,0 +1,249 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <fsp.h>
+#include <lock.h>
+#include <opal.h>
+#include <device.h>
+#include <processor.h>
+#include <opal-msg.h>
+#include <fsp-elog.h>
+
+DEFINE_LOG_ENTRY(OPAL_RC_PANEL_WRITE, OPAL_PLATFORM_ERR_EVT, OPAL_OP_PANEL,
+ OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+static struct fsp_msg op_msg_resp;
+static struct fsp_msg op_msg = {
+ .resp = &op_msg_resp,
+};
+static struct fsp_msg *op_req;
+static uint64_t op_async_token;
+static struct lock op_lock = LOCK_UNLOCKED;
+
+void op_display(enum op_severity sev, enum op_module mod, uint16_t code)
+{
+ uint32_t w0 = sev << 16 | mod;
+ uint32_t w1;
+ bool clean_lock;
+
+ if (!fsp_present())
+ return;
+
+ w1 = tohex((code >> 12) & 0xf) << 24;
+ w1 |= tohex((code >> 8) & 0xf) << 16;
+ w1 |= tohex((code >> 4) & 0xf) << 8;
+ w1 |= tohex((code ) & 0xf);
+
+ /*
+ * We use lock_recursive to detect recursion. We avoid sending
+ * the message if that happens as this could be a case of a
+ * locking error in the FSP driver for example
+ */
+ clean_lock = lock_recursive(&op_lock);
+ if (!clean_lock)
+ return;
+
+ /* We don't use mkmsg, we use a preallocated msg to avoid
+ * going down the malloc path etc... since this can be called
+ * in case of fatal errors
+ */
+ fsp_fillmsg(&op_msg, FSP_CMD_DISP_SRC_DIRECT, 3, 1, w0, w1);
+ fsp_sync_msg(&op_msg, false);
+ unlock(&op_lock);
+}
+
+void op_panel_disable_src_echo(void)
+{
+ if (!fsp_present())
+ return;
+
+ lock(&op_lock);
+ fsp_fillmsg(&op_msg, FSP_CMD_DIS_SRC_ECHO, 0);
+ fsp_sync_msg(&op_msg, false);
+ unlock(&op_lock);
+}
+
+void op_panel_clear_src(void)
+{
+ if (!fsp_present())
+ return;
+
+ lock(&op_lock);
+ fsp_fillmsg(&op_msg, FSP_CMD_CLEAR_SRC, 0);
+ fsp_sync_msg(&op_msg, false);
+ unlock(&op_lock);
+}
+
+/* opal_write_oppanel - Write to the physical op panel.
+ *
+ * Pass in an array of oppanel_line_t structs defining the ASCII characters
+ * to display on each line of the oppanel. If there are two lines on the
+ * physical panel, and you only want to write to the first line, you only
+ * need to pass in one line. If you only want to write to the second line,
+ * you need to pass in both lines, and set the line_len of the first line
+ * to zero.
+ *
+ * This command is asynchronous. If OPAL_SUCCESS is returned, then the
+ * operation was initiated successfully. Subsequent calls will return
+ * OPAL_BUSY until the current operation is complete.
+ */
+struct op_src {
+ uint8_t version;
+#define OP_SRC_VERSION 2
+ uint8_t flags;
+ uint8_t reserved;
+ uint8_t hex_word_cnt;
+ uint16_t reserved2;
+ uint16_t total_size;
+ uint32_t word2; /* SRC format in low byte */
+ uint32_t word3;
+ uint32_t word4;
+ uint32_t word5;
+ uint32_t word6;
+ uint32_t word7;
+ uint32_t word8;
+ uint32_t word9;
+#define OP_SRC_ASCII_LEN 32
+ uint8_t ascii[OP_SRC_ASCII_LEN]; /* Word 11 */
+} __packed __align(4);
+
+/* Page align for the sake of TCE mapping */
+static struct op_src op_src __align(0x1000);
+
+static void __op_panel_write_complete(struct fsp_msg *msg)
+{
+ fsp_tce_unmap(PSI_DMA_OP_PANEL_MISC, 0x1000);
+ lwsync();
+ op_req = NULL;
+ fsp_freemsg(msg);
+}
+
+static void op_panel_write_complete(struct fsp_msg *msg)
+{
+ uint8_t rc = (msg->resp->word1 >> 8) & 0xff;
+
+ if (rc)
+ prerror("OPPANEL: Error 0x%02x in display command\n", rc);
+
+ __op_panel_write_complete(msg);
+
+ opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL, 1, op_async_token);
+}
+
+static int64_t __opal_write_oppanel(oppanel_line_t *lines, uint64_t num_lines,
+ uint64_t async_token)
+{
+ int64_t rc = OPAL_ASYNC_COMPLETION;
+ int len;
+ int i;
+
+ if (num_lines < 1 || num_lines > 2)
+ return OPAL_PARAMETER;
+
+ lock(&op_lock);
+
+ /* Only one in flight */
+ if (op_req) {
+ rc = OPAL_BUSY_EVENT;
+ goto bail;
+ }
+
+ op_req = fsp_allocmsg(true);
+ if (!op_req) {
+ rc = OPAL_NO_MEM;
+ goto bail;
+ }
+
+ op_async_token = async_token;
+
+ memset(&op_src, 0, sizeof(op_src));
+
+ op_src.version = OP_SRC_VERSION;
+ op_src.flags = 0;
+ op_src.reserved = 0;
+ op_src.hex_word_cnt = 1; /* header word only */
+ op_src.reserved2 = 0;
+ op_src.total_size = sizeof(op_src);
+ op_src.word2 = 0; /* should be unneeded */
+
+ len = lines[0].line_len > 16 ? 16 : lines[0].line_len;
+
+ memset(op_src.ascii + len, ' ', 16-len);
+ memcpy(op_src.ascii, lines[0].line, len);
+ if (num_lines > 1) {
+ len = lines[1].line_len > 16 ? 16 : lines[1].line_len;
+ memcpy(op_src.ascii + 16, lines[1].line, len);
+ memset(op_src.ascii + 16 + len, ' ', 16-len);
+ }
+
+ for (i = 0; i < sizeof(op_src.ascii); i++) {
+ /*
+ * So, there's this interesting thing if you send
+ * HTML/Javascript through the Operator Panel.
+ * You get to inject it into the ASM web ui!
+ * So we filter out anything suspect here,
+ * at least for the time being.
+ *
+ * Allowed characters:
+ * . / 0-9 : a-z A-Z SPACE
+ */
+ if (! ((op_src.ascii[i] >= '.' && op_src.ascii[i] <= ':') ||
+ (op_src.ascii[i] >= 'a' && op_src.ascii[i] <= 'z') ||
+ (op_src.ascii[i] >= 'A' && op_src.ascii[i] <= 'Z') ||
+ op_src.ascii[i] == ' ')) {
+ op_src.ascii[i] = '.';
+ }
+ }
+
+ fsp_tce_map(PSI_DMA_OP_PANEL_MISC, &op_src, 0x1000);
+
+ fsp_fillmsg(op_req, FSP_CMD_DISP_SRC_INDIR, 3, 0,
+ PSI_DMA_OP_PANEL_MISC, sizeof(struct op_src));
+ rc = fsp_queue_msg(op_req, op_panel_write_complete);
+ if (rc) {
+ __op_panel_write_complete(op_req);
+ rc = OPAL_INTERNAL_ERROR;
+ }
+ bail:
+ unlock(&op_lock);
+ log_simple_error(&e_info(OPAL_RC_PANEL_WRITE),
+ "FSP: Error updating Op Panel: %lld\n", rc);
+ return rc;
+}
+
+static int64_t opal_write_oppanel_async(uint64_t async_token,
+ oppanel_line_t *lines,
+ uint64_t num_lines)
+{
+ return __opal_write_oppanel(lines, num_lines, async_token);
+}
+
+void fsp_oppanel_init(void)
+{
+ struct dt_node *oppanel;
+
+ if (!fsp_present())
+ return;
+
+ opal_register(OPAL_WRITE_OPPANEL_ASYNC, opal_write_oppanel_async, 3);
+
+ oppanel = dt_new(opal_node, "oppanel");
+ dt_add_property_cells(oppanel, "#length", 16);
+ dt_add_property_cells(oppanel, "#lines", 2);
+ dt_add_property_string(oppanel, "compatible", "ibm,opal-oppanel");
+}
diff --git a/hw/fsp/fsp-rtc.c b/hw/fsp/fsp-rtc.c
new file mode 100644
index 00000000..887091ab
--- /dev/null
+++ b/hw/fsp/fsp-rtc.c
@@ -0,0 +1,572 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <fsp.h>
+#include <lock.h>
+#include <timebase.h>
+#include <time.h>
+#include <fsp-elog.h>
+
+//#define DBG(fmt...) printf("RTC: " fmt)
+#define DBG(fmt...) do { } while(0)
+
+/*
+ * Note on how those operate:
+ *
+ * Because the RTC calls can be pretty slow, these functions will shoot
+ * an asynchronous request to the FSP (if none is already pending)
+ *
+ * The requests will return OPAL_BUSY_EVENT as long as the event has
+ * not been completed.
+ *
+ * WARNING: An attempt at doing an RTC write while one is already pending
+ * will simply ignore the new arguments and continue returning
+ * OPAL_BUSY_EVENT. This is to be compatible with existing Linux code.
+ *
+ * Completion of the request will result in an event OPAL_EVENT_RTC
+ * being signaled, which will remain raised until a corresponding call
+ * to opal_rtc_read() or opal_rtc_write() finally returns OPAL_SUCCESS,
+ * at which point the operation is complete and the event cleared.
+ *
+ * If we end up taking longer than rtc_read_timeout_ms millieconds waiting
+ * for the response from a read request, we simply return a cached value (plus
+ * an offset calculated from the timebase. When the read request finally
+ * returns, we update our cache value accordingly.
+ *
+ * There is two separate set of state for reads and writes. If both are
+ * attempted at the same time, the event bit will remain set as long as either
+ * of the two has a pending event to signal.
+ */
+
+enum {
+ RTC_TOD_VALID,
+ RTC_TOD_INVALID,
+ RTC_TOD_PERMANENT_ERROR,
+} rtc_tod_state = RTC_TOD_INVALID;
+
+static struct lock rtc_lock;
+static struct fsp_msg *rtc_read_msg;
+static struct fsp_msg *rtc_write_msg;
+/* TODO We'd probably want to export and use this variable declared in fsp.c,
+ * instead of each component individually maintaining the state.. may be for
+ * later optimization
+ */
+static bool fsp_in_reset = false;
+
+/* last synchonisation point */
+static struct {
+ struct tm tm;
+ unsigned long tb;
+ bool dirty;
+} rtc_tod_cache;
+
+/* Timebase value when we last initiated a RTC read request */
+static unsigned long read_req_tb;
+
+/* If a RTC read takes longer than this, we return a value generated
+ * from the cache + timebase */
+static const int rtc_read_timeout_ms = 1500;
+
+DEFINE_LOG_ENTRY(OPAL_RC_RTC_TOD, OPAL_PLATFORM_ERR_EVT, OPAL_RTC,
+ OPAL_PLATFORM_FIRMWARE, OPAL_INFO,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_RTC_READ, OPAL_PLATFORM_ERR_EVT, OPAL_RTC,
+ OPAL_PLATFORM_FIRMWARE, OPAL_INFO,
+ OPAL_NA, NULL);
+
+static int days_in_month(int month, int year)
+{
+ static int month_days[] = {
+ 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31,
+ };
+
+ assert(1 <= month && month <= 12);
+
+ /* we may need to update this in the year 4000, pending a
+ * decision on whether or not it's a leap year */
+ if (month == 2) {
+ bool is_leap = !(year % 400) || ((year % 100) && !(year % 4));
+ return is_leap ? 29 : 28;
+ }
+
+ return month_days[month - 1];
+}
+
+static void tm_add(struct tm *in, struct tm *out, unsigned long secs)
+{
+ unsigned long year, month, mday, hour, minute, second, d;
+ static const unsigned long sec_in_400_years =
+ ((3903ul * 365) + (97 * 366)) * 24 * 60 * 60;
+
+ assert(in);
+ assert(out);
+
+ second = in->tm_sec;
+ minute = in->tm_min;
+ hour = in->tm_hour;
+ mday = in->tm_mday;
+ month = in->tm_mon;
+ year = in->tm_year;
+
+ second += secs;
+
+ /* There are the same number of seconds in any 400-year block; this
+ * limits the iterations in the loop below */
+ year += 400 * (second / sec_in_400_years);
+ second = second % sec_in_400_years;
+
+ if (second >= 60) {
+ minute += second / 60;
+ second = second % 60;
+ }
+
+ if (minute >= 60) {
+ hour += minute / 60;
+ minute = minute % 60;
+ }
+
+ if (hour >= 24) {
+ mday += hour / 24;
+ hour = hour % 24;
+ }
+
+ for (d = days_in_month(month, year); mday >= d;
+ d = days_in_month(month, year)) {
+ month++;
+ if (month > 12) {
+ month = 1;
+ year++;
+ }
+ mday -= d;
+ }
+
+ out->tm_year = year;
+ out->tm_mon = month;
+ out->tm_mday = mday;
+ out->tm_hour = hour;
+ out->tm_min = minute;
+ out->tm_sec = second;
+}
+
+/* MSB is byte 3, LSB is byte 0 */
+static unsigned int bcd_byte(uint32_t bcd, int byteno)
+{
+ bcd >>= byteno * 8;
+ return (bcd >> 4 & 0xf) * 10 + (bcd & 0xf);
+}
+
+static uint32_t int_to_bcd2(unsigned int x)
+{
+ return (((x / 10) << 4) & 0xf0) | (x % 10);
+}
+
+static uint32_t int_to_bcd4(unsigned int x)
+{
+ return int_to_bcd2(x / 100) << 8 | int_to_bcd2(x % 100);
+}
+
+static void rtc_to_tm(struct fsp_msg *msg, struct tm *tm)
+{
+ uint32_t x;
+
+ /* The FSP returns in BCD:
+ *
+ * | year | month | mday |
+ * +------------------------------------+
+ * | hour | minute | secs | reserved |
+ * +------------------------------------+
+ * | microseconds |
+ */
+ x = msg->data.words[0];
+ tm->tm_year = bcd_byte(x, 3) * 100 + bcd_byte(x, 2);
+ tm->tm_mon = bcd_byte(x, 1);
+ tm->tm_mday = bcd_byte(x, 0);
+
+ x = msg->data.words[1];
+ tm->tm_hour = bcd_byte(x, 3);
+ tm->tm_min = bcd_byte(x, 2);
+ tm->tm_sec = bcd_byte(x, 1);
+}
+
+static void tm_to_datetime(struct tm *tm, uint32_t *y_m_d, uint64_t *h_m_s_m)
+{
+ uint64_t h_m_s;
+ /*
+ * The OPAL API is defined as returned a u64 of a similar
+ * format to the FSP message; the 32-bit date field is
+ * in the format:
+ *
+ * | year | year | month | day |
+ *
+ */
+ *y_m_d = int_to_bcd4(tm->tm_year) << 16 |
+ int_to_bcd2(tm->tm_mon) << 8 |
+ int_to_bcd2(tm->tm_mday);
+
+ /*
+ * ... and the 64-bit time field is in the format
+ *
+ * | hour | minutes | secs | millisec |
+ * | -------------------------------------
+ * | millisec | reserved |
+ *
+ * We simply ignore the microseconds/milliseconds for now
+ * as I don't quite understand why the OPAL API defines that
+ * it needs 6 digits for the milliseconds :-) I suspect the
+ * doc got that wrong and it's supposed to be micro but
+ * let's ignore it.
+ *
+ * Note that Linux doesn't use nor set the ms field anyway.
+ */
+ h_m_s = int_to_bcd2(tm->tm_hour) << 24 |
+ int_to_bcd2(tm->tm_min) << 16 |
+ int_to_bcd2(tm->tm_sec) << 8;
+
+ *h_m_s_m = h_m_s << 32;
+}
+
+static void fsp_rtc_process_read(struct fsp_msg *read_resp)
+{
+ int val = (read_resp->word1 >> 8) & 0xff;
+
+ switch (val) {
+ case 0xa9:
+ log_simple_error(&e_info(OPAL_RC_RTC_TOD),
+ "RTC TOD in invalid state\n");
+ rtc_tod_state = RTC_TOD_INVALID;
+ break;
+
+ case 0xaf:
+ log_simple_error(&e_info(OPAL_RC_RTC_TOD),
+ "RTC TOD in permanent error state\n");
+ rtc_tod_state = RTC_TOD_PERMANENT_ERROR;
+ break;
+
+ case 0:
+ /* Save the read RTC value in our cache */
+ rtc_to_tm(read_resp, &rtc_tod_cache.tm);
+ rtc_tod_cache.tb = mftb();
+ rtc_tod_state = RTC_TOD_VALID;
+ break;
+
+ default:
+ log_simple_error(&e_info(OPAL_RC_RTC_TOD),
+ "RTC TOD read failed: %d\n", val);
+ rtc_tod_state = RTC_TOD_INVALID;
+ }
+}
+
+static void opal_rtc_eval_events(void)
+{
+ bool pending = false;
+
+ if (rtc_read_msg && !fsp_msg_busy(rtc_read_msg))
+ pending = true;
+ if (rtc_write_msg && !fsp_msg_busy(rtc_write_msg))
+ pending = true;
+ opal_update_pending_evt(OPAL_EVENT_RTC, pending ? OPAL_EVENT_RTC : 0);
+}
+
+static void fsp_rtc_req_complete(struct fsp_msg *msg)
+{
+ lock(&rtc_lock);
+ DBG("RTC completion %p\n", msg);
+ if (msg == rtc_read_msg)
+ fsp_rtc_process_read(msg->resp);
+ opal_rtc_eval_events();
+ unlock(&rtc_lock);
+}
+
+static int64_t fsp_rtc_send_read_request(void)
+{
+ struct fsp_msg *msg;
+ int rc;
+
+ msg = fsp_mkmsg(FSP_CMD_READ_TOD, 0);
+ if (!msg) {
+ log_simple_error(&e_info(OPAL_RC_RTC_READ),
+ "RTC: failed to allocate read message\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+
+ rc = fsp_queue_msg(msg, fsp_rtc_req_complete);
+ if (rc) {
+ fsp_freemsg(msg);
+ log_simple_error(&e_info(OPAL_RC_RTC_READ),
+ "RTC: failed to queue read message: %d\n", rc);
+ return OPAL_INTERNAL_ERROR;
+ }
+
+ read_req_tb = mftb();
+ rtc_read_msg = msg;
+
+ return OPAL_BUSY_EVENT;
+}
+
+static void encode_cached_tod(uint32_t *year_month_day,
+ uint64_t *hour_minute_second_millisecond)
+{
+ unsigned long cache_age_sec;
+ struct tm tm;
+
+ cache_age_sec = tb_to_msecs(mftb() - rtc_tod_cache.tb) / 1000;
+
+ tm_add(&rtc_tod_cache.tm, &tm, cache_age_sec);
+
+ /* Format to OPAL API values */
+ tm_to_datetime(&tm, year_month_day, hour_minute_second_millisecond);
+}
+
+int fsp_rtc_get_cached_tod(uint32_t *year_month_day,
+ uint64_t *hour_minute_second_millisecond)
+{
+
+ if (rtc_tod_state != RTC_TOD_VALID)
+ return -1;
+
+ encode_cached_tod(year_month_day,
+ hour_minute_second_millisecond);
+ return 0;
+}
+
+static int64_t fsp_opal_rtc_read(uint32_t *year_month_day,
+ uint64_t *hour_minute_second_millisecond)
+{
+ struct fsp_msg *msg;
+ int64_t rc;
+
+ if (!year_month_day || !hour_minute_second_millisecond)
+ return OPAL_PARAMETER;
+
+ lock(&rtc_lock);
+ /* During R/R of FSP, read cached TOD */
+ if (fsp_in_reset) {
+ fsp_rtc_get_cached_tod(year_month_day,
+ hour_minute_second_millisecond);
+ rc = OPAL_SUCCESS;
+ goto out;
+ }
+
+ msg = rtc_read_msg;
+
+ if (rtc_tod_state == RTC_TOD_PERMANENT_ERROR) {
+ if (msg && !fsp_msg_busy(msg))
+ fsp_freemsg(msg);
+ rc = OPAL_HARDWARE;
+ goto out;
+ }
+
+ /* If we don't have a read pending already, fire off a request and
+ * return */
+ if (!msg) {
+ DBG("Sending new RTC read request\n");
+ rc = fsp_rtc_send_read_request();
+
+ /* If our pending read is done, clear events and return the time
+ * from the cache */
+ } else if (!fsp_msg_busy(msg)) {
+ DBG("RTC read complete, state %d\n", rtc_tod_state);
+
+ rtc_read_msg = NULL;
+ opal_rtc_eval_events();
+ fsp_freemsg(msg);
+
+ if (rtc_tod_state == RTC_TOD_VALID) {
+ encode_cached_tod(year_month_day,
+ hour_minute_second_millisecond);
+ rc = OPAL_SUCCESS;
+ } else
+ rc = OPAL_INTERNAL_ERROR;
+
+ /* Timeout: return our cached value (updated from tb), but leave the
+ * read request pending so it will update the cache later */
+ } else if (mftb() > read_req_tb + msecs_to_tb(rtc_read_timeout_ms)) {
+ DBG("RTC read timed out\n");
+
+ encode_cached_tod(year_month_day,
+ hour_minute_second_millisecond);
+ rc = OPAL_SUCCESS;
+
+ /* Otherwise, we're still waiting on the read to complete */
+ } else {
+ rc = OPAL_BUSY_EVENT;
+ }
+out:
+ unlock(&rtc_lock);
+ return rc;
+}
+
+static int64_t fsp_opal_rtc_write(uint32_t year_month_day,
+ uint64_t hour_minute_second_millisecond)
+{
+ struct fsp_msg *msg;
+ uint32_t w0, w1, w2;
+ int64_t rc;
+
+ lock(&rtc_lock);
+ if (rtc_tod_state == RTC_TOD_PERMANENT_ERROR) {
+ rc = OPAL_HARDWARE;
+ msg = NULL;
+ goto bail;
+ }
+
+ /* Do we have a request already ? */
+ msg = rtc_write_msg;
+ if (msg) {
+ /* If it's still in progress, return */
+ if (fsp_msg_busy(msg)) {
+ /* Don't free the message */
+ msg = NULL;
+ rc = OPAL_BUSY_EVENT;
+ goto bail;
+ }
+
+ DBG("Completed write request @%p, state=%d\n", msg, msg->state);
+ /* It's complete, clear events */
+ rtc_write_msg = NULL;
+ opal_rtc_eval_events();
+
+ /* Check error state */
+ if (msg->state != fsp_msg_done) {
+ DBG(" -> request not in done state -> error !\n");
+ rc = OPAL_INTERNAL_ERROR;
+ goto bail;
+ }
+ rc = OPAL_SUCCESS;
+ goto bail;
+ }
+
+ DBG("Sending new write request...\n");
+
+ /* Create a request and send it. Just like for read, we ignore
+ * the "millisecond" field which is probably supposed to be
+ * microseconds and which Linux ignores as well anyway
+ */
+ w0 = year_month_day;
+ w1 = (hour_minute_second_millisecond >> 32) & 0xffffff00;
+ w2 = 0;
+
+ rtc_write_msg = fsp_mkmsg(FSP_CMD_WRITE_TOD, 3, w0, w1, w2);
+ if (!rtc_write_msg) {
+ DBG(" -> allocation failed !\n");
+ rc = OPAL_INTERNAL_ERROR;
+ goto bail;
+ }
+ DBG(" -> req at %p\n", rtc_write_msg);
+
+ if (fsp_in_reset) {
+ rtc_to_tm(rtc_write_msg, &rtc_tod_cache.tm);
+ rtc_tod_cache.tb = mftb();
+ rtc_tod_cache.dirty = true;
+ fsp_freemsg(rtc_write_msg);
+ rtc_write_msg = NULL;
+ rc = OPAL_SUCCESS;
+ goto bail;
+ } else if (fsp_queue_msg(rtc_write_msg, fsp_rtc_req_complete)) {
+ DBG(" -> queueing failed !\n");
+ rc = OPAL_INTERNAL_ERROR;
+ fsp_freemsg(rtc_write_msg);
+ rtc_write_msg = NULL;
+ goto bail;
+ }
+ rc = OPAL_BUSY_EVENT;
+ bail:
+ unlock(&rtc_lock);
+ if (msg)
+ fsp_freemsg(msg);
+ return rc;
+}
+
+static void rtc_flush_cached_tod(void)
+{
+ struct fsp_msg *msg;
+ uint64_t h_m_s_m;
+ uint32_t y_m_d;
+
+ if (fsp_rtc_get_cached_tod(&y_m_d, &h_m_s_m))
+ return;
+ msg = fsp_mkmsg(FSP_CMD_WRITE_TOD, 3, y_m_d,
+ (h_m_s_m >> 32) & 0xffffff00, 0);
+ if (msg)
+ fsp_queue_msg(msg, fsp_freemsg);
+}
+
+static bool fsp_rtc_msg_rr(u32 cmd_sub_mod, struct fsp_msg *msg)
+{
+
+ int rc = false;
+ assert(msg == NULL);
+
+ switch (cmd_sub_mod) {
+ case FSP_RESET_START:
+ lock(&rtc_lock);
+ fsp_in_reset = true;
+ unlock(&rtc_lock);
+ rc = true;
+ break;
+ case FSP_RELOAD_COMPLETE:
+ lock(&rtc_lock);
+ fsp_in_reset = false;
+ if (rtc_tod_cache.dirty) {
+ rtc_flush_cached_tod();
+ rtc_tod_cache.dirty = false;
+ }
+ unlock(&rtc_lock);
+ rc = true;
+ break;
+ }
+
+ return rc;
+}
+
+static struct fsp_client fsp_rtc_client_rr = {
+ .message = fsp_rtc_msg_rr,
+};
+
+void fsp_rtc_init(void)
+{
+ struct fsp_msg msg, resp;
+ int rc;
+
+ if (!fsp_present()) {
+ rtc_tod_state = RTC_TOD_PERMANENT_ERROR;
+ return;
+ }
+
+ opal_register(OPAL_RTC_READ, fsp_opal_rtc_read, 2);
+ opal_register(OPAL_RTC_WRITE, fsp_opal_rtc_write, 2);
+
+ /* Register for the reset/reload event */
+ fsp_register_client(&fsp_rtc_client_rr, FSP_MCLASS_RR_EVENT);
+
+ msg.resp = &resp;
+ fsp_fillmsg(&msg, FSP_CMD_READ_TOD, 0);
+
+ DBG("Getting initial RTC TOD\n");
+
+ lock(&rtc_lock);
+
+ rc = fsp_sync_msg(&msg, false);
+
+ if (rc >= 0)
+ fsp_rtc_process_read(&resp);
+ else
+ rtc_tod_state = RTC_TOD_PERMANENT_ERROR;
+
+ unlock(&rtc_lock);
+}
diff --git a/hw/fsp/fsp-sensor.c b/hw/fsp/fsp-sensor.c
new file mode 100644
index 00000000..f4fc19d2
--- /dev/null
+++ b/hw/fsp/fsp-sensor.c
@@ -0,0 +1,788 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ */
+
+
+/*
+ * Design note:
+ * This code will enable the 'powernv' to retrieve sensor related data from FSP
+ * using SPCN passthru mailbox commands.
+ *
+ * The OPAL read sensor API in Sapphire is implemented as an 'asynchronous' read
+ * call that returns after queuing the read request. A unique sensor-id is
+ * expected as an argument for OPAL read call which has already been exported
+ * to the device tree during fsp init. The sapphire code decodes this Id to
+ * determine requested attribute and sensor.
+ */
+
+#include <skiboot.h>
+#include <fsp.h>
+#include <lock.h>
+#include <device.h>
+#include <spcn.h>
+#include <opal-msg.h>
+#include<fsp-elog.h>
+
+//#define DBG(fmt...) printf("SENSOR: " fmt)
+#define DBG(fmt...) do { } while (0)
+
+#define SENSOR_PREFIX "sensor: "
+#define INVALID_DATA ((uint32_t)-1)
+
+/* Entry size of PRS command modifiers */
+#define PRS_STATUS_ENTRY_SZ 0x08
+#define SENSOR_PARAM_ENTRY_SZ 0x10
+#define SENSOR_DATA_ENTRY_SZ 0x08
+#define PROC_JUNC_ENTRY_SZ 0x04
+
+DEFINE_LOG_ENTRY(OPAL_RC_SENSOR_INIT, OPAL_PLATFORM_ERR_EVT, OPAL_SENSOR,
+ OPAL_MISC_SUBSYSTEM,
+ OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_SENSOR_READ, OPAL_PLATFORM_ERR_EVT, OPAL_SENSOR,
+ OPAL_MISC_SUBSYSTEM, OPAL_INFO,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_SENSOR_ASYNC_COMPLETE, OPAL_PLATFORM_ERR_EVT,
+ OPAL_SENSOR, OPAL_MISC_SUBSYSTEM, OPAL_INFO,
+ OPAL_NA, NULL);
+
+/* FSP response status codes */
+enum {
+ SP_RSP_STATUS_VALID_DATA = 0x00,
+ SP_RSP_STATUS_INVALID_DATA = 0x22,
+ SP_RSP_STATUS_SPCN_ERR = 0xA8,
+ SP_RSP_STATUS_DMA_ERR = 0x24,
+};
+
+enum sensor_state {
+ SENSOR_VALID_DATA,
+ SENSOR_INVALID_DATA,
+ SENSOR_SPCN_ERROR,
+ SENSOR_DMA_ERROR,
+ SENSOR_PERMANENT_ERROR,
+ SENSOR_OPAL_ERROR,
+};
+
+enum spcn_attr {
+ /* mod 0x01, 0x02 */
+ SENSOR_PRESENT,
+ SENSOR_FAULTED,
+ SENSOR_AC_FAULTED,
+ SENSOR_ON,
+ SENSOR_ON_SUPPORTED,
+ /* mod 0x10, 0x11 */
+ SENSOR_THRS,
+ SENSOR_LOCATION,
+ /* mod 0x12, 0x13 */
+ SENSOR_DATA,
+ /* mod 0x1c */
+ SENSOR_POWER,
+
+ SENSOR_MAX,
+};
+
+/* Parsed sensor attributes, passed through OPAL */
+struct opal_sensor_data {
+ uint64_t async_token; /* Asynchronous token */
+ uint32_t *sensor_data; /* Kernel pointer to copy data */
+ enum spcn_attr spcn_attr; /* Modifier attribute */
+ uint16_t rid; /* Sensor RID */
+ uint8_t frc; /* Sensor resource class */
+ uint32_t mod_index; /* Modifier index*/
+ uint32_t offset; /* Offset in sensor buffer */
+};
+
+struct spcn_mod_attr {
+ const char *name;
+ enum spcn_attr val;
+};
+
+struct spcn_mod {
+ uint8_t mod; /* Modifier code */
+ uint8_t entry_size; /* Size of each entry in response buffer */
+ uint16_t entry_count; /* Number of entries */
+ struct spcn_mod_attr *mod_attr;
+};
+
+static struct spcn_mod_attr prs_status_attrs[] = {
+ {"present", SENSOR_PRESENT},
+ {"faulted", SENSOR_FAULTED},
+ {"ac-faulted", SENSOR_AC_FAULTED},
+ {"on", SENSOR_ON},
+ {"on-supported", SENSOR_ON_SUPPORTED}
+};
+
+static struct spcn_mod_attr sensor_param_attrs[] = {
+ {"thrs", SENSOR_THRS},
+ {"loc", SENSOR_LOCATION}
+};
+
+static struct spcn_mod_attr sensor_data_attrs[] = {
+ {"data", SENSOR_DATA}
+};
+
+static struct spcn_mod_attr sensor_power_attrs[] = {
+ {"power", SENSOR_POWER}
+};
+
+static struct spcn_mod spcn_mod_data[] = {
+ {SPCN_MOD_PRS_STATUS_FIRST, PRS_STATUS_ENTRY_SZ, 0,
+ prs_status_attrs},
+ {SPCN_MOD_PRS_STATUS_SUBS, PRS_STATUS_ENTRY_SZ, 0,
+ prs_status_attrs},
+ {SPCN_MOD_SENSOR_PARAM_FIRST, SENSOR_PARAM_ENTRY_SZ, 0,
+ sensor_param_attrs},
+ {SPCN_MOD_SENSOR_PARAM_SUBS, SENSOR_PARAM_ENTRY_SZ, 0,
+ sensor_param_attrs},
+ {SPCN_MOD_SENSOR_DATA_FIRST, SENSOR_DATA_ENTRY_SZ, 0,
+ sensor_data_attrs},
+ {SPCN_MOD_SENSOR_DATA_SUBS, SENSOR_DATA_ENTRY_SZ, 0,
+ sensor_data_attrs},
+ /* TODO Support this modifier '0x14', if required */
+ /* {SPCN_MOD_PROC_JUNC_TEMP, PROC_JUNC_ENTRY_SZ, 0, NULL}, */
+ {SPCN_MOD_SENSOR_POWER, SENSOR_DATA_ENTRY_SZ, 0,
+ sensor_power_attrs},
+ {SPCN_MOD_LAST, 0xff, 0xffff, NULL}
+};
+
+/* Frame resource class (FRC) names */
+static const char *frc_names[] = {
+ /* 0x00 and 0x01 are reserved */
+ NULL,
+ NULL,
+ "power-controller",
+ "power-supply",
+ "regulator",
+ "cooling-fan",
+ "cooling-controller",
+ "battery-charger",
+ "battery-pack",
+ "amb-temp",
+ "temp",
+ "vrm",
+ "riser-card",
+ "io-backplane"
+};
+
+#define SENSOR_MAX_SIZE 0x00100000
+static void *sensor_buffer = NULL;
+static enum sensor_state sensor_state;
+static bool prev_msg_consumed = true;
+static struct lock sensor_lock;
+
+/* Function prototypes */
+static int64_t fsp_sensor_send_read_request(struct opal_sensor_data *attr);
+static void queue_msg_for_delivery(int rc, struct opal_sensor_data *attr);
+
+
+/*
+ * Power Resource Status (PRS)
+ * Command: 0x42
+ *
+ * Modifier: 0x01
+ * --------------------------------------------------------------------------
+ * | 0 1 2 3 4 5 6 7 |
+ * --------------------------------------------------------------------------
+ * |Frame resrc class| PRID | SRC | Status |
+ * --------------------------------------------------------------------------
+ *
+ *
+ * Modifier: 0x10
+ * --------------------------------------------------------------------------
+ * | 0 1 2 3 4 5 6 7 |
+ * --------------------------------------------------------------------------
+ * |Frame resrc class| PRID | Sensor location |
+ * --------------------------------------------------------------------------
+ * --------------------------------------------------------------------------
+ * | 8 9 10 11 12 13 14 15 |
+ * --------------------------------------------------------------------------
+ * | Reserved | Reserved | Threshold | Status |
+ * --------------------------------------------------------------------------
+ *
+ *
+ * Modifier: 0x12
+ * --------------------------------------------------------------------------
+ * | 0 1 2 3 4 5 6 7 |
+ * --------------------------------------------------------------------------
+ * |Frame resrc class| PRID | Sensor data | Status |
+ * --------------------------------------------------------------------------
+ *
+ *
+ * Modifier: 0x14
+ * --------------------------------------------------------------------------
+ * | 0 1 2 3 |
+ * --------------------------------------------------------------------------
+ * |Enclosure Tj Avg | Chip Tj Avg | Reserved | Reserved |
+ * --------------------------------------------------------------------------
+ */
+
+static void fsp_sensor_process_data(struct opal_sensor_data *attr)
+{
+ uint8_t *sensor_buf_ptr = (uint8_t *)sensor_buffer;
+ uint32_t sensor_data = INVALID_DATA;
+ uint16_t sensor_mod_data[8];
+ int count, i;
+ uint8_t valid, nr_power;
+ uint32_t power;
+
+ for (count = 0; count < spcn_mod_data[attr->mod_index].entry_count;
+ count++) {
+ memcpy((void *)sensor_mod_data, sensor_buf_ptr,
+ spcn_mod_data[attr->mod_index].entry_size);
+ if (spcn_mod_data[attr->mod_index].mod == SPCN_MOD_PROC_JUNC_TEMP) {
+ /* TODO Support this modifier '0x14', if required */
+
+ } else if (spcn_mod_data[attr->mod_index].mod == SPCN_MOD_SENSOR_POWER) {
+ valid = sensor_buf_ptr[0];
+ if (valid & 0x80) {
+ nr_power = valid & 0x0f;
+ sensor_data = 0;
+ for (i=0; i < nr_power; i++) {
+ power = *(uint32_t *) &sensor_buf_ptr[2 + i * 5];
+ DBG("Power[%d]: %d mW\n", i, power);
+ sensor_data += power/1000;
+ }
+ } else {
+ DBG("Power Sensor data not valid\n");
+ }
+ } else if (sensor_mod_data[0] == attr->frc &&
+ sensor_mod_data[1] == attr->rid) {
+ switch (attr->spcn_attr) {
+ /* modifier 0x01, 0x02 */
+ case SENSOR_PRESENT:
+ DBG("Not exported to device tree\n");
+ break;
+ case SENSOR_FAULTED:
+ sensor_data = sensor_mod_data[3] & 0x02;
+ break;
+ case SENSOR_AC_FAULTED:
+ case SENSOR_ON:
+ case SENSOR_ON_SUPPORTED:
+ DBG("Not exported to device tree\n");
+ break;
+ /* modifier 0x10, 0x11 */
+ case SENSOR_THRS:
+ sensor_data = sensor_mod_data[6];
+ break;
+ case SENSOR_LOCATION:
+ DBG("Not exported to device tree\n");
+ break;
+ /* modifier 0x12, 0x13 */
+ case SENSOR_DATA:
+ sensor_data = sensor_mod_data[2];
+ break;
+ default:
+ break;
+ }
+
+ break;
+ }
+
+ sensor_buf_ptr += spcn_mod_data[attr->mod_index].entry_size;
+ }
+
+ *(attr->sensor_data) = sensor_data;
+ if (sensor_data == INVALID_DATA)
+ queue_msg_for_delivery(OPAL_PARTIAL, attr);
+ else
+ queue_msg_for_delivery(OPAL_SUCCESS, attr);
+}
+
+static int fsp_sensor_process_read(struct fsp_msg *resp_msg)
+{
+ uint8_t mbx_rsp_status;
+ uint32_t size = 0;
+
+ mbx_rsp_status = (resp_msg->word1 >> 8) & 0xff;
+ switch (mbx_rsp_status) {
+ case SP_RSP_STATUS_VALID_DATA:
+ sensor_state = SENSOR_VALID_DATA;
+ size = resp_msg->data.words[1] & 0xffff;
+ break;
+ case SP_RSP_STATUS_INVALID_DATA:
+ log_simple_error(&e_info(OPAL_RC_SENSOR_READ),
+ "SENSOR: %s: Received invalid data\n", __func__);
+ sensor_state = SENSOR_INVALID_DATA;
+ break;
+ case SP_RSP_STATUS_SPCN_ERR:
+ log_simple_error(&e_info(OPAL_RC_SENSOR_READ),
+ "SENSOR: %s: Failure due to SPCN error\n", __func__);
+ sensor_state = SENSOR_SPCN_ERROR;
+ break;
+ case SP_RSP_STATUS_DMA_ERR:
+ log_simple_error(&e_info(OPAL_RC_SENSOR_READ),
+ "SENSOR: %s: Failure due to DMA error\n", __func__);
+ sensor_state = SENSOR_DMA_ERROR;
+ break;
+ default:
+ log_simple_error(&e_info(OPAL_RC_SENSOR_READ),
+ "SENSOR %s: Read failed, status:0x%02X\n",
+ __func__, mbx_rsp_status);
+ sensor_state = SENSOR_INVALID_DATA;
+ break;
+ }
+
+ return size;
+}
+
+static void queue_msg_for_delivery(int rc, struct opal_sensor_data *attr)
+{
+ DBG("%s: rc:%d, data:%d\n", __func__, rc, *(attr->sensor_data));
+ opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL,
+ attr->async_token, rc);
+ spcn_mod_data[attr->mod_index].entry_count = 0;
+ free(attr);
+ prev_msg_consumed = true;
+}
+
+static void fsp_sensor_read_complete(struct fsp_msg *msg)
+{
+ struct opal_sensor_data *attr = msg->user_data;
+ enum spcn_rsp_status status;
+ int rc, size;
+
+ DBG("Sensor read completed\n");
+
+ status = (msg->resp->data.words[1] >> 24) & 0xff;
+ size = fsp_sensor_process_read(msg->resp);
+ fsp_freemsg(msg);
+
+ lock(&sensor_lock);
+ if (sensor_state == SENSOR_VALID_DATA) {
+ spcn_mod_data[attr->mod_index].entry_count += (size /
+ spcn_mod_data[attr->mod_index].entry_size);
+ attr->offset += size;
+ /* Fetch the subsequent entries of the same modifier type */
+ if (status == SPCN_RSP_STATUS_COND_SUCCESS) {
+ switch (spcn_mod_data[attr->mod_index].mod) {
+ case SPCN_MOD_PRS_STATUS_FIRST:
+ case SPCN_MOD_SENSOR_PARAM_FIRST:
+ case SPCN_MOD_SENSOR_DATA_FIRST:
+ attr->mod_index++;
+ spcn_mod_data[attr->mod_index].entry_count =
+ spcn_mod_data[attr->mod_index - 1].
+ entry_count;
+ spcn_mod_data[attr->mod_index - 1].entry_count = 0;
+ break;
+ default:
+ break;
+ }
+
+ rc = fsp_sensor_send_read_request(attr);
+ if (rc != OPAL_ASYNC_COMPLETION)
+ goto err;
+ } else { /* Notify 'powernv' of read completion */
+ fsp_sensor_process_data(attr);
+ }
+ } else {
+ rc = OPAL_INTERNAL_ERROR;
+ goto err;
+ }
+ unlock(&sensor_lock);
+ return;
+err:
+ *(attr->sensor_data) = INVALID_DATA;
+ queue_msg_for_delivery(rc, attr);
+ unlock(&sensor_lock);
+ log_simple_error(&e_info(OPAL_RC_SENSOR_ASYNC_COMPLETE),
+ "SENSOR: %s: Failed to queue the "
+ "read request to fsp\n", __func__);
+}
+
+static int64_t fsp_sensor_send_read_request(struct opal_sensor_data *attr)
+{
+ int rc;
+ struct fsp_msg *msg;
+ uint32_t *sensor_buf_ptr;
+ uint32_t align;
+ uint32_t cmd_header;
+
+ DBG("Get the data for modifier [%d]\n", spcn_mod_data[attr->mod_index].mod);
+ if (spcn_mod_data[attr->mod_index].mod == SPCN_MOD_PROC_JUNC_TEMP) {
+ /* TODO Support this modifier '0x14', if required */
+ align = attr->offset % sizeof(*sensor_buf_ptr);
+ if (align)
+ attr->offset += (sizeof(*sensor_buf_ptr) - align);
+
+ sensor_buf_ptr = (uint32_t *)((uint8_t *)sensor_buffer +
+ attr->offset);
+
+ /* TODO Add 8 byte command data required for mod 0x14 */
+
+ attr->offset += 8;
+
+ cmd_header = spcn_mod_data[attr->mod_index].mod << 24 |
+ SPCN_CMD_PRS << 16 | 0x0008;
+ } else {
+ cmd_header = spcn_mod_data[attr->mod_index].mod << 24 |
+ SPCN_CMD_PRS << 16;
+ }
+
+ msg = fsp_mkmsg(FSP_CMD_SPCN_PASSTHRU, 4,
+ SPCN_ADDR_MODE_CEC_NODE, cmd_header, 0,
+ PSI_DMA_SENSOR_BUF + attr->offset);
+
+ if (!msg) {
+ prerror(SENSOR_PREFIX "%s: Failed to allocate read message"
+ "\n", __func__);
+ return OPAL_INTERNAL_ERROR;
+ }
+
+ msg->user_data = attr;
+ rc = fsp_queue_msg(msg, fsp_sensor_read_complete);
+ if (rc) {
+ fsp_freemsg(msg);
+ msg = NULL;
+ prerror(SENSOR_PREFIX "%s: Failed to queue read message, "
+ "%d\n", __func__, rc);
+ return OPAL_INTERNAL_ERROR;
+ }
+
+ return OPAL_ASYNC_COMPLETION;
+}
+
+static int64_t parse_sensor_id(uint32_t id, struct opal_sensor_data *attr)
+{
+ uint32_t mod, index;
+
+ attr->spcn_attr = id >> 24;
+ if (attr->spcn_attr >= SENSOR_MAX)
+ return OPAL_PARAMETER;
+
+ if (attr->spcn_attr <= SENSOR_ON_SUPPORTED)
+ mod = SPCN_MOD_PRS_STATUS_FIRST;
+ else if (attr->spcn_attr <= SENSOR_LOCATION)
+ mod = SPCN_MOD_SENSOR_PARAM_FIRST;
+ else if (attr->spcn_attr <= SENSOR_DATA)
+ mod = SPCN_MOD_SENSOR_DATA_FIRST;
+ else if (attr->spcn_attr <= SENSOR_POWER)
+ mod = SPCN_MOD_SENSOR_POWER;
+ else
+ return OPAL_PARAMETER;
+
+ for (index = 0; spcn_mod_data[index].mod != SPCN_MOD_LAST; index++) {
+ if (spcn_mod_data[index].mod == mod)
+ break;
+ }
+
+ attr->mod_index = index;
+ attr->frc = (id >> 16) & 0xff;
+ attr->rid = id & 0xffff;
+
+ return 0;
+}
+
+
+static int64_t fsp_opal_read_sensor(uint32_t sensor_hndl, int token,
+ uint32_t *sensor_data)
+{
+ struct opal_sensor_data *attr;
+ int64_t rc;
+
+ DBG("fsp_opal_read_sensor [%08x]\n", sensor_hndl);
+ if (sensor_state == SENSOR_PERMANENT_ERROR) {
+ rc = OPAL_HARDWARE;
+ goto out;
+ }
+
+ if (!sensor_hndl) {
+ rc = OPAL_PARAMETER;
+ goto out;
+ }
+
+ lock(&sensor_lock);
+ if (prev_msg_consumed) {
+ attr = zalloc(sizeof(*attr));
+ if (!attr) {
+ log_simple_error(&e_info(OPAL_RC_SENSOR_INIT),
+ "SENSOR: Failed to allocate memory\n");
+ rc = OPAL_NO_MEM;
+ goto out_lock;
+ }
+
+ /* Parse the sensor id and store them to the local structure */
+ rc = parse_sensor_id(sensor_hndl, attr);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_SENSOR_READ),
+ "SENSOR: %s: Failed to parse the sensor "
+ "handle[0x%08x]\n", __func__, sensor_hndl);
+ goto out_free;
+ }
+ /* Kernel buffer pointer to copy the data later when ready */
+ attr->sensor_data = sensor_data;
+ attr->async_token = token;
+
+ rc = fsp_sensor_send_read_request(attr);
+ if (rc != OPAL_ASYNC_COMPLETION) {
+ log_simple_error(&e_info(OPAL_RC_SENSOR_READ),
+ "SENSOR: %s: Failed to queue the read "
+ "request to fsp\n", __func__);
+ goto out_free;
+ }
+
+ prev_msg_consumed = false;
+ } else {
+ rc = OPAL_BUSY_EVENT;
+ }
+
+ unlock(&sensor_lock);
+ return rc;
+
+out_free:
+ free(attr);
+out_lock:
+ unlock(&sensor_lock);
+out:
+ return rc;
+}
+
+
+#define MAX_RIDS 64
+#define MAX_NAME 64
+
+static uint32_t get_index(uint32_t *prids, uint16_t rid)
+{
+ int index;
+
+ for (index = 0; prids[index] && index < MAX_RIDS; index++) {
+ if (prids[index] == rid)
+ return index;
+ }
+
+ prids[index] = rid;
+ return index;
+}
+
+static void create_sensor_nodes(int index, uint16_t frc, uint16_t rid,
+ uint32_t *prids, struct dt_node *sensors)
+{
+ char name[MAX_NAME];
+ struct dt_node *fs_node;
+ uint32_t value;
+
+ switch (spcn_mod_data[index].mod) {
+ case SPCN_MOD_PRS_STATUS_FIRST:
+ case SPCN_MOD_PRS_STATUS_SUBS:
+ switch (frc) {
+ case SENSOR_FRC_POWER_SUPPLY:
+ case SENSOR_FRC_COOLING_FAN:
+ snprintf(name, MAX_NAME, "%s#%d-%s", frc_names[frc],
+ /* Start enumeration from 1 */
+ get_index(prids, rid) + 1,
+ spcn_mod_data[index].mod_attr[1].name);
+ fs_node = dt_new(sensors, name);
+ snprintf(name, MAX_NAME, "ibm,opal-sensor-%s",
+ frc_names[frc]);
+ dt_add_property_string(fs_node, "compatible", name);
+ value = spcn_mod_data[index].mod_attr[1].val << 24 |
+ (frc & 0xff) << 16 | rid;
+ dt_add_property_cells(fs_node, "sensor-id", value);
+ break;
+ default:
+ break;
+ }
+ break;
+ case SPCN_MOD_SENSOR_PARAM_FIRST:
+ case SPCN_MOD_SENSOR_PARAM_SUBS:
+ case SPCN_MOD_SENSOR_DATA_FIRST:
+ case SPCN_MOD_SENSOR_DATA_SUBS:
+ switch (frc) {
+ case SENSOR_FRC_POWER_SUPPLY:
+ case SENSOR_FRC_COOLING_FAN:
+ case SENSOR_FRC_AMB_TEMP:
+ snprintf(name, MAX_NAME, "%s#%d-%s", frc_names[frc],
+ /* Start enumeration from 1 */
+ get_index(prids, rid) + 1,
+ spcn_mod_data[index].mod_attr[0].name);
+ fs_node = dt_new(sensors, name);
+ snprintf(name, MAX_NAME, "ibm,opal-sensor-%s",
+ frc_names[frc]);
+ dt_add_property_string(fs_node, "compatible", name);
+ value = spcn_mod_data[index].mod_attr[0].val << 24 |
+ (frc & 0xff) << 16 | rid;
+ dt_add_property_cells(fs_node, "sensor-id", value);
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case SPCN_MOD_SENSOR_POWER:
+ fs_node = dt_new(sensors, "power#1-data");
+ dt_add_property_string(fs_node, "compatible", "ibm,opal-sensor-power");
+ value = spcn_mod_data[index].mod_attr[0].val << 24;
+ dt_add_property_cells(fs_node, "sensor-id", value);
+ break;
+ }
+}
+
+static void add_sensor_ids(struct dt_node *sensors)
+{
+ uint32_t MAX_FRC_NAMES = sizeof(frc_names) / sizeof(*frc_names);
+ uint8_t *sensor_buf_ptr = (uint8_t *)sensor_buffer;
+ uint32_t frc_rids[MAX_FRC_NAMES][MAX_RIDS];
+ uint16_t sensor_frc, power_rid;
+ uint16_t sensor_mod_data[8];
+ int index, count;
+
+ memset(frc_rids, 0, sizeof(frc_rids));
+
+ for (index = 0; spcn_mod_data[index].mod != SPCN_MOD_LAST; index++) {
+ if (spcn_mod_data[index].mod == SPCN_MOD_SENSOR_POWER) {
+ create_sensor_nodes(index, 0, 0, 0, sensors);
+ continue;
+ }
+ for (count = 0; count < spcn_mod_data[index].entry_count;
+ count++) {
+ if (spcn_mod_data[index].mod ==
+ SPCN_MOD_PROC_JUNC_TEMP) {
+ /* TODO Support this modifier '0x14', if
+ * required */
+ } else {
+ memcpy((void *)sensor_mod_data, sensor_buf_ptr,
+ spcn_mod_data[index].entry_size);
+ sensor_frc = sensor_mod_data[0];
+ power_rid = sensor_mod_data[1];
+
+ if (sensor_frc < MAX_FRC_NAMES &&
+ frc_names[sensor_frc])
+ create_sensor_nodes(index, sensor_frc,
+ power_rid,
+ frc_rids[sensor_frc],
+ sensors);
+ }
+
+ sensor_buf_ptr += spcn_mod_data[index].entry_size;
+ }
+ }
+}
+
+static void add_opal_sensor_node(void)
+{
+ int index;
+ struct dt_node *sensors;
+
+ if (!fsp_present())
+ return;
+
+ sensors = dt_new(opal_node, "sensors");
+
+ add_sensor_ids(sensors);
+
+ /* Reset the entry count of each modifier */
+ for (index = 0; spcn_mod_data[index].mod != SPCN_MOD_LAST;
+ index++)
+ spcn_mod_data[index].entry_count = 0;
+}
+
+void fsp_init_sensor(void)
+{
+ uint32_t cmd_header, align, size, psi_dma_offset = 0;
+ enum spcn_rsp_status status;
+ uint32_t *sensor_buf_ptr;
+ struct fsp_msg msg, resp;
+ int index, rc;
+
+ if (!fsp_present()) {
+ sensor_state = SENSOR_PERMANENT_ERROR;
+ return;
+ }
+
+ sensor_buffer = memalign(TCE_PSIZE, SENSOR_MAX_SIZE);
+ if (!sensor_buffer) {
+ prerror("FSP: could not allocate sensor_buffer!\n");
+ return;
+ }
+
+ /* Map TCE */
+ fsp_tce_map(PSI_DMA_SENSOR_BUF, sensor_buffer, PSI_DMA_SENSOR_BUF_SZ);
+
+ /* Register OPAL interface */
+ opal_register(OPAL_SENSOR_READ, fsp_opal_read_sensor, 3);
+
+ msg.resp = &resp;
+
+ /* Traverse using all the modifiers to know all the sensors available
+ * in the system */
+ for (index = 0; spcn_mod_data[index].mod != SPCN_MOD_LAST &&
+ sensor_state == SENSOR_VALID_DATA;) {
+ DBG("Get the data for modifier [%d]\n", spcn_mod_data[index].mod);
+ if (spcn_mod_data[index].mod == SPCN_MOD_PROC_JUNC_TEMP) {
+ /* TODO Support this modifier 0x14, if required */
+ align = psi_dma_offset % sizeof(*sensor_buf_ptr);
+ if (align)
+ psi_dma_offset += (sizeof(*sensor_buf_ptr) - align);
+
+ sensor_buf_ptr = (uint32_t *)((uint8_t *)sensor_buffer
+ + psi_dma_offset);
+
+ /* TODO Add 8 byte command data required for mod 0x14 */
+ psi_dma_offset += 8;
+
+ cmd_header = spcn_mod_data[index].mod << 24 |
+ SPCN_CMD_PRS << 16 | 0x0008;
+ } else {
+ cmd_header = spcn_mod_data[index].mod << 24 |
+ SPCN_CMD_PRS << 16;
+ }
+
+ fsp_fillmsg(&msg, FSP_CMD_SPCN_PASSTHRU, 4,
+ SPCN_ADDR_MODE_CEC_NODE, cmd_header, 0,
+ PSI_DMA_SENSOR_BUF + psi_dma_offset);
+
+ rc = fsp_sync_msg(&msg, false);
+ if (rc >= 0) {
+ status = (resp.data.words[1] >> 24) & 0xff;
+ size = fsp_sensor_process_read(&resp);
+ psi_dma_offset += size;
+ spcn_mod_data[index].entry_count += (size /
+ spcn_mod_data[index].entry_size);
+ } else {
+ sensor_state = SENSOR_PERMANENT_ERROR;
+ break;
+ }
+
+ switch (spcn_mod_data[index].mod) {
+ case SPCN_MOD_PRS_STATUS_FIRST:
+ case SPCN_MOD_SENSOR_PARAM_FIRST:
+ case SPCN_MOD_SENSOR_DATA_FIRST:
+ if (status == SPCN_RSP_STATUS_COND_SUCCESS)
+ index++;
+ else
+ index += 2;
+
+ break;
+ case SPCN_MOD_PRS_STATUS_SUBS:
+ case SPCN_MOD_SENSOR_PARAM_SUBS:
+ case SPCN_MOD_SENSOR_DATA_SUBS:
+ if (status != SPCN_RSP_STATUS_COND_SUCCESS)
+ index++;
+ break;
+ case SPCN_MOD_SENSOR_POWER:
+ index++;
+ default:
+ break;
+ }
+ }
+
+ if (sensor_state != SENSOR_VALID_DATA)
+ sensor_state = SENSOR_PERMANENT_ERROR;
+ else
+ add_opal_sensor_node();
+}
diff --git a/hw/fsp/fsp-surveillance.c b/hw/fsp/fsp-surveillance.c
new file mode 100644
index 00000000..c1d19b64
--- /dev/null
+++ b/hw/fsp/fsp-surveillance.c
@@ -0,0 +1,209 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <skiboot.h>
+#include <fsp.h>
+#include <lock.h>
+#include <processor.h>
+#include <timebase.h>
+#include <fsp-sysparam.h>
+#include <fsp-elog.h>
+
+static bool fsp_surv_state = false;
+static bool fsp_surv_ack_pending = false;
+static u64 surv_timer;
+static u64 surv_ack_timer;
+static u32 surv_state_param;
+static struct lock surv_lock = LOCK_UNLOCKED;
+
+#define FSP_SURV_ACK_TIMEOUT 120 /* surv ack timeout in seconds */
+
+DEFINE_LOG_ENTRY(OPAL_RC_SURVE_INIT, OPAL_MISC_ERR_EVT, OPAL_SURVEILLANCE,
+ OPAL_SURVEILLANCE_ERR, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_MISCELLANEOUS_INFO_ONLY, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_SURVE_STATUS, OPAL_MISC_ERR_EVT, OPAL_SURVEILLANCE,
+ OPAL_SURVEILLANCE_ERR, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_MISCELLANEOUS_INFO_ONLY, NULL);
+
+static void fsp_surv_ack(struct fsp_msg *msg)
+{
+ uint8_t val;
+
+ if (!msg->resp)
+ return;
+
+ val = (msg->resp->word1 >> 8) & 0xff;
+ if (val == 0) {
+ /* reset the pending flag */
+ printf("SURV: Received heartbeat acknowledge from FSP\n");
+ lock(&surv_lock);
+ fsp_surv_ack_pending = false;
+ unlock(&surv_lock);
+ } else
+ prerror("SURV: Heartbeat Acknowledgment error from FSP\n");
+
+ fsp_freemsg(msg);
+}
+
+static void fsp_surv_check_timeout(void)
+{
+ u64 now = mftb();
+
+ /*
+ * We just checked fsp_surv_ack_pending to be true in fsp_surv_hbeat
+ * and we haven't dropped the surv_lock between then and now. So, we
+ * just go ahead and check timeouts.
+ */
+ if (tb_compare(now, surv_ack_timer) == TB_AAFTERB) {
+ /* XXX: We should be logging a PEL to the host, assuming
+ * the FSP is dead, pending a R/R.
+ */
+ prerror("SURV: [%16llx] Surv ACK timed out; initiating R/R\n",
+ now);
+
+ /* Reset the pending trigger too */
+ fsp_surv_ack_pending = false;
+ fsp_trigger_reset();
+ }
+
+ return;
+}
+
+/* Send surveillance heartbeat based on a timebase trigger */
+static void fsp_surv_hbeat(void)
+{
+ u64 now = mftb();
+
+ /* Check if an ack is pending... if so, don't send the ping just yet */
+ if (fsp_surv_ack_pending) {
+ fsp_surv_check_timeout();
+ return;
+ }
+
+ /* add timebase callbacks */
+ /*
+ * XXX This packet needs to be pushed to FSP in an interval
+ * less than 120s that's advertised to FSP.
+ *
+ * Verify if the command building format and call is fine.
+ */
+ if (surv_timer == 0 ||
+ (tb_compare(now, surv_timer) == TB_AAFTERB) ||
+ (tb_compare(now, surv_timer) == TB_AEQUALB)) {
+ printf("SURV: [%16llx] Sending the hearbeat command to FSP\n",
+ now);
+ fsp_queue_msg(fsp_mkmsg(FSP_CMD_SURV_HBEAT, 1, 120),
+ fsp_surv_ack);
+
+ fsp_surv_ack_pending = true;
+ surv_timer = now + secs_to_tb(60);
+ surv_ack_timer = now + secs_to_tb(FSP_SURV_ACK_TIMEOUT);
+ }
+}
+
+static void fsp_surv_poll(void *data __unused)
+{
+ if (!fsp_surv_state)
+ return;
+ lock(&surv_lock);
+ fsp_surv_hbeat();
+ unlock(&surv_lock);
+}
+
+static void fsp_surv_got_param(uint32_t param_id __unused, int err_len,
+ void *data __unused)
+{
+ if (err_len != 4) {
+ log_simple_error(&e_info(OPAL_RC_SURVE_STATUS),
+ "SURV: Error retreiving surveillance status: %d\n",
+ err_len);
+ return;
+ }
+
+ printf("SURV: Status from FSP: %d\n", surv_state_param);
+ if (!(surv_state_param & 0x01))
+ return;
+
+ lock(&surv_lock);
+ fsp_surv_state = true;
+
+ /* Also send one heartbeat now. The next one will not happen
+ * until we hit the OS.
+ */
+ fsp_surv_hbeat();
+ unlock(&surv_lock);
+}
+
+void fsp_surv_query(void)
+{
+ int rc;
+
+ printf("SURV: Querying FSP's surveillance status\n");
+
+ /* Reset surveillance settings */
+ lock(&surv_lock);
+ fsp_surv_state = false;
+ surv_timer = 0;
+ surv_ack_timer = 0;
+ unlock(&surv_lock);
+
+ /* Query FPS for surveillance state */
+ rc = fsp_get_sys_param(SYS_PARAM_SURV, &surv_state_param, 4,
+ fsp_surv_got_param, NULL);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_SURVE_INIT),
+ "SURV: Error %d queueing param request\n", rc);
+ }
+}
+
+static bool fsp_surv_msg_rr(u32 cmd_sub_mod, struct fsp_msg *msg)
+{
+ assert(msg == NULL);
+
+ switch (cmd_sub_mod) {
+ case FSP_RESET_START:
+ printf("SURV: Disabling surveillance\n");
+ fsp_surv_state = false;
+ fsp_surv_ack_pending = false;
+ return true;
+ case FSP_RELOAD_COMPLETE:
+ fsp_surv_query();
+ return true;
+ }
+ return false;
+}
+
+static struct fsp_client fsp_surv_client_rr = {
+ .message = fsp_surv_msg_rr,
+};
+
+/* This is called at boot time */
+void fsp_init_surveillance(void)
+{
+ /* Always register the poller, so we don't have to add/remove
+ * it on reset-reload or change of surveillance state. Also the
+ * poller list has no locking so we don't want to play with it
+ * at runtime.
+ */
+ opal_add_poller(fsp_surv_poll, NULL);
+
+ /* Register for the reset/reload event */
+ fsp_register_client(&fsp_surv_client_rr, FSP_MCLASS_RR_EVENT);
+
+ /* Send query to FSP */
+ fsp_surv_query();
+}
+
diff --git a/hw/fsp/fsp-sysparam.c b/hw/fsp/fsp-sysparam.c
new file mode 100644
index 00000000..e9e5b164
--- /dev/null
+++ b/hw/fsp/fsp-sysparam.c
@@ -0,0 +1,454 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <skiboot.h>
+#include <fsp.h>
+#include <opal.h>
+#include <device.h>
+#include <lock.h>
+#include <processor.h>
+#include <psi.h>
+#include <opal-msg.h>
+#include <fsp-sysparam.h>
+
+struct sysparam_comp_data {
+ uint32_t param_len;
+ uint64_t async_token;
+};
+
+struct sysparam_req {
+ sysparam_compl_t completion;
+ void *comp_data;
+ void *ubuf;
+ uint32_t ulen;
+ struct fsp_msg msg;
+ struct fsp_msg resp;
+ bool done;
+};
+
+static struct sysparam_attr {
+ const char *name;
+ uint32_t id;
+ uint32_t length;
+ uint8_t perm;
+} sysparam_attrs[] = {
+#define _R OPAL_SYSPARAM_READ
+#define _W OPAL_SYSPARAM_WRITE
+#define _RW OPAL_SYSPARAM_RW
+ {"surveillance", SYS_PARAM_SURV, 4, _RW},
+ {"hmc-management", SYS_PARAM_HMC_MANAGED, 4, _R},
+ {"cupd-policy", SYS_PARAM_FLASH_POLICY, 4, _RW},
+ {"plat-hmc-managed", SYS_PARAM_NEED_HMC, 4, _RW},
+ {"fw-license-policy", SYS_PARAM_FW_LICENSE, 4, _RW},
+ {"world-wide-port-num", SYS_PARAM_WWPN, 12, _W},
+ {"default-boot-device", SYS_PARAM_DEF_BOOT_DEV, 1, _RW},
+ {"next-boot-device", SYS_PARAM_NEXT_BOOT_DEV,1, _RW}
+#undef _R
+#undef _W
+#undef _RW
+};
+
+static int fsp_sysparam_process(struct sysparam_req *r)
+{
+ u32 param_id, len;
+ int stlen = 0;
+ u8 fstat;
+ /* Snapshot completion before we set the "done" flag */
+ sysparam_compl_t comp = r->completion;
+ void *cdata = r->comp_data;
+
+ if (r->msg.state != fsp_msg_done) {
+ prerror("FSP: Request for sysparam 0x%x got FSP failure!\n",
+ r->msg.data.words[0]);
+ stlen = -1; /* XXX Find saner error codes */
+ goto complete;
+ }
+
+ param_id = r->resp.data.words[0];
+ len = r->resp.data.words[1] & 0xffff;
+
+ /* Check params validity */
+ if (param_id != r->msg.data.words[0]) {
+ prerror("FSP: Request for sysparam 0x%x got resp. for 0x%x!\n",
+ r->msg.data.words[0], param_id);
+ stlen = -2; /* XXX Sane error codes */
+ goto complete;
+ }
+ if (len > r->ulen) {
+ prerror("FSP: Request for sysparam 0x%x truncated!\n",
+ param_id);
+ len = r->ulen;
+ }
+
+ /* Decode the request status */
+ fstat = (r->msg.resp->word1 >> 8) & 0xff;
+ switch(fstat) {
+ case 0x00: /* XXX Is that even possible ? */
+ case 0x11: /* Data in request */
+ memcpy(r->ubuf, &r->resp.data.words[2], len);
+ /* pass through */
+ case 0x12: /* Data in TCE */
+ stlen = len;
+ break;
+ default:
+ stlen = -fstat;
+ }
+ complete:
+ /* Call completion if any */
+ if (comp)
+ comp(r->msg.data.words[0], stlen, cdata);
+
+ free(r);
+
+ return stlen;
+}
+
+static void fsp_sysparam_get_complete(struct fsp_msg *msg)
+{
+ struct sysparam_req *r = container_of(msg, struct sysparam_req, msg);
+
+ /* If it's an asynchronous request, process it now */
+ if (r->completion) {
+ fsp_sysparam_process(r);
+ return;
+ }
+
+ /* Else just set the done flag */
+
+ /* Another CPU can be polling on the "done" flag without the
+ * lock held, so let's order the udpates to the structure
+ */
+ lwsync();
+ r->done = true;
+}
+
+int fsp_get_sys_param(uint32_t param_id, void *buffer, uint32_t length,
+ sysparam_compl_t async_complete, void *comp_data)
+{
+ struct sysparam_req *r;
+ uint64_t baddr, tce_token;
+ int rc;
+
+ if (!fsp_present())
+ return -ENODEV;
+ /*
+ * XXX FIXME: We currently always allocate the sysparam_req here
+ * however, we want to avoid runtime allocations as much as
+ * possible, so if this is going to be used a lot at runtime,
+ * we probably want to pre-allocate a pool of these
+ */
+ r = zalloc(sizeof(struct sysparam_req));
+ if (!r)
+ return -ENOMEM;
+ if (length > 4096)
+ return -EINVAL;
+ r->completion = async_complete;
+ r->comp_data = comp_data;
+ r->done = false;
+ r->ubuf = buffer;
+ r->ulen = length;
+ r->msg.resp = &r->resp;
+
+ /* Map always 1 page ... easier that way and none of that
+ * is performance critical
+ */
+ baddr = (uint64_t)buffer;
+ fsp_tce_map(PSI_DMA_GET_SYSPARAM, (void *)(baddr & ~0xffful), 0x1000);
+ tce_token = PSI_DMA_GET_SYSPARAM | (baddr & 0xfff);
+ fsp_fillmsg(&r->msg, FSP_CMD_QUERY_SPARM, 3,
+ param_id, length, tce_token);
+ rc = fsp_queue_msg(&r->msg, fsp_sysparam_get_complete);
+
+ /* Asynchronous operation or queueing failure, return */
+ if (rc || async_complete)
+ return rc;
+
+ /* Synchronous operation requested, spin and process */
+ while(!r->done)
+ fsp_poll();
+
+ /* Will free the request */
+ return fsp_sysparam_process(r);
+}
+
+static void fsp_opal_getparam_complete(uint32_t param_id __unused, int err_len,
+ void *data)
+{
+ struct sysparam_comp_data *comp_data = data;
+ int rc = OPAL_SUCCESS;
+
+ if (comp_data->param_len != err_len)
+ rc = OPAL_INTERNAL_ERROR;
+
+ opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL,
+ comp_data->async_token, rc);
+ free(comp_data);
+}
+
+static void fsp_opal_setparam_complete(struct fsp_msg *msg)
+{
+ struct sysparam_comp_data *comp_data = msg->user_data;
+ u8 fstat;
+ uint32_t param_id;
+ int rc = OPAL_SUCCESS;
+
+ if (msg->state != fsp_msg_done) {
+ prerror("FSP: Request for set sysparam 0x%x got FSP failure!\n",
+ msg->data.words[0]);
+ rc = OPAL_INTERNAL_ERROR;
+ goto out;
+ }
+
+ param_id = msg->resp->data.words[0];
+ if (param_id != msg->data.words[0]) {
+ prerror("FSP: Request for set sysparam 0x%x got resp. for 0x%x!"
+ "\n", msg->data.words[0], param_id);
+ rc = OPAL_INTERNAL_ERROR;
+ goto out;
+ }
+
+ fstat = (msg->resp->word1 >> 8) & 0xff;
+ switch (fstat) {
+ case 0x00:
+ rc = OPAL_SUCCESS;
+ break;
+ case 0x22:
+ prerror("%s: Response status 0x%x, invalid data\n", __func__,
+ fstat);
+ rc = OPAL_INTERNAL_ERROR;
+ break;
+ case 0x24:
+ prerror("%s: Response status 0x%x, DMA error\n", __func__,
+ fstat);
+ rc = OPAL_INTERNAL_ERROR;
+ break;
+ default:
+ rc = OPAL_INTERNAL_ERROR;
+ break;
+ }
+
+out:
+ opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL,
+ comp_data->async_token, rc);
+ free(comp_data);
+ fsp_freemsg(msg);
+}
+
+/* OPAL interface for PowerNV to read the system parameter from FSP */
+static int64_t fsp_opal_get_param(uint64_t async_token, uint32_t param_id,
+ uint64_t buffer, uint64_t length)
+{
+ struct sysparam_comp_data *comp_data;
+ int count, rc, i;
+
+ if (!fsp_present())
+ return OPAL_HARDWARE;
+
+ count = ARRAY_SIZE(sysparam_attrs);
+ for (i = 0; i < count; i++)
+ if (sysparam_attrs[i].id == param_id)
+ break;
+ if (i == count)
+ return OPAL_PARAMETER;
+
+ if (length < sysparam_attrs[i].length)
+ return OPAL_PARAMETER;
+ if (!(sysparam_attrs[i].perm & OPAL_SYSPARAM_READ))
+ return OPAL_PERMISSION;
+
+ comp_data = zalloc(sizeof(struct sysparam_comp_data));
+ if (!comp_data)
+ return OPAL_NO_MEM;
+
+ comp_data->param_len = sysparam_attrs[i].length;
+ comp_data->async_token = async_token;
+ rc = fsp_get_sys_param(param_id, (void *)buffer,
+ sysparam_attrs[i].length, fsp_opal_getparam_complete,
+ comp_data);
+ if (rc) {
+ free(comp_data);
+ prerror("%s: Error %d queuing param request\n", __func__, rc);
+ return OPAL_INTERNAL_ERROR;
+ }
+
+ return OPAL_ASYNC_COMPLETION;
+}
+
+/* OPAL interface for PowerNV to update the system parameter to FSP */
+static int64_t fsp_opal_set_param(uint64_t async_token, uint32_t param_id,
+ uint64_t buffer, uint64_t length)
+{
+ struct sysparam_comp_data *comp_data;
+ struct fsp_msg *msg;
+ uint64_t tce_token;
+ int count, rc, i;
+
+ if (!fsp_present())
+ return OPAL_HARDWARE;
+
+ count = ARRAY_SIZE(sysparam_attrs);
+ for (i = 0; i < count; i++)
+ if (sysparam_attrs[i].id == param_id)
+ break;
+ if (i == count)
+ return OPAL_PARAMETER;
+
+ if (length < sysparam_attrs[i].length)
+ return OPAL_PARAMETER;
+ if (!(sysparam_attrs[i].perm & OPAL_SYSPARAM_WRITE))
+ return OPAL_PERMISSION;
+
+ fsp_tce_map(PSI_DMA_SET_SYSPARAM, (void *)(buffer & ~0xffful), 0x1000);
+ tce_token = PSI_DMA_SET_SYSPARAM | (buffer & 0xfff);
+
+ msg = fsp_mkmsg(FSP_CMD_SET_SPARM_2, 4, param_id, length,
+ tce_token >> 32, tce_token);
+ if (!msg) {
+ prerror("%s: Failed to allocate the message\n", __func__);
+ return OPAL_INTERNAL_ERROR;
+ }
+
+ comp_data = zalloc(sizeof(struct sysparam_comp_data));
+ if (!comp_data)
+ return OPAL_NO_MEM;
+
+ comp_data->param_len = length;
+ comp_data->async_token = async_token;
+ msg->user_data = comp_data;
+
+ rc = fsp_queue_msg(msg, fsp_opal_setparam_complete);
+ if (rc) {
+ free(comp_data);
+ fsp_freemsg(msg);
+ prerror("%s: Failed to queue the message\n", __func__);
+ return OPAL_INTERNAL_ERROR;
+ }
+
+ return OPAL_ASYNC_COMPLETION;
+}
+
+static bool fsp_sysparam_msg(u32 cmd_sub_mod, struct fsp_msg *msg)
+{
+ struct fsp_msg *rsp;
+ int rc = -ENOMEM;
+
+ switch(cmd_sub_mod) {
+ case FSP_CMD_SP_SPARM_UPD_0:
+ case FSP_CMD_SP_SPARM_UPD_1:
+ printf("FSP: Got sysparam update, param ID 0x%x\n",
+ msg->data.words[0]);
+ rsp = fsp_mkmsg((cmd_sub_mod & 0xffff00) | 0x008000, 0);
+ if (rsp)
+ rc = fsp_queue_msg(rsp, fsp_freemsg);
+ if (rc) {
+ prerror("FSP: Error %d queuing sysparam reply\n", rc);
+ /* What to do here ? R/R ? */
+ fsp_freemsg(rsp);
+ }
+ return true;
+ }
+ return false;
+}
+
+static struct fsp_client fsp_sysparam_client = {
+ .message = fsp_sysparam_msg,
+};
+
+static void add_opal_sysparam_node(void)
+{
+ struct dt_node *sysparams;
+ char *names, *s;
+ uint32_t *ids, *lens;
+ uint8_t *perms;
+ unsigned int i, count, size = 0;
+
+ if (!fsp_present())
+ return;
+
+ sysparams = dt_new(opal_node, "sysparams");
+ dt_add_property_string(sysparams, "compatible", "ibm,opal-sysparams");
+
+ count = ARRAY_SIZE(sysparam_attrs);
+ for (i = 0; i < count; i++)
+ size = size + strlen(sysparam_attrs[i].name) + 1;
+
+ names = zalloc(size);
+ if (!names) {
+ prerror("%s: Failed to allocate memory for parameter names\n",
+ __func__);
+ return;
+ }
+
+ ids = zalloc(count * sizeof(*ids));
+ if (!ids) {
+ prerror("%s: Failed to allocate memory for parameter ids\n",
+ __func__);
+ goto out_free_name;
+ }
+
+ lens = zalloc(count * sizeof(*lens));
+ if (!lens) {
+ prerror("%s: Failed to allocate memory for parameter length\n",
+ __func__);
+ goto out_free_id;
+ }
+
+ perms = zalloc(count * sizeof(*perms));
+ if (!perms) {
+ prerror("%s: Failed to allocate memory for parameter length\n",
+ __func__);
+ goto out_free_len;
+ }
+
+ s = names;
+ for (i = 0; i < count; i++) {
+ strcpy(s, sysparam_attrs[i].name);
+ s = s + strlen(sysparam_attrs[i].name) + 1;
+
+ ids[i] = sysparam_attrs[i].id;
+ lens[i] = sysparam_attrs[i].length;
+ perms[i] = sysparam_attrs[i].perm;
+ }
+
+ dt_add_property(sysparams, "param-name", names, size);
+ dt_add_property(sysparams, "param-id", ids, count * sizeof(*ids));
+ dt_add_property(sysparams, "param-len", lens, count * sizeof(*lens));
+ dt_add_property(sysparams, "param-perm", perms, count * sizeof(*perms));
+
+ free(perms);
+
+out_free_len:
+ free(lens);
+out_free_id:
+ free(ids);
+out_free_name:
+ free(names);
+}
+
+void fsp_sysparam_init(void)
+{
+ if (!fsp_present())
+ return;
+
+ /* Register change notifications */
+ fsp_register_client(&fsp_sysparam_client, FSP_MCLASS_SERVICE);
+
+ /* Register OPAL interfaces */
+ opal_register(OPAL_GET_PARAM, fsp_opal_get_param, 4);
+ opal_register(OPAL_SET_PARAM, fsp_opal_set_param, 4);
+
+ /* Add device-tree nodes */
+ add_opal_sysparam_node();
+}
diff --git a/hw/fsp/fsp.c b/hw/fsp/fsp.c
new file mode 100644
index 00000000..5dc298aa
--- /dev/null
+++ b/hw/fsp/fsp.c
@@ -0,0 +1,2147 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Service Processor handling code
+ *
+ * XXX This mixes PSI and FSP and currently only supports
+ * P7/P7+ PSI and FSP1
+ *
+ * If we are going to support P8 PSI and FSP2, we probably want
+ * to split the PSI support from the FSP support proper first.
+ */
+#include <stdarg.h>
+#include <processor.h>
+#include <io.h>
+#include <fsp.h>
+#include <lock.h>
+#include <interrupts.h>
+#include <gx.h>
+#include <device.h>
+#include <trace.h>
+#include <timebase.h>
+#include <cpu.h>
+#include <fsp-elog.h>
+
+DEFINE_LOG_ENTRY(OPAL_RC_FSP_POLL_TIMEOUT, OPAL_PLATFORM_ERR_EVT, OPAL_FSP,
+ OPAL_PLATFORM_FIRMWARE, OPAL_ERROR_PANIC, OPAL_NA, NULL);
+
+//#define DBG(fmt...) printf(fmt)
+#define DBG(fmt...) do { } while(0)
+#define FSP_TRACE_MSG
+#define FSP_TRACE_EVENT
+
+#define FSP_MAX_IOPATH 4
+
+enum fsp_path_state {
+ fsp_path_bad,
+ fsp_path_backup,
+ fsp_path_active,
+};
+
+struct fsp_iopath {
+ enum fsp_path_state state;
+ void *fsp_regs;
+ struct psi *psi;
+};
+
+enum fsp_mbx_state {
+ fsp_mbx_idle, /* Mailbox ready to send */
+ fsp_mbx_send, /* Mailbox sent, waiting for ack */
+ fsp_mbx_crit_op, /* Critical operation in progress */
+ fsp_mbx_prep_for_reset, /* Prepare for reset sent */
+ fsp_mbx_err, /* Mailbox in error state, waiting for r&r */
+ fsp_mbx_rr, /* Mailbox in r&r */
+};
+
+struct fsp {
+ struct fsp *link;
+ unsigned int index;
+ enum fsp_mbx_state state;
+ struct fsp_msg *pending;
+
+ unsigned int iopath_count;
+ int active_iopath; /* -1: no active IO path */
+ struct fsp_iopath iopath[FSP_MAX_IOPATH];
+};
+
+static struct fsp *first_fsp;
+static struct fsp *active_fsp;
+static u16 fsp_curseq = 0x8000;
+static u64 *fsp_tce_table;
+
+#define FSP_INBOUND_SIZE 0x00100000UL
+static void *fsp_inbound_buf = NULL;
+static u32 fsp_inbound_off;
+
+static struct lock fsp_lock = LOCK_UNLOCKED;
+
+static u64 fsp_cmdclass_resp_bitmask;
+static u64 timeout_timer;
+
+static u64 fsp_hir_timeout;
+
+#define FSP_CRITICAL_OP_TIMEOUT 128
+#define FSP_DRCR_CLEAR_TIMEOUT 128
+
+/*
+ * We keep track on last logged values for some things to print only on
+ * value changes, but also to releive pressure on the tracer which
+ * doesn't do a very good job at detecting repeats when called from
+ * many different CPUs
+ */
+static u32 disr_last_print;
+static u32 drcr_last_print;
+static u32 hstate_last_print;
+
+void fsp_handle_resp(struct fsp_msg *msg);
+
+struct fsp_cmdclass {
+ int timeout;
+ bool busy;
+ struct list_head msgq;
+ struct list_head clientq;
+ struct list_head rr_queue; /* To queue up msgs during R/R */
+ u64 timesent;
+};
+
+static struct fsp_cmdclass fsp_cmdclass_rr;
+
+static struct fsp_cmdclass fsp_cmdclass[FSP_MCLASS_LAST - FSP_MCLASS_FIRST + 1]
+= {
+#define DEF_CLASS(_cl, _to) [_cl - FSP_MCLASS_FIRST] = { .timeout = _to }
+ DEF_CLASS(FSP_MCLASS_SERVICE, 16),
+ DEF_CLASS(FSP_MCLASS_PCTRL_MSG, 16),
+ DEF_CLASS(FSP_MCLASS_PCTRL_ABORTS, 16),
+ DEF_CLASS(FSP_MCLASS_ERR_LOG, 16),
+ DEF_CLASS(FSP_MCLASS_CODE_UPDATE, 40),
+ DEF_CLASS(FSP_MCLASS_FETCH_SPDATA, 16),
+ DEF_CLASS(FSP_MCLASS_FETCH_HVDATA, 16),
+ DEF_CLASS(FSP_MCLASS_NVRAM, 16),
+ DEF_CLASS(FSP_MCLASS_MBOX_SURV, 2),
+ DEF_CLASS(FSP_MCLASS_RTC, 16),
+ DEF_CLASS(FSP_MCLASS_SMART_CHIP, 20),
+ DEF_CLASS(FSP_MCLASS_INDICATOR, 180),
+ DEF_CLASS(FSP_MCLASS_HMC_INTFMSG, 16),
+ DEF_CLASS(FSP_MCLASS_HMC_VT, 16),
+ DEF_CLASS(FSP_MCLASS_HMC_BUFFERS, 16),
+ DEF_CLASS(FSP_MCLASS_SHARK, 16),
+ DEF_CLASS(FSP_MCLASS_MEMORY_ERR, 16),
+ DEF_CLASS(FSP_MCLASS_CUOD_EVENT, 16),
+ DEF_CLASS(FSP_MCLASS_HW_MAINT, 16),
+ DEF_CLASS(FSP_MCLASS_VIO, 16),
+ DEF_CLASS(FSP_MCLASS_SRC_MSG, 16),
+ DEF_CLASS(FSP_MCLASS_DATA_COPY, 16),
+ DEF_CLASS(FSP_MCLASS_TONE, 16),
+ DEF_CLASS(FSP_MCLASS_VIRTUAL_NVRAM, 16),
+ DEF_CLASS(FSP_MCLASS_TORRENT, 16),
+ DEF_CLASS(FSP_MCLASS_NODE_PDOWN, 16),
+ DEF_CLASS(FSP_MCLASS_DIAG, 16),
+ DEF_CLASS(FSP_MCLASS_PCIE_LINK_TOPO, 16),
+ DEF_CLASS(FSP_MCLASS_OCC, 16),
+};
+
+static void fsp_trace_msg(struct fsp_msg *msg, u8 dir __unused)
+{
+ union trace fsp __unused;
+#ifdef FSP_TRACE_MSG
+ size_t len = offsetof(struct trace_fsp_msg, data[msg->dlen]);
+
+ fsp.fsp_msg.dlen = msg->dlen;
+ fsp.fsp_msg.word0 = msg->word0;
+ fsp.fsp_msg.word1 = msg->word1;
+ fsp.fsp_msg.dir = dir;
+ memcpy(fsp.fsp_msg.data, msg->data.bytes, msg->dlen);
+ trace_add(&fsp, TRACE_FSP_MSG, len);
+#endif /* FSP_TRACE_MSG */
+ assert(msg->dlen <= sizeof(fsp.fsp_msg.data));
+}
+
+static struct fsp *fsp_get_active(void)
+{
+ /* XXX Handle transition between FSPs */
+ return active_fsp;
+}
+
+static u64 fsp_get_class_bit(u8 class)
+{
+ /* Alias classes CE and CF as the FSP has a single queue */
+ if (class == FSP_MCLASS_IPL)
+ class = FSP_MCLASS_SERVICE;
+
+ return 1ul << (class - FSP_MCLASS_FIRST);
+}
+
+static struct fsp_cmdclass *__fsp_get_cmdclass(u8 class)
+{
+ struct fsp_cmdclass *ret;
+
+ /* RR class is special */
+ if (class == FSP_MCLASS_RR_EVENT)
+ return &fsp_cmdclass_rr;
+
+ /* Bound check */
+ if (class < FSP_MCLASS_FIRST || class > FSP_MCLASS_LAST)
+ return NULL;
+
+ /* Alias classes CE and CF as the FSP has a single queue */
+ if (class == FSP_MCLASS_IPL)
+ class = FSP_MCLASS_SERVICE;
+
+ ret = &fsp_cmdclass[class - FSP_MCLASS_FIRST];
+
+ /* Unknown class */
+ if (ret->timeout == 0)
+ return NULL;
+
+ return ret;
+}
+
+static struct fsp_cmdclass *fsp_get_cmdclass(struct fsp_msg *msg)
+{
+ u8 c = msg->word0 & 0xff;
+
+ return __fsp_get_cmdclass(c);
+}
+
+static struct fsp_msg *__fsp_allocmsg(void)
+{
+ return zalloc(sizeof(struct fsp_msg));
+}
+
+struct fsp_msg *fsp_allocmsg(bool alloc_response)
+{
+ struct fsp_msg *msg;
+
+ msg = __fsp_allocmsg();
+ if (!msg)
+ return NULL;
+ if (alloc_response)
+ msg->resp = __fsp_allocmsg();
+ return msg;
+}
+
+void __fsp_freemsg(struct fsp_msg *msg)
+{
+ free(msg);
+}
+
+void fsp_freemsg(struct fsp_msg *msg)
+{
+ if (msg->resp)
+ __fsp_freemsg(msg->resp);
+ __fsp_freemsg(msg);
+}
+
+void fsp_cancelmsg(struct fsp_msg *msg)
+{
+ bool need_unlock = false;
+ struct fsp_cmdclass* cmdclass = fsp_get_cmdclass(msg);
+ struct fsp *fsp = fsp_get_active();
+
+ if (fsp->state != fsp_mbx_rr) {
+ prerror("FSP: Message cancel allowed only when"
+ "FSP is in reset\n");
+ return;
+ }
+
+ if (!cmdclass)
+ return;
+
+ /* Recursive locking */
+ need_unlock = lock_recursive(&fsp_lock);
+
+ list_del(&msg->link);
+ msg->state = fsp_msg_cancelled;
+
+ if (need_unlock)
+ unlock(&fsp_lock);
+}
+
+static void fsp_wreg(struct fsp *fsp, u32 reg, u32 val)
+{
+ struct fsp_iopath *iop;
+
+ if (fsp->active_iopath < 0)
+ return;
+ iop = &fsp->iopath[fsp->active_iopath];
+ if (iop->state == fsp_path_bad)
+ return;
+ out_be32(iop->fsp_regs + reg, val);
+}
+
+static u32 fsp_rreg(struct fsp *fsp, u32 reg)
+{
+ struct fsp_iopath *iop;
+
+ if (fsp->active_iopath < 0)
+ return 0xffffffff;
+ iop = &fsp->iopath[fsp->active_iopath];
+ if (iop->state == fsp_path_bad)
+ return 0xffffffff;
+ return in_be32(iop->fsp_regs + reg);
+}
+
+static void fsp_reg_dump(void)
+{
+#define FSP_DUMP_ONE(x) \
+ printf(" %20s: %x\n", #x, fsp_rreg(fsp, x));
+
+ struct fsp *fsp = fsp_get_active();
+
+ if (!fsp)
+ return;
+
+ printf("FSP #%d: Register dump (state=%d)\n",
+ fsp->index, fsp->state);
+ FSP_DUMP_ONE(FSP_DRCR_REG);
+ FSP_DUMP_ONE(FSP_DISR_REG);
+ FSP_DUMP_ONE(FSP_MBX1_HCTL_REG);
+ FSP_DUMP_ONE(FSP_MBX1_FCTL_REG);
+ FSP_DUMP_ONE(FSP_MBX2_HCTL_REG);
+ FSP_DUMP_ONE(FSP_MBX2_FCTL_REG);
+ FSP_DUMP_ONE(FSP_SDES_REG);
+ FSP_DUMP_ONE(FSP_HDES_REG);
+ FSP_DUMP_ONE(FSP_HDIR_REG);
+ FSP_DUMP_ONE(FSP_HDIM_SET_REG);
+ FSP_DUMP_ONE(FSP_PDIR_REG);
+ FSP_DUMP_ONE(FSP_PDIM_SET_REG);
+ FSP_DUMP_ONE(FSP_SCRATCH0_REG);
+ FSP_DUMP_ONE(FSP_SCRATCH1_REG);
+ FSP_DUMP_ONE(FSP_SCRATCH2_REG);
+ FSP_DUMP_ONE(FSP_SCRATCH3_REG);
+}
+
+static void fsp_notify_rr_state(u32 state)
+{
+ struct fsp_client *client, *next;
+ struct fsp_cmdclass *cmdclass = __fsp_get_cmdclass(FSP_MCLASS_RR_EVENT);
+
+ assert(cmdclass);
+ list_for_each_safe(&cmdclass->clientq, client, next, link)
+ client->message(state, NULL);
+}
+
+static void fsp_reset_cmdclass(void)
+{
+ int i;
+ struct fsp_msg *msg;
+
+ for (i = 0; i <= (FSP_MCLASS_LAST - FSP_MCLASS_FIRST); i++) {
+ struct fsp_cmdclass *cmdclass = &fsp_cmdclass[i];
+ cmdclass->busy = false;
+ cmdclass->timesent = 0;
+
+ /* We also need to reset the 'timeout' timers here */
+
+ /* Make sure the message queue is empty */
+ while(!list_empty(&cmdclass->msgq)) {
+ msg = list_pop(&cmdclass->msgq, struct fsp_msg,
+ link);
+ list_add_tail(&cmdclass->rr_queue, &msg->link);
+ }
+ }
+}
+
+static bool fsp_in_hir(struct fsp *fsp)
+{
+ switch (fsp->state) {
+ case fsp_mbx_crit_op:
+ case fsp_mbx_prep_for_reset:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool fsp_in_reset(struct fsp *fsp)
+{
+ switch (fsp->state) {
+ case fsp_mbx_err: /* Will be reset soon */
+ case fsp_mbx_rr: /* Already in reset */
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool fsp_hir_state_timeout(void)
+{
+ u64 now = mftb();
+
+ if (tb_compare(now, fsp_hir_timeout) == TB_AAFTERB)
+ return true;
+
+ return false;
+}
+
+static void fsp_set_hir_timeout(u32 seconds)
+{
+ u64 now = mftb();
+ fsp_hir_timeout = now + secs_to_tb(seconds);
+}
+
+static bool fsp_crit_op_in_progress(struct fsp *fsp)
+{
+ u32 disr = fsp_rreg(fsp, FSP_DISR_REG);
+
+ if (disr & FSP_DISR_CRIT_OP_IN_PROGRESS)
+ return true;
+
+ return false;
+}
+
+/* Notify the FSP that it will be reset soon by writing to the DRCR */
+static void fsp_prep_for_reset(struct fsp *fsp)
+{
+ u32 drcr = fsp_rreg(fsp, FSP_DRCR_REG);
+
+ printf("FSP: Writing reset to DRCR\n");
+ drcr_last_print = drcr;
+ fsp_wreg(fsp, FSP_DRCR_REG, (drcr | FSP_PREP_FOR_RESET_CMD));
+ fsp->state = fsp_mbx_prep_for_reset;
+ fsp_set_hir_timeout(FSP_DRCR_CLEAR_TIMEOUT);
+}
+
+static void fsp_hir_poll(struct fsp *fsp, struct psi *psi)
+{
+ u32 drcr;
+
+ switch (fsp->state) {
+ case fsp_mbx_crit_op:
+ if (fsp_crit_op_in_progress(fsp)) {
+ if (fsp_hir_state_timeout())
+ prerror("FSP: Critical operation timeout\n");
+ /* XXX What do do next? Check with FSP folks */
+ } else {
+ fsp_prep_for_reset(fsp);
+ }
+ break;
+ case fsp_mbx_prep_for_reset:
+ drcr = fsp_rreg(fsp, FSP_DRCR_REG);
+
+ if (drcr != drcr_last_print) {
+ printf("FSP: DRCR changed, old = %x, new = %x\n",
+ drcr_last_print, drcr);
+ drcr_last_print = drcr;
+ }
+
+ if (drcr & FSP_DRCR_ACK_MASK) {
+ if (fsp_hir_state_timeout()) {
+ prerror("FSP: Ack timeout. Triggering reset\n");
+ psi_disable_link(psi);
+ fsp->state = fsp_mbx_err;
+ }
+ } else {
+ printf("FSP: DRCR ack received. Triggering reset\n");
+ psi_disable_link(psi);
+ fsp->state = fsp_mbx_err;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * This is the main entry for the host initiated reset case.
+ * This gets called when:
+ * a. Surveillance ack is not received in 120 seconds
+ * b. A mailbox command doesn't get a response within the stipulated time.
+ */
+static void __fsp_trigger_reset(void)
+{
+ struct fsp *fsp = fsp_get_active();
+ u32 disr;
+
+ /* Already in one of the error processing states */
+ if (fsp_in_hir(fsp) || fsp_in_reset(fsp))
+ return;
+
+ prerror("FSP: fsp_trigger_reset() entry\n");
+
+ drcr_last_print = 0;
+ /*
+ * Check if we are allowed to reset the FSP. We aren't allowed to
+ * reset the FSP if the FSP_DISR_DBG_IN_PROGRESS is set.
+ */
+ disr = fsp_rreg(fsp, FSP_DISR_REG);
+ if (disr & FSP_DISR_DBG_IN_PROGRESS) {
+ prerror("FSP: Host initiated reset disabled\n");
+ return;
+ }
+
+ /*
+ * Check if some critical operation is in progress as indicated
+ * by FSP_DISR_CRIT_OP_IN_PROGRESS. Timeout is 128 seconds
+ */
+ if (fsp_crit_op_in_progress(fsp)) {
+ printf("FSP: Critical operation in progress\n");
+ fsp->state = fsp_mbx_crit_op;
+ fsp_set_hir_timeout(FSP_CRITICAL_OP_TIMEOUT);
+ } else
+ fsp_prep_for_reset(fsp);
+}
+
+void fsp_trigger_reset(void)
+{
+ lock(&fsp_lock);
+ __fsp_trigger_reset();
+ unlock(&fsp_lock);
+}
+
+static void fsp_start_rr(struct fsp *fsp)
+{
+ struct fsp_iopath *iop;
+
+ if (fsp->state == fsp_mbx_rr)
+ return;
+
+ /* We no longer have an active path on that FSP */
+ if (fsp->active_iopath >= 0) {
+ iop = &fsp->iopath[fsp->active_iopath];
+ iop->state = fsp_path_bad;
+ fsp->active_iopath = -1;
+ }
+ fsp->state = fsp_mbx_rr;
+ disr_last_print = 0;
+ hstate_last_print = 0;
+
+ /*
+ * Mark all command classes as non-busy and clear their
+ * timeout, then flush all messages in our staging queue
+ */
+ fsp_reset_cmdclass();
+
+ /* Notify clients. We have to drop the lock here */
+ unlock(&fsp_lock);
+ fsp_notify_rr_state(FSP_RESET_START);
+ lock(&fsp_lock);
+
+ /* Start polling PSI */
+ psi_set_link_polling(true);
+}
+
+static void fsp_trace_event(struct fsp *fsp, u32 evt,
+ u32 data0, u32 data1, u32 data2, u32 data3)
+{
+ union trace tfsp __unused;
+#ifdef FSP_TRACE_EVENT
+ size_t len = sizeof(struct trace_fsp_event);
+
+ tfsp.fsp_evt.event = evt;
+ tfsp.fsp_evt.fsp_state = fsp->state;
+ tfsp.fsp_evt.data[0] = data0;
+ tfsp.fsp_evt.data[1] = data1;
+ tfsp.fsp_evt.data[2] = data2;
+ tfsp.fsp_evt.data[3] = data3;
+ trace_add(&tfsp, TRACE_FSP_EVENT, len);
+#endif /* FSP_TRACE_EVENT */
+}
+
+static void fsp_handle_errors(struct fsp *fsp)
+{
+ u32 hstate;
+ struct fsp_iopath *iop;
+ struct psi *psi;
+ u32 disr;
+
+ if (fsp->active_iopath < 0) {
+ prerror("FSP #%d: fsp_handle_errors() with no active IOP\n",
+ fsp->index);
+ return;
+ }
+
+ iop = &fsp->iopath[fsp->active_iopath];
+ if (!iop->psi) {
+ prerror("FSP: Active IOP with no PSI link !\n");
+ return;
+ }
+ psi = iop->psi;
+
+ /*
+ * If the link is not up, start R&R immediately, we do call
+ * psi_disable_link() in this case as while the link might
+ * not be up, it might still be enabled and the PSI layer
+ * "active" bit still set
+ */
+ if (!psi_check_link_active(psi)) {
+ /* Start R&R process */
+ fsp_trace_event(fsp, TRACE_FSP_EVT_LINK_DOWN, 0, 0, 0, 0);
+ prerror("FSP #%d: Link down, starting R&R\n", fsp->index);
+
+ /* If we got here due to a host initiated reset, the link
+ * is already driven down.
+ */
+ if (fsp->state == fsp_mbx_err)
+ psi_disable_link(psi);
+ fsp_start_rr(fsp);
+ return;
+ }
+
+ /* Link is up, check for other conditions */
+ disr = fsp_rreg(fsp, FSP_DISR_REG);
+
+ /* If in R&R, log values */
+ if (disr != disr_last_print) {
+ fsp_trace_event(fsp, TRACE_FSP_EVT_DISR_CHG, disr, 0, 0, 0);
+
+ printf("FSP #%d: DISR stat change = 0x%08x\n",
+ fsp->index, disr);
+ disr_last_print = disr;
+ }
+
+ /*
+ * We detect FSP_IN_RR in DSISR or we have a deferred mbox
+ * error, we trigger an R&R after a bit of housekeeping to
+ * limit the chance of a stray interrupt
+ */
+ if ((disr & FSP_DISR_FSP_IN_RR) || (fsp->state == fsp_mbx_err)) {
+ /*
+ * When the linux comes back up, we still see that bit
+ * set for a bit, so just move on, nothing to see here
+ */
+ if (fsp->state == fsp_mbx_rr)
+ return;
+
+ fsp_trace_event(fsp, TRACE_FSP_EVT_SOFT_RR, disr, 0, 0, 0);
+
+ printf("FSP #%d: FSP in reset or delayed error, starting R&R\n",
+ fsp->index);
+
+ /* Clear all interrupt conditions */
+ fsp_wreg(fsp, FSP_HDIR_REG, FSP_DBIRQ_ALL);
+
+ /* Make sure this happened */
+ fsp_rreg(fsp, FSP_HDIR_REG);
+
+ /* Bring the PSI link down */
+ psi_disable_link(psi);
+
+ /* Start R&R process */
+ fsp_start_rr(fsp);
+ return;
+ }
+
+ /*
+ * We detect an R&R complete indication, acknolwedge it
+ */
+ if (disr & FSP_DISR_FSP_RR_COMPLETE) {
+ /*
+ * Acking this bit doens't make it go away immediately, so
+ * only do it while still in R&R state
+ */
+ if (fsp->state == fsp_mbx_rr) {
+ fsp_trace_event(fsp, TRACE_FSP_EVT_RR_COMPL, 0,0,0,0);
+
+ printf("FSP #%d: Detected R&R complete, acking\n",
+ fsp->index);
+
+ /* Clear HDATA area */
+ fsp_wreg(fsp, FSP_MBX1_HDATA_AREA, 0xff);
+
+ /* Ack it (XDN) and clear HPEND & counts */
+ fsp_wreg(fsp, FSP_MBX1_HCTL_REG,
+ FSP_MBX_CTL_PTS |
+ FSP_MBX_CTL_XDN |
+ FSP_MBX_CTL_HPEND |
+ FSP_MBX_CTL_HCSP_MASK |
+ FSP_MBX_CTL_DCSP_MASK);
+
+ /*
+ * Mark the mbox as usable again so we can process
+ * incoming messages
+ */
+ fsp->state = fsp_mbx_idle;
+ }
+ }
+
+ /*
+ * XXX
+ *
+ * Here we detect a number of errors, should we initiate
+ * and R&R ?
+ */
+
+ hstate = fsp_rreg(fsp, FSP_HDES_REG);
+ if (hstate != hstate_last_print) {
+ fsp_trace_event(fsp, TRACE_FSP_EVT_HDES_CHG, hstate, 0, 0, 0);
+
+ printf("FSP #%d: HDES stat change = 0x%08x\n",
+ fsp->index, hstate);
+ hstate_last_print = disr;
+ }
+
+ if (hstate == 0xffffffff)
+ return;
+
+ /* Clear errors */
+ fsp_wreg(fsp, FSP_HDES_REG, FSP_DBERRSTAT_CLR1);
+
+ /*
+ * Most of those errors shouldn't have happened, we just clear
+ * the error state and return. In the long run, we might want
+ * to start retrying commands, switching FSPs or links, etc...
+ *
+ * We currently don't set our mailbox to a permanent error state.
+ */
+ if (hstate & FSP_DBERRSTAT_ILLEGAL1)
+ prerror("FSP #%d: Illegal command error !\n", fsp->index);
+
+ if (hstate & FSP_DBERRSTAT_WFULL1)
+ prerror("FSP #%d: Write to a full mbox !\n", fsp->index);
+
+ if (hstate & FSP_DBERRSTAT_REMPTY1)
+ prerror("FSP #%d: Read from an empty mbox !\n", fsp->index);
+
+ if (hstate & FSP_DBERRSTAT_PAR1)
+ prerror("FSP #%d: Parity error !\n", fsp->index);
+}
+
+/*
+ * This is called by fsp_post_msg() to check if the mbox
+ * is in a state that allows sending of a message
+ *
+ * Due to the various "interesting" contexts fsp_post_msg()
+ * can be called from, including recursive locks from lock
+ * error messages or console code, this should avoid doing
+ * anything more complex than checking a bit of state.
+ *
+ * Specifically, we cannot initiate an R&R and call back into
+ * clients etc... from this function.
+ *
+ * The best we can do is to se the mbox in error state and
+ * handle it later during a poll or interrupts.
+ */
+static bool fsp_check_can_send(struct fsp *fsp)
+{
+ struct fsp_iopath *iop;
+ struct psi *psi;
+
+ /* Look for FSP in non-idle state */
+ if (fsp->state != fsp_mbx_idle)
+ return false;
+
+ /* Look for an active IO path */
+ if (fsp->active_iopath < 0)
+ goto mbox_error;
+ iop = &fsp->iopath[fsp->active_iopath];
+ if (!iop->psi) {
+ prerror("FSP: Active IOP with no PSI link !\n");
+ goto mbox_error;
+ }
+ psi = iop->psi;
+
+ /* Check if link has gone down. This will be handled later */
+ if (!psi_check_link_active(psi)) {
+ prerror("FSP #%d: Link seems to be down on send\n", fsp->index);
+ goto mbox_error;
+ }
+
+ /* XXX Do we want to check for other error conditions ? */
+ return true;
+
+ /*
+ * An error of some case occurred, we'll handle it later
+ * from a more normal "poll" context
+ */
+ mbox_error:
+ fsp->state = fsp_mbx_err;
+ return false;
+}
+
+static bool fsp_post_msg(struct fsp *fsp, struct fsp_msg *msg)
+{
+ u32 ctl, reg;
+ int i, wlen;
+
+ DBG("FSP #%d: fsp_post_msg (w0: 0x%08x w1: 0x%08x)\n",
+ fsp->index, msg->word0, msg->word1);
+
+ /* Note: We used to read HCTL here and only modify some of
+ * the bits in it. This was bogus, because we would write back
+ * the incoming bits as '1' and clear them, causing fsp_poll()
+ * to then miss them. Let's just start with 0, which is how
+ * I suppose the HW intends us to do.
+ */
+
+ /* Set ourselves as busy */
+ fsp->pending = msg;
+ fsp->state = fsp_mbx_send;
+ msg->state = fsp_msg_sent;
+
+ /* We trace after setting the mailbox state so that if the
+ * tracing recurses, it ends up just queuing the message up
+ */
+ fsp_trace_msg(msg, TRACE_FSP_MSG_OUT);
+
+ /* Build the message in the mailbox */
+ reg = FSP_MBX1_HDATA_AREA;
+ fsp_wreg(fsp, reg, msg->word0); reg += 4;
+ fsp_wreg(fsp, reg, msg->word1); reg += 4;
+ wlen = (msg->dlen + 3) >> 2;
+ for (i = 0; i < wlen; i++) {
+ fsp_wreg(fsp, reg, msg->data.words[i]);
+ reg += 4;
+ }
+
+ /* Write the header */
+ fsp_wreg(fsp, FSP_MBX1_HHDR0_REG, (msg->dlen + 8) << 16);
+
+ /* Write the control register */
+ ctl = 4 << FSP_MBX_CTL_HCHOST_SHIFT;
+ ctl |= (msg->dlen + 8) << FSP_MBX_CTL_DCHOST_SHIFT;
+ ctl |= FSP_MBX_CTL_PTS | FSP_MBX_CTL_SPPEND;
+ DBG(" new ctl: %08x\n", ctl);
+ fsp_wreg(fsp, FSP_MBX1_HCTL_REG, ctl);
+
+ return true;
+}
+
+static void fsp_poke_queue(struct fsp_cmdclass *cmdclass)
+{
+ struct fsp *fsp = fsp_get_active();
+ struct fsp_msg *msg;
+
+ if (!fsp)
+ return;
+ if (!fsp_check_can_send(fsp))
+ return;
+
+ /* From here to the point where fsp_post_msg() sets fsp->state
+ * to !idle we must not cause any re-entrancy (no debug or trace)
+ * in a code path that may hit fsp_post_msg() (it's ok to do so
+ * if we are going to bail out), as we are committed to calling
+ * fsp_post_msg() and so a re-entrancy could cause us to do a
+ * double-send into the mailbox.
+ */
+ if (cmdclass->busy || list_empty(&cmdclass->msgq))
+ return;
+
+ msg = list_top(&cmdclass->msgq, struct fsp_msg, link);
+ assert(msg);
+ cmdclass->busy = true;
+
+ if (!fsp_post_msg(fsp, msg)) {
+ prerror("FSP #%d: Failed to send message\n", fsp->index);
+ cmdclass->busy = false;
+ return;
+ }
+}
+
+static void __fsp_fillmsg(struct fsp_msg *msg, u32 cmd_sub_mod,
+ u8 add_words, va_list list)
+{
+ bool response = !!(cmd_sub_mod & 0x1000000);
+ u8 cmd = (cmd_sub_mod >> 16) & 0xff;
+ u8 sub = (cmd_sub_mod >> 8) & 0xff;
+ u8 mod = cmd_sub_mod & 0xff;
+ int i;
+
+ msg->word0 = cmd & 0xff;
+ msg->word1 = mod << 8 | sub;
+ msg->response = response;
+ msg->dlen = add_words << 2;
+
+ for (i = 0; i < add_words; i++)
+ msg->data.words[i] = va_arg(list, unsigned int);
+ va_end(list);
+
+ /* Initialize the value with false. If this ends up
+ * in fsp_sync_msg, we will set it to true.
+ */
+ msg->sync_msg = false;
+}
+
+extern void fsp_fillmsg(struct fsp_msg *msg, u32 cmd_sub_mod, u8 add_words, ...)
+{
+ va_list list;
+
+ va_start(list, add_words);
+ __fsp_fillmsg(msg, cmd_sub_mod, add_words, list);
+ va_end(list);
+}
+
+struct fsp_msg *fsp_mkmsg(u32 cmd_sub_mod, u8 add_words, ...)
+{
+ struct fsp_msg *msg = fsp_allocmsg(!!(cmd_sub_mod & 0x1000000));
+ va_list list;
+
+ if (!msg) {
+ prerror("FSP: Failed to allocate struct fsp_msg\n");
+ return NULL;
+ }
+
+ va_start(list, add_words);
+ __fsp_fillmsg(msg, cmd_sub_mod, add_words, list);
+ va_end(list);
+
+ return msg;
+}
+
+/*
+ * IMPORTANT NOTE: This is *guaranteed* to not call the completion
+ * routine recusrively for *any* fsp message, either the
+ * queued one or a previous one. Thus it is *ok* to call
+ * this function with a lock held which will itself be
+ * taken by the completion function.
+ *
+ * Any change to this implementation must respect this
+ * rule. This will be especially true of things like
+ * reset/reload and error handling, if we fail to queue
+ * we must just return an error, not call any completion
+ * from the scope of fsp_queue_msg().
+ */
+int fsp_queue_msg(struct fsp_msg *msg, void (*comp)(struct fsp_msg *msg))
+{
+ struct fsp_cmdclass *cmdclass;
+ struct fsp *fsp = fsp_get_active();
+ bool need_unlock;
+ u16 seq;
+ int rc = 0;
+
+ if (!fsp)
+ return -1;
+
+ /* Recursive locking */
+ need_unlock = lock_recursive(&fsp_lock);
+
+ /* Grab a new sequence number */
+ seq = fsp_curseq;
+ fsp_curseq = fsp_curseq + 1;
+ if (fsp_curseq == 0)
+ fsp_curseq = 0x8000;
+ msg->word0 = (msg->word0 & 0xffff) | seq << 16;
+
+ /* Set completion */
+ msg->complete = comp;
+
+ /* Clear response state */
+ if (msg->resp)
+ msg->resp->state = fsp_msg_unused;
+
+ /* Queue the message in the appropriate queue */
+ cmdclass = fsp_get_cmdclass(msg);
+ if (!cmdclass) {
+ prerror("FSP: Invalid msg in fsp_queue_msg w0/1=0x%08x/%08x\n",
+ msg->word0, msg->word1);
+ rc = -1;
+ goto unlock;
+ }
+
+ msg->state = fsp_msg_queued;
+
+ /*
+ * If we have initiated or about to initiate a reset/reload operation,
+ * we stash the message on the R&R backup queue. Otherwise, queue it
+ * normally and poke the HW
+ */
+ if (fsp_in_hir(fsp) || fsp_in_reset(fsp))
+ list_add_tail(&cmdclass->rr_queue, &msg->link);
+ else {
+ list_add_tail(&cmdclass->msgq, &msg->link);
+ fsp_poke_queue(cmdclass);
+ }
+
+ unlock:
+ if (need_unlock)
+ unlock(&fsp_lock);
+
+ return rc;
+}
+
+/* WARNING: This will drop the FSP lock !!! */
+static void fsp_complete_msg(struct fsp_msg *msg)
+{
+ struct fsp_cmdclass *cmdclass = fsp_get_cmdclass(msg);
+ void (*comp)(struct fsp_msg *msg);
+
+ assert(cmdclass);
+
+ DBG(" completing msg, word0: 0x%08x\n", msg->word0);
+
+ comp = msg->complete;
+ list_del_from(&cmdclass->msgq, &msg->link);
+ cmdclass->busy = false;
+ msg->state = fsp_msg_done;
+
+ unlock(&fsp_lock);
+ if (comp)
+ (*comp)(msg);
+ lock(&fsp_lock);
+}
+
+/* WARNING: This will drop the FSP lock !!! */
+static void fsp_complete_send(struct fsp *fsp)
+{
+ struct fsp_msg *msg = fsp->pending;
+ struct fsp_cmdclass *cmdclass = fsp_get_cmdclass(msg);
+
+ assert(msg);
+ assert(cmdclass);
+
+ fsp->pending = NULL;
+
+ DBG(" completing send, word0: 0x%08x, resp: %d\n",
+ msg->word0, msg->response);
+
+ if (msg->response) {
+ u64 setbit = fsp_get_class_bit(msg->word0 & 0xff);
+ msg->state = fsp_msg_wresp;
+ fsp_cmdclass_resp_bitmask |= setbit;
+ cmdclass->timesent = mftb();
+ } else
+ fsp_complete_msg(msg);
+}
+
+static void fsp_alloc_inbound(struct fsp_msg *msg)
+{
+ u16 func_id = msg->data.words[0] & 0xffff;
+ u32 len = msg->data.words[1];
+ u32 tce_token = 0, act_len = 0;
+ u8 rc = 0;
+ void *buf;
+
+ printf("FSP: Allocate inbound buffer func: %04x len: %d\n",
+ func_id, len);
+
+ lock(&fsp_lock);
+ if ((fsp_inbound_off + len) > FSP_INBOUND_SIZE) {
+ prerror("FSP: Out of space in buffer area !\n");
+ rc = 0xeb;
+ goto reply;
+ }
+
+ if (!fsp_inbound_buf) {
+ fsp_inbound_buf = memalign(TCE_PSIZE, FSP_INBOUND_SIZE);
+ if (!fsp_inbound_buf) {
+ prerror("FSP: could not allocate fsp_inbound_buf!\n");
+ rc = 0xeb;
+ goto reply;
+ }
+ }
+
+ buf = fsp_inbound_buf + fsp_inbound_off;
+ tce_token = PSI_DMA_INBOUND_BUF + fsp_inbound_off;
+ len = (len + 0xfff) & ~0xfff;
+ fsp_inbound_off += len;
+ fsp_tce_map(tce_token, buf, len);
+ printf("FSP: -> buffer at 0x%p, TCE: 0x%08x, alen: 0x%x\n",
+ buf, tce_token, len);
+ act_len = len;
+
+ reply:
+ unlock(&fsp_lock);
+ fsp_queue_msg(fsp_mkmsg(FSP_RSP_ALLOC_INBOUND | rc,
+ 3, 0, tce_token, act_len), fsp_freemsg);
+}
+
+void *fsp_inbound_buf_from_tce(u32 tce_token)
+{
+ u32 offset = tce_token - PSI_DMA_INBOUND_BUF;
+
+ if (tce_token < PSI_DMA_INBOUND_BUF || offset >= fsp_inbound_off) {
+ prerror("FSP: TCE token 0x%x out of bounds\n", tce_token);
+ return NULL;
+ }
+ return fsp_inbound_buf + offset;
+}
+
+static void fsp_repost_queued_msgs_post_rr(void)
+{
+ struct fsp_msg *msg;
+ int i;
+
+ for (i = 0; i <= (FSP_MCLASS_LAST - FSP_MCLASS_FIRST); i++) {
+ struct fsp_cmdclass *cmdclass = &fsp_cmdclass[i];
+ bool poke = false;
+
+ while(!list_empty(&cmdclass->rr_queue)) {
+ msg = list_pop(&cmdclass->rr_queue,
+ struct fsp_msg, link);
+ list_add_tail(&cmdclass->msgq, &msg->link);
+ poke = true;
+ }
+ if (poke)
+ fsp_poke_queue(cmdclass);
+ }
+}
+
+static bool fsp_local_command(u32 cmd_sub_mod, struct fsp_msg *msg)
+{
+ u32 cmd = 0;
+ u32 rsp_data = 0;
+
+ switch(cmd_sub_mod) {
+ case FSP_CMD_CONTINUE_IPL:
+ /* We get a CONTINUE_IPL as a response to OPL */
+ printf("FSP: Got CONTINUE_IPL !\n");
+ ipl_state |= ipl_got_continue;
+ return true;
+
+ case FSP_CMD_HV_STATE_CHG:
+ printf("FSP: Got HV state change request to %d\n",
+ msg->data.bytes[0]);
+
+ /* Send response synchronously for now, we might want to
+ * deal with that sort of stuff asynchronously if/when
+ * we add support for auto-freeing of messages
+ */
+ fsp_sync_msg(fsp_mkmsg(FSP_RSP_HV_STATE_CHG, 0), true);
+ return true;
+
+ case FSP_CMD_SP_NEW_ROLE:
+ /* FSP is assuming a new role */
+ printf("FSP: FSP assuming new role\n");
+ fsp_sync_msg(fsp_mkmsg(FSP_RSP_SP_NEW_ROLE, 0), true);
+ ipl_state |= ipl_got_new_role;
+ return true;
+
+ case FSP_CMD_SP_QUERY_CAPS:
+ printf("FSP: FSP query capabilities\n");
+ /* XXX Do something saner. For now do a synchronous
+ * response and hard code our capabilities
+ */
+ fsp_sync_msg(fsp_mkmsg(FSP_RSP_SP_QUERY_CAPS, 4,
+ 0x3ff80000, 0, 0, 0), true);
+ ipl_state |= ipl_got_caps;
+ return true;
+ case FSP_CMD_FSP_FUNCTNAL:
+ printf("FSP: Got FSP Functional\n");
+ ipl_state |= ipl_got_fsp_functional;
+ return true;
+ case FSP_CMD_ALLOC_INBOUND:
+ fsp_alloc_inbound(msg);
+ return true;
+ case FSP_CMD_SP_RELOAD_COMP:
+ printf("FSP: SP says Reset/Reload complete\n");
+ if (msg->data.bytes[3] & PPC_BIT8(0)) {
+ fsp_fips_dump_notify(msg->data.words[1],
+ msg->data.words[2]);
+
+ if (msg->data.bytes[3] & PPC_BIT8(1))
+ printf(" PLID is %x\n",
+ msg->data.words[3]);
+ }
+ if (msg->data.bytes[3] & PPC_BIT8(2))
+ printf(" A Reset/Reload was NOT done\n");
+ else {
+ /* Notify clients that the FSP is back up */
+ fsp_notify_rr_state(FSP_RELOAD_COMPLETE);
+ fsp_repost_queued_msgs_post_rr();
+ }
+ return true;
+ case FSP_CMD_PANELSTATUS:
+ case FSP_CMD_PANELSTATUS_EX1:
+ case FSP_CMD_PANELSTATUS_EX2:
+ /* Panel status messages. We currently just ignore them */
+ return true;
+ case FSP_CMD_CLOSE_HMC_INTF:
+ /* Close the HMC interface */
+ /* Though Sapphire does not support a HMC connection, the FSP
+ * sends this message when it is trying to open any new
+ * hypervisor session. So returning an error 0x51.
+ */
+ cmd = FSP_RSP_CLOSE_HMC_INTF | FSP_STAUS_INVALID_HMC_ID;
+ rsp_data = msg->data.bytes[0] << 24 | msg->data.bytes[1] << 16;
+ rsp_data &= 0xffff0000;
+ fsp_queue_msg(fsp_mkmsg(cmd, 1, rsp_data), fsp_freemsg);
+ return true;
+ }
+ return false;
+}
+
+
+/* This is called without the FSP lock */
+static void fsp_handle_command(struct fsp_msg *msg)
+{
+ struct fsp_cmdclass *cmdclass = fsp_get_cmdclass(msg);
+ struct fsp_client *client, *next;
+ u32 cmd_sub_mod;
+
+ if (!cmdclass) {
+ prerror("FSP: Got message for unknown class %x\n",
+ msg->word0 & 0xff);
+ goto free;
+ }
+
+ cmd_sub_mod = (msg->word0 & 0xff) << 16;
+ cmd_sub_mod |= (msg->word1 & 0xff) << 8;
+ cmd_sub_mod |= (msg->word1 >> 8) & 0xff;
+
+ /* Some commands are handled locally */
+ if (fsp_local_command(cmd_sub_mod, msg))
+ goto free;
+
+ /* The rest go to clients */
+ list_for_each_safe(&cmdclass->clientq, client, next, link) {
+ if (client->message(cmd_sub_mod, msg))
+ goto free;
+ }
+
+ prerror("FSP: Unhandled message %06x\n", cmd_sub_mod);
+
+ /* We don't know whether the message expected some kind of
+ * response, so we send one anyway
+ */
+ fsp_queue_msg(fsp_mkmsg((cmd_sub_mod & 0xffff00) | 0x008020, 0),
+ fsp_freemsg);
+ free:
+ fsp_freemsg(msg);
+}
+
+static void __fsp_fill_incoming(struct fsp *fsp, struct fsp_msg *msg,
+ int dlen, u32 w0, u32 w1)
+{
+ unsigned int wlen, i, reg;
+
+ msg->dlen = dlen - 8;
+ msg->word0 = w0;
+ msg->word1 = w1;
+ wlen = (dlen + 3) >> 2;
+ reg = FSP_MBX1_FDATA_AREA + 8;
+ for (i = 0; i < wlen; i++) {
+ msg->data.words[i] = fsp_rreg(fsp, reg);
+ reg += 4;
+ }
+
+ /* Ack it (XDN) and clear HPEND & counts */
+ fsp_wreg(fsp, FSP_MBX1_HCTL_REG,
+ FSP_MBX_CTL_PTS |
+ FSP_MBX_CTL_XDN |
+ FSP_MBX_CTL_HPEND |
+ FSP_MBX_CTL_HCSP_MASK |
+ FSP_MBX_CTL_DCSP_MASK);
+
+ fsp_trace_msg(msg, TRACE_FSP_MSG_IN);
+}
+
+static void __fsp_drop_incoming(struct fsp *fsp)
+{
+ /* Ack it (XDN) and clear HPEND & counts */
+ fsp_wreg(fsp, FSP_MBX1_HCTL_REG,
+ FSP_MBX_CTL_PTS |
+ FSP_MBX_CTL_XDN |
+ FSP_MBX_CTL_HPEND |
+ FSP_MBX_CTL_HCSP_MASK |
+ FSP_MBX_CTL_DCSP_MASK);
+}
+
+/* WARNING: This will drop the FSP lock */
+static void fsp_handle_incoming(struct fsp *fsp)
+{
+ struct fsp_msg *msg;
+ u32 h0, w0, w1;
+ unsigned int dlen;
+ bool special_response = false;
+
+ h0 = fsp_rreg(fsp, FSP_MBX1_FHDR0_REG);
+ dlen = (h0 >> 16) & 0xff;
+
+ w0 = fsp_rreg(fsp, FSP_MBX1_FDATA_AREA);
+ w1 = fsp_rreg(fsp, FSP_MBX1_FDATA_AREA + 4);
+
+ DBG(" Incoming: w0: 0x%08x, w1: 0x%08x, dlen: %d\n",
+ w0, w1, dlen);
+
+ /* Some responses are expected out of band */
+ if ((w0 & 0xff) == FSP_MCLASS_HMC_INTFMSG &&
+ ((w1 & 0xff) == 0x8a || ((w1 & 0xff) == 0x8b)))
+ special_response = true;
+
+ /* Check for response bit */
+ if (w1 & 0x80 && !special_response) {
+ struct fsp_cmdclass *cmdclass = __fsp_get_cmdclass(w0 & 0xff);
+ struct fsp_msg *req;
+
+ if (!cmdclass) {
+ prerror("FSP: Got response for unknown class %x\n",
+ w0 & 0xff);
+ __fsp_drop_incoming(fsp);
+ return;
+ }
+
+ if (!cmdclass->busy || list_empty(&cmdclass->msgq)) {
+ prerror("FSP #%d: Got orphan response !\n", fsp->index);
+ __fsp_drop_incoming(fsp);
+ return;
+ }
+ req = list_top(&cmdclass->msgq, struct fsp_msg, link);
+
+ /* Check if the response seems to match the message */
+ if (req->state != fsp_msg_wresp ||
+ (req->word0 & 0xff) != (w0 & 0xff) ||
+ (req->word1 & 0xff) != (w1 & 0x7f)) {
+ __fsp_drop_incoming(fsp);
+ prerror("FSP #%d: Response doesn't match pending msg\n",
+ fsp->index);
+ return;
+ } else {
+ u64 resetbit = ~fsp_get_class_bit(req->word0 & 0xff);
+ fsp_cmdclass_resp_bitmask &= resetbit;
+ cmdclass->timesent = 0;
+ }
+
+ /* Allocate response if needed XXX We need to complete
+ * the original message with some kind of error here ?
+ */
+ if (!req->resp) {
+ req->resp = __fsp_allocmsg();
+ if (!req->resp) {
+ __fsp_drop_incoming(fsp);
+ prerror("FSP #%d: Failed to allocate response\n",
+ fsp->index);
+ return;
+ }
+ }
+
+ /* Populate and complete (will drop the lock) */
+ req->resp->state = fsp_msg_response;
+ __fsp_fill_incoming(fsp, req->resp, dlen, w0, w1);
+ fsp_complete_msg(req);
+ return;
+ }
+
+ /* Allocate an incoming message */
+ msg = __fsp_allocmsg();
+ if (!msg) {
+ __fsp_drop_incoming(fsp);
+ prerror("FSP #%d: Failed to allocate incoming msg\n",
+ fsp->index);
+ return;
+ }
+ msg->state = fsp_msg_incoming;
+ __fsp_fill_incoming(fsp, msg, dlen, w0, w1);
+
+ /* Handle FSP commands. This can recurse into fsp_queue_msg etc.. */
+ unlock(&fsp_lock);
+ fsp_handle_command(msg);
+ lock(&fsp_lock);
+}
+
+static void fsp_check_queues(struct fsp *fsp)
+{
+ int i;
+
+ /* XXX In the long run, we might want to have a queue of
+ * classes waiting to be serviced to speed this up, either
+ * that or a bitmap.
+ */
+ for (i = 0; i <= (FSP_MCLASS_LAST - FSP_MCLASS_FIRST); i++) {
+ struct fsp_cmdclass *cmdclass = &fsp_cmdclass[i];
+
+ if (fsp->state != fsp_mbx_idle)
+ break;
+ if (cmdclass->busy || list_empty(&cmdclass->msgq))
+ continue;
+ fsp_poke_queue(cmdclass);
+ }
+}
+
+static void __fsp_poll(bool interrupt)
+{
+ struct fsp_iopath *iop;
+ struct fsp *fsp = fsp_get_active();
+ u32 ctl, hdir = 0;
+ bool psi_irq;
+
+ /*
+ * The tracer isn't terribly efficient at detecting dups
+ * especially when coming from multiple CPUs so we do our
+ * own change-detection locally
+ */
+ static u32 hdir_last_trace;
+ static u32 ctl_last_trace;
+ static bool psi_irq_last_trace;
+ static bool irq_last_trace;
+
+ if (!fsp)
+ return;
+
+ /* Crazy interrupt handling scheme:
+ *
+ * In order to avoid "losing" interrupts when polling the mbox
+ * we only clear interrupt conditions when called as a result of
+ * an interrupt.
+ *
+ * That way, if a poll clears, for example, the HPEND condition,
+ * the interrupt remains, causing a dummy interrupt later on
+ * thus allowing the OS to be notified of a state change (ie it
+ * doesn't need every poll site to monitor every state change).
+ *
+ * However, this scheme is complicated by the fact that we need
+ * to clear the interrupt condition after we have cleared the
+ * original condition in HCTL, and we might have long stale
+ * interrupts which we do need to eventually get rid of. However
+ * clearing interrupts in such a way is racy, so we need to loop
+ * and re-poll HCTL after having done so or we might miss an
+ * event. It's a latency risk, but unlikely and probably worth it.
+ */
+
+ again:
+ if (fsp->active_iopath < 0) {
+ /* That should never happen */
+ if (interrupt)
+ prerror("FSP: Interrupt with no working IO path\n");
+ return;
+ }
+ iop = &fsp->iopath[fsp->active_iopath];
+
+ /* Handle host initiated resets */
+ if (fsp_in_hir(fsp)) {
+ fsp_hir_poll(fsp, iop->psi);
+ return;
+ }
+
+ /* Check for error state and handle R&R completion */
+ fsp_handle_errors(fsp);
+
+ /*
+ * The above might have triggered and R&R, check that we
+ * are still functional
+ */
+ if ((fsp->active_iopath < 0) || fsp_in_hir(fsp))
+ return;
+ iop = &fsp->iopath[fsp->active_iopath];
+
+ /* Read interrupt status (we may or may not use it) */
+ hdir = fsp_rreg(fsp, FSP_HDIR_REG);
+
+ /* Read control now as well so we can trace them */
+ ctl = fsp_rreg(fsp, FSP_MBX1_HCTL_REG);
+
+ /* Ditto with PSI irq state */
+ psi_irq = psi_poll_fsp_interrupt(iop->psi);
+
+ /* Trace it if anything changes */
+ if (hdir != hdir_last_trace || ctl != ctl_last_trace ||
+ interrupt != irq_last_trace || psi_irq != psi_irq_last_trace) {
+ fsp_trace_event(fsp, TRACE_FSP_EVT_POLL_IRQ,
+ interrupt, hdir, ctl, psi_irq);
+
+ hdir_last_trace = hdir;
+ ctl_last_trace = ctl;
+ irq_last_trace = interrupt;
+ psi_irq_last_trace = psi_irq;
+ }
+
+ /*
+ * We *MUST* ignore the MBOX2 bits here. While MBOX2 cannot generate
+ * interrupt, it might still latch some bits here (and we found cases
+ * where the MBOX2 XUP would be set). If that happens, clearing HDIR
+ * never works (the bit gets set again immediately) because we don't
+ * clear the condition in HTCL2 and thus we loop forever.
+ */
+ hdir &= FSP_DBIRQ_MBOX1;
+
+ /*
+ * Sanity check: If an interrupt is pending and we are in polling
+ * mode, check that the PSI side is also pending. If some bit is
+ * set, just clear and move on.
+ */
+ if (hdir && !interrupt && !psi_irq) {
+ prerror("FSP: WARNING ! HDIR 0x%08x but no PSI irq !\n", hdir);
+ fsp_wreg(fsp, FSP_HDIR_REG, hdir);
+ }
+
+ /*
+ * We should never have the mbox in error state here unless it
+ * was fine until some printf inside fsp_handle_errors() caused
+ * the console to poke the FSP which detected a branch new error
+ * in the process. Let's be safe rather than sorry and handle that
+ * here
+ */
+ if (fsp_in_hir(fsp) || fsp->state == fsp_mbx_err) {
+ prerror("FSP: Late error state detection\n");
+ goto again;
+ }
+
+ /*
+ * If we are in an R&R state with an active IO path, we
+ * shouldn't be getting interrupts. If we do, just clear
+ * the condition and print a message
+ */
+ if (fsp->state == fsp_mbx_rr) {
+ if (interrupt) {
+ prerror("FSP: Interrupt in RR state [HDIR=0x%08x]\n",
+ hdir);
+ fsp_wreg(fsp, FSP_HDIR_REG, hdir);
+ }
+ return;
+ }
+
+ /* Poll FSP CTL */
+ if (ctl & (FSP_MBX_CTL_XUP | FSP_MBX_CTL_HPEND))
+ DBG("FSP #%d: poll, ctl: %x\n", fsp->index, ctl);
+
+ /* Do we have a pending message waiting to complete ? */
+ if (ctl & FSP_MBX_CTL_XUP) {
+ fsp_wreg(fsp, FSP_MBX1_HCTL_REG, FSP_MBX_CTL_XUP);
+ if (fsp->state == fsp_mbx_send) {
+ /* mbox is free */
+ fsp->state = fsp_mbx_idle;
+
+ /* Complete message (will break the lock) */
+ fsp_complete_send(fsp);
+
+ /* Lock can have been broken, so ctl is now
+ * potentially invalid, let's recheck
+ */
+ goto again;
+ } else {
+ prerror("FSP #%d: Got XUP with no pending message !\n",
+ fsp->index);
+ }
+ }
+
+ if (fsp->state == fsp_mbx_send) {
+ /* XXX Handle send timeouts!!! */
+ }
+
+ /* Is there an incoming message ? This will break the lock as well */
+ if (ctl & FSP_MBX_CTL_HPEND)
+ fsp_handle_incoming(fsp);
+
+ /* Note: Lock may have been broken above, thus ctl might be invalid
+ * now, don't use it any further.
+ */
+
+ /* Check for something else to send */
+ if (fsp->state == fsp_mbx_idle)
+ fsp_check_queues(fsp);
+
+ /* Clear interrupts, and recheck HCTL if any occurred */
+ if (interrupt && hdir) {
+ fsp_wreg(fsp, FSP_HDIR_REG, hdir);
+ goto again;
+ }
+}
+
+void fsp_poll(void)
+{
+ lock(&fsp_lock);
+ __fsp_poll(false);
+ unlock(&fsp_lock);
+}
+
+void fsp_interrupt(void)
+{
+ lock(&fsp_lock);
+ __fsp_poll(true);
+ unlock(&fsp_lock);
+}
+
+int fsp_sync_msg(struct fsp_msg *msg, bool autofree)
+{
+ int rc;
+
+ /* This indication is useful only in the case where
+ * we queue up messages when the FSP takes a r/r.
+ */
+ msg->sync_msg = true;
+ msg->auto_free = autofree;
+
+ rc = fsp_queue_msg(msg, NULL);
+ if (rc)
+ goto bail;
+
+ while(fsp_msg_busy(msg))
+ fsp_poll();
+
+ switch(msg->state) {
+ case fsp_msg_done:
+ rc = 0;
+ break;
+ case fsp_msg_timeout:
+ rc = -1; /* XXX to improve */
+ break;
+ default:
+ rc = -1; /* Should not happen... (assert ?) */
+ }
+
+ if (msg->resp)
+ rc = (msg->resp->word1 >> 8) & 0xff;
+ bail:
+ if (autofree)
+ fsp_freemsg(msg);
+ return rc;
+}
+
+void fsp_register_client(struct fsp_client *client, u8 msgclass)
+{
+ struct fsp_cmdclass *cmdclass = __fsp_get_cmdclass(msgclass);
+
+ if (!fsp_present())
+ return;
+ assert(cmdclass);
+ list_add_tail(&cmdclass->clientq, &client->link);
+}
+
+void fsp_unregister_client(struct fsp_client *client, u8 msgclass)
+{
+ struct fsp_cmdclass *cmdclass = __fsp_get_cmdclass(msgclass);
+
+ if (!fsp_present())
+ return;
+ assert(cmdclass);
+ list_del_from(&cmdclass->clientq, &client->link);
+}
+
+static int fsp_init_mbox(struct fsp *fsp)
+{
+ unsigned int i;
+ u32 reg;
+
+ /*
+ * Note: The documentation contradicts itself as to
+ * whether the HDIM bits should be set or cleared to
+ * enable interrupts
+ *
+ * This seems to work...
+ */
+
+ /* Mask all interrupts */
+ fsp_wreg(fsp, FSP_HDIM_CLR_REG, FSP_DBIRQ_ALL);
+
+ /* Clear all errors */
+ fsp_wreg(fsp, FSP_HDES_REG, FSP_DBERRSTAT_CLR1 | FSP_DBERRSTAT_CLR2);
+
+ /* Initialize data area as the doco says */
+ for (i = 0; i < 0x40; i += 4)
+ fsp_wreg(fsp, FSP_MBX1_HDATA_AREA + i, 0);
+
+ /*
+ * Clear whatever crap may remain in HDCR. Do not write XDN as that
+ * would be interpreted incorrectly as an R&R completion which
+ * we aren't ready to send yet !
+ */
+ fsp_wreg(fsp, FSP_MBX1_HCTL_REG, FSP_MBX_CTL_XUP | FSP_MBX_CTL_HPEND |
+ FSP_MBX_CTL_HCSP_MASK | FSP_MBX_CTL_DCSP_MASK |
+ FSP_MBX_CTL_PTS);
+
+ /* Clear all pending interrupts */
+ fsp_wreg(fsp, FSP_HDIR_REG, FSP_DBIRQ_ALL);
+
+ /* Enable all mbox1 interrupts */
+ fsp_wreg(fsp, FSP_HDIM_SET_REG, FSP_DBIRQ_MBOX1);
+
+ /* Decode what FSP we are connected to */
+ reg = fsp_rreg(fsp, FSP_SCRATCH0_REG);
+ if (reg & PPC_BIT32(0)) { /* Is it a valid connection */
+ if (reg & PPC_BIT32(3))
+ printf("FSP: Connected to FSP-B\n");
+ else
+ printf("FSP: Connected to FSP-A\n");
+ }
+
+ return 0;
+}
+
+/* We use a single fixed TCE table for all PSI interfaces */
+static void fsp_init_tce_table(void)
+{
+ fsp_tce_table = (u64 *)PSI_TCE_TABLE_BASE;
+
+ /* Memset the larger table even if we only use the smaller
+ * one on P7
+ */
+ memset(fsp_tce_table, 0, PSI_TCE_TABLE_SIZE_P8);
+}
+
+void fsp_tce_map(u32 offset, void *addr, u32 size)
+{
+ u64 raddr = (u64)addr;
+
+ assert(!(offset & 0xfff));
+ assert(!(raddr & 0xfff));
+ assert(!(size & 0xfff));
+
+ size >>= 12;
+ offset >>= 12;
+
+ while(size--) {
+ fsp_tce_table[offset++] = raddr | 0x3;
+ raddr += 0x1000;
+ }
+}
+
+void fsp_tce_unmap(u32 offset, u32 size)
+{
+ assert(!(offset & 0xfff));
+ assert(!(size & 0xfff));
+
+ size >>= 12;
+ offset >>= 12;
+
+ while(size--)
+ fsp_tce_table[offset++] = 0;
+}
+
+static struct fsp *fsp_find_by_index(int index)
+{
+ struct fsp *fsp = first_fsp;
+
+ do {
+ if (fsp->index == index)
+ return fsp;
+ } while (fsp->link != first_fsp);
+
+ return NULL;
+}
+
+static void fsp_init_links(struct dt_node *fsp_node)
+{
+ const struct dt_property *linksprop;
+ int i, index;
+ struct fsp *fsp;
+ struct fsp_iopath *fiop;
+
+ linksprop = dt_find_property(fsp_node, "ibm,psi-links");
+ index = dt_prop_get_u32(fsp_node, "reg");
+ fsp = fsp_find_by_index(index);
+ if (!fsp) {
+ prerror("FSP: FSP with index %d not found\n", index);
+ return;
+ }
+
+ fsp->state = fsp_mbx_idle;
+
+ /* Iterate all links */
+ for (i = 0; i < fsp->iopath_count; i++) {
+ u64 reg;
+ u32 link;
+
+ link = ((const u32 *)linksprop->prop)[i];
+ fiop = &fsp->iopath[i];
+ fiop->psi = psi_find_link(link);
+ if (fiop->psi == NULL) {
+ prerror("FSP #%d: Couldn't find PSI link\n",
+ fsp->index);
+ continue;
+ }
+
+ printf("FSP #%d: Found PSI HB link to chip %d\n",
+ fsp->index, link);
+
+ psi_fsp_link_in_use(fiop->psi);
+
+ /* Get the FSP register window */
+ reg = in_be64(fiop->psi->regs + PSIHB_FSPBAR);
+ fiop->fsp_regs = (void *)(reg | (1ULL << 63) |
+ dt_prop_get_u32(fsp_node, "reg-offset"));
+ }
+}
+
+static void fsp_update_links_states(struct fsp *fsp)
+{
+ struct fsp_iopath *fiop;
+ unsigned int i;
+
+ /* Iterate all links */
+ for (i = 0; i < fsp->iopath_count; i++) {
+ fiop = &fsp->iopath[i];
+ if (!fiop->psi)
+ continue;
+ if (!fiop->psi->working)
+ fiop->state = fsp_path_bad;
+ else if (fiop->psi->active) {
+ fsp->active_iopath = i;
+ fiop->state = fsp_path_active;
+ } else
+ fiop->state = fsp_path_backup;
+ }
+
+ if (fsp->active_iopath >= 0) {
+ if (!active_fsp || (active_fsp != fsp))
+ active_fsp = fsp;
+
+ fsp_inbound_off = 0;
+ fiop = &fsp->iopath[fsp->active_iopath];
+ psi_init_for_fsp(fiop->psi);
+ fsp_init_mbox(fsp);
+ psi_enable_fsp_interrupt(fiop->psi);
+ }
+}
+
+void fsp_reinit_fsp(void)
+{
+ struct fsp *fsp;
+
+ /* Stop polling PSI */
+ psi_set_link_polling(false);
+
+ /* Notify all FSPs to check for an updated link state */
+ for (fsp = first_fsp; fsp; fsp = fsp->link)
+ fsp_update_links_states(fsp);
+}
+
+static void fsp_create_fsp(struct dt_node *fsp_node)
+{
+ const struct dt_property *linksprop;
+ struct fsp *fsp;
+ int count, index;
+
+ index = dt_prop_get_u32(fsp_node, "reg");
+ prerror("FSP #%d: Found in device-tree, setting up...\n", index);
+
+ linksprop = dt_find_property(fsp_node, "ibm,psi-links");
+ if (!linksprop || linksprop->len < 4) {
+ prerror("FSP #%d: No links !\n", index);
+ return;
+ }
+
+ fsp = zalloc(sizeof(struct fsp));
+ if (!fsp) {
+ prerror("FSP #%d: Can't allocate memory !\n", index);
+ return;
+ }
+
+ fsp->index = index;
+ fsp->active_iopath = -1;
+
+ count = linksprop->len / 4;
+ printf("FSP #%d: Found %d IO PATH\n", index, count);
+ if (count > FSP_MAX_IOPATH) {
+ prerror("FSP #%d: WARNING, limited to %d IO PATH\n",
+ index, FSP_MAX_IOPATH);
+ count = FSP_MAX_IOPATH;
+ }
+ fsp->iopath_count = count;
+
+ fsp->link = first_fsp;
+ first_fsp = fsp;
+
+ fsp_init_links(fsp_node);
+ fsp_update_links_states(fsp);
+}
+
+static void fsp_opal_poll(void *data __unused)
+{
+ if (try_lock(&fsp_lock)) {
+ __fsp_poll(false);
+ unlock(&fsp_lock);
+ }
+}
+
+static bool fsp_init_one(const char *compat)
+{
+ struct dt_node *fsp_node;
+ bool inited = false;
+
+ dt_for_each_compatible(dt_root, fsp_node, compat) {
+ if (!inited) {
+ int i;
+
+ /* Initialize the per-class msg queues */
+ for (i = 0;
+ i <= (FSP_MCLASS_LAST - FSP_MCLASS_FIRST); i++) {
+ list_head_init(&fsp_cmdclass[i].msgq);
+ list_head_init(&fsp_cmdclass[i].clientq);
+ list_head_init(&fsp_cmdclass[i].rr_queue);
+ }
+
+ /* Init the queues for RR notifier cmdclass */
+ list_head_init(&fsp_cmdclass_rr.msgq);
+ list_head_init(&fsp_cmdclass_rr.clientq);
+ list_head_init(&fsp_cmdclass_rr.rr_queue);
+
+ /* Register poller */
+ opal_add_poller(fsp_opal_poll, NULL);
+
+ inited = true;
+ }
+
+ /* Create the FSP data structure */
+ fsp_create_fsp(fsp_node);
+ }
+
+ return inited;
+}
+
+void fsp_init(void)
+{
+ printf("FSP: Looking for FSP...\n");
+
+ fsp_init_tce_table();
+
+ if (!fsp_init_one("ibm,fsp1") && !fsp_init_one("ibm,fsp2")) {
+ printf("FSP: No FSP on this machine\n");
+ return;
+ }
+}
+
+bool fsp_present(void)
+{
+ return first_fsp != NULL;
+}
+
+static void fsp_timeout_poll(void *data __unused)
+{
+ u64 now = mftb();
+ u64 timeout_val = 0;
+ u64 cmdclass_resp_bitmask = fsp_cmdclass_resp_bitmask;
+ struct fsp_cmdclass *cmdclass = NULL;
+ struct fsp_msg *req = NULL;
+ u32 index = 0;
+
+ if (timeout_timer == 0)
+ timeout_timer = now + secs_to_tb(30);
+
+ /* The lowest granularity for a message timeout is 30 secs.
+ * So every 30secs, check if there is any message
+ * waiting for a response from the FSP
+ */
+ if ((tb_compare(now, timeout_timer) == TB_AAFTERB) ||
+ (tb_compare(now, timeout_timer) == TB_AEQUALB))
+ timeout_timer = now + secs_to_tb(30);
+ else
+ return;
+
+ while (cmdclass_resp_bitmask) {
+ u64 time_sent = 0;
+ u64 time_to_comp = 0;
+
+ if (!(cmdclass_resp_bitmask & 0x1))
+ goto next_bit;
+
+ cmdclass = &fsp_cmdclass[index];
+ timeout_val = secs_to_tb((cmdclass->timeout) * 60);
+ time_sent = cmdclass->timesent;
+ time_to_comp = now - cmdclass->timesent;
+
+ /* Now check if the response has timed out */
+ if (tb_compare(time_to_comp, timeout_val) == TB_AAFTERB) {
+ u64 resetbit = 0;
+
+ /* Take the FSP lock now and re-check */
+ lock(&fsp_lock);
+ if (!(fsp_cmdclass_resp_bitmask & (1 << index)) ||
+ time_sent != cmdclass->timesent) {
+ unlock(&fsp_lock);
+ goto next_bit;
+ }
+ req = list_top(&cmdclass->msgq, struct fsp_msg, link);
+ log_simple_error(&e_info(OPAL_RC_FSP_POLL_TIMEOUT),
+ "FSP: Response from FSP timed out, word0 = %x,"
+ "word1 = %x state: %d\n",
+ req->word0, req->word1, req->state);
+ fsp_reg_dump();
+ resetbit = ~fsp_get_class_bit(req->word0 & 0xff);
+ fsp_cmdclass_resp_bitmask &= resetbit;
+ cmdclass->timesent = 0;
+ if (req->resp)
+ req->resp->state = fsp_msg_timeout;
+ fsp_complete_msg(req);
+ __fsp_trigger_reset();
+ unlock(&fsp_lock);
+ }
+ next_bit:
+ cmdclass_resp_bitmask = cmdclass_resp_bitmask >> 1;
+ index++;
+ }
+}
+
+void fsp_opl(void)
+{
+ struct dt_node *iplp;
+
+ if (!fsp_present())
+ return;
+
+ /* Send OPL */
+ ipl_state |= ipl_opl_sent;
+ fsp_sync_msg(fsp_mkmsg(FSP_CMD_OPL, 0), true);
+ while(!(ipl_state & ipl_got_continue))
+ fsp_poll();
+
+ /* Send continue ACK */
+ fsp_sync_msg(fsp_mkmsg(FSP_CMD_CONTINUE_ACK, 0), true);
+
+ /* Wait for various FSP messages */
+ printf("INIT: Waiting for FSP to advertize new role...\n");
+ while(!(ipl_state & ipl_got_new_role))
+ fsp_poll();
+ printf("INIT: Waiting for FSP to request capabilities...\n");
+ while(!(ipl_state & ipl_got_caps))
+ fsp_poll();
+
+ /* Initiate the timeout poller */
+ opal_add_poller(fsp_timeout_poll, NULL);
+
+ /* Tell FSP we are in standby */
+ printf("INIT: Sending HV Functional: Standby...\n");
+ fsp_sync_msg(fsp_mkmsg(FSP_CMD_HV_FUNCTNAL, 1, 0x01000000), true);
+
+ /* Wait for FSP functional */
+ printf("INIT: Waiting for FSP functional\n");
+ while(!(ipl_state & ipl_got_fsp_functional))
+ fsp_poll();
+
+ /* Tell FSP we are in running state */
+ printf("INIT: Sending HV Functional: Runtime...\n");
+ fsp_sync_msg(fsp_mkmsg(FSP_CMD_HV_FUNCTNAL, 1, 0x02000000), true);
+
+ /*
+ * For the factory reset case, FSP sends us the PCI Bus
+ * Reset request. We don't have to do anything special with
+ * PCI bus numbers here; just send the Power Down message
+ * with modifier 0x02 to FSP.
+ */
+ iplp = dt_find_by_path(dt_root, "ipl-params/ipl-params");
+ if (iplp && dt_find_property(iplp, "pci-busno-reset-ipl")) {
+ printf("INIT: PCI Bus Reset requested. Sending Power Down\n");
+ fsp_sync_msg(fsp_mkmsg(FSP_CMD_POWERDOWN_PCIRS, 0), true);
+ }
+
+ /*
+ * Tell FSP we are in running state with all partitions.
+ *
+ * This is need otherwise the FSP will not reset it's reboot count
+ * on failures. Ideally we should send that when we know the
+ * OS is up but we don't currently have a very good way to do
+ * that so this will do as a stop-gap
+ */
+ printf("INIT: Sending HV Functional: Runtime all parts...\n");
+ fsp_sync_msg(fsp_mkmsg(FSP_CMD_HV_FUNCTNAL, 1, 0x04000000), true);
+}
+
+uint32_t fsp_adjust_lid_side(uint32_t lid_no)
+{
+ struct dt_node *iplp;
+ const char *side = NULL;
+
+ iplp = dt_find_by_path(dt_root, "ipl-params/ipl-params");
+ if (iplp)
+ side = dt_prop_get_def(iplp, "cec-ipl-side", NULL);
+ if (!side || !strcmp(side, "temp"))
+ lid_no |= ADJUST_T_SIDE_LID_NO;
+ return lid_no;
+}
+
+int fsp_fetch_data(uint8_t flags, uint16_t id, uint32_t sub_id,
+ uint32_t offset, void *buffer, size_t *length)
+{
+ uint32_t total, remaining = *length;
+ uint64_t baddr;
+ uint64_t balign, boff, bsize;
+ struct fsp_msg *msg;
+ static struct lock fsp_fetch_lock = LOCK_UNLOCKED;
+
+ *length = total = 0;
+
+ if (!fsp_present())
+ return -ENODEV;
+
+ printf("FSP: Fetch data id: %02x sid: %08x to %p (0x%x bytes)\n",
+ id, sub_id, buffer, remaining);
+
+ /*
+ * Use a lock to avoid multiple processors trying to fetch
+ * at the same time and colliding on the TCE space
+ */
+ lock(&fsp_fetch_lock);
+
+ while(remaining) {
+ uint32_t chunk, taddr, woffset, wlen;
+ uint8_t rc;
+
+ /* Calculate alignment skew */
+ baddr = (uint64_t)buffer;
+ balign = baddr & ~0xffful;
+ boff = baddr & 0xffful;
+
+ /* Get a chunk */
+ chunk = remaining;
+ if (chunk > (PSI_DMA_FETCH_SIZE - boff))
+ chunk = PSI_DMA_FETCH_SIZE - boff;
+ bsize = ((boff + chunk) + 0xfff) & ~0xffful;
+
+ printf("FSP: 0x%08x bytes balign=%llx boff=%llx bsize=%llx\n",
+ chunk, balign, boff, bsize);
+ fsp_tce_map(PSI_DMA_FETCH, (void *)balign, bsize);
+ taddr = PSI_DMA_FETCH + boff;
+ msg = fsp_mkmsg(FSP_CMD_FETCH_SP_DATA, 6,
+ flags << 16 | id, sub_id, offset,
+ 0, taddr, chunk);
+ rc = fsp_sync_msg(msg, false);
+ fsp_tce_unmap(PSI_DMA_FETCH, bsize);
+
+ woffset = msg->resp->data.words[1];
+ wlen = msg->resp->data.words[2];
+ printf("FSP: -> rc=0x%02x off: %08x twritten: %08x\n",
+ rc, woffset, wlen);
+ fsp_freemsg(msg);
+
+ /* XXX Is flash busy (0x3f) a reason for retry ? */
+ if (rc != 0 && rc != 2) {
+ unlock(&fsp_fetch_lock);
+ return -EIO;
+ }
+
+ remaining -= wlen;
+ total += wlen;
+ buffer += wlen;
+ offset += wlen;
+
+ /* The doc seems to indicate that we get rc=2 if there's
+ * more data and rc=0 if we reached the end of file, but
+ * it looks like I always get rc=0, so let's consider
+ * an EOF if we got less than what we asked
+ */
+ if (wlen < chunk)
+ break;
+ }
+ unlock(&fsp_fetch_lock);
+
+ *length = total;
+
+ return 0;
+}
+
+/*
+ * Asynchronous fsp fetch data call
+ *
+ * Note:
+ * buffer = PSI DMA address space
+ */
+int fsp_fetch_data_queue(uint8_t flags, uint16_t id, uint32_t sub_id,
+ uint32_t offset, void *buffer, size_t *length,
+ void (*comp)(struct fsp_msg *msg))
+{
+ struct fsp_msg *msg;
+ uint32_t chunk = *length;
+
+ if (!comp)
+ return OPAL_PARAMETER;
+
+ msg = fsp_mkmsg(FSP_CMD_FETCH_SP_DATA, 0x6, flags << 16 | id,
+ sub_id, offset, 0, buffer, chunk);
+ if (!msg) {
+ prerror("FSP: allocation failed!\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ if (fsp_queue_msg(msg, comp)) {
+ fsp_freemsg(msg);
+ prerror("FSP: Failed to queue fetch data message\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ return OPAL_SUCCESS;
+}
+
+void fsp_used_by_console(void)
+{
+ fsp_lock.in_con_path = true;
+}
OpenPOWER on IntegriCloud