summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ibmvscsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/ibmvscsi')
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c15
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c154
-rw-r--r--drivers/scsi/ibmvscsi/viosrp.h46
3 files changed, 122 insertions, 93 deletions
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 4e31caa21ddf..23f5ba5e6472 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2208,7 +2208,10 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
if (rsp_rc != 0) {
sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
- return -EIO;
+ /* If failure is received, the host adapter is most likely going
+ through reset, return success so the caller will wait for the command
+ being cancelled to get returned */
+ return 0;
}
sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
@@ -2221,7 +2224,15 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
if (status != IBMVFC_MAD_SUCCESS) {
sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
- return -EIO;
+ switch (status) {
+ case IBMVFC_MAD_DRIVER_FAILED:
+ case IBMVFC_MAD_CRQ_ERROR:
+ /* Host adapter most likely going through reset, return success to
+ the caller will wait for the command being cancelled to get returned */
+ return 0;
+ default:
+ return -EIO;
+ };
}
sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index d0fa4b6c551f..fa764406df68 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -241,7 +241,7 @@ static void gather_partition_info(void)
struct device_node *rootdn;
const char *ppartition_name;
- const unsigned int *p_number_ptr;
+ const __be32 *p_number_ptr;
/* Retrieve information about this partition */
rootdn = of_find_node_by_path("/");
@@ -255,7 +255,7 @@ static void gather_partition_info(void)
sizeof(partition_name));
p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
if (p_number_ptr)
- partition_number = *p_number_ptr;
+ partition_number = of_read_number(p_number_ptr, 1);
of_node_put(rootdn);
}
@@ -270,10 +270,11 @@ static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
strncpy(hostdata->madapter_info.partition_name, partition_name,
sizeof(hostdata->madapter_info.partition_name));
- hostdata->madapter_info.partition_number = partition_number;
+ hostdata->madapter_info.partition_number =
+ cpu_to_be32(partition_number);
- hostdata->madapter_info.mad_version = 1;
- hostdata->madapter_info.os_type = 2;
+ hostdata->madapter_info.mad_version = cpu_to_be32(1);
+ hostdata->madapter_info.os_type = cpu_to_be32(2);
}
/**
@@ -464,9 +465,9 @@ static int initialize_event_pool(struct event_pool *pool,
memset(&evt->crq, 0x00, sizeof(evt->crq));
atomic_set(&evt->free, 1);
evt->crq.valid = 0x80;
- evt->crq.IU_length = sizeof(*evt->xfer_iu);
- evt->crq.IU_data_ptr = pool->iu_token +
- sizeof(*evt->xfer_iu) * i;
+ evt->crq.IU_length = cpu_to_be16(sizeof(*evt->xfer_iu));
+ evt->crq.IU_data_ptr = cpu_to_be64(pool->iu_token +
+ sizeof(*evt->xfer_iu) * i);
evt->xfer_iu = pool->iu_storage + i;
evt->hostdata = hostdata;
evt->ext_list = NULL;
@@ -588,7 +589,7 @@ static void init_event_struct(struct srp_event_struct *evt_struct,
evt_struct->cmnd_done = NULL;
evt_struct->sync_srp = NULL;
evt_struct->crq.format = format;
- evt_struct->crq.timeout = timeout;
+ evt_struct->crq.timeout = cpu_to_be16(timeout);
evt_struct->done = done;
}
@@ -659,8 +660,8 @@ static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
scsi_for_each_sg(cmd, sg, nseg, i) {
struct srp_direct_buf *descr = md + i;
- descr->va = sg_dma_address(sg);
- descr->len = sg_dma_len(sg);
+ descr->va = cpu_to_be64(sg_dma_address(sg));
+ descr->len = cpu_to_be32(sg_dma_len(sg));
descr->key = 0;
total_length += sg_dma_len(sg);
}
@@ -703,13 +704,14 @@ static int map_sg_data(struct scsi_cmnd *cmd,
}
indirect->table_desc.va = 0;
- indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf);
+ indirect->table_desc.len = cpu_to_be32(sg_mapped *
+ sizeof(struct srp_direct_buf));
indirect->table_desc.key = 0;
if (sg_mapped <= MAX_INDIRECT_BUFS) {
total_length = map_sg_list(cmd, sg_mapped,
&indirect->desc_list[0]);
- indirect->len = total_length;
+ indirect->len = cpu_to_be32(total_length);
return 1;
}
@@ -731,9 +733,10 @@ static int map_sg_data(struct scsi_cmnd *cmd,
total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list);
- indirect->len = total_length;
- indirect->table_desc.va = evt_struct->ext_list_token;
- indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]);
+ indirect->len = cpu_to_be32(total_length);
+ indirect->table_desc.va = cpu_to_be64(evt_struct->ext_list_token);
+ indirect->table_desc.len = cpu_to_be32(sg_mapped *
+ sizeof(indirect->desc_list[0]));
memcpy(indirect->desc_list, evt_struct->ext_list,
MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
return 1;
@@ -849,7 +852,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
struct ibmvscsi_host_data *hostdata,
unsigned long timeout)
{
- u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
+ __be64 *crq_as_u64 = (__be64 *)&evt_struct->crq;
int request_status = 0;
int rc;
int srp_req = 0;
@@ -920,8 +923,9 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
add_timer(&evt_struct->timer);
}
- if ((rc =
- ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
+ rc = ibmvscsi_send_crq(hostdata, be64_to_cpu(crq_as_u64[0]),
+ be64_to_cpu(crq_as_u64[1]));
+ if (rc != 0) {
list_del(&evt_struct->list);
del_timer(&evt_struct->timer);
@@ -987,15 +991,16 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
memcpy(cmnd->sense_buffer,
rsp->data,
- rsp->sense_data_len);
+ be32_to_cpu(rsp->sense_data_len));
unmap_cmd_data(&evt_struct->iu.srp.cmd,
evt_struct,
evt_struct->hostdata->dev);
if (rsp->flags & SRP_RSP_FLAG_DOOVER)
- scsi_set_resid(cmnd, rsp->data_out_res_cnt);
+ scsi_set_resid(cmnd,
+ be32_to_cpu(rsp->data_out_res_cnt));
else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
- scsi_set_resid(cmnd, rsp->data_in_res_cnt);
+ scsi_set_resid(cmnd, be32_to_cpu(rsp->data_in_res_cnt));
}
if (evt_struct->cmnd_done)
@@ -1037,7 +1042,7 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
srp_cmd->opcode = SRP_CMD;
memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb));
- srp_cmd->lun = ((u64) lun) << 48;
+ srp_cmd->lun = cpu_to_be64(((u64)lun) << 48);
if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
if (!firmware_has_feature(FW_FEATURE_CMO))
@@ -1062,9 +1067,10 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
if ((in_fmt == SRP_DATA_DESC_INDIRECT ||
out_fmt == SRP_DATA_DESC_INDIRECT) &&
indirect->table_desc.va == 0) {
- indirect->table_desc.va = evt_struct->crq.IU_data_ptr +
+ indirect->table_desc.va =
+ cpu_to_be64(be64_to_cpu(evt_struct->crq.IU_data_ptr) +
offsetof(struct srp_cmd, add_data) +
- offsetof(struct srp_indirect_buf, desc_list);
+ offsetof(struct srp_indirect_buf, desc_list));
}
return ibmvscsi_send_srp_event(evt_struct, hostdata, 0);
@@ -1158,7 +1164,7 @@ static void login_rsp(struct srp_event_struct *evt_struct)
* request_limit could have been set to -1 by this client.
*/
atomic_set(&hostdata->request_limit,
- evt_struct->xfer_iu->srp.login_rsp.req_lim_delta);
+ be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta));
/* If we had any pending I/Os, kick them */
scsi_unblock_requests(hostdata->host);
@@ -1184,8 +1190,9 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
login = &evt_struct->iu.srp.login_req;
memset(login, 0, sizeof(*login));
login->opcode = SRP_LOGIN_REQ;
- login->req_it_iu_len = sizeof(union srp_iu);
- login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
+ login->req_it_iu_len = cpu_to_be32(sizeof(union srp_iu));
+ login->req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
+ SRP_BUF_FORMAT_INDIRECT);
spin_lock_irqsave(hostdata->host->host_lock, flags);
/* Start out with a request limit of 0, since this is negotiated in
@@ -1214,12 +1221,13 @@ static void capabilities_rsp(struct srp_event_struct *evt_struct)
dev_err(hostdata->dev, "error 0x%X getting capabilities info\n",
evt_struct->xfer_iu->mad.capabilities.common.status);
} else {
- if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP)
+ if (hostdata->caps.migration.common.server_support !=
+ cpu_to_be16(SERVER_SUPPORTS_CAP))
dev_info(hostdata->dev, "Partition migration not supported\n");
if (client_reserve) {
if (hostdata->caps.reserve.common.server_support ==
- SERVER_SUPPORTS_CAP)
+ cpu_to_be16(SERVER_SUPPORTS_CAP))
dev_info(hostdata->dev, "Client reserve enabled\n");
else
dev_info(hostdata->dev, "Client reserve not supported\n");
@@ -1251,9 +1259,9 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
req = &evt_struct->iu.mad.capabilities;
memset(req, 0, sizeof(*req));
- hostdata->caps.flags = CAP_LIST_SUPPORTED;
+ hostdata->caps.flags = cpu_to_be32(CAP_LIST_SUPPORTED);
if (hostdata->client_migrated)
- hostdata->caps.flags |= CLIENT_MIGRATED;
+ hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED);
strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
sizeof(hostdata->caps.name));
@@ -1264,22 +1272,31 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0';
- req->common.type = VIOSRP_CAPABILITIES_TYPE;
- req->buffer = hostdata->caps_addr;
+ req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE);
+ req->buffer = cpu_to_be64(hostdata->caps_addr);
- hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES;
- hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration);
- hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP;
- hostdata->caps.migration.ecl = 1;
+ hostdata->caps.migration.common.cap_type =
+ cpu_to_be32(MIGRATION_CAPABILITIES);
+ hostdata->caps.migration.common.length =
+ cpu_to_be16(sizeof(hostdata->caps.migration));
+ hostdata->caps.migration.common.server_support =
+ cpu_to_be16(SERVER_SUPPORTS_CAP);
+ hostdata->caps.migration.ecl = cpu_to_be32(1);
if (client_reserve) {
- hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES;
- hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve);
- hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP;
- hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2;
- req->common.length = sizeof(hostdata->caps);
+ hostdata->caps.reserve.common.cap_type =
+ cpu_to_be32(RESERVATION_CAPABILITIES);
+ hostdata->caps.reserve.common.length =
+ cpu_to_be16(sizeof(hostdata->caps.reserve));
+ hostdata->caps.reserve.common.server_support =
+ cpu_to_be16(SERVER_SUPPORTS_CAP);
+ hostdata->caps.reserve.type =
+ cpu_to_be32(CLIENT_RESERVE_SCSI_2);
+ req->common.length =
+ cpu_to_be16(sizeof(hostdata->caps));
} else
- req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve);
+ req->common.length = cpu_to_be16(sizeof(hostdata->caps) -
+ sizeof(hostdata->caps.reserve));
spin_lock_irqsave(hostdata->host->host_lock, flags);
if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
@@ -1297,7 +1314,7 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
static void fast_fail_rsp(struct srp_event_struct *evt_struct)
{
struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
- u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status;
+ u16 status = be16_to_cpu(evt_struct->xfer_iu->mad.fast_fail.common.status);
if (status == VIOSRP_MAD_NOT_SUPPORTED)
dev_err(hostdata->dev, "fast_fail not supported in server\n");
@@ -1334,8 +1351,8 @@ static int enable_fast_fail(struct ibmvscsi_host_data *hostdata)
fast_fail_mad = &evt_struct->iu.mad.fast_fail;
memset(fast_fail_mad, 0, sizeof(*fast_fail_mad));
- fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL;
- fast_fail_mad->common.length = sizeof(*fast_fail_mad);
+ fast_fail_mad->common.type = cpu_to_be32(VIOSRP_ENABLE_FAST_FAIL);
+ fast_fail_mad->common.length = cpu_to_be16(sizeof(*fast_fail_mad));
spin_lock_irqsave(hostdata->host->host_lock, flags);
rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
@@ -1362,15 +1379,15 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
"host partition %s (%d), OS %d, max io %u\n",
hostdata->madapter_info.srp_version,
hostdata->madapter_info.partition_name,
- hostdata->madapter_info.partition_number,
- hostdata->madapter_info.os_type,
- hostdata->madapter_info.port_max_txu[0]);
+ be32_to_cpu(hostdata->madapter_info.partition_number),
+ be32_to_cpu(hostdata->madapter_info.os_type),
+ be32_to_cpu(hostdata->madapter_info.port_max_txu[0]));
if (hostdata->madapter_info.port_max_txu[0])
hostdata->host->max_sectors =
- hostdata->madapter_info.port_max_txu[0] >> 9;
+ be32_to_cpu(hostdata->madapter_info.port_max_txu[0]) >> 9;
- if (hostdata->madapter_info.os_type == 3 &&
+ if (be32_to_cpu(hostdata->madapter_info.os_type) == 3 &&
strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
hostdata->madapter_info.srp_version);
@@ -1379,7 +1396,7 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
}
- if (hostdata->madapter_info.os_type == 3) {
+ if (be32_to_cpu(hostdata->madapter_info.os_type) == 3) {
enable_fast_fail(hostdata);
return;
}
@@ -1414,9 +1431,9 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
req = &evt_struct->iu.mad.adapter_info;
memset(req, 0x00, sizeof(*req));
- req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
- req->common.length = sizeof(hostdata->madapter_info);
- req->buffer = hostdata->adapter_info_addr;
+ req->common.type = cpu_to_be32(VIOSRP_ADAPTER_INFO_TYPE);
+ req->common.length = cpu_to_be16(sizeof(hostdata->madapter_info));
+ req->buffer = cpu_to_be64(hostdata->adapter_info_addr);
spin_lock_irqsave(hostdata->host->host_lock, flags);
if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
@@ -1501,7 +1518,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
/* Set up an abort SRP command */
memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
tsk_mgmt->opcode = SRP_TSK_MGMT;
- tsk_mgmt->lun = ((u64) lun) << 48;
+ tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48);
tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
tsk_mgmt->task_tag = (u64) found_evt;
@@ -1624,7 +1641,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
/* Set up a lun reset SRP command */
memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
tsk_mgmt->opcode = SRP_TSK_MGMT;
- tsk_mgmt->lun = ((u64) lun) << 48;
+ tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48);
tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
evt->sync_srp = &srp_rsp;
@@ -1735,8 +1752,9 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
{
long rc;
unsigned long flags;
+ /* The hypervisor copies our tag value here so no byteswapping */
struct srp_event_struct *evt_struct =
- (struct srp_event_struct *)crq->IU_data_ptr;
+ (__force struct srp_event_struct *)crq->IU_data_ptr;
switch (crq->valid) {
case 0xC0: /* initialization */
switch (crq->format) {
@@ -1792,18 +1810,18 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
*/
if (!valid_event_struct(&hostdata->pool, evt_struct)) {
dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n",
- (void *)crq->IU_data_ptr);
+ evt_struct);
return;
}
if (atomic_read(&evt_struct->free)) {
dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n",
- (void *)crq->IU_data_ptr);
+ evt_struct);
return;
}
if (crq->format == VIOSRP_SRP_FORMAT)
- atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta,
+ atomic_add(be32_to_cpu(evt_struct->xfer_iu->srp.rsp.req_lim_delta),
&hostdata->request_limit);
del_timer(&evt_struct->timer);
@@ -1856,13 +1874,11 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
/* Set up a lun reset SRP command */
memset(host_config, 0x00, sizeof(*host_config));
- host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
- host_config->common.length = length;
- host_config->buffer = addr = dma_map_single(hostdata->dev, buffer,
- length,
- DMA_BIDIRECTIONAL);
+ host_config->common.type = cpu_to_be32(VIOSRP_HOST_CONFIG_TYPE);
+ host_config->common.length = cpu_to_be16(length);
+ addr = dma_map_single(hostdata->dev, buffer, length, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(hostdata->dev, host_config->buffer)) {
+ if (dma_mapping_error(hostdata->dev, addr)) {
if (!firmware_has_feature(FW_FEATURE_CMO))
dev_err(hostdata->dev,
"dma_mapping error getting host config\n");
@@ -1870,6 +1886,8 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
return -1;
}
+ host_config->buffer = cpu_to_be64(addr);
+
init_completion(&evt_struct->comp);
spin_lock_irqsave(hostdata->host->host_lock, flags);
rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h
index 2cd735d1d196..116243087622 100644
--- a/drivers/scsi/ibmvscsi/viosrp.h
+++ b/drivers/scsi/ibmvscsi/viosrp.h
@@ -75,9 +75,9 @@ struct viosrp_crq {
u8 format; /* SCSI vs out-of-band */
u8 reserved;
u8 status; /* non-scsi failure? (e.g. DMA failure) */
- u16 timeout; /* in seconds */
- u16 IU_length; /* in bytes */
- u64 IU_data_ptr; /* the TCE for transferring data */
+ __be16 timeout; /* in seconds */
+ __be16 IU_length; /* in bytes */
+ __be64 IU_data_ptr; /* the TCE for transferring data */
};
/* MADs are Management requests above and beyond the IUs defined in the SRP
@@ -124,10 +124,10 @@ enum viosrp_capability_flag {
* Common MAD header
*/
struct mad_common {
- u32 type;
- u16 status;
- u16 length;
- u64 tag;
+ __be32 type;
+ __be16 status;
+ __be16 length;
+ __be64 tag;
};
/*
@@ -139,23 +139,23 @@ struct mad_common {
*/
struct viosrp_empty_iu {
struct mad_common common;
- u64 buffer;
- u32 port;
+ __be64 buffer;
+ __be32 port;
};
struct viosrp_error_log {
struct mad_common common;
- u64 buffer;
+ __be64 buffer;
};
struct viosrp_adapter_info {
struct mad_common common;
- u64 buffer;
+ __be64 buffer;
};
struct viosrp_host_config {
struct mad_common common;
- u64 buffer;
+ __be64 buffer;
};
struct viosrp_fast_fail {
@@ -164,27 +164,27 @@ struct viosrp_fast_fail {
struct viosrp_capabilities {
struct mad_common common;
- u64 buffer;
+ __be64 buffer;
};
struct mad_capability_common {
- u32 cap_type;
- u16 length;
- u16 server_support;
+ __be32 cap_type;
+ __be16 length;
+ __be16 server_support;
};
struct mad_reserve_cap {
struct mad_capability_common common;
- u32 type;
+ __be32 type;
};
struct mad_migration_cap {
struct mad_capability_common common;
- u32 ecl;
+ __be32 ecl;
};
struct capabilities{
- u32 flags;
+ __be32 flags;
char name[SRP_MAX_LOC_LEN];
char loc[SRP_MAX_LOC_LEN];
struct mad_migration_cap migration;
@@ -208,10 +208,10 @@ union viosrp_iu {
struct mad_adapter_info_data {
char srp_version[8];
char partition_name[96];
- u32 partition_number;
- u32 mad_version;
- u32 os_type;
- u32 port_max_txu[8]; /* per-port maximum transfer */
+ __be32 partition_number;
+ __be32 mad_version;
+ __be32 os_type;
+ __be32 port_max_txu[8]; /* per-port maximum transfer */
};
#endif
OpenPOWER on IntegriCloud