summaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r--drivers/s390/cio/blacklist.c234
-rw-r--r--drivers/s390/cio/blacklist.h2
-rw-r--r--drivers/s390/cio/ccwgroup.c6
-rw-r--r--drivers/s390/cio/chsc.c473
-rw-r--r--drivers/s390/cio/chsc.h13
-rw-r--r--drivers/s390/cio/cio.c168
-rw-r--r--drivers/s390/cio/cio.h11
-rw-r--r--drivers/s390/cio/cmf.c8
-rw-r--r--drivers/s390/cio/css.c297
-rw-r--r--drivers/s390/cio/css.h43
-rw-r--r--drivers/s390/cio/device.c47
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/device_fsm.c29
-rw-r--r--drivers/s390/cio/device_id.c26
-rw-r--r--drivers/s390/cio/device_ops.c4
-rw-r--r--drivers/s390/cio/device_pgid.c56
-rw-r--r--drivers/s390/cio/device_status.c14
-rw-r--r--drivers/s390/cio/ioasm.h86
-rw-r--r--drivers/s390/cio/qdio.c713
-rw-r--r--drivers/s390/cio/qdio.h144
-rw-r--r--drivers/s390/cio/schid.h26
21 files changed, 1576 insertions, 825 deletions
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index a1c52a682191..daf21e03b21d 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -1,7 +1,7 @@
/*
* drivers/s390/cio/blacklist.c
* S/390 common I/O routines -- blacklisting of specific devices
- * $Revision: 1.35 $
+ * $Revision: 1.39 $
*
* Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
@@ -15,6 +15,7 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/ctype.h>
#include <linux/device.h>
@@ -34,10 +35,10 @@
* These can be single devices or ranges of devices
*/
-/* 65536 bits to indicate if a devno is blacklisted or not */
-#define __BL_DEV_WORDS ((__MAX_SUBCHANNELS + (8*sizeof(long) - 1)) / \
+/* 65536 bits for each set to indicate if a devno is blacklisted or not */
+#define __BL_DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
(8*sizeof(long)))
-static unsigned long bl_dev[__BL_DEV_WORDS];
+static unsigned long bl_dev[__MAX_SSID + 1][__BL_DEV_WORDS];
typedef enum {add, free} range_action;
/*
@@ -45,21 +46,23 @@ typedef enum {add, free} range_action;
* (Un-)blacklist the devices from-to
*/
static inline void
-blacklist_range (range_action action, unsigned int from, unsigned int to)
+blacklist_range (range_action action, unsigned int from, unsigned int to,
+ unsigned int ssid)
{
if (!to)
to = from;
- if (from > to || to > __MAX_SUBCHANNELS) {
+ if (from > to || to > __MAX_SUBCHANNEL || ssid > __MAX_SSID) {
printk (KERN_WARNING "Invalid blacklist range "
- "0x%04x to 0x%04x, skipping\n", from, to);
+ "0.%x.%04x to 0.%x.%04x, skipping\n",
+ ssid, from, ssid, to);
return;
}
for (; from <= to; from++) {
if (action == add)
- set_bit (from, bl_dev);
+ set_bit (from, bl_dev[ssid]);
else
- clear_bit (from, bl_dev);
+ clear_bit (from, bl_dev[ssid]);
}
}
@@ -69,7 +72,7 @@ blacklist_range (range_action action, unsigned int from, unsigned int to)
* Shamelessly grabbed from dasd_devmap.c.
*/
static inline int
-blacklist_busid(char **str, int *id0, int *id1, int *devno)
+blacklist_busid(char **str, int *id0, int *ssid, int *devno)
{
int val, old_style;
char *sav;
@@ -86,7 +89,7 @@ blacklist_busid(char **str, int *id0, int *id1, int *devno)
goto confused;
val = simple_strtoul(*str, str, 16);
if (old_style || (*str)[0] != '.') {
- *id0 = *id1 = 0;
+ *id0 = *ssid = 0;
if (val < 0 || val > 0xffff)
goto confused;
*devno = val;
@@ -105,7 +108,7 @@ blacklist_busid(char **str, int *id0, int *id1, int *devno)
val = simple_strtoul(*str, str, 16);
if (val < 0 || val > 0xff || (*str)++[0] != '.')
goto confused;
- *id1 = val;
+ *ssid = val;
if (!isxdigit((*str)[0])) /* We require at least one hex digit */
goto confused;
val = simple_strtoul(*str, str, 16);
@@ -125,7 +128,7 @@ confused:
static inline int
blacklist_parse_parameters (char *str, range_action action)
{
- unsigned int from, to, from_id0, to_id0, from_id1, to_id1;
+ unsigned int from, to, from_id0, to_id0, from_ssid, to_ssid;
while (*str != 0 && *str != '\n') {
range_action ra = action;
@@ -142,23 +145,25 @@ blacklist_parse_parameters (char *str, range_action action)
*/
if (strncmp(str,"all,",4) == 0 || strcmp(str,"all") == 0 ||
strncmp(str,"all\n",4) == 0 || strncmp(str,"all ",4) == 0) {
- from = 0;
- to = __MAX_SUBCHANNELS;
+ int j;
+
str += 3;
+ for (j=0; j <= __MAX_SSID; j++)
+ blacklist_range(ra, 0, __MAX_SUBCHANNEL, j);
} else {
int rc;
rc = blacklist_busid(&str, &from_id0,
- &from_id1, &from);
+ &from_ssid, &from);
if (rc)
continue;
to = from;
to_id0 = from_id0;
- to_id1 = from_id1;
+ to_ssid = from_ssid;
if (*str == '-') {
str++;
rc = blacklist_busid(&str, &to_id0,
- &to_id1, &to);
+ &to_ssid, &to);
if (rc)
continue;
}
@@ -168,18 +173,19 @@ blacklist_parse_parameters (char *str, range_action action)
strsep(&str, ",\n"));
continue;
}
- if ((from_id0 != to_id0) || (from_id1 != to_id1)) {
+ if ((from_id0 != to_id0) ||
+ (from_ssid != to_ssid)) {
printk(KERN_WARNING "invalid cio_ignore range "
"%x.%x.%04x-%x.%x.%04x\n",
- from_id0, from_id1, from,
- to_id0, to_id1, to);
+ from_id0, from_ssid, from,
+ to_id0, to_ssid, to);
continue;
}
+ pr_debug("blacklist_setup: adding range "
+ "from %x.%x.%04x to %x.%x.%04x\n",
+ from_id0, from_ssid, from, to_id0, to_ssid, to);
+ blacklist_range (ra, from, to, to_ssid);
}
- /* FIXME: ignoring id0 and id1 here. */
- pr_debug("blacklist_setup: adding range "
- "from 0.0.%04x to 0.0.%04x\n", from, to);
- blacklist_range (ra, from, to);
}
return 1;
}
@@ -213,12 +219,33 @@ __setup ("cio_ignore=", blacklist_setup);
* Used by validate_subchannel()
*/
int
-is_blacklisted (int devno)
+is_blacklisted (int ssid, int devno)
{
- return test_bit (devno, bl_dev);
+ return test_bit (devno, bl_dev[ssid]);
}
#ifdef CONFIG_PROC_FS
+static int
+__s390_redo_validation(struct subchannel_id schid, void *data)
+{
+ int ret;
+ struct subchannel *sch;
+
+ sch = get_subchannel_by_schid(schid);
+ if (sch) {
+ /* Already known. */
+ put_device(&sch->dev);
+ return 0;
+ }
+ ret = css_probe_device(schid);
+ if (ret == -ENXIO)
+ return ret; /* We're through. */
+ if (ret == -ENOMEM)
+ /* Stop validation for now. Bad, but no need for a panic. */
+ return ret;
+ return 0;
+}
+
/*
* Function: s390_redo_validation
* Look for no longer blacklisted devices
@@ -226,29 +253,9 @@ is_blacklisted (int devno)
static inline void
s390_redo_validation (void)
{
- unsigned int irq;
-
CIO_TRACE_EVENT (0, "redoval");
- for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
- int ret;
- struct subchannel *sch;
-
- sch = get_subchannel_by_schid(irq);
- if (sch) {
- /* Already known. */
- put_device(&sch->dev);
- continue;
- }
- ret = css_probe_device(irq);
- if (ret == -ENXIO)
- break; /* We're through. */
- if (ret == -ENOMEM)
- /*
- * Stop validation for now. Bad, but no need for a
- * panic.
- */
- break;
- }
+
+ for_each_subchannel(__s390_redo_validation, NULL);
}
/*
@@ -278,41 +285,90 @@ blacklist_parse_proc_parameters (char *buf)
s390_redo_validation ();
}
-/* FIXME: These should be real bus ids and not home-grown ones! */
-static int cio_ignore_read (char *page, char **start, off_t off,
- int count, int *eof, void *data)
+/* Iterator struct for all devices. */
+struct ccwdev_iter {
+ int devno;
+ int ssid;
+ int in_range;
+};
+
+static void *
+cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset)
{
- const unsigned int entry_size = 18; /* "0.0.ABCD-0.0.EFGH\n" */
- long devno;
- int len;
-
- len = 0;
- for (devno = off; /* abuse the page variable
- * as counter, see fs/proc/generic.c */
- devno < __MAX_SUBCHANNELS && len + entry_size < count; devno++) {
- if (!test_bit(devno, bl_dev))
- continue;
- len += sprintf(page + len, "0.0.%04lx", devno);
- if (test_bit(devno + 1, bl_dev)) { /* print range */
- while (++devno < __MAX_SUBCHANNELS)
- if (!test_bit(devno, bl_dev))
- break;
- len += sprintf(page + len, "-0.0.%04lx", --devno);
- }
- len += sprintf(page + len, "\n");
- }
+ struct ccwdev_iter *iter;
+
+ if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
+ return NULL;
+ iter = kzalloc(sizeof(struct ccwdev_iter), GFP_KERNEL);
+ if (!iter)
+ return ERR_PTR(-ENOMEM);
+ iter->ssid = *offset / (__MAX_SUBCHANNEL + 1);
+ iter->devno = *offset % (__MAX_SUBCHANNEL + 1);
+ return iter;
+}
+
+static void
+cio_ignore_proc_seq_stop(struct seq_file *s, void *it)
+{
+ if (!IS_ERR(it))
+ kfree(it);
+}
+
+static void *
+cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
+{
+ struct ccwdev_iter *iter;
+
+ if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
+ return NULL;
+ iter = it;
+ if (iter->devno == __MAX_SUBCHANNEL) {
+ iter->devno = 0;
+ iter->ssid++;
+ if (iter->ssid > __MAX_SSID)
+ return NULL;
+ } else
+ iter->devno++;
+ (*offset)++;
+ return iter;
+}
- if (devno < __MAX_SUBCHANNELS)
- *eof = 1;
- *start = (char *) (devno - off); /* number of checked entries */
- return len;
+static int
+cio_ignore_proc_seq_show(struct seq_file *s, void *it)
+{
+ struct ccwdev_iter *iter;
+
+ iter = it;
+ if (!is_blacklisted(iter->ssid, iter->devno))
+ /* Not blacklisted, nothing to output. */
+ return 0;
+ if (!iter->in_range) {
+ /* First device in range. */
+ if ((iter->devno == __MAX_SUBCHANNEL) ||
+ !is_blacklisted(iter->ssid, iter->devno + 1))
+ /* Singular device. */
+ return seq_printf(s, "0.%x.%04x\n",
+ iter->ssid, iter->devno);
+ iter->in_range = 1;
+ return seq_printf(s, "0.%x.%04x-", iter->ssid, iter->devno);
+ }
+ if ((iter->devno == __MAX_SUBCHANNEL) ||
+ !is_blacklisted(iter->ssid, iter->devno + 1)) {
+ /* Last device in range. */
+ iter->in_range = 0;
+ return seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno);
+ }
+ return 0;
}
-static int cio_ignore_write(struct file *file, const char __user *user_buf,
- unsigned long user_len, void *data)
+static ssize_t
+cio_ignore_write(struct file *file, const char __user *user_buf,
+ size_t user_len, loff_t *offset)
{
char *buf;
+ if (*offset)
+ return -EINVAL;
if (user_len > 65536)
user_len = 65536;
buf = vmalloc (user_len + 1); /* maybe better use the stack? */
@@ -330,6 +386,27 @@ static int cio_ignore_write(struct file *file, const char __user *user_buf,
return user_len;
}
+static struct seq_operations cio_ignore_proc_seq_ops = {
+ .start = cio_ignore_proc_seq_start,
+ .stop = cio_ignore_proc_seq_stop,
+ .next = cio_ignore_proc_seq_next,
+ .show = cio_ignore_proc_seq_show,
+};
+
+static int
+cio_ignore_proc_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &cio_ignore_proc_seq_ops);
+}
+
+static struct file_operations cio_ignore_proc_fops = {
+ .open = cio_ignore_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+ .write = cio_ignore_write,
+};
+
static int
cio_ignore_proc_init (void)
{
@@ -340,8 +417,7 @@ cio_ignore_proc_init (void)
if (!entry)
return 0;
- entry->read_proc = cio_ignore_read;
- entry->write_proc = cio_ignore_write;
+ entry->proc_fops = &cio_ignore_proc_fops;
return 1;
}
diff --git a/drivers/s390/cio/blacklist.h b/drivers/s390/cio/blacklist.h
index fb42cafbe57c..95e25c1df922 100644
--- a/drivers/s390/cio/blacklist.h
+++ b/drivers/s390/cio/blacklist.h
@@ -1,6 +1,6 @@
#ifndef S390_BLACKLIST_H
#define S390_BLACKLIST_H
-extern int is_blacklisted (int devno);
+extern int is_blacklisted (int ssid, int devno);
#endif
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index be9d2d65c22f..e849289d4f3c 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -1,7 +1,7 @@
/*
* drivers/s390/cio/ccwgroup.c
* bus driver for ccwgroup
- * $Revision: 1.32 $
+ * $Revision: 1.33 $
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
@@ -263,7 +263,7 @@ ccwgroup_set_online(struct ccwgroup_device *gdev)
struct ccwgroup_driver *gdrv;
int ret;
- if (atomic_compare_and_swap(0, 1, &gdev->onoff))
+ if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN;
if (gdev->state == CCWGROUP_ONLINE) {
ret = 0;
@@ -289,7 +289,7 @@ ccwgroup_set_offline(struct ccwgroup_device *gdev)
struct ccwgroup_driver *gdrv;
int ret;
- if (atomic_compare_and_swap(0, 1, &gdev->onoff))
+ if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN;
if (gdev->state == CCWGROUP_OFFLINE) {
ret = 0;
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index fa3c23b80e3a..7270808c02d1 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1,7 +1,7 @@
/*
* drivers/s390/cio/chsc.c
* S/390 common I/O routines -- channel subsystem call
- * $Revision: 1.120 $
+ * $Revision: 1.126 $
*
* Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
@@ -24,8 +24,6 @@
#include "ioasm.h"
#include "chsc.h"
-static struct channel_path *chps[NR_CHPIDS];
-
static void *sei_page;
static int new_channel_path(int chpid);
@@ -33,13 +31,13 @@ static int new_channel_path(int chpid);
static inline void
set_chp_logically_online(int chp, int onoff)
{
- chps[chp]->state = onoff;
+ css[0]->chps[chp]->state = onoff;
}
static int
get_chp_status(int chp)
{
- return (chps[chp] ? chps[chp]->state : -ENODEV);
+ return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV);
}
void
@@ -77,7 +75,9 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
struct {
struct chsc_header request;
- u16 reserved1;
+ u16 reserved1a:10;
+ u16 ssid:2;
+ u16 reserved1b:4;
u16 f_sch; /* first subchannel */
u16 reserved2;
u16 l_sch; /* last subchannel */
@@ -104,8 +104,9 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
.code = 0x0004,
};
- ssd_area->f_sch = sch->irq;
- ssd_area->l_sch = sch->irq;
+ ssd_area->ssid = sch->schid.ssid;
+ ssd_area->f_sch = sch->schid.sch_no;
+ ssd_area->l_sch = sch->schid.sch_no;
ccode = chsc(ssd_area);
if (ccode > 0) {
@@ -147,7 +148,8 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
*/
if (ssd_area->st > 3) { /* uhm, that looks strange... */
CIO_CRW_EVENT(0, "Strange subchannel type %d"
- " for sch %04x\n", ssd_area->st, sch->irq);
+ " for sch 0.%x.%04x\n", ssd_area->st,
+ sch->schid.ssid, sch->schid.sch_no);
/*
* There may have been a new subchannel type defined in the
* time since this code was written; since we don't know which
@@ -156,8 +158,9 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
return 0;
} else {
const char *type[4] = {"I/O", "chsc", "message", "ADM"};
- CIO_CRW_EVENT(6, "ssd: sch %04x is %s subchannel\n",
- sch->irq, type[ssd_area->st]);
+ CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
+ sch->schid.ssid, sch->schid.sch_no,
+ type[ssd_area->st]);
sch->ssd_info.valid = 1;
sch->ssd_info.type = ssd_area->st;
@@ -218,13 +221,13 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
int j;
int mask;
struct subchannel *sch;
- __u8 *chpid;
+ struct channel_path *chpid;
struct schib schib;
sch = to_subchannel(dev);
chpid = data;
for (j = 0; j < 8; j++)
- if (sch->schib.pmcw.chpid[j] == *chpid)
+ if (sch->schib.pmcw.chpid[j] == chpid->id)
break;
if (j >= 8)
return 0;
@@ -232,7 +235,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
mask = 0x80 >> j;
spin_lock(&sch->lock);
- stsch(sch->irq, &schib);
+ stsch(sch->schid, &schib);
if (!schib.pmcw.dnv)
goto out_unreg;
memcpy(&sch->schib, &schib, sizeof(struct schib));
@@ -284,7 +287,7 @@ out_unlock:
out_unreg:
spin_unlock(&sch->lock);
sch->lpm = 0;
- if (css_enqueue_subchannel_slow(sch->irq)) {
+ if (css_enqueue_subchannel_slow(sch->schid)) {
css_clear_subchannel_slow_list();
need_rescan = 1;
}
@@ -295,23 +298,30 @@ static inline void
s390_set_chpid_offline( __u8 chpid)
{
char dbf_txt[15];
+ struct device *dev;
sprintf(dbf_txt, "chpr%x", chpid);
CIO_TRACE_EVENT(2, dbf_txt);
if (get_chp_status(chpid) <= 0)
return;
-
- bus_for_each_dev(&css_bus_type, NULL, &chpid,
+ dev = get_device(&css[0]->chps[chpid]->dev);
+ bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev),
s390_subchannel_remove_chpid);
if (need_rescan || css_slow_subchannels_exist())
queue_work(slow_path_wq, &slow_path_work);
+ put_device(dev);
}
+struct res_acc_data {
+ struct channel_path *chp;
+ u32 fla_mask;
+ u16 fla;
+};
+
static int
-s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask,
- struct subchannel *sch)
+s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
{
int found;
int chp;
@@ -323,8 +333,9 @@ s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask,
* check if chpid is in information updated by ssd
*/
if (sch->ssd_info.valid &&
- sch->ssd_info.chpid[chp] == chpid &&
- (sch->ssd_info.fla[chp] & fla_mask) == fla) {
+ sch->ssd_info.chpid[chp] == res_data->chp->id &&
+ (sch->ssd_info.fla[chp] & res_data->fla_mask)
+ == res_data->fla) {
found = 1;
break;
}
@@ -337,24 +348,87 @@ s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask,
* new path information and eventually check for logically
* offline chpids.
*/
- ccode = stsch(sch->irq, &sch->schib);
+ ccode = stsch(sch->schid, &sch->schib);
if (ccode > 0)
return 0;
return 0x80 >> chp;
}
+static inline int
+s390_process_res_acc_new_sch(struct subchannel_id schid)
+{
+ struct schib schib;
+ int ret;
+ /*
+ * We don't know the device yet, but since a path
+ * may be available now to the device we'll have
+ * to do recognition again.
+ * Since we don't have any idea about which chpid
+ * that beast may be on we'll have to do a stsch
+ * on all devices, grr...
+ */
+ if (stsch_err(schid, &schib))
+ /* We're through */
+ return need_rescan ? -EAGAIN : -ENXIO;
+
+ /* Put it on the slow path. */
+ ret = css_enqueue_subchannel_slow(schid);
+ if (ret) {
+ css_clear_subchannel_slow_list();
+ need_rescan = 1;
+ return -EAGAIN;
+ }
+ return 0;
+}
+
static int
-s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
+__s390_process_res_acc(struct subchannel_id schid, void *data)
{
+ int chp_mask, old_lpm;
+ struct res_acc_data *res_data;
struct subchannel *sch;
- int irq, rc;
+
+ res_data = (struct res_acc_data *)data;
+ sch = get_subchannel_by_schid(schid);
+ if (!sch)
+ /* Check if a subchannel is newly available. */
+ return s390_process_res_acc_new_sch(schid);
+
+ spin_lock_irq(&sch->lock);
+
+ chp_mask = s390_process_res_acc_sch(res_data, sch);
+
+ if (chp_mask == 0) {
+ spin_unlock_irq(&sch->lock);
+ return 0;
+ }
+ old_lpm = sch->lpm;
+ sch->lpm = ((sch->schib.pmcw.pim &
+ sch->schib.pmcw.pam &
+ sch->schib.pmcw.pom)
+ | chp_mask) & sch->opm;
+ if (!old_lpm && sch->lpm)
+ device_trigger_reprobe(sch);
+ else if (sch->driver && sch->driver->verify)
+ sch->driver->verify(&sch->dev);
+
+ spin_unlock_irq(&sch->lock);
+ put_device(&sch->dev);
+ return (res_data->fla_mask == 0xffff) ? -ENODEV : 0;
+}
+
+
+static int
+s390_process_res_acc (struct res_acc_data *res_data)
+{
+ int rc;
char dbf_txt[15];
- sprintf(dbf_txt, "accpr%x", chpid);
+ sprintf(dbf_txt, "accpr%x", res_data->chp->id);
CIO_TRACE_EVENT( 2, dbf_txt);
- if (fla != 0) {
- sprintf(dbf_txt, "fla%x", fla);
+ if (res_data->fla != 0) {
+ sprintf(dbf_txt, "fla%x", res_data->fla);
CIO_TRACE_EVENT( 2, dbf_txt);
}
@@ -365,70 +439,11 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
* The more information we have (info), the less scanning
* will we have to do.
*/
-
- if (!get_chp_status(chpid))
- return 0; /* no need to do the rest */
-
- rc = 0;
- for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
- int chp_mask, old_lpm;
-
- sch = get_subchannel_by_schid(irq);
- if (!sch) {
- struct schib schib;
- int ret;
- /*
- * We don't know the device yet, but since a path
- * may be available now to the device we'll have
- * to do recognition again.
- * Since we don't have any idea about which chpid
- * that beast may be on we'll have to do a stsch
- * on all devices, grr...
- */
- if (stsch(irq, &schib)) {
- /* We're through */
- if (need_rescan)
- rc = -EAGAIN;
- break;
- }
- if (need_rescan) {
- rc = -EAGAIN;
- continue;
- }
- /* Put it on the slow path. */
- ret = css_enqueue_subchannel_slow(irq);
- if (ret) {
- css_clear_subchannel_slow_list();
- need_rescan = 1;
- }
- rc = -EAGAIN;
- continue;
- }
-
- spin_lock_irq(&sch->lock);
-
- chp_mask = s390_process_res_acc_sch(chpid, fla, fla_mask, sch);
-
- if (chp_mask == 0) {
-
- spin_unlock_irq(&sch->lock);
- continue;
- }
- old_lpm = sch->lpm;
- sch->lpm = ((sch->schib.pmcw.pim &
- sch->schib.pmcw.pam &
- sch->schib.pmcw.pom)
- | chp_mask) & sch->opm;
- if (!old_lpm && sch->lpm)
- device_trigger_reprobe(sch);
- else if (sch->driver && sch->driver->verify)
- sch->driver->verify(&sch->dev);
-
- spin_unlock_irq(&sch->lock);
- put_device(&sch->dev);
- if (fla_mask == 0xffff)
- break;
- }
+ rc = for_each_subchannel(__s390_process_res_acc, res_data);
+ if (css_slow_subchannels_exist())
+ rc = -EAGAIN;
+ else if (rc != -EAGAIN)
+ rc = 0;
return rc;
}
@@ -466,6 +481,7 @@ int
chsc_process_crw(void)
{
int chpid, ret;
+ struct res_acc_data res_data;
struct {
struct chsc_header request;
u32 reserved1;
@@ -499,8 +515,9 @@ chsc_process_crw(void)
ret = 0;
do {
int ccode, status;
+ struct device *dev;
memset(sei_area, 0, sizeof(*sei_area));
-
+ memset(&res_data, 0, sizeof(struct res_acc_data));
sei_area->request = (struct chsc_header) {
.length = 0x0010,
.code = 0x000e,
@@ -573,26 +590,25 @@ chsc_process_crw(void)
if (status < 0)
new_channel_path(sei_area->rsid);
else if (!status)
- return 0;
- if ((sei_area->vf & 0x80) == 0) {
- pr_debug("chpid: %x\n", sei_area->rsid);
- ret = s390_process_res_acc(sei_area->rsid,
- 0, 0);
- } else if ((sei_area->vf & 0xc0) == 0x80) {
- pr_debug("chpid: %x link addr: %x\n",
- sei_area->rsid, sei_area->fla);
- ret = s390_process_res_acc(sei_area->rsid,
- sei_area->fla,
- 0xff00);
- } else if ((sei_area->vf & 0xc0) == 0xc0) {
- pr_debug("chpid: %x full link addr: %x\n",
- sei_area->rsid, sei_area->fla);
- ret = s390_process_res_acc(sei_area->rsid,
- sei_area->fla,
- 0xffff);
+ break;
+ dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
+ res_data.chp = to_channelpath(dev);
+ pr_debug("chpid: %x", sei_area->rsid);
+ if ((sei_area->vf & 0xc0) != 0) {
+ res_data.fla = sei_area->fla;
+ if ((sei_area->vf & 0xc0) == 0xc0) {
+ pr_debug(" full link addr: %x",
+ sei_area->fla);
+ res_data.fla_mask = 0xffff;
+ } else {
+ pr_debug(" link addr: %x",
+ sei_area->fla);
+ res_data.fla_mask = 0xff00;
+ }
}
- pr_debug("\n");
-
+ ret = s390_process_res_acc(&res_data);
+ pr_debug("\n\n");
+ put_device(dev);
break;
default: /* other stuff */
@@ -604,12 +620,72 @@ chsc_process_crw(void)
return ret;
}
+static inline int
+__chp_add_new_sch(struct subchannel_id schid)
+{
+ struct schib schib;
+ int ret;
+
+ if (stsch(schid, &schib))
+ /* We're through */
+ return need_rescan ? -EAGAIN : -ENXIO;
+
+ /* Put it on the slow path. */
+ ret = css_enqueue_subchannel_slow(schid);
+ if (ret) {
+ css_clear_subchannel_slow_list();
+ need_rescan = 1;
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+
static int
-chp_add(int chpid)
+__chp_add(struct subchannel_id schid, void *data)
{
+ int i;
+ struct channel_path *chp;
struct subchannel *sch;
- int irq, ret, rc;
+
+ chp = (struct channel_path *)data;
+ sch = get_subchannel_by_schid(schid);
+ if (!sch)
+ /* Check if the subchannel is now available. */
+ return __chp_add_new_sch(schid);
+ spin_lock(&sch->lock);
+ for (i=0; i<8; i++)
+ if (sch->schib.pmcw.chpid[i] == chp->id) {
+ if (stsch(sch->schid, &sch->schib) != 0) {
+ /* Endgame. */
+ spin_unlock(&sch->lock);
+ return -ENXIO;
+ }
+ break;
+ }
+ if (i==8) {
+ spin_unlock(&sch->lock);
+ return 0;
+ }
+ sch->lpm = ((sch->schib.pmcw.pim &
+ sch->schib.pmcw.pam &
+ sch->schib.pmcw.pom)
+ | 0x80 >> i) & sch->opm;
+
+ if (sch->driver && sch->driver->verify)
+ sch->driver->verify(&sch->dev);
+
+ spin_unlock(&sch->lock);
+ put_device(&sch->dev);
+ return 0;
+}
+
+static int
+chp_add(int chpid)
+{
+ int rc;
char dbf_txt[15];
+ struct device *dev;
if (!get_chp_status(chpid))
return 0; /* no need to do the rest */
@@ -617,59 +693,13 @@ chp_add(int chpid)
sprintf(dbf_txt, "cadd%x", chpid);
CIO_TRACE_EVENT(2, dbf_txt);
- rc = 0;
- for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
- int i;
-
- sch = get_subchannel_by_schid(irq);
- if (!sch) {
- struct schib schib;
-
- if (stsch(irq, &schib)) {
- /* We're through */
- if (need_rescan)
- rc = -EAGAIN;
- break;
- }
- if (need_rescan) {
- rc = -EAGAIN;
- continue;
- }
- /* Put it on the slow path. */
- ret = css_enqueue_subchannel_slow(irq);
- if (ret) {
- css_clear_subchannel_slow_list();
- need_rescan = 1;
- }
- rc = -EAGAIN;
- continue;
- }
-
- spin_lock(&sch->lock);
- for (i=0; i<8; i++)
- if (sch->schib.pmcw.chpid[i] == chpid) {
- if (stsch(sch->irq, &sch->schib) != 0) {
- /* Endgame. */
- spin_unlock(&sch->lock);
- return rc;
- }
- break;
- }
- if (i==8) {
- spin_unlock(&sch->lock);
- return rc;
- }
- sch->lpm = ((sch->schib.pmcw.pim &
- sch->schib.pmcw.pam &
- sch->schib.pmcw.pom)
- | 0x80 >> i) & sch->opm;
-
- if (sch->driver && sch->driver->verify)
- sch->driver->verify(&sch->dev);
-
- spin_unlock(&sch->lock);
- put_device(&sch->dev);
- }
+ dev = get_device(&css[0]->chps[chpid]->dev);
+ rc = for_each_subchannel(__chp_add, to_channelpath(dev));
+ if (css_slow_subchannels_exist())
+ rc = -EAGAIN;
+ if (rc != -EAGAIN)
+ rc = 0;
+ put_device(dev);
return rc;
}
@@ -702,7 +732,7 @@ __check_for_io_and_kill(struct subchannel *sch, int index)
if (!device_is_online(sch))
/* cio could be doing I/O. */
return 0;
- cc = stsch(sch->irq, &sch->schib);
+ cc = stsch(sch->schid, &sch->schib);
if (cc)
return 0;
if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) {
@@ -743,7 +773,7 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
* just varied off path. Then kill it.
*/
if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) {
- if (css_enqueue_subchannel_slow(sch->irq)) {
+ if (css_enqueue_subchannel_slow(sch->schid)) {
css_clear_subchannel_slow_list();
need_rescan = 1;
}
@@ -781,6 +811,29 @@ s390_subchannel_vary_chpid_on(struct device *dev, void *data)
return 0;
}
+static int
+__s390_vary_chpid_on(struct subchannel_id schid, void *data)
+{
+ struct schib schib;
+ struct subchannel *sch;
+
+ sch = get_subchannel_by_schid(schid);
+ if (sch) {
+ put_device(&sch->dev);
+ return 0;
+ }
+ if (stsch_err(schid, &schib))
+ /* We're through */
+ return -ENXIO;
+ /* Put it on the slow path. */
+ if (css_enqueue_subchannel_slow(schid)) {
+ css_clear_subchannel_slow_list();
+ need_rescan = 1;
+ return -EAGAIN;
+ }
+ return 0;
+}
+
/*
* Function: s390_vary_chpid
* Varies the specified chpid online or offline
@@ -789,8 +842,7 @@ static int
s390_vary_chpid( __u8 chpid, int on)
{
char dbf_text[15];
- int status, irq, ret;
- struct subchannel *sch;
+ int status;
sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
CIO_TRACE_EVENT( 2, dbf_text);
@@ -815,30 +867,9 @@ s390_vary_chpid( __u8 chpid, int on)
bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
s390_subchannel_vary_chpid_on :
s390_subchannel_vary_chpid_off);
- if (!on)
- goto out;
- /* Scan for new devices on varied on path. */
- for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
- struct schib schib;
-
- if (need_rescan)
- break;
- sch = get_subchannel_by_schid(irq);
- if (sch) {
- put_device(&sch->dev);
- continue;
- }
- if (stsch(irq, &schib))
- /* We're through */
- break;
- /* Put it on the slow path. */
- ret = css_enqueue_subchannel_slow(irq);
- if (ret) {
- css_clear_subchannel_slow_list();
- need_rescan = 1;
- }
- }
-out:
+ if (on)
+ /* Scan for new devices on varied on path. */
+ for_each_subchannel(__s390_vary_chpid_on, NULL);
if (need_rescan || css_slow_subchannels_exist())
queue_work(slow_path_wq, &slow_path_work);
return 0;
@@ -995,7 +1026,7 @@ new_channel_path(int chpid)
chp->id = chpid;
chp->state = 1;
chp->dev = (struct device) {
- .parent = &css_bus_device,
+ .parent = &css[0]->device,
.release = chp_release,
};
snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
@@ -1017,7 +1048,7 @@ new_channel_path(int chpid)
device_unregister(&chp->dev);
goto out_free;
} else
- chps[chpid] = chp;
+ css[0]->chps[chpid] = chp;
return ret;
out_free:
kfree(chp);
@@ -1030,7 +1061,7 @@ chsc_get_chp_desc(struct subchannel *sch, int chp_no)
struct channel_path *chp;
struct channel_path_desc *desc;
- chp = chps[sch->schib.pmcw.chpid[chp_no]];
+ chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]];
if (!chp)
return NULL;
desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
@@ -1051,6 +1082,54 @@ chsc_alloc_sei_area(void)
return (sei_page ? 0 : -ENOMEM);
}
+int __init
+chsc_enable_facility(int operation_code)
+{
+ int ret;
+ struct {
+ struct chsc_header request;
+ u8 reserved1:4;
+ u8 format:4;
+ u8 reserved2;
+ u16 operation_code;
+ u32 reserved3;
+ u32 reserved4;
+ u32 operation_data_area[252];
+ struct chsc_header response;
+ u32 reserved5:4;
+ u32 format2:4;
+ u32 reserved6:24;
+ } *sda_area;
+
+ sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
+ if (!sda_area)
+ return -ENOMEM;
+ sda_area->request = (struct chsc_header) {
+ .length = 0x0400,
+ .code = 0x0031,
+ };
+ sda_area->operation_code = operation_code;
+
+ ret = chsc(sda_area);
+ if (ret > 0) {
+ ret = (ret == 3) ? -ENODEV : -EBUSY;
+ goto out;
+ }
+ switch (sda_area->response.code) {
+ case 0x0003: /* invalid request block */
+ case 0x0007:
+ ret = -EINVAL;
+ break;
+ case 0x0004: /* command not provided */
+ case 0x0101: /* facility not provided */
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ out:
+ free_page((unsigned long)sda_area);
+ return ret;
+}
+
subsys_initcall(chsc_alloc_sei_area);
struct css_general_char css_general_characteristics;
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index be20da49d147..44e4b4bb1c5a 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -1,12 +1,12 @@
#ifndef S390_CHSC_H
#define S390_CHSC_H
-#define NR_CHPIDS 256
-
#define CHSC_SEI_ACC_CHPID 1
#define CHSC_SEI_ACC_LINKADDR 2
#define CHSC_SEI_ACC_FULLLINKADDR 3
+#define CHSC_SDA_OC_MSS 0x2
+
struct chsc_header {
u16 length;
u16 code;
@@ -43,7 +43,9 @@ struct css_general_char {
u32 ext_mb : 1; /* bit 48 */
u32 : 7;
u32 aif_tdd : 1; /* bit 56 */
- u32 : 10;
+ u32 : 1;
+ u32 qebsm : 1; /* bit 58 */
+ u32 : 8;
u32 aif_osa : 1; /* bit 67 */
u32 : 28;
}__attribute__((packed));
@@ -63,4 +65,9 @@ extern int chsc_determine_css_characteristics(void);
extern int css_characteristics_avail;
extern void *chsc_get_chp_desc(struct subchannel*, int);
+
+extern int chsc_enable_facility(int);
+
+#define to_channelpath(dev) container_of(dev, struct channel_path, dev)
+
#endif
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 185bc73c3ecd..7376bc87206d 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -1,7 +1,7 @@
/*
* drivers/s390/cio/cio.c
* S/390 common I/O routines -- low level i/o calls
- * $Revision: 1.135 $
+ * $Revision: 1.138 $
*
* Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
@@ -135,7 +135,7 @@ cio_tpi(void)
return 0;
irb = (struct irb *) __LC_IRB;
/* Store interrupt response block to lowcore. */
- if (tsch (tpi_info->irq, irb) != 0)
+ if (tsch (tpi_info->schid, irb) != 0)
/* Not status pending or not operational. */
return 1;
sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
@@ -163,10 +163,11 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
else
sch->lpm = 0;
- stsch (sch->irq, &sch->schib);
+ stsch (sch->schid, &sch->schib);
CIO_MSG_EVENT(0, "cio_start: 'not oper' status for "
- "subchannel %04x!\n", sch->irq);
+ "subchannel 0.%x.%04x!\n", sch->schid.ssid,
+ sch->schid.sch_no);
sprintf(dbf_text, "no%s", sch->dev.bus_id);
CIO_TRACE_EVENT(0, dbf_text);
CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
@@ -194,7 +195,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
sch->orb.spnd = sch->options.suspend;
sch->orb.ssic = sch->options.suspend && sch->options.inter;
sch->orb.lpm = (lpm != 0) ? (lpm & sch->opm) : sch->lpm;
-#ifdef CONFIG_ARCH_S390X
+#ifdef CONFIG_64BIT
/*
* for 64 bit we always support 64 bit IDAWs with 4k page size only
*/
@@ -204,7 +205,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
sch->orb.key = key >> 4;
/* issue "Start Subchannel" */
sch->orb.cpa = (__u32) __pa (cpa);
- ccode = ssch (sch->irq, &sch->orb);
+ ccode = ssch (sch->schid, &sch->orb);
/* process condition code */
sprintf (dbf_txt, "ccode:%d", ccode);
@@ -243,7 +244,7 @@ cio_resume (struct subchannel *sch)
CIO_TRACE_EVENT (4, "resIO");
CIO_TRACE_EVENT (4, sch->dev.bus_id);
- ccode = rsch (sch->irq);
+ ccode = rsch (sch->schid);
sprintf (dbf_txt, "ccode:%d", ccode);
CIO_TRACE_EVENT (4, dbf_txt);
@@ -283,7 +284,7 @@ cio_halt(struct subchannel *sch)
/*
* Issue "Halt subchannel" and process condition code
*/
- ccode = hsch (sch->irq);
+ ccode = hsch (sch->schid);
sprintf (dbf_txt, "ccode:%d", ccode);
CIO_TRACE_EVENT (2, dbf_txt);
@@ -318,7 +319,7 @@ cio_clear(struct subchannel *sch)
/*
* Issue "Clear subchannel" and process condition code
*/
- ccode = csch (sch->irq);
+ ccode = csch (sch->schid);
sprintf (dbf_txt, "ccode:%d", ccode);
CIO_TRACE_EVENT (2, dbf_txt);
@@ -351,7 +352,7 @@ cio_cancel (struct subchannel *sch)
CIO_TRACE_EVENT (2, "cancelIO");
CIO_TRACE_EVENT (2, sch->dev.bus_id);
- ccode = xsch (sch->irq);
+ ccode = xsch (sch->schid);
sprintf (dbf_txt, "ccode:%d", ccode);
CIO_TRACE_EVENT (2, dbf_txt);
@@ -359,7 +360,7 @@ cio_cancel (struct subchannel *sch)
switch (ccode) {
case 0: /* success */
/* Update information in scsw. */
- stsch (sch->irq, &sch->schib);
+ stsch (sch->schid, &sch->schib);
return 0;
case 1: /* status pending */
return -EBUSY;
@@ -381,7 +382,7 @@ cio_modify (struct subchannel *sch)
ret = 0;
for (retry = 0; retry < 5; retry++) {
- ccode = msch_err (sch->irq, &sch->schib);
+ ccode = msch_err (sch->schid, &sch->schib);
if (ccode < 0) /* -EIO if msch gets a program check. */
return ccode;
switch (ccode) {
@@ -414,7 +415,7 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc)
CIO_TRACE_EVENT (2, "ensch");
CIO_TRACE_EVENT (2, sch->dev.bus_id);
- ccode = stsch (sch->irq, &sch->schib);
+ ccode = stsch (sch->schid, &sch->schib);
if (ccode)
return -ENODEV;
@@ -432,13 +433,13 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc)
*/
sch->schib.pmcw.csense = 0;
if (ret == 0) {
- stsch (sch->irq, &sch->schib);
+ stsch (sch->schid, &sch->schib);
if (sch->schib.pmcw.ena)
break;
}
if (ret == -EBUSY) {
struct irb irb;
- if (tsch(sch->irq, &irb) != 0)
+ if (tsch(sch->schid, &irb) != 0)
break;
}
}
@@ -461,7 +462,7 @@ cio_disable_subchannel (struct subchannel *sch)
CIO_TRACE_EVENT (2, "dissch");
CIO_TRACE_EVENT (2, sch->dev.bus_id);
- ccode = stsch (sch->irq, &sch->schib);
+ ccode = stsch (sch->schid, &sch->schib);
if (ccode == 3) /* Not operational. */
return -ENODEV;
@@ -485,7 +486,7 @@ cio_disable_subchannel (struct subchannel *sch)
*/
break;
if (ret == 0) {
- stsch (sch->irq, &sch->schib);
+ stsch (sch->schid, &sch->schib);
if (!sch->schib.pmcw.ena)
break;
}
@@ -508,12 +509,12 @@ cio_disable_subchannel (struct subchannel *sch)
* -ENODEV for subchannels with invalid device number or blacklisted devices
*/
int
-cio_validate_subchannel (struct subchannel *sch, unsigned int irq)
+cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
{
char dbf_txt[15];
int ccode;
- sprintf (dbf_txt, "valsch%x", irq);
+ sprintf (dbf_txt, "valsch%x", schid.sch_no);
CIO_TRACE_EVENT (4, dbf_txt);
/* Nuke all fields. */
@@ -522,17 +523,20 @@ cio_validate_subchannel (struct subchannel *sch, unsigned int irq)
spin_lock_init(&sch->lock);
/* Set a name for the subchannel */
- snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", irq);
+ snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid,
+ schid.sch_no);
/*
* The first subchannel that is not-operational (ccode==3)
* indicates that there aren't any more devices available.
+ * If stsch gets an exception, it means the current subchannel set
+ * is not valid.
*/
- sch->irq = irq;
- ccode = stsch (irq, &sch->schib);
+ ccode = stsch_err (schid, &sch->schib);
if (ccode)
- return -ENXIO;
+ return (ccode == 3) ? -ENXIO : ccode;
+ sch->schid = schid;
/* Copy subchannel type from path management control word. */
sch->st = sch->schib.pmcw.st;
@@ -541,9 +545,9 @@ cio_validate_subchannel (struct subchannel *sch, unsigned int irq)
*/
if (sch->st != 0) {
CIO_DEBUG(KERN_INFO, 0,
- "Subchannel %04X reports "
+ "Subchannel 0.%x.%04x reports "
"non-I/O subchannel type %04X\n",
- sch->irq, sch->st);
+ sch->schid.ssid, sch->schid.sch_no, sch->st);
/* We stop here for non-io subchannels. */
return sch->st;
}
@@ -554,26 +558,29 @@ cio_validate_subchannel (struct subchannel *sch, unsigned int irq)
return -ENODEV;
/* Devno is valid. */
- if (is_blacklisted (sch->schib.pmcw.dev)) {
+ if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) {
/*
* This device must not be known to Linux. So we simply
* say that there is no device and return ENODEV.
*/
CIO_MSG_EVENT(0, "Blacklisted device detected "
- "at devno %04X\n", sch->schib.pmcw.dev);
+ "at devno %04X, subchannel set %x\n",
+ sch->schib.pmcw.dev, sch->schid.ssid);
return -ENODEV;
}
sch->opm = 0xff;
- chsc_validate_chpids(sch);
+ if (!cio_is_console(sch->schid))
+ chsc_validate_chpids(sch);
sch->lpm = sch->schib.pmcw.pim &
sch->schib.pmcw.pam &
sch->schib.pmcw.pom &
sch->opm;
CIO_DEBUG(KERN_INFO, 0,
- "Detected device %04X on subchannel %04X"
+ "Detected device %04x on subchannel 0.%x.%04X"
" - PIM = %02X, PAM = %02X, POM = %02X\n",
- sch->schib.pmcw.dev, sch->irq, sch->schib.pmcw.pim,
+ sch->schib.pmcw.dev, sch->schid.ssid,
+ sch->schid.sch_no, sch->schib.pmcw.pim,
sch->schib.pmcw.pam, sch->schib.pmcw.pom);
/*
@@ -632,7 +639,7 @@ do_IRQ (struct pt_regs *regs)
if (sch)
spin_lock(&sch->lock);
/* Store interrupt response block to lowcore. */
- if (tsch (tpi_info->irq, irb) == 0 && sch) {
+ if (tsch (tpi_info->schid, irb) == 0 && sch) {
/* Keep subchannel information word up to date. */
memcpy (&sch->schib.scsw, &irb->scsw,
sizeof (irb->scsw));
@@ -691,28 +698,36 @@ wait_cons_dev (void)
}
static int
-cio_console_irq(void)
+cio_test_for_console(struct subchannel_id schid, void *data)
{
- int irq;
+ if (stsch_err(schid, &console_subchannel.schib) != 0)
+ return -ENXIO;
+ if (console_subchannel.schib.pmcw.dnv &&
+ console_subchannel.schib.pmcw.dev ==
+ console_devno) {
+ console_irq = schid.sch_no;
+ return 1; /* found */
+ }
+ return 0;
+}
+
+
+static int
+cio_get_console_sch_no(void)
+{
+ struct subchannel_id schid;
+ init_subchannel_id(&schid);
if (console_irq != -1) {
/* VM provided us with the irq number of the console. */
- if (stsch(console_irq, &console_subchannel.schib) != 0 ||
+ schid.sch_no = console_irq;
+ if (stsch(schid, &console_subchannel.schib) != 0 ||
!console_subchannel.schib.pmcw.dnv)
return -1;
console_devno = console_subchannel.schib.pmcw.dev;
} else if (console_devno != -1) {
/* At least the console device number is known. */
- for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
- if (stsch(irq, &console_subchannel.schib) != 0)
- break;
- if (console_subchannel.schib.pmcw.dnv &&
- console_subchannel.schib.pmcw.dev ==
- console_devno) {
- console_irq = irq;
- break;
- }
- }
+ for_each_subchannel(cio_test_for_console, NULL);
if (console_irq == -1)
return -1;
} else {
@@ -728,17 +743,20 @@ cio_console_irq(void)
struct subchannel *
cio_probe_console(void)
{
- int irq, ret;
+ int sch_no, ret;
+ struct subchannel_id schid;
if (xchg(&console_subchannel_in_use, 1) != 0)
return ERR_PTR(-EBUSY);
- irq = cio_console_irq();
- if (irq == -1) {
+ sch_no = cio_get_console_sch_no();
+ if (sch_no == -1) {
console_subchannel_in_use = 0;
return ERR_PTR(-ENODEV);
}
memset(&console_subchannel, 0, sizeof(struct subchannel));
- ret = cio_validate_subchannel(&console_subchannel, irq);
+ init_subchannel_id(&schid);
+ schid.sch_no = sch_no;
+ ret = cio_validate_subchannel(&console_subchannel, schid);
if (ret) {
console_subchannel_in_use = 0;
return ERR_PTR(-ENODEV);
@@ -770,11 +788,11 @@ cio_release_console(void)
/* Bah... hack to catch console special sausages. */
int
-cio_is_console(int irq)
+cio_is_console(struct subchannel_id schid)
{
if (!console_subchannel_in_use)
return 0;
- return (irq == console_subchannel.irq);
+ return schid_equal(&schid, &console_subchannel.schid);
}
struct subchannel *
@@ -787,7 +805,7 @@ cio_get_console_subchannel(void)
#endif
static inline int
-__disable_subchannel_easy(unsigned int schid, struct schib *schib)
+__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
{
int retry, cc;
@@ -805,7 +823,7 @@ __disable_subchannel_easy(unsigned int schid, struct schib *schib)
}
static inline int
-__clear_subchannel_easy(unsigned int schid)
+__clear_subchannel_easy(struct subchannel_id schid)
{
int retry;
@@ -815,8 +833,8 @@ __clear_subchannel_easy(unsigned int schid)
struct tpi_info ti;
if (tpi(&ti)) {
- tsch(ti.irq, (struct irb *)__LC_IRB);
- if (ti.irq == schid)
+ tsch(ti.schid, (struct irb *)__LC_IRB);
+ if (schid_equal(&ti.schid, &schid))
return 0;
}
udelay(100);
@@ -825,31 +843,33 @@ __clear_subchannel_easy(unsigned int schid)
}
extern void do_reipl(unsigned long devno);
+static int
+__shutdown_subchannel_easy(struct subchannel_id schid, void *data)
+{
+ struct schib schib;
+
+ if (stsch_err(schid, &schib))
+ return -ENXIO;
+ if (!schib.pmcw.ena)
+ return 0;
+ switch(__disable_subchannel_easy(schid, &schib)) {
+ case 0:
+ case -ENODEV:
+ break;
+ default: /* -EBUSY */
+ if (__clear_subchannel_easy(schid))
+ break; /* give up... */
+ stsch(schid, &schib);
+ __disable_subchannel_easy(schid, &schib);
+ }
+ return 0;
+}
-/* Clear all subchannels. */
void
clear_all_subchannels(void)
{
- unsigned int schid;
-
local_irq_disable();
- for (schid=0;schid<=highest_subchannel;schid++) {
- struct schib schib;
- if (stsch(schid, &schib))
- break; /* break out of the loop */
- if (!schib.pmcw.ena)
- continue;
- switch(__disable_subchannel_easy(schid, &schib)) {
- case 0:
- case -ENODEV:
- break;
- default: /* -EBUSY */
- if (__clear_subchannel_easy(schid))
- break; /* give up... jump out of switch */
- stsch(schid, &schib);
- __disable_subchannel_easy(schid, &schib);
- }
- }
+ for_each_subchannel(__shutdown_subchannel_easy, NULL);
}
/* Make sure all subchannels are quiet before we re-ipl an lpar. */
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index c50a9da420a9..0ca987344e07 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -1,6 +1,8 @@
#ifndef S390_CIO_H
#define S390_CIO_H
+#include "schid.h"
+
/*
* where we put the ssd info
*/
@@ -83,7 +85,7 @@ struct orb {
/* subchannel data structure used by I/O subroutines */
struct subchannel {
- unsigned int irq; /* aka. subchannel number */
+ struct subchannel_id schid;
spinlock_t lock; /* subchannel lock */
enum {
@@ -114,7 +116,7 @@ struct subchannel {
#define to_subchannel(n) container_of(n, struct subchannel, dev)
-extern int cio_validate_subchannel (struct subchannel *, unsigned int);
+extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id);
extern int cio_enable_subchannel (struct subchannel *, unsigned int);
extern int cio_disable_subchannel (struct subchannel *);
extern int cio_cancel (struct subchannel *);
@@ -127,14 +129,15 @@ extern int cio_cancel (struct subchannel *);
extern int cio_set_options (struct subchannel *, int);
extern int cio_get_options (struct subchannel *);
extern int cio_modify (struct subchannel *);
+
/* Use with care. */
#ifdef CONFIG_CCW_CONSOLE
extern struct subchannel *cio_probe_console(void);
extern void cio_release_console(void);
-extern int cio_is_console(int irq);
+extern int cio_is_console(struct subchannel_id);
extern struct subchannel *cio_get_console_subchannel(void);
#else
-#define cio_is_console(irq) 0
+#define cio_is_console(schid) 0
#define cio_get_console_subchannel() NULL
#endif
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index b978f7fe8327..0b03714e696a 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/s390/cio/cmf.c ($Revision: 1.16 $)
+ * linux/drivers/s390/cio/cmf.c ($Revision: 1.19 $)
*
* Linux on zSeries Channel Measurement Facility support
*
@@ -178,7 +178,7 @@ set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address)
/* msch can silently fail, so do it again if necessary */
for (retry = 0; retry < 3; retry++) {
/* prepare schib */
- stsch(sch->irq, schib);
+ stsch(sch->schid, schib);
schib->pmcw.mme = mme;
schib->pmcw.mbfc = mbfc;
/* address can be either a block address or a block index */
@@ -188,7 +188,7 @@ set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address)
schib->pmcw.mbi = address;
/* try to submit it */
- switch(ret = msch_err(sch->irq, schib)) {
+ switch(ret = msch_err(sch->schid, schib)) {
case 0:
break;
case 1:
@@ -202,7 +202,7 @@ set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address)
ret = -EINVAL;
break;
}
- stsch(sch->irq, schib); /* restore the schib */
+ stsch(sch->schid, schib); /* restore the schib */
if (ret)
break;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 555119cacc27..e565193650c7 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1,7 +1,7 @@
/*
* drivers/s390/cio/css.c
* driver for channel subsystem
- * $Revision: 1.85 $
+ * $Revision: 1.93 $
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
@@ -21,19 +21,35 @@
#include "ioasm.h"
#include "chsc.h"
-unsigned int highest_subchannel;
int need_rescan = 0;
int css_init_done = 0;
+static int max_ssid = 0;
+
+struct channel_subsystem *css[__MAX_CSSID + 1];
-struct pgid global_pgid;
int css_characteristics_avail = 0;
-struct device css_bus_device = {
- .bus_id = "css0",
-};
+inline int
+for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
+{
+ struct subchannel_id schid;
+ int ret;
+
+ init_subchannel_id(&schid);
+ ret = -ENODEV;
+ do {
+ do {
+ ret = fn(schid, data);
+ if (ret)
+ break;
+ } while (schid.sch_no++ < __MAX_SUBCHANNEL);
+ schid.sch_no = 0;
+ } while (schid.ssid++ < max_ssid);
+ return ret;
+}
static struct subchannel *
-css_alloc_subchannel(int irq)
+css_alloc_subchannel(struct subchannel_id schid)
{
struct subchannel *sch;
int ret;
@@ -41,13 +57,11 @@ css_alloc_subchannel(int irq)
sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
if (sch == NULL)
return ERR_PTR(-ENOMEM);
- ret = cio_validate_subchannel (sch, irq);
+ ret = cio_validate_subchannel (sch, schid);
if (ret < 0) {
kfree(sch);
return ERR_PTR(ret);
}
- if (irq > highest_subchannel)
- highest_subchannel = irq;
if (sch->st != SUBCHANNEL_TYPE_IO) {
/* For now we ignore all non-io subchannels. */
@@ -87,7 +101,7 @@ css_subchannel_release(struct device *dev)
struct subchannel *sch;
sch = to_subchannel(dev);
- if (!cio_is_console(sch->irq))
+ if (!cio_is_console(sch->schid))
kfree(sch);
}
@@ -99,7 +113,7 @@ css_register_subchannel(struct subchannel *sch)
int ret;
/* Initialize the subchannel structure */
- sch->dev.parent = &css_bus_device;
+ sch->dev.parent = &css[0]->device;
sch->dev.bus = &css_bus_type;
sch->dev.release = &css_subchannel_release;
@@ -114,12 +128,12 @@ css_register_subchannel(struct subchannel *sch)
}
int
-css_probe_device(int irq)
+css_probe_device(struct subchannel_id schid)
{
int ret;
struct subchannel *sch;
- sch = css_alloc_subchannel(irq);
+ sch = css_alloc_subchannel(schid);
if (IS_ERR(sch))
return PTR_ERR(sch);
ret = css_register_subchannel(sch);
@@ -132,26 +146,26 @@ static int
check_subchannel(struct device * dev, void * data)
{
struct subchannel *sch;
- int irq = (unsigned long)data;
+ struct subchannel_id *schid = data;
sch = to_subchannel(dev);
- return (sch->irq == irq);
+ return schid_equal(&sch->schid, schid);
}
struct subchannel *
-get_subchannel_by_schid(int irq)
+get_subchannel_by_schid(struct subchannel_id schid)
{
struct device *dev;
dev = bus_find_device(&css_bus_type, NULL,
- (void *)(unsigned long)irq, check_subchannel);
+ (void *)&schid, check_subchannel);
return dev ? to_subchannel(dev) : NULL;
}
static inline int
-css_get_subchannel_status(struct subchannel *sch, int schid)
+css_get_subchannel_status(struct subchannel *sch, struct subchannel_id schid)
{
struct schib schib;
int cc;
@@ -170,13 +184,13 @@ css_get_subchannel_status(struct subchannel *sch, int schid)
}
static int
-css_evaluate_subchannel(int irq, int slow)
+css_evaluate_subchannel(struct subchannel_id schid, int slow)
{
int event, ret, disc;
struct subchannel *sch;
unsigned long flags;
- sch = get_subchannel_by_schid(irq);
+ sch = get_subchannel_by_schid(schid);
disc = sch ? device_is_disconnected(sch) : 0;
if (disc && slow) {
if (sch)
@@ -194,9 +208,10 @@ css_evaluate_subchannel(int irq, int slow)
put_device(&sch->dev);
return -EAGAIN; /* Will be done on the slow path. */
}
- event = css_get_subchannel_status(sch, irq);
- CIO_MSG_EVENT(4, "Evaluating schid %04x, event %d, %s, %s path.\n",
- irq, event, sch?(disc?"disconnected":"normal"):"unknown",
+ event = css_get_subchannel_status(sch, schid);
+ CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
+ schid.ssid, schid.sch_no, event,
+ sch?(disc?"disconnected":"normal"):"unknown",
slow?"slow":"fast");
switch (event) {
case CIO_NO_PATH:
@@ -253,7 +268,7 @@ css_evaluate_subchannel(int irq, int slow)
sch->schib.pmcw.intparm = 0;
cio_modify(sch);
put_device(&sch->dev);
- ret = css_probe_device(irq);
+ ret = css_probe_device(schid);
} else {
/*
* We can't immediately deregister the disconnected
@@ -272,7 +287,7 @@ css_evaluate_subchannel(int irq, int slow)
device_trigger_reprobe(sch);
spin_unlock_irqrestore(&sch->lock, flags);
}
- ret = sch ? 0 : css_probe_device(irq);
+ ret = sch ? 0 : css_probe_device(schid);
break;
default:
BUG();
@@ -281,28 +296,15 @@ css_evaluate_subchannel(int irq, int slow)
return ret;
}
-static void
-css_rescan_devices(void)
+static int
+css_rescan_devices(struct subchannel_id schid, void *data)
{
- int irq, ret;
-
- for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
- ret = css_evaluate_subchannel(irq, 1);
- /* No more memory. It doesn't make sense to continue. No
- * panic because this can happen in midflight and just
- * because we can't use a new device is no reason to crash
- * the system. */
- if (ret == -ENOMEM)
- break;
- /* -ENXIO indicates that there are no more subchannels. */
- if (ret == -ENXIO)
- break;
- }
+ return css_evaluate_subchannel(schid, 1);
}
struct slow_subchannel {
struct list_head slow_list;
- unsigned long schid;
+ struct subchannel_id schid;
};
static LIST_HEAD(slow_subchannels_head);
@@ -315,7 +317,7 @@ css_trigger_slow_path(void)
if (need_rescan) {
need_rescan = 0;
- css_rescan_devices();
+ for_each_subchannel(css_rescan_devices, NULL);
return;
}
@@ -354,23 +356,31 @@ css_reiterate_subchannels(void)
* Called from the machine check handler for subchannel report words.
*/
int
-css_process_crw(int irq)
+css_process_crw(int rsid1, int rsid2)
{
int ret;
+ struct subchannel_id mchk_schid;
- CIO_CRW_EVENT(2, "source is subchannel %04X\n", irq);
+ CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n",
+ rsid1, rsid2);
if (need_rescan)
/* We need to iterate all subchannels anyway. */
return -EAGAIN;
+
+ init_subchannel_id(&mchk_schid);
+ mchk_schid.sch_no = rsid1;
+ if (rsid2 != 0)
+ mchk_schid.ssid = (rsid2 >> 8) & 3;
+
/*
* Since we are always presented with IPI in the CRW, we have to
* use stsch() to find out if the subchannel in question has come
* or gone.
*/
- ret = css_evaluate_subchannel(irq, 0);
+ ret = css_evaluate_subchannel(mchk_schid, 0);
if (ret == -EAGAIN) {
- if (css_enqueue_subchannel_slow(irq)) {
+ if (css_enqueue_subchannel_slow(mchk_schid)) {
css_clear_subchannel_slow_list();
need_rescan = 1;
}
@@ -378,22 +388,83 @@ css_process_crw(int irq)
return ret;
}
-static void __init
-css_generate_pgid(void)
+static int __init
+__init_channel_subsystem(struct subchannel_id schid, void *data)
{
- /* Let's build our path group ID here. */
- if (css_characteristics_avail && css_general_characteristics.mcss)
- global_pgid.cpu_addr = 0x8000;
+ struct subchannel *sch;
+ int ret;
+
+ if (cio_is_console(schid))
+ sch = cio_get_console_subchannel();
else {
+ sch = css_alloc_subchannel(schid);
+ if (IS_ERR(sch))
+ ret = PTR_ERR(sch);
+ else
+ ret = 0;
+ switch (ret) {
+ case 0:
+ break;
+ case -ENOMEM:
+ panic("Out of memory in init_channel_subsystem\n");
+ /* -ENXIO: no more subchannels. */
+ case -ENXIO:
+ return ret;
+ default:
+ return 0;
+ }
+ }
+ /*
+ * We register ALL valid subchannels in ioinfo, even those
+ * that have been present before init_channel_subsystem.
+ * These subchannels can't have been registered yet (kmalloc
+ * not working) so we do it now. This is true e.g. for the
+ * console subchannel.
+ */
+ css_register_subchannel(sch);
+ return 0;
+}
+
+static void __init
+css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
+{
+ if (css_characteristics_avail && css_general_characteristics.mcss) {
+ css->global_pgid.pgid_high.ext_cssid.version = 0x80;
+ css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
+ } else {
#ifdef CONFIG_SMP
- global_pgid.cpu_addr = hard_smp_processor_id();
+ css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id();
#else
- global_pgid.cpu_addr = 0;
+ css->global_pgid.pgid_high.cpu_addr = 0;
#endif
}
- global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
- global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
- global_pgid.tod_high = (__u32) (get_clock() >> 32);
+ css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
+ css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
+ css->global_pgid.tod_high = tod_high;
+
+}
+
+static void
+channel_subsystem_release(struct device *dev)
+{
+ struct channel_subsystem *css;
+
+ css = to_css(dev);
+ kfree(css);
+}
+
+static inline void __init
+setup_css(int nr)
+{
+ u32 tod_high;
+
+ memset(css[nr], 0, sizeof(struct channel_subsystem));
+ css[nr]->valid = 1;
+ css[nr]->cssid = nr;
+ sprintf(css[nr]->device.bus_id, "css%x", nr);
+ css[nr]->device.release = channel_subsystem_release;
+ tod_high = (u32) (get_clock() >> 32);
+ css_generate_pgid(css[nr], tod_high);
}
/*
@@ -404,53 +475,50 @@ css_generate_pgid(void)
static int __init
init_channel_subsystem (void)
{
- int ret, irq;
+ int ret, i;
if (chsc_determine_css_characteristics() == 0)
css_characteristics_avail = 1;
- css_generate_pgid();
-
if ((ret = bus_register(&css_bus_type)))
goto out;
- if ((ret = device_register (&css_bus_device)))
- goto out_bus;
+ /* Try to enable MSS. */
+ ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
+ switch (ret) {
+ case 0: /* Success. */
+ max_ssid = __MAX_SSID;
+ break;
+ case -ENOMEM:
+ goto out_bus;
+ default:
+ max_ssid = 0;
+ }
+ /* Setup css structure. */
+ for (i = 0; i <= __MAX_CSSID; i++) {
+ css[i] = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
+ if (!css[i]) {
+ ret = -ENOMEM;
+ goto out_unregister;
+ }
+ setup_css(i);
+ ret = device_register(&css[i]->device);
+ if (ret)
+ goto out_free;
+ }
css_init_done = 1;
ctl_set_bit(6, 28);
- for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
- struct subchannel *sch;
-
- if (cio_is_console(irq))
- sch = cio_get_console_subchannel();
- else {
- sch = css_alloc_subchannel(irq);
- if (IS_ERR(sch))
- ret = PTR_ERR(sch);
- else
- ret = 0;
- if (ret == -ENOMEM)
- panic("Out of memory in "
- "init_channel_subsystem\n");
- /* -ENXIO: no more subchannels. */
- if (ret == -ENXIO)
- break;
- if (ret)
- continue;
- }
- /*
- * We register ALL valid subchannels in ioinfo, even those
- * that have been present before init_channel_subsystem.
- * These subchannels can't have been registered yet (kmalloc
- * not working) so we do it now. This is true e.g. for the
- * console subchannel.
- */
- css_register_subchannel(sch);
- }
+ for_each_subchannel(__init_channel_subsystem, NULL);
return 0;
-
+out_free:
+ kfree(css[i]);
+out_unregister:
+ while (i > 0) {
+ i--;
+ device_unregister(&css[i]->device);
+ }
out_bus:
bus_unregister(&css_bus_type);
out:
@@ -481,47 +549,8 @@ struct bus_type css_bus_type = {
subsys_initcall(init_channel_subsystem);
-/*
- * Register root devices for some drivers. The release function must not be
- * in the device drivers, so we do it here.
- */
-static void
-s390_root_dev_release(struct device *dev)
-{
- kfree(dev);
-}
-
-struct device *
-s390_root_dev_register(const char *name)
-{
- struct device *dev;
- int ret;
-
- if (!strlen(name))
- return ERR_PTR(-EINVAL);
- dev = kmalloc(sizeof(struct device), GFP_KERNEL);
- if (!dev)
- return ERR_PTR(-ENOMEM);
- memset(dev, 0, sizeof(struct device));
- strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE));
- dev->release = s390_root_dev_release;
- ret = device_register(dev);
- if (ret) {
- kfree(dev);
- return ERR_PTR(ret);
- }
- return dev;
-}
-
-void
-s390_root_dev_unregister(struct device *dev)
-{
- if (dev)
- device_unregister(dev);
-}
-
int
-css_enqueue_subchannel_slow(unsigned long schid)
+css_enqueue_subchannel_slow(struct subchannel_id schid)
{
struct slow_subchannel *new_slow_sch;
unsigned long flags;
@@ -564,6 +593,4 @@ css_slow_subchannels_exist(void)
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(css_bus_type);
-EXPORT_SYMBOL(s390_root_dev_register);
-EXPORT_SYMBOL(s390_root_dev_unregister);
EXPORT_SYMBOL_GPL(css_characteristics_avail);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 2004a6c49388..251ebd7a7d3a 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -6,6 +6,8 @@
#include <asm/cio.h>
+#include "schid.h"
+
/*
* path grouping stuff
*/
@@ -33,19 +35,25 @@ struct path_state {
__u8 resvd : 3; /* reserved */
} __attribute__ ((packed));
+struct extended_cssid {
+ u8 version;
+ u8 cssid;
+} __attribute__ ((packed));
+
struct pgid {
union {
__u8 fc; /* SPID function code */
struct path_state ps; /* SNID path state */
} inf;
- __u32 cpu_addr : 16; /* CPU address */
+ union {
+ __u32 cpu_addr : 16; /* CPU address */
+ struct extended_cssid ext_cssid;
+ } pgid_high;
__u32 cpu_id : 24; /* CPU identification */
__u32 cpu_model : 16; /* CPU model */
__u32 tod_high; /* high word TOD clock */
} __attribute__ ((packed));
-extern struct pgid global_pgid;
-
#define MAX_CIWS 8
/*
@@ -68,7 +76,8 @@ struct ccw_device_private {
atomic_t onoff;
unsigned long registered;
__u16 devno; /* device number */
- __u16 irq; /* subchannel number */
+ __u16 sch_no; /* subchannel number */
+ __u8 ssid; /* subchannel set id */
__u8 imask; /* lpm mask for SNID/SID/SPGID */
int iretry; /* retry counter SNID/SID/SPGID */
struct {
@@ -121,15 +130,27 @@ struct css_driver {
extern struct bus_type css_bus_type;
extern struct css_driver io_subchannel_driver;
-int css_probe_device(int irq);
-extern struct subchannel * get_subchannel_by_schid(int irq);
-extern unsigned int highest_subchannel;
+extern int css_probe_device(struct subchannel_id);
+extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
extern int css_init_done;
-
-#define __MAX_SUBCHANNELS 65536
+extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
+
+#define __MAX_SUBCHANNEL 65535
+#define __MAX_SSID 3
+#define __MAX_CHPID 255
+#define __MAX_CSSID 0
+
+struct channel_subsystem {
+ u8 cssid;
+ int valid;
+ struct channel_path *chps[__MAX_CHPID];
+ struct device device;
+ struct pgid global_pgid;
+};
+#define to_css(dev) container_of(dev, struct channel_subsystem, device)
extern struct bus_type css_bus_type;
-extern struct device css_bus_device;
+extern struct channel_subsystem *css[];
/* Some helper functions for disconnected state. */
int device_is_disconnected(struct subchannel *);
@@ -144,7 +165,7 @@ void device_set_waiting(struct subchannel *);
void device_kill_pending_timer(struct subchannel *);
/* Helper functions to build lists for the slow path. */
-int css_enqueue_subchannel_slow(unsigned long schid);
+extern int css_enqueue_subchannel_slow(struct subchannel_id schid);
void css_walk_subchannel_slow_list(void (*fn)(unsigned long));
void css_clear_subchannel_slow_list(void);
int css_slow_subchannels_exist(void);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 85908cacc3b8..fa3e4c0a2536 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1,7 +1,7 @@
/*
* drivers/s390/cio/device.c
* bus driver for ccw devices
- * $Revision: 1.131 $
+ * $Revision: 1.137 $
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
@@ -374,7 +374,7 @@ online_store (struct device *dev, struct device_attribute *attr, const char *buf
int i, force, ret;
char *tmp;
- if (atomic_compare_and_swap(0, 1, &cdev->private->onoff))
+ if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
return -EAGAIN;
if (cdev->drv && !try_module_get(cdev->drv->owner)) {
@@ -535,7 +535,8 @@ ccw_device_register(struct ccw_device *cdev)
}
struct match_data {
- unsigned int devno;
+ unsigned int devno;
+ unsigned int ssid;
struct ccw_device * sibling;
};
@@ -548,6 +549,7 @@ match_devno(struct device * dev, void * data)
cdev = to_ccwdev(dev);
if ((cdev->private->state == DEV_STATE_DISCONNECTED) &&
(cdev->private->devno == d->devno) &&
+ (cdev->private->ssid == d->ssid) &&
(cdev != d->sibling)) {
cdev->private->state = DEV_STATE_NOT_OPER;
return 1;
@@ -556,11 +558,13 @@ match_devno(struct device * dev, void * data)
}
static struct ccw_device *
-get_disc_ccwdev_by_devno(unsigned int devno, struct ccw_device *sibling)
+get_disc_ccwdev_by_devno(unsigned int devno, unsigned int ssid,
+ struct ccw_device *sibling)
{
struct device *dev;
struct match_data data = {
- .devno = devno,
+ .devno = devno,
+ .ssid = ssid,
.sibling = sibling,
};
@@ -616,13 +620,13 @@ ccw_device_do_unreg_rereg(void *data)
need_rename = 1;
other_cdev = get_disc_ccwdev_by_devno(sch->schib.pmcw.dev,
- cdev);
+ sch->schid.ssid, cdev);
if (other_cdev) {
struct subchannel *other_sch;
other_sch = to_subchannel(other_cdev->dev.parent);
if (get_device(&other_sch->dev)) {
- stsch(other_sch->irq, &other_sch->schib);
+ stsch(other_sch->schid, &other_sch->schib);
if (other_sch->schib.pmcw.dnv) {
other_sch->schib.pmcw.intparm = 0;
cio_modify(other_sch);
@@ -639,8 +643,8 @@ ccw_device_do_unreg_rereg(void *data)
if (test_and_clear_bit(1, &cdev->private->registered))
device_del(&cdev->dev);
if (need_rename)
- snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.0.%04x",
- sch->schib.pmcw.dev);
+ snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x",
+ sch->schid.ssid, sch->schib.pmcw.dev);
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_add_changed, (void *)cdev);
queue_work(ccw_device_work, &cdev->private->kick_work);
@@ -769,18 +773,20 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
sch->dev.driver_data = cdev;
sch->driver = &io_subchannel_driver;
cdev->ccwlock = &sch->lock;
+
/* Init private data. */
priv = cdev->private;
priv->devno = sch->schib.pmcw.dev;
- priv->irq = sch->irq;
+ priv->ssid = sch->schid.ssid;
+ priv->sch_no = sch->schid.sch_no;
priv->state = DEV_STATE_NOT_OPER;
INIT_LIST_HEAD(&priv->cmb_list);
init_waitqueue_head(&priv->wait_q);
init_timer(&priv->timer);
/* Set an initial name for the device. */
- snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.0.%04x",
- sch->schib.pmcw.dev);
+ snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x",
+ sch->schid.ssid, sch->schib.pmcw.dev);
/* Increase counter of devices currently in recognition. */
atomic_inc(&ccw_device_init_count);
@@ -951,7 +957,7 @@ io_subchannel_shutdown(struct device *dev)
sch = to_subchannel(dev);
cdev = dev->driver_data;
- if (cio_is_console(sch->irq))
+ if (cio_is_console(sch->schid))
return;
if (!sch->schib.pmcw.ena)
/* Nothing to do. */
@@ -986,10 +992,6 @@ ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch)
cdev->dev = (struct device) {
.parent = &sch->dev,
};
- /* Initialize the subchannel structure */
- sch->dev.parent = &css_bus_device;
- sch->dev.bus = &css_bus_type;
-
rc = io_subchannel_recog(cdev, sch);
if (rc)
return rc;
@@ -1146,6 +1148,16 @@ ccw_driver_unregister (struct ccw_driver *cdriver)
driver_unregister(&cdriver->driver);
}
+/* Helper func for qdio. */
+struct subchannel_id
+ccw_device_get_subchannel_id(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+ return sch->schid;
+}
+
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccw_device_set_online);
EXPORT_SYMBOL(ccw_device_set_offline);
@@ -1155,3 +1167,4 @@ EXPORT_SYMBOL(get_ccwdev_by_busid);
EXPORT_SYMBOL(ccw_bus_type);
EXPORT_SYMBOL(ccw_device_work);
EXPORT_SYMBOL(ccw_device_notify_work);
+EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index a3aa056d7245..11587ebb7289 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -110,6 +110,7 @@ int ccw_device_stlck(struct ccw_device *);
/* qdio needs this. */
void ccw_device_set_timeout(struct ccw_device *, int);
+extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
void retry_set_schib(struct ccw_device *cdev);
#endif
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index c1c89f4fd4e3..23d12b65e5fa 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -133,7 +133,7 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
int ret;
sch = to_subchannel(cdev->dev.parent);
- ret = stsch(sch->irq, &sch->schib);
+ ret = stsch(sch->schid, &sch->schib);
if (ret || !sch->schib.pmcw.dnv)
return -ENODEV;
if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0)
@@ -231,7 +231,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
* through ssch() and the path information is up to date.
*/
old_lpm = sch->lpm;
- stsch(sch->irq, &sch->schib);
+ stsch(sch->schid, &sch->schib);
sch->lpm = sch->schib.pmcw.pim &
sch->schib.pmcw.pam &
sch->schib.pmcw.pom &
@@ -257,8 +257,9 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
switch (state) {
case DEV_STATE_NOT_OPER:
CIO_DEBUG(KERN_WARNING, 2,
- "SenseID : unknown device %04x on subchannel %04x\n",
- cdev->private->devno, sch->irq);
+ "SenseID : unknown device %04x on subchannel "
+ "0.%x.%04x\n", cdev->private->devno,
+ sch->schid.ssid, sch->schid.sch_no);
break;
case DEV_STATE_OFFLINE:
if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
@@ -282,16 +283,18 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
return;
}
/* Issue device info message. */
- CIO_DEBUG(KERN_INFO, 2, "SenseID : device %04x reports: "
+ CIO_DEBUG(KERN_INFO, 2, "SenseID : device 0.%x.%04x reports: "
"CU Type/Mod = %04X/%02X, Dev Type/Mod = "
- "%04X/%02X\n", cdev->private->devno,
+ "%04X/%02X\n",
+ cdev->private->ssid, cdev->private->devno,
cdev->id.cu_type, cdev->id.cu_model,
cdev->id.dev_type, cdev->id.dev_model);
break;
case DEV_STATE_BOXED:
CIO_DEBUG(KERN_WARNING, 2,
- "SenseID : boxed device %04x on subchannel %04x\n",
- cdev->private->devno, sch->irq);
+ "SenseID : boxed device %04x on subchannel "
+ "0.%x.%04x\n", cdev->private->devno,
+ sch->schid.ssid, sch->schid.sch_no);
break;
}
cdev->private->state = state;
@@ -359,7 +362,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
if (state == DEV_STATE_BOXED)
CIO_DEBUG(KERN_WARNING, 2,
"Boxed device %04x on subchannel %04x\n",
- cdev->private->devno, sch->irq);
+ cdev->private->devno, sch->schid.sch_no);
if (cdev->private->flags.donotify) {
cdev->private->flags.donotify = 0;
@@ -592,7 +595,7 @@ ccw_device_offline(struct ccw_device *cdev)
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
- if (stsch(sch->irq, &sch->schib) || !sch->schib.pmcw.dnv)
+ if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE) {
if (sch->schib.scsw.actl != 0)
@@ -711,7 +714,7 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
* Since we might not just be coming from an interrupt from the
* subchannel we have to update the schib.
*/
- stsch(sch->irq, &sch->schib);
+ stsch(sch->schid, &sch->schib);
if (sch->schib.scsw.actl != 0 ||
(cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
@@ -923,7 +926,7 @@ ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event)
/* Iff device is idle, reset timeout. */
sch = to_subchannel(cdev->dev.parent);
- if (!stsch(sch->irq, &sch->schib))
+ if (!stsch(sch->schid, &sch->schib))
if (sch->schib.scsw.actl == 0)
ccw_device_set_timeout(cdev, 0);
/* Call the handler. */
@@ -1035,7 +1038,7 @@ device_trigger_reprobe(struct subchannel *sch)
return;
/* Update some values. */
- if (stsch(sch->irq, &sch->schib))
+ if (stsch(sch->schid, &sch->schib))
return;
/*
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 0e68fb511dc9..04ceba343db8 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -27,7 +27,7 @@
/*
* diag210 is used under VM to get information about a virtual device
*/
-#ifdef CONFIG_ARCH_S390X
+#ifdef CONFIG_64BIT
int
diag210(struct diag210 * addr)
{
@@ -256,16 +256,17 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
* sense id information. So, for intervention required,
* we use the "whack it until it talks" strategy...
*/
- CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel %04x "
- "reports cmd reject\n",
- cdev->private->devno, sch->irq);
+ CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel "
+ "0.%x.%04x reports cmd reject\n",
+ cdev->private->devno, sch->schid.ssid,
+ sch->schid.sch_no);
return -EOPNOTSUPP;
}
if (irb->esw.esw0.erw.cons) {
- CIO_MSG_EVENT(2, "SenseID : UC on dev %04x, "
+ CIO_MSG_EVENT(2, "SenseID : UC on dev 0.%x.%04x, "
"lpum %02X, cnt %02d, sns :"
" %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
- cdev->private->devno,
+ cdev->private->ssid, cdev->private->devno,
irb->esw.esw0.sublog.lpum,
irb->esw.esw0.erw.scnt,
irb->ecw[0], irb->ecw[1],
@@ -277,16 +278,17 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
if (irb->scsw.cc == 3) {
if ((sch->orb.lpm &
sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0)
- CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x on"
- " subchannel %04x is 'not operational'\n",
- sch->orb.lpm, cdev->private->devno,
- sch->irq);
+ CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x "
+ "on subchannel 0.%x.%04x is "
+ "'not operational'\n", sch->orb.lpm,
+ cdev->private->devno, sch->schid.ssid,
+ sch->schid.sch_no);
return -EACCES;
}
/* Hmm, whatever happened, try again. */
CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on "
- "subchannel %04x returns status %02X%02X\n",
- cdev->private->devno, sch->irq,
+ "subchannel 0.%x.%04x returns status %02X%02X\n",
+ cdev->private->devno, sch->schid.ssid, sch->schid.sch_no,
irb->scsw.dstat, irb->scsw.cstat);
return -EAGAIN;
}
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 85a3026e6900..143b6c25a4e6 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -1,7 +1,7 @@
/*
* drivers/s390/cio/device_ops.c
*
- * $Revision: 1.57 $
+ * $Revision: 1.58 $
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
@@ -570,7 +570,7 @@ ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
int
_ccw_device_get_subchannel_number(struct ccw_device *cdev)
{
- return cdev->private->irq;
+ return cdev->private->sch_no;
}
int
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 0adac8a67331..052832d03d38 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -22,6 +22,7 @@
#include "cio_debug.h"
#include "css.h"
#include "device.h"
+#include "ioasm.h"
/*
* Start Sense Path Group ID helper function. Used in ccw_device_recog
@@ -56,10 +57,10 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev)
if (ret != -EACCES)
return ret;
CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel "
- "%04x, lpm %02X, became 'not "
+ "0.%x.%04x, lpm %02X, became 'not "
"operational'\n",
- cdev->private->devno, sch->irq,
- cdev->private->imask);
+ cdev->private->devno, sch->schid.ssid,
+ sch->schid.sch_no, cdev->private->imask);
}
cdev->private->imask >>= 1;
@@ -105,10 +106,10 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
return -EOPNOTSUPP;
}
if (irb->esw.esw0.erw.cons) {
- CIO_MSG_EVENT(2, "SNID - device %04x, unit check, "
+ CIO_MSG_EVENT(2, "SNID - device 0.%x.%04x, unit check, "
"lpum %02X, cnt %02d, sns : "
"%02X%02X%02X%02X %02X%02X%02X%02X ...\n",
- cdev->private->devno,
+ cdev->private->ssid, cdev->private->devno,
irb->esw.esw0.sublog.lpum,
irb->esw.esw0.erw.scnt,
irb->ecw[0], irb->ecw[1],
@@ -118,15 +119,17 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
return -EAGAIN;
}
if (irb->scsw.cc == 3) {
- CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel "
- "%04x, lpm %02X, became 'not operational'\n",
- cdev->private->devno, sch->irq, sch->orb.lpm);
+ CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x,"
+ " lpm %02X, became 'not operational'\n",
+ cdev->private->devno, sch->schid.ssid,
+ sch->schid.sch_no, sch->orb.lpm);
return -EACCES;
}
if (cdev->private->pgid.inf.ps.state2 == SNID_STATE2_RESVD_ELSE) {
- CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel %04x "
+ CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x "
"is reserved by someone else\n",
- cdev->private->devno, sch->irq);
+ cdev->private->devno, sch->schid.ssid,
+ sch->schid.sch_no);
return -EUSERS;
}
return 0;
@@ -162,7 +165,7 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event)
/* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */
case 0: /* Sense Path Group ID successful. */
if (cdev->private->pgid.inf.ps.state1 == SNID_STATE1_RESET)
- memcpy(&cdev->private->pgid, &global_pgid,
+ memcpy(&cdev->private->pgid, &css[0]->global_pgid,
sizeof(struct pgid));
ccw_device_sense_pgid_done(cdev, 0);
break;
@@ -235,8 +238,9 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
sch->lpm &= ~cdev->private->imask;
sch->vpm &= ~cdev->private->imask;
CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel "
- "%04x, lpm %02X, became 'not operational'\n",
- cdev->private->devno, sch->irq, cdev->private->imask);
+ "0.%x.%04x, lpm %02X, became 'not operational'\n",
+ cdev->private->devno, sch->schid.ssid,
+ sch->schid.sch_no, cdev->private->imask);
return ret;
}
@@ -258,8 +262,10 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
if (irb->ecw[0] & SNS0_CMD_REJECT)
return -EOPNOTSUPP;
/* Hmm, whatever happened, try again. */
- CIO_MSG_EVENT(2, "SPID - device %04x, unit check, cnt %02d, "
+ CIO_MSG_EVENT(2, "SPID - device 0.%x.%04x, unit check, "
+ "cnt %02d, "
"sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
+ cdev->private->ssid,
cdev->private->devno, irb->esw.esw0.erw.scnt,
irb->ecw[0], irb->ecw[1],
irb->ecw[2], irb->ecw[3],
@@ -268,10 +274,10 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
return -EAGAIN;
}
if (irb->scsw.cc == 3) {
- CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel "
- "%04x, lpm %02X, became 'not operational'\n",
- cdev->private->devno, sch->irq,
- cdev->private->imask);
+ CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel 0.%x.%04x,"
+ " lpm %02X, became 'not operational'\n",
+ cdev->private->devno, sch->schid.ssid,
+ sch->schid.sch_no, cdev->private->imask);
return -EACCES;
}
return 0;
@@ -364,8 +370,22 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
void
ccw_device_verify_start(struct ccw_device *cdev)
{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
cdev->private->flags.pgid_single = 0;
cdev->private->iretry = 5;
+ /*
+ * Update sch->lpm with current values to catch paths becoming
+ * available again.
+ */
+ if (stsch(sch->schid, &sch->schib)) {
+ ccw_device_verify_done(cdev, -ENODEV);
+ return;
+ }
+ sch->lpm = sch->schib.pmcw.pim &
+ sch->schib.pmcw.pam &
+ sch->schib.pmcw.pom &
+ sch->opm;
__ccw_device_verify_start(cdev);
}
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 12a24d4331a2..db09c209098b 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -36,15 +36,16 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
"received"
- " ... device %04X on subchannel %04X, dev_stat "
+ " ... device %04x on subchannel 0.%x.%04x, dev_stat "
": %02X sch_stat : %02X\n",
- cdev->private->devno, cdev->private->irq,
+ cdev->private->devno, cdev->private->ssid,
+ cdev->private->sch_no,
irb->scsw.dstat, irb->scsw.cstat);
if (irb->scsw.cc != 3) {
char dbf_text[15];
- sprintf(dbf_text, "chk%x", cdev->private->irq);
+ sprintf(dbf_text, "chk%x", cdev->private->sch_no);
CIO_TRACE_EVENT(0, dbf_text);
CIO_HEX_EVENT(0, irb, sizeof (struct irb));
}
@@ -59,10 +60,11 @@ ccw_device_path_notoper(struct ccw_device *cdev)
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
- stsch (sch->irq, &sch->schib);
+ stsch (sch->schid, &sch->schib);
- CIO_MSG_EVENT(0, "%s(%04x) - path(s) %02x are "
- "not operational \n", __FUNCTION__, sch->irq,
+ CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are "
+ "not operational \n", __FUNCTION__,
+ sch->schid.ssid, sch->schid.sch_no,
sch->schib.pmcw.pnom);
sch->lpm &= ~sch->schib.pmcw.pnom;
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index 45480a2bc4c0..95a9462f9a91 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -1,12 +1,13 @@
#ifndef S390_CIO_IOASM_H
#define S390_CIO_IOASM_H
+#include "schid.h"
+
/*
* TPI info structure
*/
struct tpi_info {
- __u32 reserved1 : 16; /* reserved 0x00000001 */
- __u32 irq : 16; /* aka. subchannel number */
+ struct subchannel_id schid;
__u32 intparm; /* interruption parameter */
__u32 adapter_IO : 1;
__u32 reserved2 : 1;
@@ -21,7 +22,8 @@ struct tpi_info {
* Some S390 specific IO instructions as inline
*/
-static inline int stsch(int irq, volatile struct schib *addr)
+static inline int stsch(struct subchannel_id schid,
+ volatile struct schib *addr)
{
int ccode;
@@ -31,12 +33,42 @@ static inline int stsch(int irq, volatile struct schib *addr)
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
- : "d" (irq | 0x10000), "a" (addr)
+ : "d" (schid), "a" (addr), "m" (*addr)
+ : "cc", "1" );
+ return ccode;
+}
+
+static inline int stsch_err(struct subchannel_id schid,
+ volatile struct schib *addr)
+{
+ int ccode;
+
+ __asm__ __volatile__(
+ " lhi %0,%3\n"
+ " lr 1,%1\n"
+ " stsch 0(%2)\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1:\n"
+#ifdef CONFIG_64BIT
+ ".section __ex_table,\"a\"\n"
+ " .align 8\n"
+ " .quad 0b,1b\n"
+ ".previous"
+#else
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 0b,1b\n"
+ ".previous"
+#endif
+ : "=&d" (ccode)
+ : "d" (schid), "a" (addr), "K" (-EIO), "m" (*addr)
: "cc", "1" );
return ccode;
}
-static inline int msch(int irq, volatile struct schib *addr)
+static inline int msch(struct subchannel_id schid,
+ volatile struct schib *addr)
{
int ccode;
@@ -46,12 +78,13 @@ static inline int msch(int irq, volatile struct schib *addr)
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
- : "d" (irq | 0x10000L), "a" (addr)
+ : "d" (schid), "a" (addr), "m" (*addr)
: "cc", "1" );
return ccode;
}
-static inline int msch_err(int irq, volatile struct schib *addr)
+static inline int msch_err(struct subchannel_id schid,
+ volatile struct schib *addr)
{
int ccode;
@@ -62,7 +95,7 @@ static inline int msch_err(int irq, volatile struct schib *addr)
"0: ipm %0\n"
" srl %0,28\n"
"1:\n"
-#ifdef CONFIG_ARCH_S390X
+#ifdef CONFIG_64BIT
".section __ex_table,\"a\"\n"
" .align 8\n"
" .quad 0b,1b\n"
@@ -74,12 +107,13 @@ static inline int msch_err(int irq, volatile struct schib *addr)
".previous"
#endif
: "=&d" (ccode)
- : "d" (irq | 0x10000L), "a" (addr), "K" (-EIO)
+ : "d" (schid), "a" (addr), "K" (-EIO), "m" (*addr)
: "cc", "1" );
return ccode;
}
-static inline int tsch(int irq, volatile struct irb *addr)
+static inline int tsch(struct subchannel_id schid,
+ volatile struct irb *addr)
{
int ccode;
@@ -89,7 +123,7 @@ static inline int tsch(int irq, volatile struct irb *addr)
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
- : "d" (irq | 0x10000L), "a" (addr)
+ : "d" (schid), "a" (addr), "m" (*addr)
: "cc", "1" );
return ccode;
}
@@ -103,12 +137,13 @@ static inline int tpi( volatile struct tpi_info *addr)
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
- : "a" (addr)
+ : "a" (addr), "m" (*addr)
: "cc", "1" );
return ccode;
}
-static inline int ssch(int irq, volatile struct orb *addr)
+static inline int ssch(struct subchannel_id schid,
+ volatile struct orb *addr)
{
int ccode;
@@ -118,12 +153,12 @@ static inline int ssch(int irq, volatile struct orb *addr)
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
- : "d" (irq | 0x10000L), "a" (addr)
+ : "d" (schid), "a" (addr), "m" (*addr)
: "cc", "1" );
return ccode;
}
-static inline int rsch(int irq)
+static inline int rsch(struct subchannel_id schid)
{
int ccode;
@@ -133,12 +168,12 @@ static inline int rsch(int irq)
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
- : "d" (irq | 0x10000L)
+ : "d" (schid)
: "cc", "1" );
return ccode;
}
-static inline int csch(int irq)
+static inline int csch(struct subchannel_id schid)
{
int ccode;
@@ -148,12 +183,12 @@ static inline int csch(int irq)
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
- : "d" (irq | 0x10000L)
+ : "d" (schid)
: "cc", "1" );
return ccode;
}
-static inline int hsch(int irq)
+static inline int hsch(struct subchannel_id schid)
{
int ccode;
@@ -163,12 +198,12 @@ static inline int hsch(int irq)
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
- : "d" (irq | 0x10000L)
+ : "d" (schid)
: "cc", "1" );
return ccode;
}
-static inline int xsch(int irq)
+static inline int xsch(struct subchannel_id schid)
{
int ccode;
@@ -178,21 +213,22 @@ static inline int xsch(int irq)
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
- : "d" (irq | 0x10000L)
+ : "d" (schid)
: "cc", "1" );
return ccode;
}
static inline int chsc(void *chsc_area)
{
+ typedef struct { char _[4096]; } addr_type;
int cc;
__asm__ __volatile__ (
- ".insn rre,0xb25f0000,%1,0 \n\t"
+ ".insn rre,0xb25f0000,%2,0 \n\t"
"ipm %0 \n\t"
"srl %0,28 \n\t"
- : "=d" (cc)
- : "d" (chsc_area)
+ : "=d" (cc), "=m" (*(addr_type *) chsc_area)
+ : "d" (chsc_area), "m" (*(addr_type *) chsc_area)
: "cc" );
return cc;
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index eb39218b925e..30a836ffc31f 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -56,7 +56,7 @@
#include "ioasm.h"
#include "chsc.h"
-#define VERSION_QDIO_C "$Revision: 1.108 $"
+#define VERSION_QDIO_C "$Revision: 1.114 $"
/****************** MODULE PARAMETER VARIABLES ********************/
MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>");
@@ -76,6 +76,7 @@ static struct qdio_perf_stats perf_stats;
#endif /* QDIO_PERFORMANCE_STATS */
static int hydra_thinints;
+static int is_passthrough = 0;
static int omit_svs;
static int indicator_used[INDICATORS_PER_CACHELINE];
@@ -136,12 +137,126 @@ qdio_release_q(struct qdio_q *q)
atomic_dec(&q->use_count);
}
-static volatile inline void
-qdio_set_slsb(volatile char *slsb, unsigned char value)
+/*check ccq */
+static inline int
+qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
+{
+ char dbf_text[15];
+
+ if (ccq == 0 || ccq == 32 || ccq == 96)
+ return 0;
+ if (ccq == 97)
+ return 1;
+ /*notify devices immediately*/
+ sprintf(dbf_text,"%d", ccq);
+ QDIO_DBF_TEXT2(1,trace,dbf_text);
+ return -EIO;
+}
+/* EQBS: extract buffer states */
+static inline int
+qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
+ unsigned int *start, unsigned int *cnt)
+{
+ struct qdio_irq *irq;
+ unsigned int tmp_cnt, q_no, ccq;
+ int rc ;
+ char dbf_text[15];
+
+ ccq = 0;
+ tmp_cnt = *cnt;
+ irq = (struct qdio_irq*)q->irq_ptr;
+ q_no = q->q_no;
+ if(!q->is_input_q)
+ q_no += irq->no_input_qs;
+ ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt);
+ rc = qdio_check_ccq(q, ccq);
+ if (rc < 0) {
+ QDIO_DBF_TEXT2(1,trace,"eqberr");
+ sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt, *cnt, ccq, q_no);
+ QDIO_DBF_TEXT2(1,trace,dbf_text);
+ q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
+ QDIO_STATUS_LOOK_FOR_ERROR,
+ 0, 0, 0, -1, -1, q->int_parm);
+ return 0;
+ }
+ return (tmp_cnt - *cnt);
+}
+
+/* SQBS: set buffer states */
+static inline int
+qdio_do_sqbs(struct qdio_q *q, unsigned char state,
+ unsigned int *start, unsigned int *cnt)
{
- xchg((char*)slsb,value);
+ struct qdio_irq *irq;
+ unsigned int tmp_cnt, q_no, ccq;
+ int rc;
+ char dbf_text[15];
+
+ ccq = 0;
+ tmp_cnt = *cnt;
+ irq = (struct qdio_irq*)q->irq_ptr;
+ q_no = q->q_no;
+ if(!q->is_input_q)
+ q_no += irq->no_input_qs;
+ ccq = do_sqbs(irq->sch_token, state, q_no, start, cnt);
+ rc = qdio_check_ccq(q, ccq);
+ if (rc < 0) {
+ QDIO_DBF_TEXT3(1,trace,"sqberr");
+ sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt,*cnt,ccq,q_no);
+ QDIO_DBF_TEXT3(1,trace,dbf_text);
+ q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
+ QDIO_STATUS_LOOK_FOR_ERROR,
+ 0, 0, 0, -1, -1, q->int_parm);
+ return 0;
+ }
+ return (tmp_cnt - *cnt);
}
+static inline int
+qdio_set_slsb(struct qdio_q *q, unsigned int *bufno,
+ unsigned char state, unsigned int *count)
+{
+ volatile char *slsb;
+ struct qdio_irq *irq;
+
+ irq = (struct qdio_irq*)q->irq_ptr;
+ if (!irq->is_qebsm) {
+ slsb = (char *)&q->slsb.acc.val[(*bufno)];
+ xchg(slsb, state);
+ return 1;
+ }
+ return qdio_do_sqbs(q, state, bufno, count);
+}
+
+#ifdef CONFIG_QDIO_DEBUG
+static inline void
+qdio_trace_slsb(struct qdio_q *q)
+{
+ if (q->queue_type==QDIO_TRACE_QTYPE) {
+ if (q->is_input_q)
+ QDIO_DBF_HEX2(0,slsb_in,&q->slsb,
+ QDIO_MAX_BUFFERS_PER_Q);
+ else
+ QDIO_DBF_HEX2(0,slsb_out,&q->slsb,
+ QDIO_MAX_BUFFERS_PER_Q);
+ }
+}
+#endif
+
+static inline int
+set_slsb(struct qdio_q *q, unsigned int *bufno,
+ unsigned char state, unsigned int *count)
+{
+ int rc;
+#ifdef CONFIG_QDIO_DEBUG
+ qdio_trace_slsb(q);
+#endif
+ rc = qdio_set_slsb(q, bufno, state, count);
+#ifdef CONFIG_QDIO_DEBUG
+ qdio_trace_slsb(q);
+#endif
+ return rc;
+}
static inline int
qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
unsigned int gpr3)
@@ -155,7 +270,7 @@ qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
perf_stats.siga_syncs++;
#endif /* QDIO_PERFORMANCE_STATS */
- cc = do_siga_sync(q->irq, gpr2, gpr3);
+ cc = do_siga_sync(q->schid, gpr2, gpr3);
if (cc)
QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
@@ -170,6 +285,23 @@ qdio_siga_sync_q(struct qdio_q *q)
return qdio_siga_sync(q, q->mask, 0);
}
+static int
+__do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
+{
+ struct qdio_irq *irq;
+ unsigned int fc = 0;
+ unsigned long schid;
+
+ irq = (struct qdio_irq *) q->irq_ptr;
+ if (!irq->is_qebsm)
+ schid = *((u32 *)&q->schid);
+ else {
+ schid = irq->sch_token;
+ fc |= 0x80;
+ }
+ return do_siga_output(schid, q->mask, busy_bit, fc);
+}
+
/*
* returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns
* an access exception
@@ -189,7 +321,7 @@ qdio_siga_output(struct qdio_q *q)
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
for (;;) {
- cc = do_siga_output(q->irq, q->mask, &busy_bit);
+ cc = __do_siga_output(q, &busy_bit);
//QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit);
if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) {
if (!start_time)
@@ -221,7 +353,7 @@ qdio_siga_input(struct qdio_q *q)
perf_stats.siga_ins++;
#endif /* QDIO_PERFORMANCE_STATS */
- cc = do_siga_input(q->irq, q->mask);
+ cc = do_siga_input(q->schid, q->mask);
if (cc)
QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
@@ -230,7 +362,7 @@ qdio_siga_input(struct qdio_q *q)
}
/* locked by the locks in qdio_activate and qdio_cleanup */
-static __u32 volatile *
+static __u32 *
qdio_get_indicator(void)
{
int i;
@@ -258,7 +390,7 @@ qdio_put_indicator(__u32 *addr)
atomic_dec(&spare_indicator_usecount);
}
-static inline volatile void
+static inline void
tiqdio_clear_summary_bit(__u32 *location)
{
QDIO_DBF_TEXT5(0,trace,"clrsummb");
@@ -267,7 +399,7 @@ tiqdio_clear_summary_bit(__u32 *location)
xchg(location,0);
}
-static inline volatile void
+static inline void
tiqdio_set_summary_bit(__u32 *location)
{
QDIO_DBF_TEXT5(0,trace,"setsummb");
@@ -336,7 +468,9 @@ static inline int
qdio_stop_polling(struct qdio_q *q)
{
#ifdef QDIO_USE_PROCESSING_STATE
- int gsf;
+ unsigned int tmp, gsf, count = 1;
+ unsigned char state = 0;
+ struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
if (!atomic_swap(&q->polling,0))
return 1;
@@ -348,17 +482,22 @@ qdio_stop_polling(struct qdio_q *q)
if (!q->is_input_q)
return 1;
- gsf=GET_SAVED_FRONTIER(q);
- set_slsb(&q->slsb.acc.val[(gsf+QDIO_MAX_BUFFERS_PER_Q-1)&
- (QDIO_MAX_BUFFERS_PER_Q-1)],
- SLSB_P_INPUT_NOT_INIT);
+ tmp = gsf = GET_SAVED_FRONTIER(q);
+ tmp = ((tmp + QDIO_MAX_BUFFERS_PER_Q-1) & (QDIO_MAX_BUFFERS_PER_Q-1) );
+ set_slsb(q, &tmp, SLSB_P_INPUT_NOT_INIT, &count);
+
/*
* we don't issue this SYNC_MEMORY, as we trust Rick T and
* moreover will not use the PROCESSING state under VM, so
* q->polling was 0 anyway
*/
/*SYNC_MEMORY;*/
- if (q->slsb.acc.val[gsf]!=SLSB_P_INPUT_PRIMED)
+ if (irq->is_qebsm) {
+ count = 1;
+ qdio_do_eqbs(q, &state, &gsf, &count);
+ } else
+ state = q->slsb.acc.val[gsf];
+ if (state != SLSB_P_INPUT_PRIMED)
return 1;
/*
* set our summary bit again, as otherwise there is a
@@ -431,18 +570,136 @@ tiqdio_clear_global_summary(void)
/************************* OUTBOUND ROUTINES *******************************/
+static int
+qdio_qebsm_get_outbound_buffer_frontier(struct qdio_q *q)
+{
+ struct qdio_irq *irq;
+ unsigned char state;
+ unsigned int cnt, count, ftc;
+
+ irq = (struct qdio_irq *) q->irq_ptr;
+ if ((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis))
+ SYNC_MEMORY;
+
+ ftc = q->first_to_check;
+ count = qdio_min(atomic_read(&q->number_of_buffers_used),
+ (QDIO_MAX_BUFFERS_PER_Q-1));
+ if (count == 0)
+ return q->first_to_check;
+ cnt = qdio_do_eqbs(q, &state, &ftc, &count);
+ if (cnt == 0)
+ return q->first_to_check;
+ switch (state) {
+ case SLSB_P_OUTPUT_ERROR:
+ QDIO_DBF_TEXT3(0,trace,"outperr");
+ atomic_sub(cnt , &q->number_of_buffers_used);
+ if (q->qdio_error)
+ q->error_status_flags |=
+ QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
+ q->qdio_error = SLSB_P_OUTPUT_ERROR;
+ q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
+ q->first_to_check = ftc;
+ break;
+ case SLSB_P_OUTPUT_EMPTY:
+ QDIO_DBF_TEXT5(0,trace,"outpempt");
+ atomic_sub(cnt, &q->number_of_buffers_used);
+ q->first_to_check = ftc;
+ break;
+ case SLSB_CU_OUTPUT_PRIMED:
+ /* all buffers primed */
+ QDIO_DBF_TEXT5(0,trace,"outpprim");
+ break;
+ default:
+ break;
+ }
+ QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
+ return q->first_to_check;
+}
+
+static int
+qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q)
+{
+ struct qdio_irq *irq;
+ unsigned char state;
+ int tmp, ftc, count, cnt;
+ char dbf_text[15];
+
+
+ irq = (struct qdio_irq *) q->irq_ptr;
+ ftc = q->first_to_check;
+ count = qdio_min(atomic_read(&q->number_of_buffers_used),
+ (QDIO_MAX_BUFFERS_PER_Q-1));
+ if (count == 0)
+ return q->first_to_check;
+ cnt = qdio_do_eqbs(q, &state, &ftc, &count);
+ if (cnt == 0)
+ return q->first_to_check;
+ switch (state) {
+ case SLSB_P_INPUT_ERROR :
+#ifdef CONFIG_QDIO_DEBUG
+ QDIO_DBF_TEXT3(1,trace,"inperr");
+ sprintf(dbf_text,"%2x,%2x",ftc,count);
+ QDIO_DBF_TEXT3(1,trace,dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
+ if (q->qdio_error)
+ q->error_status_flags |=
+ QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
+ q->qdio_error = SLSB_P_INPUT_ERROR;
+ q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
+ atomic_sub(cnt, &q->number_of_buffers_used);
+ q->first_to_check = ftc;
+ break;
+ case SLSB_P_INPUT_PRIMED :
+ QDIO_DBF_TEXT3(0,trace,"inptprim");
+ sprintf(dbf_text,"%2x,%2x",ftc,count);
+ QDIO_DBF_TEXT3(1,trace,dbf_text);
+ tmp = 0;
+ ftc = q->first_to_check;
+#ifdef QDIO_USE_PROCESSING_STATE
+ if (cnt > 1) {
+ cnt -= 1;
+ tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
+ if (!tmp)
+ break;
+ }
+ cnt = 1;
+ tmp += set_slsb(q, &ftc,
+ SLSB_P_INPUT_PROCESSING, &cnt);
+ atomic_set(&q->polling, 1);
+#else
+ tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
+#endif
+ atomic_sub(tmp, &q->number_of_buffers_used);
+ q->first_to_check = ftc;
+ break;
+ case SLSB_CU_INPUT_EMPTY:
+ case SLSB_P_INPUT_NOT_INIT:
+ case SLSB_P_INPUT_PROCESSING:
+ QDIO_DBF_TEXT5(0,trace,"inpnipro");
+ break;
+ default:
+ break;
+ }
+ QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
+ return q->first_to_check;
+}
static inline int
qdio_get_outbound_buffer_frontier(struct qdio_q *q)
{
- int f,f_mod_no;
- volatile char *slsb;
- int first_not_to_check;
+ struct qdio_irq *irq;
+ volatile char *slsb;
+ unsigned int count = 1;
+ int first_not_to_check, f, f_mod_no;
char dbf_text[15];
QDIO_DBF_TEXT4(0,trace,"getobfro");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+ irq = (struct qdio_irq *) q->irq_ptr;
+ if (irq->is_qebsm)
+ return qdio_qebsm_get_outbound_buffer_frontier(q);
+
slsb=&q->slsb.acc.val[0];
f_mod_no=f=q->first_to_check;
/*
@@ -484,7 +741,7 @@ check_next:
QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
/* kind of process the buffer */
- set_slsb(&q->slsb.acc.val[f_mod_no], SLSB_P_OUTPUT_NOT_INIT);
+ set_slsb(q, &f_mod_no, SLSB_P_OUTPUT_NOT_INIT, &count);
/*
* we increment the frontier, as this buffer
@@ -597,48 +854,48 @@ qdio_kick_outbound_q(struct qdio_q *q)
result=qdio_siga_output(q);
- switch (result) {
- case 0:
- /* went smooth this time, reset timestamp */
+ switch (result) {
+ case 0:
+ /* went smooth this time, reset timestamp */
#ifdef CONFIG_QDIO_DEBUG
- QDIO_DBF_TEXT3(0,trace,"cc2reslv");
- sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no,
- atomic_read(&q->busy_siga_counter));
- QDIO_DBF_TEXT3(0,trace,dbf_text);
+ QDIO_DBF_TEXT3(0,trace,"cc2reslv");
+ sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
+ atomic_read(&q->busy_siga_counter));
+ QDIO_DBF_TEXT3(0,trace,dbf_text);
#endif /* CONFIG_QDIO_DEBUG */
- q->timing.busy_start=0;
+ q->timing.busy_start=0;
+ break;
+ case (2|QDIO_SIGA_ERROR_B_BIT_SET):
+ /* cc=2 and busy bit: */
+ atomic_inc(&q->busy_siga_counter);
+
+ /* if the last siga was successful, save
+ * timestamp here */
+ if (!q->timing.busy_start)
+ q->timing.busy_start=NOW;
+
+ /* if we're in time, don't touch error_status_flags
+ * and siga_error */
+ if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) {
+ qdio_mark_q(q);
break;
- case (2|QDIO_SIGA_ERROR_B_BIT_SET):
- /* cc=2 and busy bit: */
- atomic_inc(&q->busy_siga_counter);
-
- /* if the last siga was successful, save
- * timestamp here */
- if (!q->timing.busy_start)
- q->timing.busy_start=NOW;
-
- /* if we're in time, don't touch error_status_flags
- * and siga_error */
- if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) {
- qdio_mark_q(q);
- break;
- }
- QDIO_DBF_TEXT2(0,trace,"cc2REPRT");
+ }
+ QDIO_DBF_TEXT2(0,trace,"cc2REPRT");
#ifdef CONFIG_QDIO_DEBUG
- sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no,
- atomic_read(&q->busy_siga_counter));
- QDIO_DBF_TEXT3(0,trace,dbf_text);
+ sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
+ atomic_read(&q->busy_siga_counter));
+ QDIO_DBF_TEXT3(0,trace,dbf_text);
#endif /* CONFIG_QDIO_DEBUG */
- /* else fallthrough and report error */
- default:
- /* for plain cc=1, 2 or 3: */
- if (q->siga_error)
- q->error_status_flags|=
- QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
+ /* else fallthrough and report error */
+ default:
+ /* for plain cc=1, 2 or 3: */
+ if (q->siga_error)
q->error_status_flags|=
- QDIO_STATUS_LOOK_FOR_ERROR;
- q->siga_error=result;
- }
+ QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
+ q->error_status_flags|=
+ QDIO_STATUS_LOOK_FOR_ERROR;
+ q->siga_error=result;
+ }
}
static inline void
@@ -743,8 +1000,10 @@ qdio_outbound_processing(struct qdio_q *q)
static inline int
qdio_get_inbound_buffer_frontier(struct qdio_q *q)
{
+ struct qdio_irq *irq;
int f,f_mod_no;
volatile char *slsb;
+ unsigned int count = 1;
int first_not_to_check;
#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
@@ -756,6 +1015,10 @@ qdio_get_inbound_buffer_frontier(struct qdio_q *q)
QDIO_DBF_TEXT4(0,trace,"getibfro");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+ irq = (struct qdio_irq *) q->irq_ptr;
+ if (irq->is_qebsm)
+ return qdio_qebsm_get_inbound_buffer_frontier(q);
+
slsb=&q->slsb.acc.val[0];
f_mod_no=f=q->first_to_check;
/*
@@ -792,19 +1055,19 @@ check_next:
* kill VM in terms of CP overhead
*/
if (q->siga_sync) {
- set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT);
+ set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
} else {
/* set the previous buffer to NOT_INIT. The current
* buffer will be set to PROCESSING at the end of
* this function to avoid further interrupts. */
if (last_position>=0)
- set_slsb(&slsb[last_position],
- SLSB_P_INPUT_NOT_INIT);
+ set_slsb(q, &last_position,
+ SLSB_P_INPUT_NOT_INIT, &count);
atomic_set(&q->polling,1);
last_position=f_mod_no;
}
#else /* QDIO_USE_PROCESSING_STATE */
- set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT);
+ set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
#endif /* QDIO_USE_PROCESSING_STATE */
/*
* not needed, as the inbound queue will be synced on the next
@@ -829,7 +1092,7 @@ check_next:
QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
/* kind of process the buffer */
- set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT);
+ set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
if (q->qdio_error)
q->error_status_flags|=
@@ -857,7 +1120,7 @@ out:
#ifdef QDIO_USE_PROCESSING_STATE
if (last_position>=0)
- set_slsb(&slsb[last_position],SLSB_P_INPUT_PROCESSING);
+ set_slsb(q, &last_position, SLSB_P_INPUT_NOT_INIT, &count);
#endif /* QDIO_USE_PROCESSING_STATE */
QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
@@ -902,6 +1165,10 @@ static inline int
tiqdio_is_inbound_q_done(struct qdio_q *q)
{
int no_used;
+ unsigned int start_buf, count;
+ unsigned char state = 0;
+ struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
+
#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
#endif
@@ -927,8 +1194,13 @@ tiqdio_is_inbound_q_done(struct qdio_q *q)
if (!q->siga_sync)
/* we'll check for more primed buffers in qeth_stop_polling */
return 0;
-
- if (q->slsb.acc.val[q->first_to_check]!=SLSB_P_INPUT_PRIMED)
+ if (irq->is_qebsm) {
+ count = 1;
+ start_buf = q->first_to_check;
+ qdio_do_eqbs(q, &state, &start_buf, &count);
+ } else
+ state = q->slsb.acc.val[q->first_to_check];
+ if (state != SLSB_P_INPUT_PRIMED)
/*
* nothing more to do, if next buffer is not PRIMED.
* note that we did a SYNC_MEMORY before, that there
@@ -955,6 +1227,10 @@ static inline int
qdio_is_inbound_q_done(struct qdio_q *q)
{
int no_used;
+ unsigned int start_buf, count;
+ unsigned char state = 0;
+ struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
+
#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
#endif
@@ -973,8 +1249,13 @@ qdio_is_inbound_q_done(struct qdio_q *q)
QDIO_DBF_TEXT4(0,trace,dbf_text);
return 1;
}
-
- if (q->slsb.acc.val[q->first_to_check]==SLSB_P_INPUT_PRIMED) {
+ if (irq->is_qebsm) {
+ count = 1;
+ start_buf = q->first_to_check;
+ qdio_do_eqbs(q, &state, &start_buf, &count);
+ } else
+ state = q->slsb.acc.val[q->first_to_check];
+ if (state == SLSB_P_INPUT_PRIMED) {
/* we got something to do */
QDIO_DBF_TEXT4(0,trace,"inqisntA");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
@@ -1456,7 +1737,7 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
void *ptr;
int available;
- sprintf(dbf_text,"qfqs%4x",cdev->private->irq);
+ sprintf(dbf_text,"qfqs%4x",cdev->private->sch_no);
QDIO_DBF_TEXT0(0,setup,dbf_text);
for (i=0;i<no_input_qs;i++) {
q=irq_ptr->input_qs[i];
@@ -1476,7 +1757,7 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
q->queue_type=q_format;
q->int_parm=int_parm;
- q->irq=irq_ptr->irq;
+ q->schid = irq_ptr->schid;
q->irq_ptr = irq_ptr;
q->cdev = cdev;
q->mask=1<<(31-i);
@@ -1523,11 +1804,11 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
/* fill in slsb */
- for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) {
- set_slsb(&q->slsb.acc.val[j],
- SLSB_P_INPUT_NOT_INIT);
-/* q->sbal[j]->element[1].sbalf.i1.key=QDIO_STORAGE_KEY;*/
- }
+ if (!irq_ptr->is_qebsm) {
+ unsigned int count = 1;
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
+ set_slsb(q, &j, SLSB_P_INPUT_NOT_INIT, &count);
+ }
}
for (i=0;i<no_output_qs;i++) {
@@ -1549,7 +1830,7 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
q->queue_type=q_format;
q->int_parm=int_parm;
q->is_input_q=0;
- q->irq=irq_ptr->irq;
+ q->schid = irq_ptr->schid;
q->cdev = cdev;
q->irq_ptr = irq_ptr;
q->mask=1<<(31-i);
@@ -1584,11 +1865,11 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
/* fill in slsb */
- for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) {
- set_slsb(&q->slsb.acc.val[j],
- SLSB_P_OUTPUT_NOT_INIT);
-/* q->sbal[j]->element[1].sbalf.i1.key=QDIO_STORAGE_KEY;*/
- }
+ if (!irq_ptr->is_qebsm) {
+ unsigned int count = 1;
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
+ set_slsb(q, &j, SLSB_P_OUTPUT_NOT_INIT, &count);
+ }
}
}
@@ -1656,7 +1937,7 @@ qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
char dbf_text[15];
QDIO_DBF_TEXT5(0,trace,"newstate");
- sprintf(dbf_text,"%4x%4x",irq_ptr->irq,state);
+ sprintf(dbf_text,"%4x%4x",irq_ptr->schid.sch_no,state);
QDIO_DBF_TEXT5(0,trace,dbf_text);
#endif /* CONFIG_QDIO_DEBUG */
@@ -1669,12 +1950,12 @@ qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
}
static inline void
-qdio_irq_check_sense(int irq, struct irb *irb)
+qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
{
char dbf_text[15];
if (irb->esw.esw0.erw.cons) {
- sprintf(dbf_text,"sens%4x",irq);
+ sprintf(dbf_text,"sens%4x",schid.sch_no);
QDIO_DBF_TEXT2(1,trace,dbf_text);
QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN);
@@ -1785,21 +2066,22 @@ qdio_timeout_handler(struct ccw_device *cdev)
switch (irq_ptr->state) {
case QDIO_IRQ_STATE_INACTIVE:
- QDIO_PRINT_ERR("establish queues on irq %04x: timed out\n",
- irq_ptr->irq);
+ QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: timed out\n",
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
QDIO_DBF_TEXT2(1,setup,"eq:timeo");
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
break;
case QDIO_IRQ_STATE_CLEANUP:
- QDIO_PRINT_INFO("Did not get interrupt on cleanup, irq=0x%x.\n",
- irq_ptr->irq);
+ QDIO_PRINT_INFO("Did not get interrupt on cleanup, "
+ "irq=0.%x.%x.\n",
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
break;
case QDIO_IRQ_STATE_ESTABLISHED:
case QDIO_IRQ_STATE_ACTIVE:
/* I/O has been terminated by common I/O layer. */
- QDIO_PRINT_INFO("Queues on irq %04x killed by cio.\n",
- irq_ptr->irq);
+ QDIO_PRINT_INFO("Queues on irq 0.%x.%04x killed by cio.\n",
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
QDIO_DBF_TEXT2(1, trace, "cio:term");
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
if (get_device(&cdev->dev)) {
@@ -1862,7 +2144,7 @@ qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
}
}
- qdio_irq_check_sense(irq_ptr->irq, irb);
+ qdio_irq_check_sense(irq_ptr->schid, irb);
#ifdef CONFIG_QDIO_DEBUG
sprintf(dbf_text, "state:%d", irq_ptr->state);
@@ -1905,7 +2187,7 @@ int
qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
unsigned int queue_number)
{
- int cc;
+ int cc = 0;
struct qdio_q *q;
struct qdio_irq *irq_ptr;
void *ptr;
@@ -1918,7 +2200,7 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
return -ENODEV;
#ifdef CONFIG_QDIO_DEBUG
- *((int*)(&dbf_text[4])) = irq_ptr->irq;
+ *((int*)(&dbf_text[4])) = irq_ptr->schid.sch_no;
QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
*((int*)(&dbf_text[0]))=flags;
*((int*)(&dbf_text[4]))=queue_number;
@@ -1929,12 +2211,14 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
q=irq_ptr->input_qs[queue_number];
if (!q)
return -EINVAL;
- cc = do_siga_sync(q->irq, 0, q->mask);
+ if (!(irq_ptr->is_qebsm))
+ cc = do_siga_sync(q->schid, 0, q->mask);
} else if (flags&QDIO_FLAG_SYNC_OUTPUT) {
q=irq_ptr->output_qs[queue_number];
if (!q)
return -EINVAL;
- cc = do_siga_sync(q->irq, q->mask, 0);
+ if (!(irq_ptr->is_qebsm))
+ cc = do_siga_sync(q->schid, q->mask, 0);
} else
return -EINVAL;
@@ -1945,15 +2229,54 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
return cc;
}
-static unsigned char
-qdio_check_siga_needs(int sch)
+static inline void
+qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac,
+ unsigned long token)
+{
+ struct qdio_q *q;
+ int i;
+ unsigned int count, start_buf;
+ char dbf_text[15];
+
+ /*check if QEBSM is disabled */
+ if (!(irq_ptr->is_qebsm) || !(qdioac & 0x01)) {
+ irq_ptr->is_qebsm = 0;
+ irq_ptr->sch_token = 0;
+ irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
+ QDIO_DBF_TEXT0(0,setup,"noV=V");
+ return;
+ }
+ irq_ptr->sch_token = token;
+ /*input queue*/
+ for (i = 0; i < irq_ptr->no_input_qs;i++) {
+ q = irq_ptr->input_qs[i];
+ count = QDIO_MAX_BUFFERS_PER_Q;
+ start_buf = 0;
+ set_slsb(q, &start_buf, SLSB_P_INPUT_NOT_INIT, &count);
+ }
+ sprintf(dbf_text,"V=V:%2x",irq_ptr->is_qebsm);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+ sprintf(dbf_text,"%8lx",irq_ptr->sch_token);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+ /*output queue*/
+ for (i = 0; i < irq_ptr->no_output_qs; i++) {
+ q = irq_ptr->output_qs[i];
+ count = QDIO_MAX_BUFFERS_PER_Q;
+ start_buf = 0;
+ set_slsb(q, &start_buf, SLSB_P_OUTPUT_NOT_INIT, &count);
+ }
+}
+
+static void
+qdio_get_ssqd_information(struct qdio_irq *irq_ptr)
{
int result;
unsigned char qdioac;
-
struct {
struct chsc_header request;
- u16 reserved1;
+ u16 reserved1:10;
+ u16 ssid:2;
+ u16 fmt:4;
u16 first_sch;
u16 reserved2;
u16 last_sch;
@@ -1964,67 +2287,83 @@ qdio_check_siga_needs(int sch)
u8 reserved5;
u16 sch;
u8 qfmt;
- u8 reserved6;
- u8 qdioac;
+ u8 parm;
+ u8 qdioac1;
u8 sch_class;
u8 reserved7;
u8 icnt;
u8 reserved8;
u8 ocnt;
+ u8 reserved9;
+ u8 mbccnt;
+ u16 qdioac2;
+ u64 sch_token;
} *ssqd_area;
+ QDIO_DBF_TEXT0(0,setup,"getssqd");
+ qdioac = 0;
ssqd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!ssqd_area) {
QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \
- "SIGAs for sch x%x.\n", sch);
- return CHSC_FLAG_SIGA_INPUT_NECESSARY ||
- CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
- CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
+ "SIGAs for sch x%x.\n", irq_ptr->schid.sch_no);
+ irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
+ CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
+ CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
+ irq_ptr->is_qebsm = 0;
+ irq_ptr->sch_token = 0;
+ irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
+ return;
}
+
ssqd_area->request = (struct chsc_header) {
.length = 0x0010,
.code = 0x0024,
};
-
- ssqd_area->first_sch = sch;
- ssqd_area->last_sch = sch;
-
- result=chsc(ssqd_area);
+ ssqd_area->first_sch = irq_ptr->schid.sch_no;
+ ssqd_area->last_sch = irq_ptr->schid.sch_no;
+ ssqd_area->ssid = irq_ptr->schid.ssid;
+ result = chsc(ssqd_area);
if (result) {
QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \
- "SIGAs for sch x%x.\n",
- result,sch);
+ "SIGAs for sch 0.%x.%x.\n", result,
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
+ irq_ptr->is_qebsm = 0;
goto out;
}
if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) {
QDIO_PRINT_WARN("response upon checking SIGA needs " \
- "is 0x%x. Using all SIGAs for sch x%x.\n",
- ssqd_area->response.code, sch);
+ "is 0x%x. Using all SIGAs for sch 0.%x.%x.\n",
+ ssqd_area->response.code,
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
+ irq_ptr->is_qebsm = 0;
goto out;
}
if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
!(ssqd_area->flags & CHSC_FLAG_VALIDITY) ||
- (ssqd_area->sch != sch)) {
- QDIO_PRINT_WARN("huh? problems checking out sch x%x... " \
- "using all SIGAs.\n",sch);
+ (ssqd_area->sch != irq_ptr->schid.sch_no)) {
+ QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \
+ "using all SIGAs.\n",
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */
+ irq_ptr->is_qebsm = 0;
goto out;
}
-
- qdioac = ssqd_area->qdioac;
+ qdioac = ssqd_area->qdioac1;
out:
+ qdio_check_subchannel_qebsm(irq_ptr, qdioac,
+ ssqd_area->sch_token);
free_page ((unsigned long) ssqd_area);
- return qdioac;
+ irq_ptr->qdioac = qdioac;
}
static unsigned int
@@ -2055,6 +2394,13 @@ tiqdio_check_chsc_availability(void)
sprintf(dbf_text,"hydrati%1x", hydra_thinints);
QDIO_DBF_TEXT0(0,setup,dbf_text);
+#ifdef CONFIG_64BIT
+ /* Check for QEBSM support in general (bit 58). */
+ is_passthrough = css_general_characteristics.qebsm;
+#endif
+ sprintf(dbf_text,"cssQBS:%1x", is_passthrough);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+
/* Check for aif time delay disablement fac (bit 56). If installed,
* omit svs even under lpar (good point by rick again) */
omit_svs = css_general_characteristics.aif_tdd;
@@ -2091,7 +2437,7 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
/* set to 0x10000000 to enable
* time delay disablement facility */
u32 reserved5;
- u32 subsystem_id;
+ struct subchannel_id schid;
u32 reserved6[1004];
struct chsc_header response;
u32 reserved7;
@@ -2113,7 +2459,8 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
scssc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!scssc_area) {
QDIO_PRINT_WARN("No memory for setting indicators on " \
- "subchannel x%x.\n", irq_ptr->irq);
+ "subchannel 0.%x.%x.\n",
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
return -ENOMEM;
}
scssc_area->request = (struct chsc_header) {
@@ -2127,7 +2474,7 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
scssc_area->ks = QDIO_STORAGE_KEY;
scssc_area->kc = QDIO_STORAGE_KEY;
scssc_area->isc = TIQDIO_THININT_ISC;
- scssc_area->subsystem_id = (1<<16) + irq_ptr->irq;
+ scssc_area->schid = irq_ptr->schid;
/* enables the time delay disablement facility. Don't care
* whether it is really there (i.e. we haven't checked for
* it) */
@@ -2137,12 +2484,11 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
QDIO_PRINT_WARN("Time delay disablement facility " \
"not available\n");
-
-
result = chsc(scssc_area);
if (result) {
- QDIO_PRINT_WARN("could not set indicators on irq x%x, " \
- "cc=%i.\n",irq_ptr->irq,result);
+ QDIO_PRINT_WARN("could not set indicators on irq 0.%x.%x, " \
+ "cc=%i.\n",
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no,result);
result = -EIO;
goto out;
}
@@ -2198,7 +2544,8 @@ tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target)
scsscf_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!scsscf_area) {
QDIO_PRINT_WARN("No memory for setting delay target on " \
- "subchannel x%x.\n", irq_ptr->irq);
+ "subchannel 0.%x.%x.\n",
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
return -ENOMEM;
}
scsscf_area->request = (struct chsc_header) {
@@ -2210,8 +2557,10 @@ tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target)
result=chsc(scsscf_area);
if (result) {
- QDIO_PRINT_WARN("could not set delay target on irq x%x, " \
- "cc=%i. Continuing.\n",irq_ptr->irq,result);
+ QDIO_PRINT_WARN("could not set delay target on irq 0.%x.%x, " \
+ "cc=%i. Continuing.\n",
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
+ result);
result = -EIO;
goto out;
}
@@ -2245,7 +2594,7 @@ qdio_cleanup(struct ccw_device *cdev, int how)
if (!irq_ptr)
return -ENODEV;
- sprintf(dbf_text,"qcln%4x",irq_ptr->irq);
+ sprintf(dbf_text,"qcln%4x",irq_ptr->schid.sch_no);
QDIO_DBF_TEXT1(0,trace,dbf_text);
QDIO_DBF_TEXT0(0,setup,dbf_text);
@@ -2272,7 +2621,7 @@ qdio_shutdown(struct ccw_device *cdev, int how)
down(&irq_ptr->setting_up_sema);
- sprintf(dbf_text,"qsqs%4x",irq_ptr->irq);
+ sprintf(dbf_text,"qsqs%4x",irq_ptr->schid.sch_no);
QDIO_DBF_TEXT1(0,trace,dbf_text);
QDIO_DBF_TEXT0(0,setup,dbf_text);
@@ -2378,7 +2727,7 @@ qdio_free(struct ccw_device *cdev)
down(&irq_ptr->setting_up_sema);
- sprintf(dbf_text,"qfqs%4x",irq_ptr->irq);
+ sprintf(dbf_text,"qfqs%4x",irq_ptr->schid.sch_no);
QDIO_DBF_TEXT1(0,trace,dbf_text);
QDIO_DBF_TEXT0(0,setup,dbf_text);
@@ -2526,13 +2875,14 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
irq_ptr = cdev->private->qdio_data;
if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) {
- sprintf(dbf_text,"ick1%4x",irq_ptr->irq);
+ sprintf(dbf_text,"ick1%4x",irq_ptr->schid.sch_no);
QDIO_DBF_TEXT2(1,trace,dbf_text);
QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
QDIO_PRINT_ERR("received check condition on establish " \
- "queues on irq 0x%x (cs=x%x, ds=x%x).\n",
- irq_ptr->irq,cstat,dstat);
+ "queues on irq 0.%x.%x (cs=x%x, ds=x%x).\n",
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
+ cstat,dstat);
qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR);
}
@@ -2540,9 +2890,10 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
QDIO_DBF_TEXT2(1,setup,"eq:no de");
QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
- QDIO_PRINT_ERR("establish queues on irq %04x: didn't get "
+ QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: didn't get "
"device end: dstat=%02x, cstat=%02x\n",
- irq_ptr->irq, dstat, cstat);
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
+ dstat, cstat);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
return 1;
}
@@ -2551,10 +2902,10 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
QDIO_DBF_TEXT2(1,setup,"eq:badio");
QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
- QDIO_PRINT_ERR("establish queues on irq %04x: got "
+ QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: got "
"the following devstat: dstat=%02x, "
- "cstat=%02x\n",
- irq_ptr->irq, dstat, cstat);
+ "cstat=%02x\n", irq_ptr->schid.ssid,
+ irq_ptr->schid.sch_no, dstat, cstat);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
return 1;
}
@@ -2569,7 +2920,7 @@ qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat)
irq_ptr = cdev->private->qdio_data;
- sprintf(dbf_text,"qehi%4x",cdev->private->irq);
+ sprintf(dbf_text,"qehi%4x",cdev->private->sch_no);
QDIO_DBF_TEXT0(0,setup,dbf_text);
QDIO_DBF_TEXT0(0,trace,dbf_text);
@@ -2588,7 +2939,7 @@ qdio_initialize(struct qdio_initialize *init_data)
int rc;
char dbf_text[15];
- sprintf(dbf_text,"qini%4x",init_data->cdev->private->irq);
+ sprintf(dbf_text,"qini%4x",init_data->cdev->private->sch_no);
QDIO_DBF_TEXT0(0,setup,dbf_text);
QDIO_DBF_TEXT0(0,trace,dbf_text);
@@ -2609,7 +2960,7 @@ qdio_allocate(struct qdio_initialize *init_data)
struct qdio_irq *irq_ptr;
char dbf_text[15];
- sprintf(dbf_text,"qalc%4x",init_data->cdev->private->irq);
+ sprintf(dbf_text,"qalc%4x",init_data->cdev->private->sch_no);
QDIO_DBF_TEXT0(0,setup,dbf_text);
QDIO_DBF_TEXT0(0,trace,dbf_text);
if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
@@ -2682,7 +3033,7 @@ int qdio_fill_irq(struct qdio_initialize *init_data)
irq_ptr->int_parm=init_data->int_parm;
- irq_ptr->irq = init_data->cdev->private->irq;
+ irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
irq_ptr->no_input_qs=init_data->no_input_qs;
irq_ptr->no_output_qs=init_data->no_output_qs;
@@ -2698,11 +3049,12 @@ int qdio_fill_irq(struct qdio_initialize *init_data)
QDIO_DBF_TEXT2(0,setup,dbf_text);
if (irq_ptr->is_thinint_irq) {
- irq_ptr->dev_st_chg_ind=qdio_get_indicator();
+ irq_ptr->dev_st_chg_ind = qdio_get_indicator();
QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*));
if (!irq_ptr->dev_st_chg_ind) {
QDIO_PRINT_WARN("no indicator location available " \
- "for irq 0x%x\n",irq_ptr->irq);
+ "for irq 0.%x.%x\n",
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
qdio_release_irq_memory(irq_ptr);
return -ENOBUFS;
}
@@ -2747,6 +3099,10 @@ int qdio_fill_irq(struct qdio_initialize *init_data)
irq_ptr->qdr->qkey=QDIO_STORAGE_KEY;
/* fill in qib */
+ irq_ptr->is_qebsm = is_passthrough;
+ if (irq_ptr->is_qebsm)
+ irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
+
irq_ptr->qib.qfmt=init_data->q_format;
if (init_data->no_input_qs)
irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib);
@@ -2829,7 +3185,7 @@ qdio_establish(struct qdio_initialize *init_data)
tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET);
}
- sprintf(dbf_text,"qest%4x",cdev->private->irq);
+ sprintf(dbf_text,"qest%4x",cdev->private->sch_no);
QDIO_DBF_TEXT0(0,setup,dbf_text);
QDIO_DBF_TEXT0(0,trace,dbf_text);
@@ -2855,9 +3211,10 @@ qdio_establish(struct qdio_initialize *init_data)
sprintf(dbf_text,"eq:io%4x",result);
QDIO_DBF_TEXT2(1,setup,dbf_text);
}
- QDIO_PRINT_WARN("establish queues on irq %04x: do_IO " \
- "returned %i, next try returned %i\n",
- irq_ptr->irq,result,result2);
+ QDIO_PRINT_WARN("establish queues on irq 0.%x.%04x: do_IO " \
+ "returned %i, next try returned %i\n",
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
+ result, result2);
result=result2;
if (result)
ccw_device_set_timeout(cdev, 0);
@@ -2884,7 +3241,7 @@ qdio_establish(struct qdio_initialize *init_data)
return -EIO;
}
- irq_ptr->qdioac=qdio_check_siga_needs(irq_ptr->irq);
+ qdio_get_ssqd_information(irq_ptr);
/* if this gets set once, we're running under VM and can omit SVSes */
if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY)
omit_svs=1;
@@ -2930,7 +3287,7 @@ qdio_activate(struct ccw_device *cdev, int flags)
goto out;
}
- sprintf(dbf_text,"qact%4x", irq_ptr->irq);
+ sprintf(dbf_text,"qact%4x", irq_ptr->schid.sch_no);
QDIO_DBF_TEXT2(0,setup,dbf_text);
QDIO_DBF_TEXT2(0,trace,dbf_text);
@@ -2955,9 +3312,10 @@ qdio_activate(struct ccw_device *cdev, int flags)
sprintf(dbf_text,"aq:io%4x",result);
QDIO_DBF_TEXT2(1,setup,dbf_text);
}
- QDIO_PRINT_WARN("activate queues on irq %04x: do_IO " \
- "returned %i, next try returned %i\n",
- irq_ptr->irq,result,result2);
+ QDIO_PRINT_WARN("activate queues on irq 0.%x.%04x: do_IO " \
+ "returned %i, next try returned %i\n",
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
+ result, result2);
result=result2;
}
@@ -3015,30 +3373,40 @@ static inline void
qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
unsigned int count, struct qdio_buffer *buffers)
{
+ struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
+ qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
+ if (irq->is_qebsm) {
+ while (count)
+ set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
+ return;
+ }
for (;;) {
- set_slsb(&q->slsb.acc.val[qidx],SLSB_CU_INPUT_EMPTY);
+ set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
count--;
if (!count) break;
- qidx=(qidx+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
+ qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
}
-
- /* not necessary, as the queues are synced during the SIGA read */
- /*SYNC_MEMORY;*/
}
static inline void
qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
unsigned int count, struct qdio_buffer *buffers)
{
+ struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
+
+ qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
+ if (irq->is_qebsm) {
+ while (count)
+ set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
+ return;
+ }
+
for (;;) {
- set_slsb(&q->slsb.acc.val[qidx],SLSB_CU_OUTPUT_PRIMED);
+ set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
count--;
if (!count) break;
- qidx=(qidx+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
+ qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
}
-
- /* SIGA write will sync the queues */
- /*SYNC_MEMORY;*/
}
static inline void
@@ -3083,6 +3451,9 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
struct qdio_buffer *buffers)
{
int used_elements;
+ unsigned int cnt, start_buf;
+ unsigned char state = 0;
+ struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
/* This is the outbound handling of queues */
#ifdef QDIO_PERFORMANCE_STATS
@@ -3115,9 +3486,15 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
* SYNC_MEMORY :-/ ), we try to
* fast-requeue buffers
*/
- if (q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1)
- &(QDIO_MAX_BUFFERS_PER_Q-1)]!=
- SLSB_CU_OUTPUT_PRIMED) {
+ if (irq->is_qebsm) {
+ cnt = 1;
+ start_buf = ((qidx+QDIO_MAX_BUFFERS_PER_Q-1) &
+ (QDIO_MAX_BUFFERS_PER_Q-1));
+ qdio_do_eqbs(q, &state, &start_buf, &cnt);
+ } else
+ state = q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1)
+ &(QDIO_MAX_BUFFERS_PER_Q-1) ];
+ if (state != SLSB_CU_OUTPUT_PRIMED) {
qdio_kick_outbound_q(q);
} else {
QDIO_DBF_TEXT3(0,trace, "fast-req");
@@ -3150,7 +3527,7 @@ do_QDIO(struct ccw_device *cdev,unsigned int callflags,
#ifdef CONFIG_QDIO_DEBUG
char dbf_text[20];
- sprintf(dbf_text,"doQD%04x",cdev->private->irq);
+ sprintf(dbf_text,"doQD%04x",cdev->private->sch_no);
QDIO_DBF_TEXT3(0,trace,dbf_text);
#endif /* CONFIG_QDIO_DEBUG */
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 328e31cc6854..fa385e761fe1 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -3,14 +3,15 @@
#include <asm/page.h>
-#define VERSION_CIO_QDIO_H "$Revision: 1.33 $"
+#include "schid.h"
+
+#define VERSION_CIO_QDIO_H "$Revision: 1.40 $"
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_VERBOSE_LEVEL 9
#else /* CONFIG_QDIO_DEBUG */
#define QDIO_VERBOSE_LEVEL 5
#endif /* CONFIG_QDIO_DEBUG */
-
#define QDIO_USE_PROCESSING_STATE
#ifdef CONFIG_QDIO_PERF_STATS
@@ -265,12 +266,64 @@ QDIO_PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
/*
* Some instructions as assembly
*/
+
+static inline int
+do_sqbs(unsigned long sch, unsigned char state, int queue,
+ unsigned int *start, unsigned int *count)
+{
+#ifdef CONFIG_64BIT
+ register unsigned long _ccq asm ("0") = *count;
+ register unsigned long _sch asm ("1") = sch;
+ unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
+
+ asm volatile (
+ " .insn rsy,0xeb000000008A,%1,0,0(%2)\n\t"
+ : "+d" (_ccq), "+d" (_queuestart)
+ : "d" ((unsigned long)state), "d" (_sch)
+ : "memory", "cc"
+ );
+ *count = _ccq & 0xff;
+ *start = _queuestart & 0xff;
+
+ return (_ccq >> 32) & 0xff;
+#else
+ return 0;
+#endif
+}
+
+static inline int
+do_eqbs(unsigned long sch, unsigned char *state, int queue,
+ unsigned int *start, unsigned int *count)
+{
+#ifdef CONFIG_64BIT
+ register unsigned long _ccq asm ("0") = *count;
+ register unsigned long _sch asm ("1") = sch;
+ unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
+ unsigned long _state = 0;
+
+ asm volatile (
+ " .insn rrf,0xB99c0000,%1,%2,0,0 \n\t"
+ : "+d" (_ccq), "+d" (_queuestart), "+d" (_state)
+ : "d" (_sch)
+ : "memory", "cc"
+ );
+ *count = _ccq & 0xff;
+ *start = _queuestart & 0xff;
+ *state = _state & 0xff;
+
+ return (_ccq >> 32) & 0xff;
+#else
+ return 0;
+#endif
+}
+
+
static inline int
-do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2)
+do_siga_sync(struct subchannel_id schid, unsigned int mask1, unsigned int mask2)
{
int cc;
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
asm volatile (
"lhi 0,2 \n\t"
"lr 1,%1 \n\t"
@@ -280,10 +333,10 @@ do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2)
"ipm %0 \n\t"
"srl %0,28 \n\t"
: "=d" (cc)
- : "d" (0x10000|irq), "d" (mask1), "d" (mask2)
+ : "d" (schid), "d" (mask1), "d" (mask2)
: "cc", "0", "1", "2", "3"
);
-#else /* CONFIG_ARCH_S390X */
+#else /* CONFIG_64BIT */
asm volatile (
"lghi 0,2 \n\t"
"llgfr 1,%1 \n\t"
@@ -293,19 +346,19 @@ do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2)
"ipm %0 \n\t"
"srl %0,28 \n\t"
: "=d" (cc)
- : "d" (0x10000|irq), "d" (mask1), "d" (mask2)
+ : "d" (schid), "d" (mask1), "d" (mask2)
: "cc", "0", "1", "2", "3"
);
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
return cc;
}
static inline int
-do_siga_input(unsigned int irq, unsigned int mask)
+do_siga_input(struct subchannel_id schid, unsigned int mask)
{
int cc;
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
asm volatile (
"lhi 0,1 \n\t"
"lr 1,%1 \n\t"
@@ -314,10 +367,10 @@ do_siga_input(unsigned int irq, unsigned int mask)
"ipm %0 \n\t"
"srl %0,28 \n\t"
: "=d" (cc)
- : "d" (0x10000|irq), "d" (mask)
+ : "d" (schid), "d" (mask)
: "cc", "0", "1", "2", "memory"
);
-#else /* CONFIG_ARCH_S390X */
+#else /* CONFIG_64BIT */
asm volatile (
"lghi 0,1 \n\t"
"llgfr 1,%1 \n\t"
@@ -326,21 +379,22 @@ do_siga_input(unsigned int irq, unsigned int mask)
"ipm %0 \n\t"
"srl %0,28 \n\t"
: "=d" (cc)
- : "d" (0x10000|irq), "d" (mask)
+ : "d" (schid), "d" (mask)
: "cc", "0", "1", "2", "memory"
);
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
return cc;
}
static inline int
-do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb)
+do_siga_output(unsigned long schid, unsigned long mask, __u32 *bb,
+ unsigned int fc)
{
int cc;
__u32 busy_bit;
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
asm volatile (
"lhi 0,0 \n\t"
"lr 1,%2 \n\t"
@@ -366,14 +420,14 @@ do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb)
".long 0b,2b \n\t"
".previous \n\t"
: "=d" (cc), "=d" (busy_bit)
- : "d" (0x10000|irq), "d" (mask),
+ : "d" (schid), "d" (mask),
"i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION)
: "cc", "0", "1", "2", "memory"
);
-#else /* CONFIG_ARCH_S390X */
+#else /* CONFIG_64BIT */
asm volatile (
- "lghi 0,0 \n\t"
- "llgfr 1,%2 \n\t"
+ "llgfr 0,%5 \n\t"
+ "lgr 1,%2 \n\t"
"llgfr 2,%3 \n\t"
"siga 0 \n\t"
"0:"
@@ -391,11 +445,11 @@ do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb)
".quad 0b,1b \n\t"
".previous \n\t"
: "=d" (cc), "=d" (busy_bit)
- : "d" (0x10000|irq), "d" (mask),
- "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION)
+ : "d" (schid), "d" (mask),
+ "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION), "d" (fc)
: "cc", "0", "1", "2", "memory"
);
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
(*bb) = busy_bit;
return cc;
@@ -407,21 +461,21 @@ do_clear_global_summary(void)
unsigned long time;
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
asm volatile (
"lhi 1,3 \n\t"
".insn rre,0xb2650000,2,0 \n\t"
"lr %0,3 \n\t"
: "=d" (time) : : "cc", "1", "2", "3"
);
-#else /* CONFIG_ARCH_S390X */
+#else /* CONFIG_64BIT */
asm volatile (
"lghi 1,3 \n\t"
".insn rre,0xb2650000,2,0 \n\t"
"lgr %0,3 \n\t"
: "=d" (time) : : "cc", "1", "2", "3"
);
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
return time;
}
@@ -488,42 +542,21 @@ struct qdio_perf_stats {
#define MY_MODULE_STRING(x) #x
-#ifdef CONFIG_ARCH_S390X
+#ifdef CONFIG_64BIT
#define QDIO_GET_ADDR(x) ((__u32)(unsigned long)x)
-#else /* CONFIG_ARCH_S390X */
+#else /* CONFIG_64BIT */
#define QDIO_GET_ADDR(x) ((__u32)(long)x)
-#endif /* CONFIG_ARCH_S390X */
-
-#ifdef CONFIG_QDIO_DEBUG
-#define set_slsb(x,y) \
- if(q->queue_type==QDIO_TRACE_QTYPE) { \
- if(q->is_input_q) { \
- QDIO_DBF_HEX2(0,slsb_in,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
- } else { \
- QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
- } \
- } \
- qdio_set_slsb(x,y); \
- if(q->queue_type==QDIO_TRACE_QTYPE) { \
- if(q->is_input_q) { \
- QDIO_DBF_HEX2(0,slsb_in,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
- } else { \
- QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
- } \
- }
-#else /* CONFIG_QDIO_DEBUG */
-#define set_slsb(x,y) qdio_set_slsb(x,y)
-#endif /* CONFIG_QDIO_DEBUG */
+#endif /* CONFIG_64BIT */
struct qdio_q {
volatile struct slsb slsb;
char unused[QDIO_MAX_BUFFERS_PER_Q];
- __u32 * volatile dev_st_chg_ind;
+ __u32 * dev_st_chg_ind;
int is_input_q;
- int irq;
+ struct subchannel_id schid;
struct ccw_device *cdev;
unsigned int is_iqdio_q;
@@ -568,6 +601,7 @@ struct qdio_q {
struct tasklet_struct tasklet;
#endif /* QDIO_USE_TIMERS_FOR_POLLING */
+
enum qdio_irq_states state;
/* used to store the error condition during a data transfer */
@@ -617,13 +651,17 @@ struct qdio_irq {
__u32 * volatile dev_st_chg_ind;
unsigned long int_parm;
- int irq;
+ struct subchannel_id schid;
unsigned int is_iqdio_irq;
unsigned int is_thinint_irq;
unsigned int hydra_gives_outbound_pcis;
unsigned int sync_done_on_outb_pcis;
+ /* QEBSM facility */
+ unsigned int is_qebsm;
+ unsigned long sch_token;
+
enum qdio_irq_states state;
unsigned int no_input_qs;
diff --git a/drivers/s390/cio/schid.h b/drivers/s390/cio/schid.h
new file mode 100644
index 000000000000..54328fec5ade
--- /dev/null
+++ b/drivers/s390/cio/schid.h
@@ -0,0 +1,26 @@
+#ifndef S390_SCHID_H
+#define S390_SCHID_H
+
+struct subchannel_id {
+ __u32 reserved:13;
+ __u32 ssid:2;
+ __u32 one:1;
+ __u32 sch_no:16;
+} __attribute__ ((packed,aligned(4)));
+
+
+/* Helper function for sane state of pre-allocated subchannel_id. */
+static inline void
+init_subchannel_id(struct subchannel_id *schid)
+{
+ memset(schid, 0, sizeof(struct subchannel_id));
+ schid->one = 1;
+}
+
+static inline int
+schid_equal(struct subchannel_id *schid1, struct subchannel_id *schid2)
+{
+ return !memcmp(schid1, schid2, sizeof(struct subchannel_id));
+}
+
+#endif /* S390_SCHID_H */
OpenPOWER on IntegriCloud