diff options
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r-- | drivers/s390/cio/blacklist.c | 36 | ||||
-rw-r--r-- | drivers/s390/cio/ccwgroup.c | 43 | ||||
-rw-r--r-- | drivers/s390/cio/chsc.c | 54 | ||||
-rw-r--r-- | drivers/s390/cio/cio.c | 102 | ||||
-rw-r--r-- | drivers/s390/cio/cio.h | 3 | ||||
-rw-r--r-- | drivers/s390/cio/cmf.c | 628 | ||||
-rw-r--r-- | drivers/s390/cio/css.c | 298 | ||||
-rw-r--r-- | drivers/s390/cio/css.h | 4 | ||||
-rw-r--r-- | drivers/s390/cio/device.c | 147 | ||||
-rw-r--r-- | drivers/s390/cio/device.h | 10 | ||||
-rw-r--r-- | drivers/s390/cio/device_fsm.c | 166 | ||||
-rw-r--r-- | drivers/s390/cio/device_id.c | 39 | ||||
-rw-r--r-- | drivers/s390/cio/device_ops.c | 31 | ||||
-rw-r--r-- | drivers/s390/cio/device_pgid.c | 215 | ||||
-rw-r--r-- | drivers/s390/cio/device_status.c | 8 | ||||
-rw-r--r-- | drivers/s390/cio/ioasm.h | 220 | ||||
-rw-r--r-- | drivers/s390/cio/qdio.c | 7 | ||||
-rw-r--r-- | drivers/s390/cio/qdio.h | 208 |
18 files changed, 1347 insertions, 872 deletions
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index 0960bef7b199..12c2d6b746e6 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c @@ -9,7 +9,6 @@ * Arnd Bergmann (arndb@de.ibm.com) */ -#include <linux/config.h> #include <linux/init.h> #include <linux/vmalloc.h> #include <linux/slab.h> @@ -224,39 +223,6 @@ is_blacklisted (int ssid, int devno) } #ifdef CONFIG_PROC_FS -static int -__s390_redo_validation(struct subchannel_id schid, void *data) -{ - int ret; - struct subchannel *sch; - - sch = get_subchannel_by_schid(schid); - if (sch) { - /* Already known. */ - put_device(&sch->dev); - return 0; - } - ret = css_probe_device(schid); - if (ret == -ENXIO) - return ret; /* We're through. */ - if (ret == -ENOMEM) - /* Stop validation for now. Bad, but no need for a panic. */ - return ret; - return 0; -} - -/* - * Function: s390_redo_validation - * Look for no longer blacklisted devices - * FIXME: there must be a better way to do this */ -static inline void -s390_redo_validation (void) -{ - CIO_TRACE_EVENT (0, "redoval"); - - for_each_subchannel(__s390_redo_validation, NULL); -} - /* * Function: blacklist_parse_proc_parameters * parse the stuff which is piped to /proc/cio_ignore @@ -281,7 +247,7 @@ blacklist_parse_proc_parameters (char *buf) return; } - s390_redo_validation (); + css_schedule_reprobe(); } /* Iterator struct for all devices. */ diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index bdfee7fbaa2e..38954f5cd14c 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -152,7 +152,6 @@ ccwgroup_create(struct device *root, struct ccwgroup_device *gdev; int i; int rc; - int del_drvdata; if (argc > 256) /* disallow dumb users */ return -EINVAL; @@ -163,7 +162,6 @@ ccwgroup_create(struct device *root, atomic_set(&gdev->onoff, 0); - del_drvdata = 0; for (i = 0; i < argc; i++) { gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); @@ -180,18 +178,14 @@ ccwgroup_create(struct device *root, rc = -EINVAL; goto free_dev; } - } - for (i = 0; i < argc; i++) gdev->cdev[i]->dev.driver_data = gdev; - del_drvdata = 1; + } gdev->creator_id = creator_id; gdev->count = argc; - gdev->dev = (struct device ) { - .bus = &ccwgroup_bus_type, - .parent = root, - .release = ccwgroup_release, - }; + gdev->dev.bus = &ccwgroup_bus_type; + gdev->dev.parent = root; + gdev->dev.release = ccwgroup_release; snprintf (gdev->dev.bus_id, BUS_ID_SIZE, "%s", gdev->cdev[0]->dev.bus_id); @@ -226,9 +220,9 @@ error: free_dev: for (i = 0; i < argc; i++) if (gdev->cdev[i]) { - put_device(&gdev->cdev[i]->dev); - if (del_drvdata) + if (gdev->cdev[i]->dev.driver_data == gdev) gdev->cdev[i]->dev.driver_data = NULL; + put_device(&gdev->cdev[i]->dev); } kfree(gdev); return rc; @@ -319,7 +313,7 @@ ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const if (!try_module_get(gdrv->owner)) return -EINVAL; - value = simple_strtoul(buf, 0, 0); + value = simple_strtoul(buf, NULL, 0); ret = count; if (value == 1) ccwgroup_set_online(gdev); @@ -395,30 +389,31 @@ int ccwgroup_driver_register (struct ccwgroup_driver *cdriver) { /* register our new driver with the core */ - cdriver->driver = (struct device_driver) { - .bus = &ccwgroup_bus_type, - .name = cdriver->name, - }; + cdriver->driver.bus = &ccwgroup_bus_type; + cdriver->driver.name = cdriver->name; return driver_register(&cdriver->driver); } static int -__ccwgroup_driver_unregister_device(struct device *dev, void *data) +__ccwgroup_match_all(struct device *dev, void *data) { - __ccwgroup_remove_symlinks(to_ccwgroupdev(dev)); - device_unregister(dev); - put_device(dev); - return 0; + return 1; } void ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver) { + struct device *dev; + /* We don't want ccwgroup devices to live longer than their driver. */ get_driver(&cdriver->driver); - driver_for_each_device(&cdriver->driver, NULL, NULL, - __ccwgroup_driver_unregister_device); + while ((dev = driver_find_device(&cdriver->driver, NULL, NULL, + __ccwgroup_match_all))) { + __ccwgroup_remove_symlinks(to_ccwgroupdev(dev)); + device_unregister(dev); + put_device(dev); + } put_driver(&cdriver->driver); driver_unregister(&cdriver->driver); } diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 72187e54dcac..3bb4e472d73d 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -10,7 +10,6 @@ */ #include <linux/module.h> -#include <linux/config.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/device.h> @@ -239,13 +238,10 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) /* Check for single path devices. */ if (sch->schib.pmcw.pim == 0x80) goto out_unreg; - if (sch->vpm == mask) - goto out_unreg; if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) && (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) && - (sch->schib.pmcw.lpum == mask) && - (sch->vpm == 0)) { + (sch->schib.pmcw.lpum == mask)) { int cc; cc = cio_clear(sch); @@ -260,6 +256,8 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) /* trigger path verification. */ if (sch->driver && sch->driver->verify) sch->driver->verify(&sch->dev); + else if (sch->lpm == mask) + goto out_unreg; out_unlock: spin_unlock_irq(&sch->lock); return 0; @@ -380,6 +378,7 @@ __s390_process_res_acc(struct subchannel_id schid, void *data) if (chp_mask == 0) { spin_unlock_irq(&sch->lock); + put_device(&sch->dev); return 0; } old_lpm = sch->lpm; @@ -394,7 +393,7 @@ __s390_process_res_acc(struct subchannel_id schid, void *data) spin_unlock_irq(&sch->lock); put_device(&sch->dev); - return (res_data->fla_mask == 0xffff) ? -ENODEV : 0; + return 0; } @@ -918,12 +917,13 @@ chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count) chp = to_channelpath(container_of(kobj, struct device, kobj)); css = to_css(chp->dev.parent); - size = sizeof(struct cmg_chars); + size = sizeof(struct cmg_entry); /* Only allow single reads. */ if (off || count < size) return 0; chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id); + count = size; return count; } @@ -1392,10 +1392,8 @@ new_channel_path(int chpid) /* fill in status, etc. */ chp->id = chpid; chp->state = 1; - chp->dev = (struct device) { - .parent = &css[0]->device, - .release = chp_release, - }; + chp->dev.parent = &css[0]->device; + chp->dev.release = chp_release; snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid); /* Obtain channel path description and fill it in. */ @@ -1465,6 +1463,40 @@ chsc_get_chp_desc(struct subchannel *sch, int chp_no) return desc; } +static int reset_channel_path(struct channel_path *chp) +{ + int cc; + + cc = rchp(chp->id); + switch (cc) { + case 0: + return 0; + case 2: + return -EBUSY; + default: + return -ENODEV; + } +} + +static void reset_channel_paths_css(struct channel_subsystem *css) +{ + int i; + + for (i = 0; i <= __MAX_CHPID; i++) { + if (css->chps[i]) + reset_channel_path(css->chps[i]); + } +} + +void cio_reset_channel_paths(void) +{ + int i; + + for (i = 0; i <= __MAX_CSSID; i++) { + if (css[i] && css[i]->valid) + reset_channel_paths_css(css[i]); + } +} static int __init chsc_alloc_sei_area(void) diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 5b20d8c9c025..2e2882daefbb 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -11,17 +11,15 @@ */ #include <linux/module.h> -#include <linux/config.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/kernel_stat.h> #include <linux/interrupt.h> - #include <asm/cio.h> #include <asm/delay.h> #include <asm/irq.h> - +#include <asm/setup.h> #include "airq.h" #include "cio.h" #include "css.h" @@ -148,7 +146,7 @@ cio_tpi(void) sch->driver->irq(&sch->dev); spin_unlock(&sch->lock); irq_exit (); - __local_bh_enable(); + _local_bh_enable(); return 1; } @@ -193,7 +191,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */ sch->orb.pfch = sch->options.prefetch == 0; sch->orb.spnd = sch->options.suspend; sch->orb.ssic = sch->options.suspend && sch->options.inter; - sch->orb.lpm = (lpm != 0) ? (lpm & sch->opm) : sch->lpm; + sch->orb.lpm = (lpm != 0) ? lpm : sch->lpm; #ifdef CONFIG_64BIT /* * for 64 bit we always support 64 bit IDAWs with 4k page size only @@ -520,6 +518,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) memset(sch, 0, sizeof(struct subchannel)); spin_lock_init(&sch->lock); + mutex_init(&sch->reg_mutex); /* Set a name for the subchannel */ snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid, @@ -570,10 +569,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) sch->opm = 0xff; if (!cio_is_console(sch->schid)) chsc_validate_chpids(sch); - sch->lpm = sch->schib.pmcw.pim & - sch->schib.pmcw.pam & - sch->schib.pmcw.pom & - sch->opm; + sch->lpm = sch->schib.pmcw.pam & sch->opm; CIO_DEBUG(KERN_INFO, 0, "Detected device %04x on subchannel 0.%x.%04X" @@ -798,7 +794,7 @@ struct subchannel * cio_get_console_subchannel(void) { if (!console_subchannel_in_use) - return 0; + return NULL; return &console_subchannel; } @@ -841,14 +837,26 @@ __clear_subchannel_easy(struct subchannel_id schid) return -EBUSY; } -extern void do_reipl(unsigned long devno); -static int -__shutdown_subchannel_easy(struct subchannel_id schid, void *data) +struct sch_match_id { + struct subchannel_id schid; + struct ccw_dev_id devid; + int rc; +}; + +static int __shutdown_subchannel_easy_and_match(struct subchannel_id schid, + void *data) { struct schib schib; + struct sch_match_id *match_id = data; if (stsch_err(schid, &schib)) return -ENXIO; + if (match_id && schib.pmcw.dnv && + (schib.pmcw.dev == match_id->devid.devno) && + (schid.ssid == match_id->devid.ssid)) { + match_id->schid = schid; + match_id->rc = 0; + } if (!schib.pmcw.ena) return 0; switch(__disable_subchannel_easy(schid, &schib)) { @@ -864,17 +872,71 @@ __shutdown_subchannel_easy(struct subchannel_id schid, void *data) return 0; } -void -clear_all_subchannels(void) +static int clear_all_subchannels_and_match(struct ccw_dev_id *devid, + struct subchannel_id *schid) { + struct sch_match_id match_id; + + match_id.devid = *devid; + match_id.rc = -ENODEV; local_irq_disable(); - for_each_subchannel(__shutdown_subchannel_easy, NULL); + for_each_subchannel(__shutdown_subchannel_easy_and_match, &match_id); + if (match_id.rc == 0) + *schid = match_id.schid; + return match_id.rc; } + +void clear_all_subchannels(void) +{ + local_irq_disable(); + for_each_subchannel(__shutdown_subchannel_easy_and_match, NULL); +} + +extern void do_reipl_asm(__u32 schid); + /* Make sure all subchannels are quiet before we re-ipl an lpar. */ -void -reipl(unsigned long devno) +void reipl_ccw_dev(struct ccw_dev_id *devid) { - clear_all_subchannels(); - do_reipl(devno); + struct subchannel_id schid; + + if (clear_all_subchannels_and_match(devid, &schid)) + panic("IPL Device not found\n"); + cio_reset_channel_paths(); + do_reipl_asm(*((__u32*)&schid)); +} + +extern struct schib ipl_schib; + +/* + * ipl_save_parameters gets called very early. It is not allowed to access + * anything in the bss section at all. The bss section is not cleared yet, + * but may contain some ipl parameters written by the firmware. + * These parameters (if present) are copied to 0x2000. + * To avoid corruption of the ipl parameters, all variables used by this + * function must reside on the stack or in the data section. + */ +void ipl_save_parameters(void) +{ + struct subchannel_id schid; + unsigned int *ipl_ptr; + void *src, *dst; + + schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; + if (!schid.one) + return; + if (stsch(schid, &ipl_schib)) + return; + if (!ipl_schib.pmcw.dnv) + return; + ipl_devno = ipl_schib.pmcw.dev; + ipl_flags |= IPL_DEVNO_VALID; + if (!ipl_schib.pmcw.qf) + return; + ipl_flags |= IPL_PARMBLOCK_VALID; + ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR; + src = (void *)(unsigned long)*ipl_ptr; + dst = (void *)IPL_PARMBLOCK_ORIGIN; + memmove(dst, src, PAGE_SIZE); + *ipl_ptr = IPL_PARMBLOCK_ORIGIN; } diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 0ca987344e07..4541c1af4b66 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -2,6 +2,7 @@ #define S390_CIO_H #include "schid.h" +#include <linux/mutex.h> /* * where we put the ssd info @@ -87,7 +88,7 @@ struct orb { struct subchannel { struct subchannel_id schid; spinlock_t lock; /* subchannel lock */ - + struct mutex reg_mutex; enum { SUBCHANNEL_TYPE_IO = 0, SUBCHANNEL_TYPE_CHSC = 1, diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 07ef3f640f4a..828b2d334f0a 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -3,9 +3,10 @@ * * Linux on zSeries Channel Measurement Facility support * - * Copyright 2000,2003 IBM Corporation + * Copyright 2000,2006 IBM Corporation * - * Author: Arnd Bergmann <arndb@de.ibm.com> + * Authors: Arnd Bergmann <arndb@de.ibm.com> + * Cornelia Huck <cornelia.huck@de.ibm.com> * * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com> * @@ -96,9 +97,9 @@ module_param(format, bool, 0444); /** * struct cmb_operations - functions to use depending on cmb_format * - * all these functions operate on a struct cmf_device. There is only - * one instance of struct cmb_operations because all cmf_device - * objects are guaranteed to be of the same type. + * Most of these functions operate on a struct ccw_device. There is only + * one instance of struct cmb_operations because the format of the measurement + * data is guaranteed to be the same for every ccw_device. * * @alloc: allocate memory for a channel measurement block, * either with the help of a special pool or with kmalloc @@ -107,6 +108,7 @@ module_param(format, bool, 0444); * @readall: read a measurement block in a common format * @reset: clear the data in the associated measurement block and * reset its time stamp + * @align: align an allocated block so that the hardware can use it */ struct cmb_operations { int (*alloc) (struct ccw_device*); @@ -115,11 +117,19 @@ struct cmb_operations { u64 (*read) (struct ccw_device*, int); int (*readall)(struct ccw_device*, struct cmbdata *); void (*reset) (struct ccw_device*); + void * (*align) (void *); struct attribute_group *attr_group; }; static struct cmb_operations *cmbops; +struct cmb_data { + void *hw_block; /* Pointer to block updated by hardware */ + void *last_block; /* Last changed block copied from hardware block */ + int size; /* Size of hw_block and last_block */ + unsigned long long last_update; /* when last_block was updated */ +}; + /* our user interface is designed in terms of nanoseconds, * while the hardware measures total times in its own * unit.*/ @@ -226,63 +236,229 @@ struct set_schib_struct { unsigned long address; wait_queue_head_t wait; int ret; + struct kref kref; }; +static void cmf_set_schib_release(struct kref *kref) +{ + struct set_schib_struct *set_data; + + set_data = container_of(kref, struct set_schib_struct, kref); + kfree(set_data); +} + +#define CMF_PENDING 1 + static int set_schib_wait(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address) { - struct set_schib_struct s = { - .mme = mme, - .mbfc = mbfc, - .address = address, - .wait = __WAIT_QUEUE_HEAD_INITIALIZER(s.wait), - }; + struct set_schib_struct *set_data; + int ret; spin_lock_irq(cdev->ccwlock); - s.ret = set_schib(cdev, mme, mbfc, address); - if (s.ret != -EBUSY) { - goto out_nowait; + if (!cdev->private->cmb) { + ret = -ENODEV; + goto out; } + set_data = kzalloc(sizeof(struct set_schib_struct), GFP_ATOMIC); + if (!set_data) { + ret = -ENOMEM; + goto out; + } + init_waitqueue_head(&set_data->wait); + kref_init(&set_data->kref); + set_data->mme = mme; + set_data->mbfc = mbfc; + set_data->address = address; + + ret = set_schib(cdev, mme, mbfc, address); + if (ret != -EBUSY) + goto out_put; if (cdev->private->state != DEV_STATE_ONLINE) { - s.ret = -EBUSY; /* if the device is not online, don't even try again */ - goto out_nowait; + ret = -EBUSY; + goto out_put; } + cdev->private->state = DEV_STATE_CMFCHANGE; - cdev->private->cmb_wait = &s; - s.ret = 1; + set_data->ret = CMF_PENDING; + cdev->private->cmb_wait = set_data; spin_unlock_irq(cdev->ccwlock); - if (wait_event_interruptible(s.wait, s.ret != 1)) { + if (wait_event_interruptible(set_data->wait, + set_data->ret != CMF_PENDING)) { spin_lock_irq(cdev->ccwlock); - if (s.ret == 1) { - s.ret = -ERESTARTSYS; - cdev->private->cmb_wait = 0; + if (set_data->ret == CMF_PENDING) { + set_data->ret = -ERESTARTSYS; if (cdev->private->state == DEV_STATE_CMFCHANGE) cdev->private->state = DEV_STATE_ONLINE; } spin_unlock_irq(cdev->ccwlock); } - return s.ret; - -out_nowait: + spin_lock_irq(cdev->ccwlock); + cdev->private->cmb_wait = NULL; + ret = set_data->ret; +out_put: + kref_put(&set_data->kref, cmf_set_schib_release); +out: spin_unlock_irq(cdev->ccwlock); - return s.ret; + return ret; } void retry_set_schib(struct ccw_device *cdev) { - struct set_schib_struct *s; + struct set_schib_struct *set_data; + + set_data = cdev->private->cmb_wait; + if (!set_data) { + WARN_ON(1); + return; + } + kref_get(&set_data->kref); + set_data->ret = set_schib(cdev, set_data->mme, set_data->mbfc, + set_data->address); + wake_up(&set_data->wait); + kref_put(&set_data->kref, cmf_set_schib_release); +} + +static int cmf_copy_block(struct ccw_device *cdev) +{ + struct subchannel *sch; + void *reference_buf; + void *hw_block; + struct cmb_data *cmb_data; + + sch = to_subchannel(cdev->dev.parent); + + if (stsch(sch->schid, &sch->schib)) + return -ENODEV; + + if (sch->schib.scsw.fctl & SCSW_FCTL_START_FUNC) { + /* Don't copy if a start function is in progress. */ + if ((!sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED) && + (sch->schib.scsw.actl & + (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) && + (!sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) + return -EBUSY; + } + cmb_data = cdev->private->cmb; + hw_block = cmbops->align(cmb_data->hw_block); + if (!memcmp(cmb_data->last_block, hw_block, cmb_data->size)) + /* No need to copy. */ + return 0; + reference_buf = kzalloc(cmb_data->size, GFP_ATOMIC); + if (!reference_buf) + return -ENOMEM; + /* Ensure consistency of block copied from hardware. */ + do { + memcpy(cmb_data->last_block, hw_block, cmb_data->size); + memcpy(reference_buf, hw_block, cmb_data->size); + } while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size)); + cmb_data->last_update = get_clock(); + kfree(reference_buf); + return 0; +} + +struct copy_block_struct { + wait_queue_head_t wait; + int ret; + struct kref kref; +}; + +static void cmf_copy_block_release(struct kref *kref) +{ + struct copy_block_struct *copy_block; + + copy_block = container_of(kref, struct copy_block_struct, kref); + kfree(copy_block); +} + +static int cmf_cmb_copy_wait(struct ccw_device *cdev) +{ + struct copy_block_struct *copy_block; + int ret; + unsigned long flags; + + spin_lock_irqsave(cdev->ccwlock, flags); + if (!cdev->private->cmb) { + ret = -ENODEV; + goto out; + } + copy_block = kzalloc(sizeof(struct copy_block_struct), GFP_ATOMIC); + if (!copy_block) { + ret = -ENOMEM; + goto out; + } + init_waitqueue_head(©_block->wait); + kref_init(©_block->kref); + + ret = cmf_copy_block(cdev); + if (ret != -EBUSY) + goto out_put; + + if (cdev->private->state != DEV_STATE_ONLINE) { + ret = -EBUSY; + goto out_put; + } + + cdev->private->state = DEV_STATE_CMFUPDATE; + copy_block->ret = CMF_PENDING; + cdev->private->cmb_wait = copy_block; + + spin_unlock_irqrestore(cdev->ccwlock, flags); + if (wait_event_interruptible(copy_block->wait, + copy_block->ret != CMF_PENDING)) { + spin_lock_irqsave(cdev->ccwlock, flags); + if (copy_block->ret == CMF_PENDING) { + copy_block->ret = -ERESTARTSYS; + if (cdev->private->state == DEV_STATE_CMFUPDATE) + cdev->private->state = DEV_STATE_ONLINE; + } + spin_unlock_irqrestore(cdev->ccwlock, flags); + } + spin_lock_irqsave(cdev->ccwlock, flags); + cdev->private->cmb_wait = NULL; + ret = copy_block->ret; +out_put: + kref_put(©_block->kref, cmf_copy_block_release); +out: + spin_unlock_irqrestore(cdev->ccwlock, flags); + return ret; +} + +void cmf_retry_copy_block(struct ccw_device *cdev) +{ + struct copy_block_struct *copy_block; - s = cdev->private->cmb_wait; - cdev->private->cmb_wait = 0; - if (!s) { + copy_block = cdev->private->cmb_wait; + if (!copy_block) { WARN_ON(1); return; } - s->ret = set_schib(cdev, s->mme, s->mbfc, s->address); - wake_up(&s->wait); + kref_get(©_block->kref); + copy_block->ret = cmf_copy_block(cdev); + wake_up(©_block->wait); + kref_put(©_block->kref, cmf_copy_block_release); +} + +static void cmf_generic_reset(struct ccw_device *cdev) +{ + struct cmb_data *cmb_data; + + spin_lock_irq(cdev->ccwlock); + cmb_data = cdev->private->cmb; + if (cmb_data) { + memset(cmb_data->last_block, 0, cmb_data->size); + /* + * Need to reset hw block as well to make the hardware start + * from 0 again. + */ + memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size); + cmb_data->last_update = 0; + } + cdev->private->cmb_start_time = get_clock(); + spin_unlock_irq(cdev->ccwlock); } /** @@ -343,8 +519,8 @@ struct cmb { /* insert a single device into the cmb_area list * called with cmb_area.lock held from alloc_cmb */ -static inline int -alloc_cmb_single (struct ccw_device *cdev) +static inline int alloc_cmb_single (struct ccw_device *cdev, + struct cmb_data *cmb_data) { struct cmb *cmb; struct ccw_device_private *node; @@ -358,10 +534,12 @@ alloc_cmb_single (struct ccw_device *cdev) /* find first unused cmb in cmb_area.mem. * this is a little tricky: cmb_area.list - * remains sorted by ->cmb pointers */ + * remains sorted by ->cmb->hw_data pointers */ cmb = cmb_area.mem; list_for_each_entry(node, &cmb_area.list, cmb_list) { - if ((struct cmb*)node->cmb > cmb) + struct cmb_data *data; + data = node->cmb; + if ((struct cmb*)data->hw_block > cmb) break; cmb++; } @@ -372,7 +550,8 @@ alloc_cmb_single (struct ccw_device *cdev) /* insert new cmb */ list_add_tail(&cdev->private->cmb_list, &node->cmb_list); - cdev->private->cmb = cmb; + cmb_data->hw_block = cmb; + cdev->private->cmb = cmb_data; ret = 0; out: spin_unlock_irq(cdev->ccwlock); @@ -385,7 +564,19 @@ alloc_cmb (struct ccw_device *cdev) int ret; struct cmb *mem; ssize_t size; + struct cmb_data *cmb_data; + + /* Allocate private cmb_data. */ + cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL); + if (!cmb_data) + return -ENOMEM; + cmb_data->last_block = kzalloc(sizeof(struct cmb), GFP_KERNEL); + if (!cmb_data->last_block) { + kfree(cmb_data); + return -ENOMEM; + } + cmb_data->size = sizeof(struct cmb); spin_lock(&cmb_area.lock); if (!cmb_area.mem) { @@ -414,29 +605,36 @@ alloc_cmb (struct ccw_device *cdev) } /* do the actual allocation */ - ret = alloc_cmb_single(cdev); + ret = alloc_cmb_single(cdev, cmb_data); out: spin_unlock(&cmb_area.lock); - + if (ret) { + kfree(cmb_data->last_block); + kfree(cmb_data); + } return ret; } -static void -free_cmb(struct ccw_device *cdev) +static void free_cmb(struct ccw_device *cdev) { struct ccw_device_private *priv; - - priv = cdev->private; + struct cmb_data *cmb_data; spin_lock(&cmb_area.lock); spin_lock_irq(cdev->ccwlock); + priv = cdev->private; + if (list_empty(&priv->cmb_list)) { /* already freed */ goto out; } + cmb_data = priv->cmb; priv->cmb = NULL; + if (cmb_data) + kfree(cmb_data->last_block); + kfree(cmb_data); list_del_init(&priv->cmb_list); if (list_empty(&cmb_area.list)) { @@ -451,83 +649,97 @@ out: spin_unlock(&cmb_area.lock); } -static int -set_cmb(struct ccw_device *cdev, u32 mme) +static int set_cmb(struct ccw_device *cdev, u32 mme) { u16 offset; + struct cmb_data *cmb_data; + unsigned long flags; - if (!cdev->private->cmb) + spin_lock_irqsave(cdev->ccwlock, flags); + if (!cdev->private->cmb) { + spin_unlock_irqrestore(cdev->ccwlock, flags); return -EINVAL; - - offset = mme ? (struct cmb *)cdev->private->cmb - cmb_area.mem : 0; + } + cmb_data = cdev->private->cmb; + offset = mme ? (struct cmb *)cmb_data->hw_block - cmb_area.mem : 0; + spin_unlock_irqrestore(cdev->ccwlock, flags); return set_schib_wait(cdev, mme, 0, offset); } -static u64 -read_cmb (struct ccw_device *cdev, int index) +static u64 read_cmb (struct ccw_device *cdev, int index) { - /* yes, we have to put it on the stack - * because the cmb must only be accessed - * atomically, e.g. with mvc */ - struct cmb cmb; - unsigned long flags; + struct cmb *cmb; u32 val; + int ret; + unsigned long flags; + + ret = cmf_cmb_copy_wait(cdev); + if (ret < 0) + return 0; spin_lock_irqsave(cdev->ccwlock, flags); if (!cdev->private->cmb) { - spin_unlock_irqrestore(cdev->ccwlock, flags); - return 0; + ret = 0; + goto out; } - - cmb = *(struct cmb*)cdev->private->cmb; - spin_unlock_irqrestore(cdev->ccwlock, flags); + cmb = ((struct cmb_data *)cdev->private->cmb)->last_block; switch (index) { case cmb_ssch_rsch_count: - return cmb.ssch_rsch_count; + ret = cmb->ssch_rsch_count; + goto out; case cmb_sample_count: - return cmb.sample_count; + ret = cmb->sample_count; + goto out; case cmb_device_connect_time: - val = cmb.device_connect_time; + val = cmb->device_connect_time; break; case cmb_function_pending_time: - val = cmb.function_pending_time; + val = cmb->function_pending_time; break; case cmb_device_disconnect_time: - val = cmb.device_disconnect_time; + val = cmb->device_disconnect_time; break; case cmb_control_unit_queuing_time: - val = cmb.control_unit_queuing_time; + val = cmb->control_unit_queuing_time; break; case cmb_device_active_only_time: - val = cmb.device_active_only_time; + val = cmb->device_active_only_time; break; default: - return 0; + ret = 0; + goto out; } - return time_to_avg_nsec(val, cmb.sample_count); + ret = time_to_avg_nsec(val, cmb->sample_count); +out: + spin_unlock_irqrestore(cdev->ccwlock, flags); + return ret; } -static int -readall_cmb (struct ccw_device *cdev, struct cmbdata *data) +static int readall_cmb (struct ccw_device *cdev, struct cmbdata *data) { - /* yes, we have to put it on the stack - * because the cmb must only be accessed - * atomically, e.g. with mvc */ - struct cmb cmb; - unsigned long flags; + struct cmb *cmb; + struct cmb_data *cmb_data; u64 time; + unsigned long flags; + int ret; + ret = cmf_cmb_copy_wait(cdev); + if (ret < 0) + return ret; spin_lock_irqsave(cdev->ccwlock, flags); - if (!cdev->private->cmb) { - spin_unlock_irqrestore(cdev->ccwlock, flags); - return -ENODEV; + cmb_data = cdev->private->cmb; + if (!cmb_data) { + ret = -ENODEV; + goto out; } - - cmb = *(struct cmb*)cdev->private->cmb; - time = get_clock() - cdev->private->cmb_start_time; - spin_unlock_irqrestore(cdev->ccwlock, flags); + if (cmb_data->last_update == 0) { + ret = -EAGAIN; + goto out; + } + cmb = cmb_data->last_block; + time = cmb_data->last_update - cdev->private->cmb_start_time; memset(data, 0, sizeof(struct cmbdata)); @@ -538,31 +750,32 @@ readall_cmb (struct ccw_device *cdev, struct cmbdata *data) data->elapsed_time = (time * 1000) >> 12; /* copy data to new structure */ - data->ssch_rsch_count = cmb.ssch_rsch_count; - data->sample_count = cmb.sample_count; + data->ssch_rsch_count = cmb->ssch_rsch_count; + data->sample_count = cmb->sample_count; /* time fields are converted to nanoseconds while copying */ - data->device_connect_time = time_to_nsec(cmb.device_connect_time); - data->function_pending_time = time_to_nsec(cmb.function_pending_time); - data->device_disconnect_time = time_to_nsec(cmb.device_disconnect_time); + data->device_connect_time = time_to_nsec(cmb->device_connect_time); + data->function_pending_time = time_to_nsec(cmb->function_pending_time); + data->device_disconnect_time = + time_to_nsec(cmb->device_disconnect_time); data->control_unit_queuing_time - = time_to_nsec(cmb.control_unit_queuing_time); + = time_to_nsec(cmb->control_unit_queuing_time); data->device_active_only_time - = time_to_nsec(cmb.device_active_only_time); + = time_to_nsec(cmb->device_active_only_time); + ret = 0; +out: + spin_unlock_irqrestore(cdev->ccwlock, flags); + return ret; +} - return 0; +static void reset_cmb(struct ccw_device *cdev) +{ + cmf_generic_reset(cdev); } -static void -reset_cmb(struct ccw_device *cdev) +static void * align_cmb(void *area) { - struct cmb *cmb; - spin_lock_irq(cdev->ccwlock); - cmb = cdev->private->cmb; - if (cmb) - memset (cmb, 0, sizeof (*cmb)); - cdev->private->cmb_start_time = get_clock(); - spin_unlock_irq(cdev->ccwlock); + return area; } static struct attribute_group cmf_attr_group; @@ -574,6 +787,7 @@ static struct cmb_operations cmbops_basic = { .read = read_cmb, .readall = readall_cmb, .reset = reset_cmb, + .align = align_cmb, .attr_group = &cmf_attr_group, }; @@ -610,22 +824,34 @@ static inline struct cmbe* cmbe_align(struct cmbe *c) return (struct cmbe*)addr; } -static int -alloc_cmbe (struct ccw_device *cdev) +static int alloc_cmbe (struct ccw_device *cdev) { struct cmbe *cmbe; - cmbe = kmalloc (sizeof (*cmbe) * 2, GFP_KERNEL); + struct cmb_data *cmb_data; + int ret; + + cmbe = kzalloc (sizeof (*cmbe) * 2, GFP_KERNEL); if (!cmbe) return -ENOMEM; - + cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL); + if (!cmb_data) { + ret = -ENOMEM; + goto out_free; + } + cmb_data->last_block = kzalloc(sizeof(struct cmbe), GFP_KERNEL); + if (!cmb_data->last_block) { + ret = -ENOMEM; + goto out_free; + } + cmb_data->size = sizeof(struct cmbe); spin_lock_irq(cdev->ccwlock); if (cdev->private->cmb) { - kfree(cmbe); spin_unlock_irq(cdev->ccwlock); - return -EBUSY; + ret = -EBUSY; + goto out_free; } - - cdev->private->cmb = cmbe; + cmb_data->hw_block = cmbe; + cdev->private->cmb = cmb_data; spin_unlock_irq(cdev->ccwlock); /* activate global measurement if this is the first channel */ @@ -636,14 +862,24 @@ alloc_cmbe (struct ccw_device *cdev) spin_unlock(&cmb_area.lock); return 0; +out_free: + if (cmb_data) + kfree(cmb_data->last_block); + kfree(cmb_data); + kfree(cmbe); + return ret; } -static void -free_cmbe (struct ccw_device *cdev) +static void free_cmbe (struct ccw_device *cdev) { + struct cmb_data *cmb_data; + spin_lock_irq(cdev->ccwlock); - kfree(cdev->private->cmb); + cmb_data = cdev->private->cmb; cdev->private->cmb = NULL; + if (cmb_data) + kfree(cmb_data->last_block); + kfree(cmb_data); spin_unlock_irq(cdev->ccwlock); /* deactivate global measurement if this is the last channel */ @@ -654,89 +890,105 @@ free_cmbe (struct ccw_device *cdev) spin_unlock(&cmb_area.lock); } -static int -set_cmbe(struct ccw_device *cdev, u32 mme) +static int set_cmbe(struct ccw_device *cdev, u32 mme) { unsigned long mba; + struct cmb_data *cmb_data; + unsigned long flags; - if (!cdev->private->cmb) + spin_lock_irqsave(cdev->ccwlock, flags); + if (!cdev->private->cmb) { + spin_unlock_irqrestore(cdev->ccwlock, flags); return -EINVAL; - mba = mme ? (unsigned long) cmbe_align(cdev->private->cmb) : 0; + } + cmb_data = cdev->private->cmb; + mba = mme ? (unsigned long) cmbe_align(cmb_data->hw_block) : 0; + spin_unlock_irqrestore(cdev->ccwlock, flags); return set_schib_wait(cdev, mme, 1, mba); } -u64 -read_cmbe (struct ccw_device *cdev, int index) +static u64 read_cmbe (struct ccw_device *cdev, int index) { - /* yes, we have to put it on the stack - * because the cmb must only be accessed - * atomically, e.g. with mvc */ - struct cmbe cmb; - unsigned long flags; + struct cmbe *cmb; + struct cmb_data *cmb_data; u32 val; + int ret; + unsigned long flags; - spin_lock_irqsave(cdev->ccwlock, flags); - if (!cdev->private->cmb) { - spin_unlock_irqrestore(cdev->ccwlock, flags); + ret = cmf_cmb_copy_wait(cdev); + if (ret < 0) return 0; - } - cmb = *cmbe_align(cdev->private->cmb); - spin_unlock_irqrestore(cdev->ccwlock, flags); + spin_lock_irqsave(cdev->ccwlock, flags); + cmb_data = cdev->private->cmb; + if (!cmb_data) { + ret = 0; + goto out; + } + cmb = cmb_data->last_block; switch (index) { case cmb_ssch_rsch_count: - return cmb.ssch_rsch_count; + ret = cmb->ssch_rsch_count; + goto out; case cmb_sample_count: - return cmb.sample_count; + ret = cmb->sample_count; + goto out; case cmb_device_connect_time: - val = cmb.device_connect_time; + val = cmb->device_connect_time; break; case cmb_function_pending_time: - val = cmb.function_pending_time; + val = cmb->function_pending_time; break; case cmb_device_disconnect_time: - val = cmb.device_disconnect_time; + val = cmb->device_disconnect_time; break; case cmb_control_unit_queuing_time: - val = cmb.control_unit_queuing_time; + val = cmb->control_unit_queuing_time; break; case cmb_device_active_only_time: - val = cmb.device_active_only_time; + val = cmb->device_active_only_time; break; case cmb_device_busy_time: - val = cmb.device_busy_time; + val = cmb->device_busy_time; break; case cmb_initial_command_response_time: - val = cmb.initial_command_response_time; + val = cmb->initial_command_response_time; break; default: - return 0; + ret = 0; + goto out; } - return time_to_avg_nsec(val, cmb.sample_count); + ret = time_to_avg_nsec(val, cmb->sample_count); +out: + spin_unlock_irqrestore(cdev->ccwlock, flags); + return ret; } -static int -readall_cmbe (struct ccw_device *cdev, struct cmbdata *data) +static int readall_cmbe (struct ccw_device *cdev, struct cmbdata *data) { - /* yes, we have to put it on the stack - * because the cmb must only be accessed - * atomically, e.g. with mvc */ - struct cmbe cmb; - unsigned long flags; + struct cmbe *cmb; + struct cmb_data *cmb_data; u64 time; + unsigned long flags; + int ret; + ret = cmf_cmb_copy_wait(cdev); + if (ret < 0) + return ret; spin_lock_irqsave(cdev->ccwlock, flags); - if (!cdev->private->cmb) { - spin_unlock_irqrestore(cdev->ccwlock, flags); - return -ENODEV; + cmb_data = cdev->private->cmb; + if (!cmb_data) { + ret = -ENODEV; + goto out; } - - cmb = *cmbe_align(cdev->private->cmb); - time = get_clock() - cdev->private->cmb_start_time; - spin_unlock_irqrestore(cdev->ccwlock, flags); + if (cmb_data->last_update == 0) { + ret = -EAGAIN; + goto out; + } + time = cmb_data->last_update - cdev->private->cmb_start_time; memset (data, 0, sizeof(struct cmbdata)); @@ -746,35 +998,38 @@ readall_cmbe (struct ccw_device *cdev, struct cmbdata *data) /* conver to nanoseconds */ data->elapsed_time = (time * 1000) >> 12; + cmb = cmb_data->last_block; /* copy data to new structure */ - data->ssch_rsch_count = cmb.ssch_rsch_count; - data->sample_count = cmb.sample_count; + data->ssch_rsch_count = cmb->ssch_rsch_count; + data->sample_count = cmb->sample_count; /* time fields are converted to nanoseconds while copying */ - data->device_connect_time = time_to_nsec(cmb.device_connect_time); - data->function_pending_time = time_to_nsec(cmb.function_pending_time); - data->device_disconnect_time = time_to_nsec(cmb.device_disconnect_time); + data->device_connect_time = time_to_nsec(cmb->device_connect_time); + data->function_pending_time = time_to_nsec(cmb->function_pending_time); + data->device_disconnect_time = + time_to_nsec(cmb->device_disconnect_time); data->control_unit_queuing_time - = time_to_nsec(cmb.control_unit_queuing_time); + = time_to_nsec(cmb->control_unit_queuing_time); data->device_active_only_time - = time_to_nsec(cmb.device_active_only_time); - data->device_busy_time = time_to_nsec(cmb.device_busy_time); + = time_to_nsec(cmb->device_active_only_time); + data->device_busy_time = time_to_nsec(cmb->device_busy_time); data->initial_command_response_time - = time_to_nsec(cmb.initial_command_response_time); + = time_to_nsec(cmb->initial_command_response_time); - return 0; + ret = 0; +out: + spin_unlock_irqrestore(cdev->ccwlock, flags); + return ret; } -static void -reset_cmbe(struct ccw_device *cdev) +static void reset_cmbe(struct ccw_device *cdev) { - struct cmbe *cmb; - spin_lock_irq(cdev->ccwlock); - cmb = cmbe_align(cdev->private->cmb); - if (cmb) - memset (cmb, 0, sizeof (*cmb)); - cdev->private->cmb_start_time = get_clock(); - spin_unlock_irq(cdev->ccwlock); + cmf_generic_reset(cdev); +} + +static void * align_cmbe(void *area) +{ + return cmbe_align(area); } static struct attribute_group cmf_attr_group_ext; @@ -786,6 +1041,7 @@ static struct cmb_operations cmbops_extended = { .read = read_cmbe, .readall = readall_cmbe, .reset = reset_cmbe, + .align = align_cmbe, .attr_group = &cmf_attr_group_ext, }; @@ -803,14 +1059,20 @@ cmb_show_avg_sample_interval(struct device *dev, struct device_attribute *attr, struct ccw_device *cdev; long interval; unsigned long count; + struct cmb_data *cmb_data; cdev = to_ccwdev(dev); - interval = get_clock() - cdev->private->cmb_start_time; count = cmf_read(cdev, cmb_sample_count); - if (count) + spin_lock_irq(cdev->ccwlock); + cmb_data = cdev->private->cmb; + if (count) { + interval = cmb_data->last_update - + cdev->private->cmb_start_time; + interval = (interval * 1000) >> 12; interval /= count; - else + } else interval = -1; + spin_unlock_irq(cdev->ccwlock); return sprintf(buf, "%ld\n", interval); } @@ -823,7 +1085,10 @@ cmb_show_avg_utilization(struct device *dev, struct device_attribute *attr, char int ret; ret = cmf_readall(to_ccwdev(dev), &data); - if (ret) + if (ret == -EAGAIN || ret == -ENODEV) + /* No data (yet/currently) available to use for calculation. */ + return sprintf(buf, "n/a\n"); + else if (ret) return ret; utilization = data.device_connect_time + @@ -876,7 +1141,7 @@ static struct attribute *cmf_attributes[] = { &dev_attr_avg_device_disconnect_time.attr, &dev_attr_avg_control_unit_queuing_time.attr, &dev_attr_avg_device_active_only_time.attr, - 0, + NULL, }; static struct attribute_group cmf_attr_group = { @@ -896,7 +1161,7 @@ static struct attribute *cmf_attributes_ext[] = { &dev_attr_avg_device_active_only_time.attr, &dev_attr_avg_device_busy_time.attr, &dev_attr_avg_initial_command_response_time.attr, - 0, + NULL, }; static struct attribute_group cmf_attr_group_ext = { @@ -982,6 +1247,13 @@ cmf_readall(struct ccw_device *cdev, struct cmbdata *data) return cmbops->readall(cdev, data); } +/* Reenable cmf when a disconnected device becomes available again. */ +int cmf_reenable(struct ccw_device *cdev) +{ + cmbops->reset(cdev); + return cmbops->set(cdev, 2); +} + static int __init init_cmf(void) { diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 74ea8aac4b7d..7086a74e9871 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -19,9 +19,11 @@ #include "cio_debug.h" #include "ioasm.h" #include "chsc.h" +#include "device.h" int need_rescan = 0; int css_init_done = 0; +static int need_reprobe = 0; static int max_ssid = 0; struct channel_subsystem *css[__MAX_CSSID + 1]; @@ -106,6 +108,24 @@ css_subchannel_release(struct device *dev) extern int css_get_ssd_info(struct subchannel *sch); + +int css_sch_device_register(struct subchannel *sch) +{ + int ret; + + mutex_lock(&sch->reg_mutex); + ret = device_register(&sch->dev); + mutex_unlock(&sch->reg_mutex); + return ret; +} + +void css_sch_device_unregister(struct subchannel *sch) +{ + mutex_lock(&sch->reg_mutex); + device_unregister(&sch->dev); + mutex_unlock(&sch->reg_mutex); +} + static int css_register_subchannel(struct subchannel *sch) { @@ -117,7 +137,7 @@ css_register_subchannel(struct subchannel *sch) sch->dev.release = &css_subchannel_release; /* make it known to the system */ - ret = device_register(&sch->dev); + ret = css_sch_device_register(sch); if (ret) printk (KERN_WARNING "%s: could not register %s\n", __func__, sch->dev.bus_id); @@ -162,136 +182,141 @@ get_subchannel_by_schid(struct subchannel_id schid) return dev ? to_subchannel(dev) : NULL; } - -static inline int -css_get_subchannel_status(struct subchannel *sch, struct subchannel_id schid) +static inline int css_get_subchannel_status(struct subchannel *sch) { struct schib schib; - int cc; - cc = stsch(schid, &schib); - if (cc) - return CIO_GONE; - if (!schib.pmcw.dnv) + if (stsch(sch->schid, &schib) || !schib.pmcw.dnv) return CIO_GONE; - if (sch && sch->schib.pmcw.dnv && - (schib.pmcw.dev != sch->schib.pmcw.dev)) + if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev)) return CIO_REVALIDATE; - if (sch && !sch->lpm) + if (!sch->lpm) return CIO_NO_PATH; return CIO_OPER; } - -static int -css_evaluate_subchannel(struct subchannel_id schid, int slow) + +static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) { int event, ret, disc; - struct subchannel *sch; unsigned long flags; + enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action; - sch = get_subchannel_by_schid(schid); - disc = sch ? device_is_disconnected(sch) : 0; + spin_lock_irqsave(&sch->lock, flags); + disc = device_is_disconnected(sch); if (disc && slow) { - if (sch) - put_device(&sch->dev); - return 0; /* Already processed. */ + /* Disconnected devices are evaluated directly only.*/ + spin_unlock_irqrestore(&sch->lock, flags); + return 0; } - /* - * We've got a machine check, so running I/O won't get an interrupt. - * Kill any pending timers. - */ - if (sch) - device_kill_pending_timer(sch); + /* No interrupt after machine check - kill pending timers. */ + device_kill_pending_timer(sch); if (!disc && !slow) { - if (sch) - put_device(&sch->dev); - return -EAGAIN; /* Will be done on the slow path. */ + /* Non-disconnected devices are evaluated on the slow path. */ + spin_unlock_irqrestore(&sch->lock, flags); + return -EAGAIN; } - event = css_get_subchannel_status(sch, schid); + event = css_get_subchannel_status(sch); CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n", - schid.ssid, schid.sch_no, event, - sch?(disc?"disconnected":"normal"):"unknown", - slow?"slow":"fast"); + sch->schid.ssid, sch->schid.sch_no, event, + disc ? "disconnected" : "normal", + slow ? "slow" : "fast"); + /* Analyze subchannel status. */ + action = NONE; switch (event) { case CIO_NO_PATH: - case CIO_GONE: - if (!sch) { - /* Never used this subchannel. Ignore. */ - ret = 0; + if (disc) { + /* Check if paths have become available. */ + action = REPROBE; break; } - if (disc && (event == CIO_NO_PATH)) { - /* - * Uargh, hack again. Because we don't get a machine - * check on configure on, our path bookkeeping can - * be out of date here (it's fine while we only do - * logical varying or get chsc machine checks). We - * need to force reprobing or we might miss devices - * coming operational again. It won't do harm in real - * no path situations. - */ - spin_lock_irqsave(&sch->lock, flags); - device_trigger_reprobe(sch); + /* fall through */ + case CIO_GONE: + /* Prevent unwanted effects when opening lock. */ + cio_disable_subchannel(sch); + device_set_disconnected(sch); + /* Ask driver what to do with device. */ + action = UNREGISTER; + if (sch->driver && sch->driver->notify) { spin_unlock_irqrestore(&sch->lock, flags); - ret = 0; - break; - } - if (sch->driver && sch->driver->notify && - sch->driver->notify(&sch->dev, event)) { - cio_disable_subchannel(sch); - device_set_disconnected(sch); - ret = 0; - break; + ret = sch->driver->notify(&sch->dev, event); + spin_lock_irqsave(&sch->lock, flags); + if (ret) + action = NONE; } - /* - * Unregister subchannel. - * The device will be killed automatically. - */ - cio_disable_subchannel(sch); - device_unregister(&sch->dev); - /* Reset intparm to zeroes. */ - sch->schib.pmcw.intparm = 0; - cio_modify(sch); - put_device(&sch->dev); - ret = 0; break; case CIO_REVALIDATE: - /* - * Revalidation machine check. Sick. - * We don't notify the driver since we have to throw the device - * away in any case. - */ - if (!disc) { - device_unregister(&sch->dev); - /* Reset intparm to zeroes. */ - sch->schib.pmcw.intparm = 0; - cio_modify(sch); - put_device(&sch->dev); - ret = css_probe_device(schid); - } else { - /* - * We can't immediately deregister the disconnected - * device since it might block. - */ - spin_lock_irqsave(&sch->lock, flags); - device_trigger_reprobe(sch); - spin_unlock_irqrestore(&sch->lock, flags); - ret = 0; - } + /* Device will be removed, so no notify necessary. */ + if (disc) + /* Reprobe because immediate unregister might block. */ + action = REPROBE; + else + action = UNREGISTER_PROBE; break; case CIO_OPER: - if (disc) { - spin_lock_irqsave(&sch->lock, flags); + if (disc) /* Get device operational again. */ - device_trigger_reprobe(sch); - spin_unlock_irqrestore(&sch->lock, flags); - } - ret = sch ? 0 : css_probe_device(schid); + action = REPROBE; + break; + } + /* Perform action. */ + ret = 0; + switch (action) { + case UNREGISTER: + case UNREGISTER_PROBE: + /* Unregister device (will use subchannel lock). */ + spin_unlock_irqrestore(&sch->lock, flags); + css_sch_device_unregister(sch); + spin_lock_irqsave(&sch->lock, flags); + + /* Reset intparm to zeroes. */ + sch->schib.pmcw.intparm = 0; + cio_modify(sch); + + /* Probe if necessary. */ + if (action == UNREGISTER_PROBE) + ret = css_probe_device(sch->schid); + break; + case REPROBE: + device_trigger_reprobe(sch); break; default: - BUG(); - ret = 0; + break; } + spin_unlock_irqrestore(&sch->lock, flags); + + return ret; +} + +static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) +{ + struct schib schib; + + if (!slow) { + /* Will be done on the slow path. */ + return -EAGAIN; + } + if (stsch(schid, &schib) || !schib.pmcw.dnv) { + /* Unusable - ignore. */ + return 0; + } + CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, " + "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER); + + return css_probe_device(schid); +} + +static int css_evaluate_subchannel(struct subchannel_id schid, int slow) +{ + struct subchannel *sch; + int ret; + + sch = get_subchannel_by_schid(schid); + if (sch) { + ret = css_evaluate_known_subchannel(sch, slow); + put_device(&sch->dev); + } else + ret = css_evaluate_new_subchannel(schid, slow); + return ret; } @@ -339,6 +364,67 @@ typedef void (*workfunc)(void *); DECLARE_WORK(slow_path_work, (workfunc)css_trigger_slow_path, NULL); struct workqueue_struct *slow_path_wq; +/* Reprobe subchannel if unregistered. */ +static int reprobe_subchannel(struct subchannel_id schid, void *data) +{ + struct subchannel *sch; + int ret; + + CIO_DEBUG(KERN_INFO, 6, "cio: reprobe 0.%x.%04x\n", + schid.ssid, schid.sch_no); + if (need_reprobe) + return -EAGAIN; + + sch = get_subchannel_by_schid(schid); + if (sch) { + /* Already known. */ + put_device(&sch->dev); + return 0; + } + + ret = css_probe_device(schid); + switch (ret) { + case 0: + break; + case -ENXIO: + case -ENOMEM: + /* These should abort looping */ + break; + default: + ret = 0; + } + + return ret; +} + +/* Work function used to reprobe all unregistered subchannels. */ +static void reprobe_all(void *data) +{ + int ret; + + CIO_MSG_EVENT(2, "reprobe start\n"); + + need_reprobe = 0; + /* Make sure initial subchannel scan is done. */ + wait_event(ccw_device_init_wq, + atomic_read(&ccw_device_init_count) == 0); + ret = for_each_subchannel(reprobe_subchannel, NULL); + + CIO_MSG_EVENT(2, "reprobe done (rc=%d, need_reprobe=%d)\n", ret, + need_reprobe); +} + +DECLARE_WORK(css_reprobe_work, reprobe_all, NULL); + +/* Schedule reprobing of all unregistered subchannels. */ +void css_schedule_reprobe(void) +{ + need_reprobe = 1; + queue_work(ccw_device_work, &css_reprobe_work); +} + +EXPORT_SYMBOL_GPL(css_schedule_reprobe); + /* * Rescan for new devices. FIXME: This is slow. * This function is called when we have lost CRWs due to overflows and we have @@ -542,9 +628,13 @@ init_channel_subsystem (void) ret = device_register(&css[i]->device); if (ret) goto out_free; - if (css_characteristics_avail && css_chsc_characteristics.secm) - device_create_file(&css[i]->device, - &dev_attr_cm_enable); + if (css_characteristics_avail && + css_chsc_characteristics.secm) { + ret = device_create_file(&css[i]->device, + &dev_attr_cm_enable); + if (ret) + goto out_device; + } } css_init_done = 1; @@ -552,6 +642,8 @@ init_channel_subsystem (void) for_each_subchannel(__init_channel_subsystem, NULL); return 0; +out_device: + device_unregister(&css[i]->device); out_free: kfree(css[i]); out_unregister: diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index e210f89a2449..8aabb4adeb5f 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h @@ -100,7 +100,7 @@ struct ccw_device_private { struct qdio_irq *qdio_data; struct irb irb; /* device status */ struct senseid senseid; /* SenseID info */ - struct pgid pgid; /* path group ID */ + struct pgid pgid[8]; /* path group IDs per chpid*/ struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ struct work_struct kick_work; wait_queue_head_t wait_q; @@ -136,6 +136,8 @@ extern struct bus_type css_bus_type; extern struct css_driver io_subchannel_driver; extern int css_probe_device(struct subchannel_id); +extern int css_sch_device_register(struct subchannel *); +extern void css_sch_device_unregister(struct subchannel *); extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); extern int css_init_done; extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 8e3053c2a451..688945662c15 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -8,7 +8,6 @@ * Cornelia Huck (cornelia.huck@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com) */ -#include <linux/config.h> #include <linux/module.h> #include <linux/init.h> #include <linux/spinlock.h> @@ -53,55 +52,83 @@ ccw_bus_match (struct device * dev, struct device_driver * drv) return 1; } -/* - * Hotplugging interface for ccw devices. - * Heavily modeled on pci and usb hotplug. - */ -static int -ccw_uevent (struct device *dev, char **envp, int num_envp, - char *buffer, int buffer_size) +/* Store modalias string delimited by prefix/suffix string into buffer with + * specified size. Return length of resulting string (excluding trailing '\0') + * even if string doesn't fit buffer (snprintf semantics). */ +static int snprint_alias(char *buf, size_t size, const char *prefix, + struct ccw_device_id *id, const char *suffix) { - struct ccw_device *cdev = to_ccwdev(dev); - int i = 0; - int length = 0; + int len; - if (!cdev) - return -ENODEV; + len = snprintf(buf, size, "%sccw:t%04Xm%02X", prefix, id->cu_type, + id->cu_model); + if (len > size) + return len; + buf += len; + size -= len; + + if (id->dev_type != 0) + len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type, + id->dev_model, suffix); + else + len += snprintf(buf, size, "dtdm%s", suffix); - /* what we want to pass to /sbin/hotplug */ + return len; +} - envp[i++] = buffer; - length += scnprintf(buffer, buffer_size - length, "CU_TYPE=%04X", - cdev->id.cu_type); - if ((buffer_size - length <= 0) || (i >= num_envp)) - return -ENOMEM; - ++length; - buffer += length; +/* Set up environment variables for ccw device uevent. Return 0 on success, + * non-zero otherwise. */ +static int ccw_uevent(struct device *dev, char **envp, int num_envp, + char *buffer, int buffer_size) +{ + struct ccw_device *cdev = to_ccwdev(dev); + struct ccw_device_id *id = &(cdev->id); + int i = 0; + int len; + /* CU_TYPE= */ + len = snprintf(buffer, buffer_size, "CU_TYPE=%04X", id->cu_type) + 1; + if (len > buffer_size || i >= num_envp) + return -ENOMEM; envp[i++] = buffer; - length += scnprintf(buffer, buffer_size - length, "CU_MODEL=%02X", - cdev->id.cu_model); - if ((buffer_size - length <= 0) || (i >= num_envp)) + buffer += len; + buffer_size -= len; + + /* CU_MODEL= */ + len = snprintf(buffer, buffer_size, "CU_MODEL=%02X", id->cu_model) + 1; + if (len > buffer_size || i >= num_envp) return -ENOMEM; - ++length; - buffer += length; + envp[i++] = buffer; + buffer += len; + buffer_size -= len; /* The next two can be zero, that's ok for us */ - envp[i++] = buffer; - length += scnprintf(buffer, buffer_size - length, "DEV_TYPE=%04X", - cdev->id.dev_type); - if ((buffer_size - length <= 0) || (i >= num_envp)) + /* DEV_TYPE= */ + len = snprintf(buffer, buffer_size, "DEV_TYPE=%04X", id->dev_type) + 1; + if (len > buffer_size || i >= num_envp) return -ENOMEM; - ++length; - buffer += length; + envp[i++] = buffer; + buffer += len; + buffer_size -= len; + /* DEV_MODEL= */ + len = snprintf(buffer, buffer_size, "DEV_MODEL=%02X", + (unsigned char) id->dev_model) + 1; + if (len > buffer_size || i >= num_envp) + return -ENOMEM; envp[i++] = buffer; - length += scnprintf(buffer, buffer_size - length, "DEV_MODEL=%02X", - cdev->id.dev_model); - if ((buffer_size - length <= 0) || (i >= num_envp)) + buffer += len; + buffer_size -= len; + + /* MODALIAS= */ + len = snprint_alias(buffer, buffer_size, "MODALIAS=", id, "") + 1; + if (len > buffer_size || i >= num_envp) return -ENOMEM; + envp[i++] = buffer; + buffer += len; + buffer_size -= len; - envp[i] = 0; + envp[i] = NULL; return 0; } @@ -133,8 +160,8 @@ struct css_driver io_subchannel_driver = { struct workqueue_struct *ccw_device_work; struct workqueue_struct *ccw_device_notify_work; -static wait_queue_head_t ccw_device_init_wq; -static atomic_t ccw_device_init_count; +wait_queue_head_t ccw_device_init_wq; +atomic_t ccw_device_init_count; static int __init init_ccw_bus_type (void) @@ -252,16 +279,11 @@ modalias_show (struct device *dev, struct device_attribute *attr, char *buf) { struct ccw_device *cdev = to_ccwdev(dev); struct ccw_device_id *id = &(cdev->id); - int ret; + int len; - ret = sprintf(buf, "ccw:t%04Xm%02X", - id->cu_type, id->cu_model); - if (id->dev_type != 0) - ret += sprintf(buf + ret, "dt%04Xdm%02X\n", - id->dev_type, id->dev_model); - else - ret += sprintf(buf + ret, "dtdm\n"); - return ret; + len = snprint_alias(buf, PAGE_SIZE, "", id, "\n") + 1; + + return len > PAGE_SIZE ? PAGE_SIZE : len; } static ssize_t @@ -281,7 +303,7 @@ ccw_device_remove_disconnected(struct ccw_device *cdev) * 'throw away device'. */ sch = to_subchannel(cdev->dev.parent); - device_unregister(&sch->dev); + css_sch_device_unregister(sch); /* Reset intparm to zeroes. */ sch->schib.pmcw.intparm = 0; cio_modify(sch); @@ -557,12 +579,11 @@ get_disc_ccwdev_by_devno(unsigned int devno, unsigned int ssid, struct ccw_device *sibling) { struct device *dev; - struct match_data data = { - .devno = devno, - .ssid = ssid, - .sibling = sibling, - }; + struct match_data data; + data.devno = devno; + data.ssid = ssid; + data.sibling = sibling; dev = bus_find_device(&ccw_bus_type, NULL, &data, match_devno); return dev ? to_ccwdev(dev) : NULL; @@ -626,7 +647,7 @@ ccw_device_do_unreg_rereg(void *data) other_sch->schib.pmcw.intparm = 0; cio_modify(other_sch); } - device_unregister(&other_sch->dev); + css_sch_device_unregister(other_sch); } } /* Update ssd info here. */ @@ -710,7 +731,7 @@ ccw_device_call_sch_unregister(void *data) struct subchannel *sch; sch = to_subchannel(cdev->dev.parent); - device_unregister(&sch->dev); + css_sch_device_unregister(sch); /* Reset intparm to zeroes. */ sch->schib.pmcw.intparm = 0; cio_modify(sch); @@ -836,10 +857,8 @@ io_subchannel_probe (struct subchannel *sch) return -ENOMEM; } atomic_set(&cdev->private->onoff, 0); - cdev->dev = (struct device) { - .parent = &sch->dev, - .release = ccw_device_release, - }; + cdev->dev.parent = &sch->dev; + cdev->dev.release = ccw_device_release; INIT_LIST_HEAD(&cdev->private->kick_work.entry); /* Do first half of device_register. */ device_initialize(&cdev->dev); @@ -978,9 +997,7 @@ ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch) int rc; /* Initialize the ccw_device structure. */ - cdev->dev = (struct device) { - .parent = &sch->dev, - }; + cdev->dev.parent= &sch->dev; rc = io_subchannel_recog(cdev, sch); if (rc) return rc; @@ -1058,7 +1075,7 @@ get_ccwdev_by_busid(struct ccw_driver *cdrv, const char *bus_id) __ccwdev_check_busid); put_driver(drv); - return dev ? to_ccwdev(dev) : 0; + return dev ? to_ccwdev(dev) : NULL; } /************************** device driver handling ************************/ @@ -1083,7 +1100,7 @@ ccw_device_probe (struct device *dev) ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV; if (ret) { - cdev->drv = 0; + cdev->drv = NULL; return ret; } @@ -1114,7 +1131,7 @@ ccw_device_remove (struct device *dev) ret, cdev->dev.bus_id); } ccw_device_set_timeout(cdev, 0); - cdev->drv = 0; + cdev->drv = NULL; return 0; } diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index 11587ebb7289..00be9a5b4acd 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h @@ -1,6 +1,10 @@ #ifndef S390_DEVICE_H #define S390_DEVICE_H +#include <asm/ccwdev.h> +#include <asm/atomic.h> +#include <linux/wait.h> + /* * states of the device statemachine */ @@ -23,6 +27,7 @@ enum dev_state { DEV_STATE_DISCONNECTED, DEV_STATE_DISCONNECTED_SENSE_ID, DEV_STATE_CMFCHANGE, + DEV_STATE_CMFUPDATE, /* last element! */ NR_DEV_STATES }; @@ -67,6 +72,8 @@ dev_fsm_final_state(struct ccw_device *cdev) extern struct workqueue_struct *ccw_device_work; extern struct workqueue_struct *ccw_device_notify_work; +extern wait_queue_head_t ccw_device_init_wq; +extern atomic_t ccw_device_init_count; void io_subchannel_recog_done(struct ccw_device *cdev); @@ -112,5 +119,8 @@ int ccw_device_stlck(struct ccw_device *); void ccw_device_set_timeout(struct ccw_device *, int); extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); +/* Channel measurement facility related */ void retry_set_schib(struct ccw_device *cdev); +void cmf_retry_copy_block(struct ccw_device *); +int cmf_reenable(struct ccw_device *); #endif diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 49ec562d7f60..dace46fc32e8 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -9,7 +9,6 @@ */ #include <linux/module.h> -#include <linux/config.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/string.h> @@ -153,7 +152,8 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) if (cdev->private->iretry) { cdev->private->iretry--; ret = cio_halt(sch); - return (ret == 0) ? -EBUSY : ret; + if (ret != -EBUSY) + return (ret == 0) ? -EBUSY : ret; } /* halt io unsuccessful. */ cdev->private->iretry = 255; /* 255 clear retries. */ @@ -232,10 +232,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) */ old_lpm = sch->lpm; stsch(sch->schid, &sch->schib); - sch->lpm = sch->schib.pmcw.pim & - sch->schib.pmcw.pam & - sch->schib.pmcw.pom & - sch->opm; + sch->lpm = sch->schib.pmcw.pam & sch->opm; /* Check since device may again have become not operational. */ if (!sch->schib.pmcw.dnv) state = DEV_STATE_NOT_OPER; @@ -267,12 +264,11 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) notify = 1; } /* fill out sense information */ - cdev->id = (struct ccw_device_id) { - .cu_type = cdev->private->senseid.cu_type, - .cu_model = cdev->private->senseid.cu_model, - .dev_type = cdev->private->senseid.dev_type, - .dev_model = cdev->private->senseid.dev_model, - }; + memset(&cdev->id, 0, sizeof(cdev->id)); + cdev->id.cu_type = cdev->private->senseid.cu_type; + cdev->id.cu_model = cdev->private->senseid.cu_model; + cdev->id.dev_type = cdev->private->senseid.dev_type; + cdev->id.dev_model = cdev->private->senseid.dev_model; if (notify) { cdev->private->state = DEV_STATE_OFFLINE; if (same_dev) { @@ -336,8 +332,11 @@ ccw_device_oper_notify(void *data) if (!ret) /* Driver doesn't want device back. */ ccw_device_do_unreg_rereg((void *)cdev); - else + else { + /* Reenable channel measurements, if needed. */ + cmf_reenable(cdev); wake_up(&cdev->private->wait_q); + } } /* @@ -376,6 +375,56 @@ ccw_device_done(struct ccw_device *cdev, int state) put_device (&cdev->dev); } +static inline int cmp_pgid(struct pgid *p1, struct pgid *p2) +{ + char *c1; + char *c2; + + c1 = (char *)p1; + c2 = (char *)p2; + + return memcmp(c1 + 1, c2 + 1, sizeof(struct pgid) - 1); +} + +static void __ccw_device_get_common_pgid(struct ccw_device *cdev) +{ + int i; + int last; + + last = 0; + for (i = 0; i < 8; i++) { + if (cdev->private->pgid[i].inf.ps.state1 == SNID_STATE1_RESET) + /* No PGID yet */ + continue; + if (cdev->private->pgid[last].inf.ps.state1 == + SNID_STATE1_RESET) { + /* First non-zero PGID */ + last = i; + continue; + } + if (cmp_pgid(&cdev->private->pgid[i], + &cdev->private->pgid[last]) == 0) + /* Non-conflicting PGIDs */ + continue; + + /* PGID mismatch, can't pathgroup. */ + CIO_MSG_EVENT(0, "SNID - pgid mismatch for device " + "0.%x.%04x, can't pathgroup\n", + cdev->private->ssid, cdev->private->devno); + cdev->private->options.pgroup = 0; + return; + } + if (cdev->private->pgid[last].inf.ps.state1 == + SNID_STATE1_RESET) + /* No previous pgid found */ + memcpy(&cdev->private->pgid[0], &css[0]->global_pgid, + sizeof(struct pgid)); + else + /* Use existing pgid */ + memcpy(&cdev->private->pgid[0], &cdev->private->pgid[last], + sizeof(struct pgid)); +} + /* * Function called from device_pgid.c after sense path ground has completed. */ @@ -386,24 +435,26 @@ ccw_device_sense_pgid_done(struct ccw_device *cdev, int err) sch = to_subchannel(cdev->dev.parent); switch (err) { - case 0: - /* Start Path Group verification. */ - sch->vpm = 0; /* Start with no path groups set. */ - cdev->private->state = DEV_STATE_VERIFY; - ccw_device_verify_start(cdev); + case -EOPNOTSUPP: /* path grouping not supported, use nop instead. */ + cdev->private->options.pgroup = 0; + break; + case 0: /* success */ + case -EACCES: /* partial success, some paths not operational */ + /* Check if all pgids are equal or 0. */ + __ccw_device_get_common_pgid(cdev); break; case -ETIME: /* Sense path group id stopped by timeout. */ case -EUSERS: /* device is reserved for someone else. */ ccw_device_done(cdev, DEV_STATE_BOXED); - break; - case -EOPNOTSUPP: /* path grouping not supported, just set online. */ - cdev->private->options.pgroup = 0; - ccw_device_done(cdev, DEV_STATE_ONLINE); - break; + return; default: ccw_device_done(cdev, DEV_STATE_NOT_OPER); - break; + return; } + /* Start Path Group verification. */ + cdev->private->state = DEV_STATE_VERIFY; + cdev->private->flags.doverify = 0; + ccw_device_verify_start(cdev); } /* @@ -502,7 +553,19 @@ ccw_device_nopath_notify(void *data) void ccw_device_verify_done(struct ccw_device *cdev, int err) { - cdev->private->flags.doverify = 0; + struct subchannel *sch; + + sch = to_subchannel(cdev->dev.parent); + /* Update schib - pom may have changed. */ + stsch(sch->schid, &sch->schib); + /* Update lpm with verified path mask. */ + sch->lpm = sch->vpm; + /* Repeat path verification? */ + if (cdev->private->flags.doverify) { + cdev->private->flags.doverify = 0; + ccw_device_verify_start(cdev); + return; + } switch (err) { case -EOPNOTSUPP: /* path grouping not supported, just set online. */ cdev->private->options.pgroup = 0; @@ -511,12 +574,10 @@ ccw_device_verify_done(struct ccw_device *cdev, int err) /* Deliver fake irb to device driver, if needed. */ if (cdev->private->flags.fake_irb) { memset(&cdev->private->irb, 0, sizeof(struct irb)); - cdev->private->irb.scsw = (struct scsw) { - .cc = 1, - .fctl = SCSW_FCTL_START_FUNC, - .actl = SCSW_ACTL_START_PEND, - .stctl = SCSW_STCTL_STATUS_PEND, - }; + cdev->private->irb.scsw.cc = 1; + cdev->private->irb.scsw.fctl = SCSW_FCTL_START_FUNC; + cdev->private->irb.scsw.actl = SCSW_ACTL_START_PEND; + cdev->private->irb.scsw.stctl = SCSW_STCTL_STATUS_PEND; cdev->private->flags.fake_irb = 0; if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, @@ -560,8 +621,10 @@ ccw_device_online(struct ccw_device *cdev) } /* Do we want to do path grouping? */ if (!cdev->private->options.pgroup) { - /* No, set state online immediately. */ - ccw_device_done(cdev, DEV_STATE_ONLINE); + /* Start initial path verification. */ + cdev->private->state = DEV_STATE_VERIFY; + cdev->private->flags.doverify = 0; + ccw_device_verify_start(cdev); return 0; } /* Do a SensePGID first. */ @@ -703,8 +766,6 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) { struct subchannel *sch; - if (!cdev->private->options.pgroup) - return; if (cdev->private->state == DEV_STATE_W4SENSE) { cdev->private->flags.doverify = 1; return; @@ -717,6 +778,7 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) stsch(sch->schid, &sch->schib); if (sch->schib.scsw.actl != 0 || + (sch->schib.scsw.stctl & SCSW_STCTL_STATUS_PEND) || (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) { /* * No final status yet or final status not yet delivered @@ -728,6 +790,7 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) } /* Device is idle, we can do the path verification. */ cdev->private->state = DEV_STATE_VERIFY; + cdev->private->flags.doverify = 0; ccw_device_verify_start(cdev); } @@ -861,6 +924,8 @@ ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event) irb = (struct irb *) __LC_IRB; /* Accumulate status. We don't do basic sense. */ ccw_device_accumulate_irb(cdev, irb); + /* Remember to clear irb to avoid residuals. */ + memset(&cdev->private->irb, 0, sizeof(struct irb)); /* Try to start delayed device verification. */ ccw_device_online_verify(cdev, 0); /* Note: Don't call handler for cio initiated clear! */ @@ -988,11 +1053,10 @@ ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event) } static void -ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event) +ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event) { - /* When the I/O has terminated, we have to start verification. */ - if (cdev->private->options.pgroup) - cdev->private->flags.doverify = 1; + /* Start verification after current task finished. */ + cdev->private->flags.doverify = 1; } static void @@ -1057,10 +1121,7 @@ device_trigger_reprobe(struct subchannel *sch) * The pim, pam, pom values may not be accurate, but they are the best * we have before performing device selection :/ */ - sch->lpm = sch->schib.pmcw.pim & - sch->schib.pmcw.pam & - sch->schib.pmcw.pom & - sch->opm; + sch->lpm = sch->schib.pmcw.pam & sch->opm; /* Re-set some bits in the pmcw that were lost. */ sch->schib.pmcw.isc = 3; sch->schib.pmcw.csense = 1; @@ -1093,6 +1154,13 @@ ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event) dev_fsm_event(cdev, dev_event); } +static void ccw_device_update_cmfblock(struct ccw_device *cdev, + enum dev_event dev_event) +{ + cmf_retry_copy_block(cdev); + cdev->private->state = DEV_STATE_ONLINE; + dev_fsm_event(cdev, dev_event); +} static void ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event) @@ -1177,7 +1245,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq, [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, - [DEV_EVENT_VERIFY] = ccw_device_nop, + [DEV_EVENT_VERIFY] = ccw_device_delay_verify, }, [DEV_STATE_ONLINE] = { [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, @@ -1220,7 +1288,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, [DEV_EVENT_INTERRUPT] = ccw_device_wait4io_irq, [DEV_EVENT_TIMEOUT] = ccw_device_wait4io_timeout, - [DEV_EVENT_VERIFY] = ccw_device_wait4io_verify, + [DEV_EVENT_VERIFY] = ccw_device_delay_verify, }, [DEV_STATE_QUIESCE] = { [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done, @@ -1233,7 +1301,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { [DEV_EVENT_NOTOPER] = ccw_device_nop, [DEV_EVENT_INTERRUPT] = ccw_device_start_id, [DEV_EVENT_TIMEOUT] = ccw_device_bug, - [DEV_EVENT_VERIFY] = ccw_device_nop, + [DEV_EVENT_VERIFY] = ccw_device_start_id, }, [DEV_STATE_DISCONNECTED_SENSE_ID] = { [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, @@ -1247,6 +1315,12 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate, [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate, }, + [DEV_STATE_CMFUPDATE] = { + [DEV_EVENT_NOTOPER] = ccw_device_update_cmfblock, + [DEV_EVENT_INTERRUPT] = ccw_device_update_cmfblock, + [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock, + [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock, + }, }; /* diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c index e60b2d8103b8..1398367b5f68 100644 --- a/drivers/s390/cio/device_id.c +++ b/drivers/s390/cio/device_id.c @@ -10,7 +10,6 @@ */ #include <linux/module.h> -#include <linux/config.h> #include <linux/init.h> #include <asm/ccwdev.h> @@ -43,18 +42,15 @@ diag210(struct diag210 * addr) spin_lock_irqsave(&diag210_lock, flags); diag210_tmp = *addr; - asm volatile ( - " lhi %0,-1\n" - " sam31\n" - " diag %1,0,0x210\n" - "0: ipm %0\n" - " srl %0,28\n" - "1: sam64\n" - ".section __ex_table,\"a\"\n" - " .align 8\n" - " .quad 0b,1b\n" - ".previous" - : "=&d" (ccode) : "a" (__pa(&diag210_tmp)) : "cc", "memory" ); + asm volatile( + " lhi %0,-1\n" + " sam31\n" + " diag %1,0,0x210\n" + "0: ipm %0\n" + " srl %0,28\n" + "1: sam64\n" + EX_TABLE(0b,1b) + : "=&d" (ccode) : "a" (__pa(&diag210_tmp)) : "cc", "memory"); *addr = diag210_tmp; spin_unlock_irqrestore(&diag210_lock, flags); @@ -67,17 +63,14 @@ diag210(struct diag210 * addr) { int ccode; - asm volatile ( - " lhi %0,-1\n" - " diag %1,0,0x210\n" - "0: ipm %0\n" - " srl %0,28\n" + asm volatile( + " lhi %0,-1\n" + " diag %1,0,0x210\n" + "0: ipm %0\n" + " srl %0,28\n" "1:\n" - ".section __ex_table,\"a\"\n" - " .align 4\n" - " .long 0b,1b\n" - ".previous" - : "=&d" (ccode) : "a" (__pa(addr)) : "cc", "memory" ); + EX_TABLE(0b,1b) + : "=&d" (ccode) : "a" (__pa(addr)) : "cc", "memory"); return ccode; } diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 795abb5a65ba..93a897eebfff 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -6,7 +6,6 @@ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) */ -#include <linux/config.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> @@ -78,7 +77,8 @@ ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, return -ENODEV; if (cdev->private->state == DEV_STATE_NOT_OPER) return -ENODEV; - if (cdev->private->state == DEV_STATE_VERIFY) { + if (cdev->private->state == DEV_STATE_VERIFY || + cdev->private->state == DEV_STATE_CLEAR_VERIFY) { /* Remember to fake irb when finished. */ if (!cdev->private->flags.fake_irb) { cdev->private->flags.fake_irb = 1; @@ -96,6 +96,12 @@ ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, ret = cio_set_options (sch, flags); if (ret) return ret; + /* Adjust requested path mask to excluded varied off paths. */ + if (lpm) { + lpm &= sch->opm; + if (lpm == 0) + return -EACCES; + } ret = cio_start_key (sch, cpa, lpm, key); if (ret == 0) cdev->private->intparm = intparm; @@ -250,7 +256,7 @@ ccw_device_get_path_mask(struct ccw_device *cdev) if (!sch) return 0; else - return sch->vpm; + return sch->lpm; } static void @@ -263,6 +269,9 @@ ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb) /* Abuse intparm for error reporting. */ if (IS_ERR(irb)) cdev->private->intparm = -EIO; + else if (irb->scsw.cc == 1) + /* Retry for deferred condition code. */ + cdev->private->intparm = -EAGAIN; else if ((irb->scsw.dstat != (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || (irb->scsw.cstat != 0)) { @@ -270,7 +279,8 @@ ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb) * We didn't get channel end / device end. Check if path * verification has been started; we can retry after it has * finished. We also retry unit checks except for command reject - * or intervention required. + * or intervention required. Also check for long busy + * conditions. */ if (cdev->private->flags.doverify || cdev->private->state == DEV_STATE_VERIFY) @@ -279,6 +289,10 @@ ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb) !(irb->ecw[0] & (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ))) cdev->private->intparm = -EAGAIN; + else if ((irb->scsw.dstat & DEV_STAT_ATTENTION) && + (irb->scsw.dstat & DEV_STAT_DEV_END) && + (irb->scsw.dstat & DEV_STAT_UNIT_EXCEP)) + cdev->private->intparm = -EAGAIN; else cdev->private->intparm = -EIO; @@ -296,7 +310,7 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _ sch = to_subchannel(cdev->dev.parent); do { ret = cio_start (sch, ccw, lpm); - if ((ret == -EBUSY) || (ret == -EACCES)) { + if (ret == -EBUSY) { /* Try again later. */ spin_unlock_irq(&sch->lock); msleep(10); @@ -425,6 +439,13 @@ read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lp if (!ciw || ciw->cmd == 0) return -EOPNOTSUPP; + /* Adjust requested path mask to excluded varied off paths. */ + if (lpm) { + lpm &= sch->opm; + if (lpm == 0) + return -EACCES; + } + rcd_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); if (!rcd_ccw) return -ENOMEM; diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index 85b1020a1fcc..8ca2d078848c 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c @@ -9,7 +9,6 @@ * Path Group ID functions. */ -#include <linux/config.h> #include <linux/module.h> #include <linux/init.h> @@ -25,6 +24,21 @@ #include "ioasm.h" /* + * Helper function called from interrupt context to decide whether an + * operation should be tried again. + */ +static int __ccw_device_should_retry(struct scsw *scsw) +{ + /* CC is only valid if start function bit is set. */ + if ((scsw->fctl & SCSW_FCTL_START_FUNC) && scsw->cc == 1) + return 1; + /* No more activity. For sense and set PGID we stubbornly try again. */ + if (!scsw->actl) + return 1; + return 0; +} + +/* * Start Sense Path Group ID helper function. Used in ccw_device_recog * and ccw_device_sense_pgid. */ @@ -34,12 +48,17 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev) struct subchannel *sch; struct ccw1 *ccw; int ret; + int i; sch = to_subchannel(cdev->dev.parent); + /* Return if we already checked on all paths. */ + if (cdev->private->imask == 0) + return (sch->lpm == 0) ? -ENODEV : -EACCES; + i = 8 - ffs(cdev->private->imask); + /* Setup sense path group id channel program. */ ccw = cdev->private->iccws; ccw->cmd_code = CCW_CMD_SENSE_PGID; - ccw->cda = (__u32) __pa (&cdev->private->pgid); ccw->count = sizeof (struct pgid); ccw->flags = CCW_FLAG_SLI; @@ -49,6 +68,7 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev) ret = -ENODEV; while (cdev->private->imask != 0) { /* Try every path multiple times. */ + ccw->cda = (__u32) __pa (&cdev->private->pgid[i]); if (cdev->private->iretry > 0) { cdev->private->iretry--; ret = cio_start (sch, cdev->private->iccws, @@ -65,7 +85,9 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev) } cdev->private->imask >>= 1; cdev->private->iretry = 5; + i++; } + return ret; } @@ -77,7 +99,7 @@ ccw_device_sense_pgid_start(struct ccw_device *cdev) cdev->private->state = DEV_STATE_SENSE_PGID; cdev->private->imask = 0x80; cdev->private->iretry = 5; - memset (&cdev->private->pgid, 0, sizeof (struct pgid)); + memset (&cdev->private->pgid, 0, sizeof (cdev->private->pgid)); ret = __ccw_device_sense_pgid_start(cdev); if (ret && ret != -EBUSY) ccw_device_sense_pgid_done(cdev, ret); @@ -92,6 +114,7 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev) { struct subchannel *sch; struct irb *irb; + int i; sch = to_subchannel(cdev->dev.parent); irb = &cdev->private->irb; @@ -125,7 +148,8 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev) sch->schid.sch_no, sch->orb.lpm); return -EACCES; } - if (cdev->private->pgid.inf.ps.state2 == SNID_STATE2_RESVD_ELSE) { + i = 8 - ffs(cdev->private->imask); + if (cdev->private->pgid[i].inf.ps.state2 == SNID_STATE2_RESVD_ELSE) { CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x " "is reserved by someone else\n", cdev->private->devno, sch->schid.ssid, @@ -146,10 +170,10 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event) int ret; irb = (struct irb *) __LC_IRB; - /* Retry sense pgid for cc=1. */ + if (irb->scsw.stctl == (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { - if (irb->scsw.cc == 1) { + if (__ccw_device_should_retry(&irb->scsw)) { ret = __ccw_device_sense_pgid_start(cdev); if (ret && ret != -EBUSY) ccw_device_sense_pgid_done(cdev, ret); @@ -163,12 +187,6 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event) memset(&cdev->private->irb, 0, sizeof(struct irb)); switch (ret) { /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */ - case 0: /* Sense Path Group ID successful. */ - if (cdev->private->pgid.inf.ps.state1 == SNID_STATE1_RESET) - memcpy(&cdev->private->pgid, &css[0]->global_pgid, - sizeof(struct pgid)); - ccw_device_sense_pgid_done(cdev, 0); - break; case -EOPNOTSUPP: /* Sense Path Group ID not supported */ ccw_device_sense_pgid_done(cdev, -EOPNOTSUPP); break; @@ -177,13 +195,15 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event) break; case -EACCES: /* channel is not operational. */ sch->lpm &= ~cdev->private->imask; + /* Fall through. */ + case 0: /* Sense Path Group ID successful. */ cdev->private->imask >>= 1; cdev->private->iretry = 5; /* Fall through. */ case -EAGAIN: /* Try again. */ ret = __ccw_device_sense_pgid_start(cdev); if (ret != 0 && ret != -EBUSY) - ccw_device_sense_pgid_done(cdev, -ENODEV); + ccw_device_sense_pgid_done(cdev, ret); break; case -EUSERS: /* device is reserved for someone else. */ ccw_device_sense_pgid_done(cdev, -EUSERS); @@ -204,20 +224,20 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func) sch = to_subchannel(cdev->dev.parent); /* Setup sense path group id channel program. */ - cdev->private->pgid.inf.fc = func; + cdev->private->pgid[0].inf.fc = func; ccw = cdev->private->iccws; if (!cdev->private->flags.pgid_single) { - cdev->private->pgid.inf.fc |= SPID_FUNC_MULTI_PATH; + cdev->private->pgid[0].inf.fc |= SPID_FUNC_MULTI_PATH; ccw->cmd_code = CCW_CMD_SUSPEND_RECONN; ccw->cda = 0; ccw->count = 0; ccw->flags = CCW_FLAG_SLI | CCW_FLAG_CC; ccw++; } else - cdev->private->pgid.inf.fc |= SPID_FUNC_SINGLE_PATH; + cdev->private->pgid[0].inf.fc |= SPID_FUNC_SINGLE_PATH; ccw->cmd_code = CCW_CMD_SET_PGID; - ccw->cda = (__u32) __pa (&cdev->private->pgid); + ccw->cda = (__u32) __pa (&cdev->private->pgid[0]); ccw->count = sizeof (struct pgid); ccw->flags = CCW_FLAG_SLI; @@ -225,18 +245,17 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func) memset(&cdev->private->irb, 0, sizeof(struct irb)); /* Try multiple times. */ - ret = -ENODEV; + ret = -EACCES; if (cdev->private->iretry > 0) { cdev->private->iretry--; ret = cio_start (sch, cdev->private->iccws, cdev->private->imask); - /* ret is 0, -EBUSY, -EACCES or -ENODEV */ - if ((ret != -EACCES) && (ret != -ENODEV)) + /* We expect an interrupt in case of success or busy + * indication. */ + if ((ret == 0) || (ret == -EBUSY)) return ret; } - /* PGID command failed on this path. Switch it off. */ - sch->lpm &= ~cdev->private->imask; - sch->vpm &= ~cdev->private->imask; + /* PGID command failed on this path. */ CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " "0.%x.%04x, lpm %02X, became 'not operational'\n", cdev->private->devno, sch->schid.ssid, @@ -245,6 +264,47 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func) } /* + * Helper function to send a nop ccw down a path. + */ +static int __ccw_device_do_nop(struct ccw_device *cdev) +{ + struct subchannel *sch; + struct ccw1 *ccw; + int ret; + + sch = to_subchannel(cdev->dev.parent); + + /* Setup nop channel program. */ + ccw = cdev->private->iccws; + ccw->cmd_code = CCW_CMD_NOOP; + ccw->cda = 0; + ccw->count = 0; + ccw->flags = CCW_FLAG_SLI; + + /* Reset device status. */ + memset(&cdev->private->irb, 0, sizeof(struct irb)); + + /* Try multiple times. */ + ret = -EACCES; + if (cdev->private->iretry > 0) { + cdev->private->iretry--; + ret = cio_start (sch, cdev->private->iccws, + cdev->private->imask); + /* We expect an interrupt in case of success or busy + * indication. */ + if ((ret == 0) || (ret == -EBUSY)) + return ret; + } + /* nop command failed on this path. */ + CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel " + "0.%x.%04x, lpm %02X, became 'not operational'\n", + cdev->private->devno, sch->schid.ssid, + sch->schid.sch_no, cdev->private->imask); + return ret; +} + + +/* * Called from interrupt context to check if a valid answer * to Set Path Group ID was received. */ @@ -283,28 +343,59 @@ __ccw_device_check_pgid(struct ccw_device *cdev) return 0; } +/* + * Called from interrupt context to check the path status after a nop has + * been send. + */ +static int __ccw_device_check_nop(struct ccw_device *cdev) +{ + struct subchannel *sch; + struct irb *irb; + + sch = to_subchannel(cdev->dev.parent); + irb = &cdev->private->irb; + if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) + return -ETIME; + if (irb->scsw.cc == 3) { + CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel 0.%x.%04x," + " lpm %02X, became 'not operational'\n", + cdev->private->devno, sch->schid.ssid, + sch->schid.sch_no, cdev->private->imask); + return -EACCES; + } + return 0; +} + static void __ccw_device_verify_start(struct ccw_device *cdev) { struct subchannel *sch; - __u8 imask, func; + __u8 func; int ret; sch = to_subchannel(cdev->dev.parent); - while (sch->vpm != sch->lpm) { - /* Find first unequal bit in vpm vs. lpm */ - for (imask = 0x80; imask != 0; imask >>= 1) - if ((sch->vpm & imask) != (sch->lpm & imask)) - break; - cdev->private->imask = imask; - func = (sch->vpm & imask) ? - SPID_FUNC_RESIGN : SPID_FUNC_ESTABLISH; - ret = __ccw_device_do_pgid(cdev, func); + /* Repeat for all paths. */ + for (; cdev->private->imask; cdev->private->imask >>= 1, + cdev->private->iretry = 5) { + if ((cdev->private->imask & sch->schib.pmcw.pam) == 0) + /* Path not available, try next. */ + continue; + if (cdev->private->options.pgroup) { + if (sch->opm & cdev->private->imask) + func = SPID_FUNC_ESTABLISH; + else + func = SPID_FUNC_RESIGN; + ret = __ccw_device_do_pgid(cdev, func); + } else + ret = __ccw_device_do_nop(cdev); + /* We expect an interrupt in case of success or busy + * indication. */ if (ret == 0 || ret == -EBUSY) return; - cdev->private->iretry = 5; + /* Permanent path failure, try next. */ } - ccw_device_verify_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV); + /* Done with all paths. */ + ccw_device_verify_done(cdev, (sch->vpm != 0) ? 0 : -ENODEV); } /* @@ -318,26 +409,29 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event) int ret; irb = (struct irb *) __LC_IRB; - /* Retry set pgid for cc=1. */ + if (irb->scsw.stctl == (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { - if (irb->scsw.cc == 1) + if (__ccw_device_should_retry(&irb->scsw)) __ccw_device_verify_start(cdev); return; } if (ccw_device_accumulate_and_sense(cdev, irb) != 0) return; sch = to_subchannel(cdev->dev.parent); - ret = __ccw_device_check_pgid(cdev); + if (cdev->private->options.pgroup) + ret = __ccw_device_check_pgid(cdev); + else + ret = __ccw_device_check_nop(cdev); memset(&cdev->private->irb, 0, sizeof(struct irb)); + switch (ret) { /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */ case 0: - /* Establish or Resign Path Group done. Update vpm. */ - if ((sch->lpm & cdev->private->imask) != 0) - sch->vpm |= cdev->private->imask; - else - sch->vpm &= ~cdev->private->imask; + /* Path verification ccw finished successfully, update lpm. */ + sch->vpm |= sch->opm & cdev->private->imask; + /* Go on with next path. */ + cdev->private->imask >>= 1; cdev->private->iretry = 5; __ccw_device_verify_start(cdev); break; @@ -346,11 +440,14 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event) * One of those strange devices which claim to be able * to do multipathing but not for Set Path Group ID. */ - if (cdev->private->flags.pgid_single) { - ccw_device_verify_done(cdev, -EOPNOTSUPP); - break; - } - cdev->private->flags.pgid_single = 1; + if (cdev->private->flags.pgid_single) + cdev->private->options.pgroup = 0; + else + cdev->private->flags.pgid_single = 1; + /* Retry */ + sch->vpm = 0; + cdev->private->imask = 0x80; + cdev->private->iretry = 5; /* fall through. */ case -EAGAIN: /* Try again. */ __ccw_device_verify_start(cdev); @@ -359,8 +456,7 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event) ccw_device_verify_done(cdev, -ETIME); break; case -EACCES: /* channel is not operational. */ - sch->lpm &= ~cdev->private->imask; - sch->vpm &= ~cdev->private->imask; + cdev->private->imask >>= 1; cdev->private->iretry = 5; __ccw_device_verify_start(cdev); break; @@ -373,19 +469,17 @@ ccw_device_verify_start(struct ccw_device *cdev) struct subchannel *sch = to_subchannel(cdev->dev.parent); cdev->private->flags.pgid_single = 0; + cdev->private->imask = 0x80; cdev->private->iretry = 5; - /* - * Update sch->lpm with current values to catch paths becoming - * available again. - */ + + /* Start with empty vpm. */ + sch->vpm = 0; + + /* Get current pam. */ if (stsch(sch->schid, &sch->schib)) { ccw_device_verify_done(cdev, -ENODEV); return; } - sch->lpm = sch->schib.pmcw.pim & - sch->schib.pmcw.pam & - sch->schib.pmcw.pom & - sch->opm; __ccw_device_verify_start(cdev); } @@ -419,10 +513,10 @@ ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event) int ret; irb = (struct irb *) __LC_IRB; - /* Retry set pgid for cc=1. */ + if (irb->scsw.stctl == (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { - if (irb->scsw.cc == 1) + if (__ccw_device_should_retry(&irb->scsw)) __ccw_device_disband_start(cdev); return; } @@ -434,7 +528,6 @@ ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event) switch (ret) { /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */ case 0: /* disband successful. */ - sch->vpm = 0; ccw_device_disband_done(cdev, ret); break; case -EOPNOTSUPP: diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index 6c762b43f921..caf148d5caad 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c @@ -9,7 +9,6 @@ * Status accumulation and basic sense functions. */ -#include <linux/config.h> #include <linux/module.h> #include <linux/init.h> @@ -68,8 +67,7 @@ ccw_device_path_notoper(struct ccw_device *cdev) sch->schib.pmcw.pnom); sch->lpm &= ~sch->schib.pmcw.pnom; - if (cdev->private->options.pgroup) - cdev->private->flags.doverify = 1; + cdev->private->flags.doverify = 1; } /* @@ -181,7 +179,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) cdev_irb->esw.esw0.erw.auth = irb->esw.esw0.erw.auth; /* Copy path verification required flag. */ cdev_irb->esw.esw0.erw.pvrf = irb->esw.esw0.erw.pvrf; - if (irb->esw.esw0.erw.pvrf && cdev->private->options.pgroup) + if (irb->esw.esw0.erw.pvrf) cdev->private->flags.doverify = 1; /* Copy concurrent sense bit. */ cdev_irb->esw.esw0.erw.cons = irb->esw.esw0.erw.cons; @@ -355,7 +353,7 @@ ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb) } /* Check if path verification is required. */ if (ccw_device_accumulate_esw_valid(irb) && - irb->esw.esw0.erw.pvrf && cdev->private->options.pgroup) + irb->esw.esw0.erw.pvrf) cdev->private->flags.doverify = 1; } diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h index 95a9462f9a91..ad6d82940069 100644 --- a/drivers/s390/cio/ioasm.h +++ b/drivers/s390/cio/ioasm.h @@ -25,106 +25,74 @@ struct tpi_info { static inline int stsch(struct subchannel_id schid, volatile struct schib *addr) { + register struct subchannel_id reg1 asm ("1") = schid; int ccode; - __asm__ __volatile__( - " lr 1,%1\n" - " stsch 0(%2)\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) - : "d" (schid), "a" (addr), "m" (*addr) - : "cc", "1" ); + asm volatile( + " stsch 0(%2)\n" + " ipm %0\n" + " srl %0,28" + : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); return ccode; } static inline int stsch_err(struct subchannel_id schid, volatile struct schib *addr) { - int ccode; + register struct subchannel_id reg1 asm ("1") = schid; + int ccode = -EIO; - __asm__ __volatile__( - " lhi %0,%3\n" - " lr 1,%1\n" - " stsch 0(%2)\n" - "0: ipm %0\n" - " srl %0,28\n" + asm volatile( + " stsch 0(%2)\n" + "0: ipm %0\n" + " srl %0,28\n" "1:\n" -#ifdef CONFIG_64BIT - ".section __ex_table,\"a\"\n" - " .align 8\n" - " .quad 0b,1b\n" - ".previous" -#else - ".section __ex_table,\"a\"\n" - " .align 4\n" - " .long 0b,1b\n" - ".previous" -#endif - : "=&d" (ccode) - : "d" (schid), "a" (addr), "K" (-EIO), "m" (*addr) - : "cc", "1" ); + EX_TABLE(0b,1b) + : "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); return ccode; } static inline int msch(struct subchannel_id schid, volatile struct schib *addr) { + register struct subchannel_id reg1 asm ("1") = schid; int ccode; - __asm__ __volatile__( - " lr 1,%1\n" - " msch 0(%2)\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) - : "d" (schid), "a" (addr), "m" (*addr) - : "cc", "1" ); + asm volatile( + " msch 0(%2)\n" + " ipm %0\n" + " srl %0,28" + : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); return ccode; } static inline int msch_err(struct subchannel_id schid, volatile struct schib *addr) { - int ccode; + register struct subchannel_id reg1 asm ("1") = schid; + int ccode = -EIO; - __asm__ __volatile__( - " lhi %0,%3\n" - " lr 1,%1\n" - " msch 0(%2)\n" - "0: ipm %0\n" - " srl %0,28\n" + asm volatile( + " msch 0(%2)\n" + "0: ipm %0\n" + " srl %0,28\n" "1:\n" -#ifdef CONFIG_64BIT - ".section __ex_table,\"a\"\n" - " .align 8\n" - " .quad 0b,1b\n" - ".previous" -#else - ".section __ex_table,\"a\"\n" - " .align 4\n" - " .long 0b,1b\n" - ".previous" -#endif - : "=&d" (ccode) - : "d" (schid), "a" (addr), "K" (-EIO), "m" (*addr) - : "cc", "1" ); + EX_TABLE(0b,1b) + : "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); return ccode; } static inline int tsch(struct subchannel_id schid, volatile struct irb *addr) { + register struct subchannel_id reg1 asm ("1") = schid; int ccode; - __asm__ __volatile__( - " lr 1,%1\n" - " tsch 0(%2)\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) - : "d" (schid), "a" (addr), "m" (*addr) - : "cc", "1" ); + asm volatile( + " tsch 0(%2)\n" + " ipm %0\n" + " srl %0,28" + : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); return ccode; } @@ -132,89 +100,77 @@ static inline int tpi( volatile struct tpi_info *addr) { int ccode; - __asm__ __volatile__( - " tpi 0(%1)\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) - : "a" (addr), "m" (*addr) - : "cc", "1" ); + asm volatile( + " tpi 0(%1)\n" + " ipm %0\n" + " srl %0,28" + : "=d" (ccode) : "a" (addr), "m" (*addr) : "cc"); return ccode; } static inline int ssch(struct subchannel_id schid, volatile struct orb *addr) { + register struct subchannel_id reg1 asm ("1") = schid; int ccode; - __asm__ __volatile__( - " lr 1,%1\n" - " ssch 0(%2)\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) - : "d" (schid), "a" (addr), "m" (*addr) - : "cc", "1" ); + asm volatile( + " ssch 0(%2)\n" + " ipm %0\n" + " srl %0,28" + : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); return ccode; } static inline int rsch(struct subchannel_id schid) { + register struct subchannel_id reg1 asm ("1") = schid; int ccode; - __asm__ __volatile__( - " lr 1,%1\n" - " rsch\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) - : "d" (schid) - : "cc", "1" ); + asm volatile( + " rsch\n" + " ipm %0\n" + " srl %0,28" + : "=d" (ccode) : "d" (reg1) : "cc"); return ccode; } static inline int csch(struct subchannel_id schid) { + register struct subchannel_id reg1 asm ("1") = schid; int ccode; - __asm__ __volatile__( - " lr 1,%1\n" - " csch\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) - : "d" (schid) - : "cc", "1" ); + asm volatile( + " csch\n" + " ipm %0\n" + " srl %0,28" + : "=d" (ccode) : "d" (reg1) : "cc"); return ccode; } static inline int hsch(struct subchannel_id schid) { + register struct subchannel_id reg1 asm ("1") = schid; int ccode; - __asm__ __volatile__( - " lr 1,%1\n" - " hsch\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) - : "d" (schid) - : "cc", "1" ); + asm volatile( + " hsch\n" + " ipm %0\n" + " srl %0,28" + : "=d" (ccode) : "d" (reg1) : "cc"); return ccode; } static inline int xsch(struct subchannel_id schid) { + register struct subchannel_id reg1 asm ("1") = schid; int ccode; - __asm__ __volatile__( - " lr 1,%1\n" - " .insn rre,0xb2760000,%1,0\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) - : "d" (schid) - : "cc", "1" ); + asm volatile( + " .insn rre,0xb2760000,%1,0\n" + " ipm %0\n" + " srl %0,28" + : "=d" (ccode) : "d" (reg1) : "cc"); return ccode; } @@ -223,41 +179,27 @@ static inline int chsc(void *chsc_area) typedef struct { char _[4096]; } addr_type; int cc; - __asm__ __volatile__ ( - ".insn rre,0xb25f0000,%2,0 \n\t" - "ipm %0 \n\t" - "srl %0,28 \n\t" + asm volatile( + " .insn rre,0xb25f0000,%2,0\n" + " ipm %0\n" + " srl %0,28\n" : "=d" (cc), "=m" (*(addr_type *) chsc_area) : "d" (chsc_area), "m" (*(addr_type *) chsc_area) - : "cc" ); - + : "cc"); return cc; } -static inline int iac( void) -{ - int ccode; - - __asm__ __volatile__( - " iac 1\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) : : "cc", "1" ); - return ccode; -} - static inline int rchp(int chpid) { + register unsigned int reg1 asm ("1") = chpid; int ccode; - __asm__ __volatile__( - " lr 1,%1\n" - " rchp\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) - : "d" (chpid) - : "cc", "1" ); + asm volatile( + " lr 1,%1\n" + " rchp\n" + " ipm %0\n" + " srl %0,28" + : "=d" (ccode) : "d" (reg1) : "cc"); return ccode; } diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index 96f519281d92..cde822d8b5c8 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c @@ -30,7 +30,6 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include <linux/config.h> #include <linux/module.h> #include <linux/init.h> @@ -116,7 +115,7 @@ qdio_min(int a,int b) static inline __u64 qdio_get_micros(void) { - return (get_clock() >> 10); /* time>>12 is microseconds */ + return (get_clock() >> 12); /* time>>12 is microseconds */ } /* @@ -1130,7 +1129,7 @@ out: #ifdef QDIO_USE_PROCESSING_STATE if (last_position>=0) - set_slsb(q, &last_position, SLSB_P_INPUT_NOT_INIT, &count); + set_slsb(q, &last_position, SLSB_P_INPUT_PROCESSING, &count); #endif /* QDIO_USE_PROCESSING_STATE */ QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); @@ -2736,7 +2735,7 @@ qdio_free(struct ccw_device *cdev) QDIO_DBF_TEXT1(0,trace,dbf_text); QDIO_DBF_TEXT0(0,setup,dbf_text); - cdev->private->qdio_data = 0; + cdev->private->qdio_data = NULL; up(&irq_ptr->setting_up_sema); diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index ceb3ab31ee08..49bb9e371c32 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -191,49 +191,49 @@ enum qdio_irq_states { #if QDIO_VERBOSE_LEVEL>8 #define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x) #else -#define QDIO_PRINT_STUPID(x...) +#define QDIO_PRINT_STUPID(x...) do { } while (0) #endif #if QDIO_VERBOSE_LEVEL>7 #define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x) #else -#define QDIO_PRINT_ALL(x...) +#define QDIO_PRINT_ALL(x...) do { } while (0) #endif #if QDIO_VERBOSE_LEVEL>6 #define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x) #else -#define QDIO_PRINT_INFO(x...) +#define QDIO_PRINT_INFO(x...) do { } while (0) #endif #if QDIO_VERBOSE_LEVEL>5 #define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x) #else -#define QDIO_PRINT_WARN(x...) +#define QDIO_PRINT_WARN(x...) do { } while (0) #endif #if QDIO_VERBOSE_LEVEL>4 #define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x) #else -#define QDIO_PRINT_ERR(x...) +#define QDIO_PRINT_ERR(x...) do { } while (0) #endif #if QDIO_VERBOSE_LEVEL>3 #define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x) #else -#define QDIO_PRINT_CRIT(x...) +#define QDIO_PRINT_CRIT(x...) do { } while (0) #endif #if QDIO_VERBOSE_LEVEL>2 #define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x) #else -#define QDIO_PRINT_ALERT(x...) +#define QDIO_PRINT_ALERT(x...) do { } while (0) #endif #if QDIO_VERBOSE_LEVEL>1 #define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x) #else -#define QDIO_PRINT_EMERG(x...) +#define QDIO_PRINT_EMERG(x...) do { } while (0) #endif #define HEXDUMP16(importance,header,ptr) \ @@ -274,12 +274,11 @@ do_sqbs(unsigned long sch, unsigned char state, int queue, register unsigned long _sch asm ("1") = sch; unsigned long _queuestart = ((unsigned long)queue << 32) | *start; - asm volatile ( - " .insn rsy,0xeb000000008A,%1,0,0(%2)\n\t" - : "+d" (_ccq), "+d" (_queuestart) - : "d" ((unsigned long)state), "d" (_sch) - : "memory", "cc" - ); + asm volatile( + " .insn rsy,0xeb000000008A,%1,0,0(%2)" + : "+d" (_ccq), "+d" (_queuestart) + : "d" ((unsigned long)state), "d" (_sch) + : "memory", "cc"); *count = _ccq & 0xff; *start = _queuestart & 0xff; @@ -299,12 +298,11 @@ do_eqbs(unsigned long sch, unsigned char *state, int queue, unsigned long _queuestart = ((unsigned long)queue << 32) | *start; unsigned long _state = 0; - asm volatile ( - " .insn rrf,0xB99c0000,%1,%2,0,0 \n\t" - : "+d" (_ccq), "+d" (_queuestart), "+d" (_state) - : "d" (_sch) - : "memory", "cc" - ); + asm volatile( + " .insn rrf,0xB99c0000,%1,%2,0,0" + : "+d" (_ccq), "+d" (_queuestart), "+d" (_state) + : "d" (_sch) + : "memory", "cc" ); *count = _ccq & 0xff; *start = _queuestart & 0xff; *state = _state & 0xff; @@ -319,69 +317,35 @@ do_eqbs(unsigned long sch, unsigned char *state, int queue, static inline int do_siga_sync(struct subchannel_id schid, unsigned int mask1, unsigned int mask2) { + register unsigned long reg0 asm ("0") = 2; + register struct subchannel_id reg1 asm ("1") = schid; + register unsigned long reg2 asm ("2") = mask1; + register unsigned long reg3 asm ("3") = mask2; int cc; -#ifndef CONFIG_64BIT - asm volatile ( - "lhi 0,2 \n\t" - "lr 1,%1 \n\t" - "lr 2,%2 \n\t" - "lr 3,%3 \n\t" - "siga 0 \n\t" - "ipm %0 \n\t" - "srl %0,28 \n\t" + asm volatile( + " siga 0\n" + " ipm %0\n" + " srl %0,28\n" : "=d" (cc) - : "d" (schid), "d" (mask1), "d" (mask2) - : "cc", "0", "1", "2", "3" - ); -#else /* CONFIG_64BIT */ - asm volatile ( - "lghi 0,2 \n\t" - "llgfr 1,%1 \n\t" - "llgfr 2,%2 \n\t" - "llgfr 3,%3 \n\t" - "siga 0 \n\t" - "ipm %0 \n\t" - "srl %0,28 \n\t" - : "=d" (cc) - : "d" (schid), "d" (mask1), "d" (mask2) - : "cc", "0", "1", "2", "3" - ); -#endif /* CONFIG_64BIT */ + : "d" (reg0), "d" (reg1), "d" (reg2), "d" (reg3) : "cc"); return cc; } static inline int do_siga_input(struct subchannel_id schid, unsigned int mask) { + register unsigned long reg0 asm ("0") = 1; + register struct subchannel_id reg1 asm ("1") = schid; + register unsigned long reg2 asm ("2") = mask; int cc; -#ifndef CONFIG_64BIT - asm volatile ( - "lhi 0,1 \n\t" - "lr 1,%1 \n\t" - "lr 2,%2 \n\t" - "siga 0 \n\t" - "ipm %0 \n\t" - "srl %0,28 \n\t" - : "=d" (cc) - : "d" (schid), "d" (mask) - : "cc", "0", "1", "2", "memory" - ); -#else /* CONFIG_64BIT */ - asm volatile ( - "lghi 0,1 \n\t" - "llgfr 1,%1 \n\t" - "llgfr 2,%2 \n\t" - "siga 0 \n\t" - "ipm %0 \n\t" - "srl %0,28 \n\t" + asm volatile( + " siga 0\n" + " ipm %0\n" + " srl %0,28\n" : "=d" (cc) - : "d" (schid), "d" (mask) - : "cc", "0", "1", "2", "memory" - ); -#endif /* CONFIG_64BIT */ - + : "d" (reg0), "d" (reg1), "d" (reg2) : "cc", "memory"); return cc; } @@ -389,93 +353,35 @@ static inline int do_siga_output(unsigned long schid, unsigned long mask, __u32 *bb, unsigned int fc) { + register unsigned long __fc asm("0") = fc; + register unsigned long __schid asm("1") = schid; + register unsigned long __mask asm("2") = mask; int cc; - __u32 busy_bit; - -#ifndef CONFIG_64BIT - asm volatile ( - "lhi 0,0 \n\t" - "lr 1,%2 \n\t" - "lr 2,%3 \n\t" - "siga 0 \n\t" - "0:" - "ipm %0 \n\t" - "srl %0,28 \n\t" - "srl 0,31 \n\t" - "lr %1,0 \n\t" - "1: \n\t" - ".section .fixup,\"ax\"\n\t" - "2: \n\t" - "lhi %0,%4 \n\t" - "bras 1,3f \n\t" - ".long 1b \n\t" - "3: \n\t" - "l 1,0(1) \n\t" - "br 1 \n\t" - ".previous \n\t" - ".section __ex_table,\"a\"\n\t" - ".align 4 \n\t" - ".long 0b,2b \n\t" - ".previous \n\t" - : "=d" (cc), "=d" (busy_bit) - : "d" (schid), "d" (mask), - "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION) - : "cc", "0", "1", "2", "memory" - ); -#else /* CONFIG_64BIT */ - asm volatile ( - "llgfr 0,%5 \n\t" - "lgr 1,%2 \n\t" - "llgfr 2,%3 \n\t" - "siga 0 \n\t" - "0:" - "ipm %0 \n\t" - "srl %0,28 \n\t" - "srl 0,31 \n\t" - "llgfr %1,0 \n\t" - "1: \n\t" - ".section .fixup,\"ax\"\n\t" - "lghi %0,%4 \n\t" - "jg 1b \n\t" - ".previous\n\t" - ".section __ex_table,\"a\"\n\t" - ".align 8 \n\t" - ".quad 0b,1b \n\t" - ".previous \n\t" - : "=d" (cc), "=d" (busy_bit) - : "d" (schid), "d" (mask), - "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION), "d" (fc) - : "cc", "0", "1", "2", "memory" - ); -#endif /* CONFIG_64BIT */ - - (*bb) = busy_bit; + + asm volatile( + " siga 0\n" + "0: ipm %0\n" + " srl %0,28\n" + "1:\n" + EX_TABLE(0b,1b) + : "=d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask) + : "0" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION) + : "cc", "memory"); + (*bb) = ((unsigned int) __fc) >> 31; return cc; } static inline unsigned long do_clear_global_summary(void) { - - unsigned long time; - -#ifndef CONFIG_64BIT - asm volatile ( - "lhi 1,3 \n\t" - ".insn rre,0xb2650000,2,0 \n\t" - "lr %0,3 \n\t" - : "=d" (time) : : "cc", "1", "2", "3" - ); -#else /* CONFIG_64BIT */ - asm volatile ( - "lghi 1,3 \n\t" - ".insn rre,0xb2650000,2,0 \n\t" - "lgr %0,3 \n\t" - : "=d" (time) : : "cc", "1", "2", "3" - ); -#endif /* CONFIG_64BIT */ - - return time; + register unsigned long __fn asm("1") = 3; + register unsigned long __tmp asm("2"); + register unsigned long __time asm("3"); + + asm volatile( + " .insn rre,0xb2650000,2,0" + : "+d" (__fn), "=d" (__tmp), "=d" (__time)); + return __time; } /* |