diff options
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r-- | drivers/s390/cio/Makefile | 4 | ||||
-rw-r--r-- | drivers/s390/cio/blacklist.c | 14 | ||||
-rw-r--r-- | drivers/s390/cio/ccwgroup.c | 20 | ||||
-rw-r--r-- | drivers/s390/cio/cio.h | 1 | ||||
-rw-r--r-- | drivers/s390/cio/css.c | 20 | ||||
-rw-r--r-- | drivers/s390/cio/device.c | 17 | ||||
-rw-r--r-- | drivers/s390/cio/device_ops.c | 25 | ||||
-rw-r--r-- | drivers/s390/cio/qdio.h | 34 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_debug.c | 5 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_main.c | 161 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_setup.c | 8 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_thinint.c | 41 | ||||
-rw-r--r-- | drivers/s390/cio/vfio_ccw_cp.h | 1 | ||||
-rw-r--r-- | drivers/s390/cio/vfio_ccw_drv.c | 58 | ||||
-rw-r--r-- | drivers/s390/cio/vfio_ccw_fsm.c | 60 | ||||
-rw-r--r-- | drivers/s390/cio/vfio_ccw_ops.c | 10 | ||||
-rw-r--r-- | drivers/s390/cio/vfio_ccw_private.h | 18 | ||||
-rw-r--r-- | drivers/s390/cio/vfio_ccw_trace.c | 14 | ||||
-rw-r--r-- | drivers/s390/cio/vfio_ccw_trace.h | 80 |
19 files changed, 389 insertions, 202 deletions
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile index f6a8db04177c..23eae4188876 100644 --- a/drivers/s390/cio/Makefile +++ b/drivers/s390/cio/Makefile @@ -5,7 +5,7 @@ # The following is required for define_trace.h to find ./trace.h CFLAGS_trace.o := -I$(src) -CFLAGS_vfio_ccw_fsm.o := -I$(src) +CFLAGS_vfio_ccw_trace.o := -I$(src) obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \ fcx.o itcw.o crw.o ccwreq.o trace.o ioasm.o @@ -21,5 +21,5 @@ qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o obj-$(CONFIG_QDIO) += qdio.o vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o \ - vfio_ccw_async.o + vfio_ccw_async.o vfio_ccw_trace.o obj-$(CONFIG_VFIO_CCW) += vfio_ccw.o diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index 2a3f874a21d5..da642e811f7f 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c @@ -398,12 +398,12 @@ cio_ignore_proc_open(struct inode *inode, struct file *file) sizeof(struct ccwdev_iter)); } -static const struct file_operations cio_ignore_proc_fops = { - .open = cio_ignore_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release_private, - .write = cio_ignore_write, +static const struct proc_ops cio_ignore_proc_ops = { + .proc_open = cio_ignore_proc_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = seq_release_private, + .proc_write = cio_ignore_write, }; static int @@ -412,7 +412,7 @@ cio_ignore_proc_init (void) struct proc_dir_entry *entry; entry = proc_create("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR, NULL, - &cio_ignore_proc_fops); + &cio_ignore_proc_ops); if (!entry) return -ENOENT; return 0; diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index c522e9313c50..b42a93736668 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -372,7 +372,7 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv, goto error; } /* Check for trailing stuff. */ - if (i == num_devices && strlen(buf) > 0) { + if (i == num_devices && buf && strlen(buf) > 0) { rc = -EINVAL; goto error; } @@ -581,11 +581,6 @@ int ccwgroup_driver_register(struct ccwgroup_driver *cdriver) } EXPORT_SYMBOL(ccwgroup_driver_register); -static int __ccwgroup_match_all(struct device *dev, const void *data) -{ - return 1; -} - /** * ccwgroup_driver_unregister() - deregister a ccw group driver * @cdriver: driver to be deregistered @@ -597,8 +592,7 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver) struct device *dev; /* We don't want ccwgroup devices to live longer than their driver. */ - while ((dev = driver_find_device(&cdriver->driver, NULL, NULL, - __ccwgroup_match_all))) { + while ((dev = driver_find_next_device(&cdriver->driver, NULL))) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); ccwgroup_ungroup(gdev); @@ -608,13 +602,6 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver) } EXPORT_SYMBOL(ccwgroup_driver_unregister); -static int __ccwgroupdev_check_busid(struct device *dev, const void *id) -{ - const char *bus_id = id; - - return (strcmp(bus_id, dev_name(dev)) == 0); -} - /** * get_ccwgroupdev_by_busid() - obtain device from a bus id * @gdrv: driver the device is owned by @@ -631,8 +618,7 @@ struct ccwgroup_device *get_ccwgroupdev_by_busid(struct ccwgroup_driver *gdrv, { struct device *dev; - dev = driver_find_device(&gdrv->driver, NULL, bus_id, - __ccwgroupdev_check_busid); + dev = driver_find_device_by_name(&gdrv->driver, bus_id); return dev ? to_ccwgroupdev(dev) : NULL; } diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index ba7d2480613b..dcdaba689b20 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -113,6 +113,7 @@ struct subchannel { enum sch_todo todo; struct work_struct todo_work; struct schib_config config; + u64 dma_mask; char *driver_override; /* Driver name to force a match */ } __attribute__ ((aligned(8))); diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 22c55816100b..94edbb33d0d1 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -232,7 +232,12 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid, * belong to a subchannel need to fit 31 bit width (e.g. ccw). */ sch->dev.coherent_dma_mask = DMA_BIT_MASK(31); - sch->dev.dma_mask = &sch->dev.coherent_dma_mask; + /* + * But we don't have such restrictions imposed on the stuff that + * is handled by the streaming API. + */ + sch->dma_mask = DMA_BIT_MASK(64); + sch->dev.dma_mask = &sch->dma_mask; return sch; err: @@ -1367,18 +1372,17 @@ static ssize_t cio_settle_write(struct file *file, const char __user *buf, return ret ? ret : count; } -static const struct file_operations cio_settle_proc_fops = { - .open = nonseekable_open, - .write = cio_settle_write, - .llseek = no_llseek, +static const struct proc_ops cio_settle_proc_ops = { + .proc_open = nonseekable_open, + .proc_write = cio_settle_write, + .proc_lseek = no_llseek, }; static int __init cio_settle_init(void) { struct proc_dir_entry *entry; - entry = proc_create("cio_settle", S_IWUSR, NULL, - &cio_settle_proc_fops); + entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops); if (!entry) return -ENOMEM; return 0; @@ -1388,6 +1392,8 @@ device_initcall(cio_settle_init); int sch_is_pseudo_sch(struct subchannel *sch) { + if (!sch->dev.parent) + return 0; return sch == to_css(sch->dev.parent)->pseudo_subchannel; } diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index c421899be20f..0c6245fc7706 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -710,7 +710,7 @@ static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch) if (!cdev->private) goto err_priv; cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask; - cdev->dev.dma_mask = &cdev->dev.coherent_dma_mask; + cdev->dev.dma_mask = sch->dev.dma_mask; dma_pool = cio_gp_dma_create(&cdev->dev, 1); if (!dma_pool) goto err_dma_pool; @@ -1695,18 +1695,6 @@ int ccw_device_force_console(struct ccw_device *cdev) EXPORT_SYMBOL_GPL(ccw_device_force_console); #endif -/* - * get ccw_device matching the busid, but only if owned by cdrv - */ -static int -__ccwdev_check_busid(struct device *dev, const void *id) -{ - const char *bus_id = id; - - return (strcmp(bus_id, dev_name(dev)) == 0); -} - - /** * get_ccwdev_by_busid() - obtain device from a bus id * @cdrv: driver the device is owned by @@ -1723,8 +1711,7 @@ struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, { struct device *dev; - dev = driver_find_device(&cdrv->driver, NULL, (void *)bus_id, - __ccwdev_check_busid); + dev = driver_find_device_by_name(&cdrv->driver, bus_id); return dev ? to_ccwdev(dev) : NULL; } diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index d722458c5928..ccecf6b9504e 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -124,9 +124,7 @@ EXPORT_SYMBOL(ccw_device_is_multipath); /** * ccw_device_clear() - terminate I/O request processing * @cdev: target ccw device - * @intparm: interruption parameter; value is only used if no I/O is - * outstanding, otherwise the intparm associated with the I/O request - * is returned + * @intparm: interruption parameter to be returned upon conclusion of csch * * ccw_device_clear() calls csch on @cdev's subchannel. * Returns: @@ -179,6 +177,9 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) * completed during the time specified by @expires. If a timeout occurs, the * channel program is terminated via xsch, hsch or csch, and the device's * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT). + * The interruption handler will echo back the @intparm specified here, unless + * another interruption parameter is specified by a subsequent invocation of + * ccw_device_halt() or ccw_device_clear(). * Returns: * %0, if the operation was successful; * -%EBUSY, if the device is busy, or status pending; @@ -256,6 +257,9 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, * Start a S/390 channel program. When the interrupt arrives, the * IRQ handler is called, either immediately, delayed (dev-end missing, * or sense required) or never (no IRQ handler registered). + * The interruption handler will echo back the @intparm specified here, unless + * another interruption parameter is specified by a subsequent invocation of + * ccw_device_halt() or ccw_device_clear(). * Returns: * %0, if the operation was successful; * -%EBUSY, if the device is busy, or status pending; @@ -287,6 +291,9 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, * Start a S/390 channel program. When the interrupt arrives, the * IRQ handler is called, either immediately, delayed (dev-end missing, * or sense required) or never (no IRQ handler registered). + * The interruption handler will echo back the @intparm specified here, unless + * another interruption parameter is specified by a subsequent invocation of + * ccw_device_halt() or ccw_device_clear(). * Returns: * %0, if the operation was successful; * -%EBUSY, if the device is busy, or status pending; @@ -322,6 +329,9 @@ int ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa, * completed during the time specified by @expires. If a timeout occurs, the * channel program is terminated via xsch, hsch or csch, and the device's * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT). + * The interruption handler will echo back the @intparm specified here, unless + * another interruption parameter is specified by a subsequent invocation of + * ccw_device_halt() or ccw_device_clear(). * Returns: * %0, if the operation was successful; * -%EBUSY, if the device is busy, or status pending; @@ -343,11 +353,12 @@ int ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa, /** * ccw_device_halt() - halt I/O request processing * @cdev: target ccw device - * @intparm: interruption parameter; value is only used if no I/O is - * outstanding, otherwise the intparm associated with the I/O request - * is returned + * @intparm: interruption parameter to be returned upon conclusion of hsch * * ccw_device_halt() calls hsch on @cdev's subchannel. + * The interruption handler will echo back the @intparm specified here, unless + * another interruption parameter is specified by a subsequent invocation of + * ccw_device_clear(). * Returns: * %0 on success, * -%ENODEV on device not operational, @@ -624,7 +635,7 @@ EXPORT_SYMBOL(ccw_device_tm_start_timeout); * @mask: mask of paths to use * * Return the number of 64K-bytes blocks all paths at least support - * for a transport command. Return values <= 0 indicate failures. + * for a transport command. Return value 0 indicates failure. */ int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask) { diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index a06944399865..ff74eb5fce50 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -82,6 +82,7 @@ enum qdio_irq_states { #define QDIO_SIGA_WRITE 0x00 #define QDIO_SIGA_READ 0x01 #define QDIO_SIGA_SYNC 0x02 +#define QDIO_SIGA_WRITEM 0x03 #define QDIO_SIGA_WRITEQ 0x04 #define QDIO_SIGA_QEBSM_FLAG 0x80 @@ -181,11 +182,9 @@ enum qdio_queue_irq_states { }; struct qdio_input_q { - /* input buffer acknowledgement flag */ - int polling; /* first ACK'ed buffer */ int ack_start; - /* how much sbals are acknowledged with qebsm */ + /* how many SBALs are acknowledged */ int ack_count; /* last time of noticing incoming data */ u64 timestamp; @@ -206,8 +205,6 @@ struct qdio_output_q { struct qdio_outbuf_state *sbal_state; /* timer to check for more outbound work */ struct timer_list timer; - /* used SBALs before tasklet schedule */ - int scan_threshold; }; /* @@ -254,9 +251,6 @@ struct qdio_q { /* input or output queue */ int is_input_q; - /* list of thinint input queues */ - struct list_head entry; - /* upper-layer program handler */ qdio_handler_t (*handler); @@ -274,6 +268,7 @@ struct qdio_irq { struct qib qib; u32 *dsci; /* address of device state change indicator */ struct ccw_device *cdev; + struct list_head entry; /* list of thinint devices */ struct dentry *debugfs_dev; struct dentry *debugfs_perf; @@ -295,6 +290,7 @@ struct qdio_irq { struct qdio_ssqd_desc ssqd_desc; void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *); + unsigned int scan_threshold; /* used SBALs before tasklet schedule */ int perf_stat_enabled; struct qdr *qdr; @@ -318,13 +314,15 @@ struct qdio_irq { #define qperf(__qdev, __attr) ((__qdev)->perf_stat.(__attr)) -#define qperf_inc(__q, __attr) \ +#define QDIO_PERF_STAT_INC(__irq, __attr) \ ({ \ - struct qdio_irq *qdev = (__q)->irq_ptr; \ + struct qdio_irq *qdev = __irq; \ if (qdev->perf_stat_enabled) \ (qdev->perf_stat.__attr)++; \ }) +#define qperf_inc(__q, __attr) QDIO_PERF_STAT_INC((__q)->irq_ptr, __attr) + static inline void account_sbals_error(struct qdio_q *q, int count) { q->q_stats.nr_sbal_error += count; @@ -356,14 +354,10 @@ static inline int multicast_outbound(struct qdio_q *q) for (i = 0; i < irq_ptr->nr_output_qs && \ ({ q = irq_ptr->output_qs[i]; 1; }); i++) -#define prev_buf(bufnr) \ - ((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK) -#define next_buf(bufnr) \ - ((bufnr + 1) & QDIO_MAX_BUFFERS_MASK) -#define add_buf(bufnr, inc) \ - ((bufnr + inc) & QDIO_MAX_BUFFERS_MASK) -#define sub_buf(bufnr, dec) \ - ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK) +#define add_buf(bufnr, inc) QDIO_BUFNR((bufnr) + (inc)) +#define next_buf(bufnr) add_buf(bufnr, 1) +#define sub_buf(bufnr, dec) QDIO_BUFNR((bufnr) - (dec)) +#define prev_buf(bufnr) sub_buf(bufnr, 1) #define queue_irqs_enabled(q) \ (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0) @@ -376,8 +370,8 @@ extern u64 last_ai_time; void qdio_setup_thinint(struct qdio_irq *irq_ptr); int qdio_establish_thinint(struct qdio_irq *irq_ptr); void qdio_shutdown_thinint(struct qdio_irq *irq_ptr); -void tiqdio_add_input_queues(struct qdio_irq *irq_ptr); -void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr); +void tiqdio_add_device(struct qdio_irq *irq_ptr); +void tiqdio_remove_device(struct qdio_irq *irq_ptr); void tiqdio_inbound_processing(unsigned long q); int tiqdio_allocate_memory(void); void tiqdio_free_memory(void); diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index 35410e6eda2e..9c0370b27426 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c @@ -124,9 +124,8 @@ static int qstat_show(struct seq_file *m, void *v) seq_printf(m, "nr_used: %d ftc: %d\n", atomic_read(&q->nr_buf_used), q->first_to_check); if (q->is_input_q) { - seq_printf(m, "polling: %d ack start: %d ack count: %d\n", - q->u.in.polling, q->u.in.ack_start, - q->u.in.ack_count); + seq_printf(m, "ack start: %d ack count: %d\n", + q->u.in.ack_start, q->u.in.ack_count); seq_printf(m, "DSCI: %x IRQs disabled: %u\n", *(u8 *)q->irq_ptr->dsci, test_bit(QDIO_QUEUE_IRQS_DISABLED, diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 4142c85e77d8..3475317c42e5 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -131,7 +131,7 @@ again: case 96: /* not all buffers processed */ qperf_inc(q, eqbs_partial); - DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x", + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x", tmp_count); return count - tmp_count; case 97: @@ -310,18 +310,19 @@ static inline int qdio_siga_sync_q(struct qdio_q *q) return qdio_siga_sync(q, q->mask, 0); } -static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit, - unsigned long aob) +static int qdio_siga_output(struct qdio_q *q, unsigned int count, + unsigned int *busy_bit, unsigned long aob) { unsigned long schid = *((u32 *) &q->irq_ptr->schid); unsigned int fc = QDIO_SIGA_WRITE; u64 start_time = 0; int retries = 0, cc; - unsigned long laob = 0; - if (aob) { - fc = QDIO_SIGA_WRITEQ; - laob = aob; + if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) { + if (count > 1) + fc = QDIO_SIGA_WRITEM; + else if (aob) + fc = QDIO_SIGA_WRITEQ; } if (is_qebsm(q)) { @@ -329,7 +330,7 @@ static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit, fc |= QDIO_SIGA_QEBSM_FLAG; } again: - cc = do_siga_output(schid, q->mask, busy_bit, fc, laob); + cc = do_siga_output(schid, q->mask, busy_bit, fc, aob); /* hipersocket busy condition */ if (unlikely(*busy_bit)) { @@ -392,19 +393,15 @@ int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, static inline void qdio_stop_polling(struct qdio_q *q) { - if (!q->u.in.polling) + if (!q->u.in.ack_count) return; - q->u.in.polling = 0; qperf_inc(q, stop_polling); /* show the card that we are not polling anymore */ - if (is_qebsm(q)) { - set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, - q->u.in.ack_count); - q->u.in.ack_count = 0; - } else - set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); + set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, + q->u.in.ack_count); + q->u.in.ack_count = 0; } static inline void account_sbals(struct qdio_q *q, unsigned int count) @@ -423,9 +420,6 @@ static inline void account_sbals(struct qdio_q *q, unsigned int count) static void process_buffer_error(struct qdio_q *q, unsigned int start, int count) { - unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT : - SLSB_P_OUTPUT_NOT_INIT; - q->qdio_error = QDIO_ERROR_SLSB_STATE; /* special handling for no target buffer empty */ @@ -433,7 +427,7 @@ static void process_buffer_error(struct qdio_q *q, unsigned int start, q->sbal[start]->element[15].sflags == 0x10) { qperf_inc(q, target_full); DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start); - goto set; + return; } DBF_ERROR("%4x BUF ERROR", SCH_NO(q)); @@ -442,13 +436,6 @@ static void process_buffer_error(struct qdio_q *q, unsigned int start, DBF_ERROR("F14:%2x F15:%2x", q->sbal[start]->element[14].sflags, q->sbal[start]->element[15].sflags); - -set: - /* - * Interrupts may be avoided as long as the error is present - * so change the buffer state immediately to avoid starvation. - */ - set_buf_states(q, start, state, count); } static inline void inbound_primed(struct qdio_q *q, unsigned int start, @@ -460,8 +447,7 @@ static inline void inbound_primed(struct qdio_q *q, unsigned int start, /* for QEBSM the ACK was already set by EQBS */ if (is_qebsm(q)) { - if (!q->u.in.polling) { - q->u.in.polling = 1; + if (!q->u.in.ack_count) { q->u.in.ack_count = count; q->u.in.ack_start = start; return; @@ -480,12 +466,12 @@ static inline void inbound_primed(struct qdio_q *q, unsigned int start, * or by the next inbound run. */ new = add_buf(start, count - 1); - if (q->u.in.polling) { + if (q->u.in.ack_count) { /* reset the previous ACK but first set the new one */ set_buf_state(q, new, SLSB_P_INPUT_ACK); set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); } else { - q->u.in.polling = 1; + q->u.in.ack_count = 1; set_buf_state(q, new, SLSB_P_INPUT_ACK); } @@ -530,6 +516,11 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start) return count; case SLSB_P_INPUT_ERROR: process_buffer_error(q, start, count); + /* + * Interrupts may be avoided as long as the error is present + * so change the buffer state immediately to avoid starvation. + */ + set_buf_states(q, start, SLSB_P_INPUT_NOT_INIT, count); if (atomic_sub_return(count, &q->nr_buf_used) == 0) qperf_inc(q, inbound_queue_full); if (q->irq_ptr->perf_stat_enabled) @@ -647,8 +638,6 @@ static void qdio_kick_handler(struct qdio_q *q, unsigned int count) qperf_inc(q, outbound_handler); DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x", start, count); - if (q->u.out.use_cq) - qdio_handle_aobs(q, start, count); } q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, @@ -774,13 +763,17 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start) count = get_outbound_buffer_frontier(q, start); - if (count) + if (count) { DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr); + if (q->u.out.use_cq) + qdio_handle_aobs(q, start, count); + } return count; } -static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob) +static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count, + unsigned long aob) { int retries = 0, cc; unsigned int busy_bit; @@ -792,7 +785,7 @@ static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob) retry: qperf_inc(q, siga_write); - cc = qdio_siga_output(q, &busy_bit, aob); + cc = qdio_siga_output(q, count, &busy_bit, aob); switch (cc) { case 0: break; @@ -879,7 +872,7 @@ static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq) struct qdio_q *out; int i; - if (!pci_out_supported(irq)) + if (!pci_out_supported(irq) || !irq->scan_threshold) return; for_each_output_queue(irq, out, i) @@ -962,7 +955,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) /* skip if polling is enabled or already in work */ if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state)) { - qperf_inc(q, int_discarded); + QDIO_PERF_STAT_INC(irq_ptr, int_discarded); continue; } q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, @@ -972,7 +965,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) } } - if (!pci_out_supported(irq_ptr)) + if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold) return; for_each_output_queue(irq_ptr, q, i) { @@ -1161,7 +1154,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how) */ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); - tiqdio_remove_input_queues(irq_ptr); + tiqdio_remove_device(irq_ptr); qdio_shutdown_queues(cdev); qdio_shutdown_debug_entries(irq_ptr); @@ -1283,6 +1276,7 @@ int qdio_allocate(struct qdio_initialize *init_data) init_data->no_output_qs)) goto out_rel; + INIT_LIST_HEAD(&irq_ptr->entry); init_data->cdev->private->qdio_data = irq_ptr; qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); return 0; @@ -1427,7 +1421,7 @@ int qdio_activate(struct ccw_device *cdev) } if (is_thinint_irq(irq_ptr)) - tiqdio_add_input_queues(irq_ptr); + tiqdio_add_device(irq_ptr); /* wait for subchannel to become active */ msleep(5); @@ -1480,13 +1474,12 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags, qperf_inc(q, inbound_call); - if (!q->u.in.polling) + if (!q->u.in.ack_count) goto set; /* protect against stop polling setting an ACK for an emptied slsb */ if (count == QDIO_MAX_BUFFERS_PER_Q) { /* overwriting everything, just delete polling status */ - q->u.in.polling = 0; q->u.in.ack_count = 0; goto set; } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) { @@ -1496,15 +1489,14 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags, diff = sub_buf(diff, q->u.in.ack_start); q->u.in.ack_count -= diff; if (q->u.in.ack_count <= 0) { - q->u.in.polling = 0; q->u.in.ack_count = 0; goto set; } q->u.in.ack_start = add_buf(q->u.in.ack_start, diff); + } else { + /* the only ACK will be deleted */ + q->u.in.ack_count = 0; } - else - /* the only ACK will be deleted, so stop polling */ - q->u.in.polling = 0; } set: @@ -1525,8 +1517,9 @@ set: * @count: how many buffers are filled */ static int handle_outbound(struct qdio_q *q, unsigned int callflags, - int bufnr, int count) + unsigned int bufnr, unsigned int count) { + const unsigned int scan_threshold = q->irq_ptr->scan_threshold; unsigned char state = 0; int used, rc = 0; @@ -1547,13 +1540,10 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, if (queue_type(q) == QDIO_IQDIO_QFMT) { unsigned long phys_aob = 0; - /* One SIGA-W per buffer required for unicast HSI */ - WARN_ON_ONCE(count > 1 && !multicast_outbound(q)); - - if (q->u.out.use_cq) + if (q->u.out.use_cq && count == 1) phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr); - rc = qdio_kick_outbound_q(q, phys_aob); + rc = qdio_kick_outbound_q(q, count, phys_aob); } else if (need_siga_sync(q)) { rc = qdio_siga_sync_q(q); } else if (count < QDIO_MAX_BUFFERS_PER_Q && @@ -1562,11 +1552,15 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, /* The previous buffer is not processed yet, tack on. */ qperf_inc(q, fast_requeue); } else { - rc = qdio_kick_outbound_q(q, 0); + rc = qdio_kick_outbound_q(q, count, 0); } + /* Let drivers implement their own completion scanning: */ + if (!scan_threshold) + return rc; + /* in case of SIGA errors we must process the error immediately */ - if (used >= q->u.out.scan_threshold || rc) + if (used >= scan_threshold || rc) qdio_tasklet_schedule(q); else /* free the SBALs in case of no further traffic */ @@ -1655,6 +1649,44 @@ rescan: } EXPORT_SYMBOL(qdio_start_irq); +static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr, + unsigned int *error) +{ + unsigned int start = q->first_to_check; + int count; + + count = q->is_input_q ? qdio_inbound_q_moved(q, start) : + qdio_outbound_q_moved(q, start); + if (count == 0) + return 0; + + *bufnr = start; + *error = q->qdio_error; + + /* for the next time */ + q->first_to_check = add_buf(start, count); + q->qdio_error = 0; + + return count; +} + +int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input, + unsigned int *bufnr, unsigned int *error) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + struct qdio_q *q; + + if (!irq_ptr) + return -ENODEV; + q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr]; + + if (need_siga_sync(q)) + qdio_siga_sync_q(q); + + return __qdio_inspect_queue(q, bufnr, error); +} +EXPORT_SYMBOL_GPL(qdio_inspect_queue); + /** * qdio_get_next_buffers - process input buffers * @cdev: associated ccw_device for the qdio subchannel @@ -1672,13 +1704,10 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr, { struct qdio_q *q; struct qdio_irq *irq_ptr = cdev->private->qdio_data; - unsigned int start; - int count; if (!irq_ptr) return -ENODEV; q = irq_ptr->input_qs[nr]; - start = q->first_to_check; /* * Cannot rely on automatic sync after interrupt since queues may @@ -1689,25 +1718,11 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr, qdio_check_outbound_pci_queues(irq_ptr); - count = qdio_inbound_q_moved(q, start); - if (count == 0) - return 0; - - start = add_buf(start, count); - q->first_to_check = start; - /* Note: upper-layer MUST stop processing immediately here ... */ if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) return -EIO; - *bufnr = q->first_to_kick; - *error = q->qdio_error; - - /* for the next time */ - q->first_to_kick = add_buf(q->first_to_kick, count); - q->qdio_error = 0; - - return count; + return __qdio_inspect_queue(q, bufnr, error); } EXPORT_SYMBOL(qdio_get_next_buffers); diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index d4101cecdc8d..3ab8e80d7bbc 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -113,7 +113,7 @@ static void set_impl_params(struct qdio_irq *irq_ptr, irq_ptr->qib.pfmt = qib_param_field_format; if (qib_param_field) memcpy(irq_ptr->qib.parm, qib_param_field, - QDIO_MAX_BUFFERS_PER_Q); + sizeof(irq_ptr->qib.parm)); if (!input_slib_elements) goto output; @@ -150,7 +150,6 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues) return -ENOMEM; } irq_ptr_qs[i] = q; - INIT_LIST_HEAD(&q->entry); } return 0; } @@ -179,7 +178,6 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr, q->mask = 1 << (31 - i); q->nr = i; q->handler = handler; - INIT_LIST_HEAD(&q->entry); } static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, @@ -248,7 +246,6 @@ static void setup_queues(struct qdio_irq *irq_ptr, output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q; q->is_input_q = 0; - q->u.out.scan_threshold = qdio_init->scan_threshold; setup_storage_lists(q, irq_ptr, output_sbal_array, i); output_sbal_array += QDIO_MAX_BUFFERS_PER_Q; @@ -474,6 +471,7 @@ int qdio_setup_irq(struct qdio_initialize *init_data) irq_ptr->nr_input_qs = init_data->no_input_qs; irq_ptr->nr_output_qs = init_data->no_output_qs; irq_ptr->cdev = init_data->cdev; + irq_ptr->scan_threshold = init_data->scan_threshold; ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid); setup_queues(irq_ptr, init_data); @@ -538,7 +536,7 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, int qdio_enable_async_operation(struct qdio_output_q *outq) { outq->aobs = kcalloc(QDIO_MAX_BUFFERS_PER_Q, sizeof(struct qaob *), - GFP_ATOMIC); + GFP_KERNEL); if (!outq->aobs) { outq->use_cq = 0; return -ENOMEM; diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 93ee067c10ca..7c4e4ec08a12 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -39,14 +39,6 @@ struct indicator_t { static LIST_HEAD(tiq_list); static DEFINE_MUTEX(tiq_list_lock); -/* Adapter interrupt definitions */ -static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating); - -static struct airq_struct tiqdio_airq = { - .handler = tiqdio_thinint_handler, - .isc = QDIO_AIRQ_ISC, -}; - static struct indicator_t *q_indicators; u64 last_ai_time; @@ -74,26 +66,20 @@ static void put_indicator(u32 *addr) atomic_dec(&ind->count); } -void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) +void tiqdio_add_device(struct qdio_irq *irq_ptr) { mutex_lock(&tiq_list_lock); - list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list); + list_add_rcu(&irq_ptr->entry, &tiq_list); mutex_unlock(&tiq_list_lock); } -void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) +void tiqdio_remove_device(struct qdio_irq *irq_ptr) { - struct qdio_q *q; - - q = irq_ptr->input_qs[0]; - if (!q) - return; - mutex_lock(&tiq_list_lock); - list_del_rcu(&q->entry); + list_del_rcu(&irq_ptr->entry); mutex_unlock(&tiq_list_lock); synchronize_rcu(); - INIT_LIST_HEAD(&q->entry); + INIT_LIST_HEAD(&irq_ptr->entry); } static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr) @@ -154,7 +140,7 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq) /* skip if polling is enabled or already in work */ if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state)) { - qperf_inc(q, int_discarded); + QDIO_PERF_STAT_INC(irq, int_discarded); continue; } @@ -182,7 +168,7 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq) static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating) { u32 si_used = clear_shared_ind(); - struct qdio_q *q; + struct qdio_irq *irq; last_ai_time = S390_lowcore.int_clock; inc_irq_stat(IRQIO_QAI); @@ -190,12 +176,8 @@ static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating) /* protect tiq_list entries, only changed in activate or shutdown */ rcu_read_lock(); - /* check for work on all inbound thinint queues */ - list_for_each_entry_rcu(q, &tiq_list, entry) { - struct qdio_irq *irq; - + list_for_each_entry_rcu(irq, &tiq_list, entry) { /* only process queues from changed sets */ - irq = q->irq_ptr; if (unlikely(references_shared_dsci(irq))) { if (!si_used) continue; @@ -204,11 +186,16 @@ static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating) tiqdio_call_inq_handlers(irq); - qperf_inc(q, adapter_int); + QDIO_PERF_STAT_INC(irq, adapter_int); } rcu_read_unlock(); } +static struct airq_struct tiqdio_airq = { + .handler = tiqdio_thinint_handler, + .isc = QDIO_AIRQ_ISC, +}; + static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) { struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page; diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h index 7cdc38049033..ba31240ce965 100644 --- a/drivers/s390/cio/vfio_ccw_cp.h +++ b/drivers/s390/cio/vfio_ccw_cp.h @@ -15,6 +15,7 @@ #include <asm/scsw.h> #include "orb.h" +#include "vfio_ccw_trace.h" /* * Max length for ccw chain. diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index 9208c0e56c33..e401a3d0aa57 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -27,6 +27,9 @@ struct workqueue_struct *vfio_ccw_work_q; static struct kmem_cache *vfio_ccw_io_region; static struct kmem_cache *vfio_ccw_cmd_region; +debug_info_t *vfio_ccw_debug_msg_id; +debug_info_t *vfio_ccw_debug_trace_id; + /* * Helpers */ @@ -164,6 +167,9 @@ static int vfio_ccw_sch_probe(struct subchannel *sch) if (ret) goto out_disable; + VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n", + sch->schid.cssid, sch->schid.ssid, + sch->schid.sch_no); return 0; out_disable: @@ -194,6 +200,9 @@ static int vfio_ccw_sch_remove(struct subchannel *sch) kfree(private->cp.guest_cp); kfree(private); + VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n", + sch->schid.cssid, sch->schid.ssid, + sch->schid.sch_no); return 0; } @@ -263,27 +272,64 @@ static struct css_driver vfio_ccw_sch_driver = { .sch_event = vfio_ccw_sch_event, }; +static int __init vfio_ccw_debug_init(void) +{ + vfio_ccw_debug_msg_id = debug_register("vfio_ccw_msg", 16, 1, + 11 * sizeof(long)); + if (!vfio_ccw_debug_msg_id) + goto out_unregister; + debug_register_view(vfio_ccw_debug_msg_id, &debug_sprintf_view); + debug_set_level(vfio_ccw_debug_msg_id, 2); + vfio_ccw_debug_trace_id = debug_register("vfio_ccw_trace", 16, 1, 16); + if (!vfio_ccw_debug_trace_id) + goto out_unregister; + debug_register_view(vfio_ccw_debug_trace_id, &debug_hex_ascii_view); + debug_set_level(vfio_ccw_debug_trace_id, 2); + return 0; + +out_unregister: + debug_unregister(vfio_ccw_debug_msg_id); + debug_unregister(vfio_ccw_debug_trace_id); + return -1; +} + +static void vfio_ccw_debug_exit(void) +{ + debug_unregister(vfio_ccw_debug_msg_id); + debug_unregister(vfio_ccw_debug_trace_id); +} + static int __init vfio_ccw_sch_init(void) { - int ret = -ENOMEM; + int ret; + + ret = vfio_ccw_debug_init(); + if (ret) + return ret; vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw"); - if (!vfio_ccw_work_q) - return -ENOMEM; + if (!vfio_ccw_work_q) { + ret = -ENOMEM; + goto out_err; + } vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region", sizeof(struct ccw_io_region), 0, SLAB_ACCOUNT, 0, sizeof(struct ccw_io_region), NULL); - if (!vfio_ccw_io_region) + if (!vfio_ccw_io_region) { + ret = -ENOMEM; goto out_err; + } vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region", sizeof(struct ccw_cmd_region), 0, SLAB_ACCOUNT, 0, sizeof(struct ccw_cmd_region), NULL); - if (!vfio_ccw_cmd_region) + if (!vfio_ccw_cmd_region) { + ret = -ENOMEM; goto out_err; + } isc_register(VFIO_CCW_ISC); ret = css_driver_register(&vfio_ccw_sch_driver); @@ -298,6 +344,7 @@ out_err: kmem_cache_destroy(vfio_ccw_cmd_region); kmem_cache_destroy(vfio_ccw_io_region); destroy_workqueue(vfio_ccw_work_q); + vfio_ccw_debug_exit(); return ret; } @@ -308,6 +355,7 @@ static void __exit vfio_ccw_sch_exit(void) kmem_cache_destroy(vfio_ccw_io_region); kmem_cache_destroy(vfio_ccw_cmd_region); destroy_workqueue(vfio_ccw_work_q); + vfio_ccw_debug_exit(); } module_init(vfio_ccw_sch_init); module_exit(vfio_ccw_sch_exit); diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c index 49d9d3da0282..23e61aa638e4 100644 --- a/drivers/s390/cio/vfio_ccw_fsm.c +++ b/drivers/s390/cio/vfio_ccw_fsm.c @@ -15,9 +15,6 @@ #include "ioasm.h" #include "vfio_ccw_private.h" -#define CREATE_TRACE_POINTS -#include "vfio_ccw_trace.h" - static int fsm_io_helper(struct vfio_ccw_private *private) { struct subchannel *sch; @@ -37,9 +34,14 @@ static int fsm_io_helper(struct vfio_ccw_private *private) goto out; } + VFIO_CCW_TRACE_EVENT(5, "stIO"); + VFIO_CCW_TRACE_EVENT(5, dev_name(&sch->dev)); + /* Issue "Start Subchannel" */ ccode = ssch(sch->schid, orb); + VFIO_CCW_HEX_EVENT(5, &ccode, sizeof(ccode)); + switch (ccode) { case 0: /* @@ -86,9 +88,14 @@ static int fsm_do_halt(struct vfio_ccw_private *private) spin_lock_irqsave(sch->lock, flags); + VFIO_CCW_TRACE_EVENT(2, "haltIO"); + VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev)); + /* Issue "Halt Subchannel" */ ccode = hsch(sch->schid); + VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode)); + switch (ccode) { case 0: /* @@ -122,9 +129,14 @@ static int fsm_do_clear(struct vfio_ccw_private *private) spin_lock_irqsave(sch->lock, flags); + VFIO_CCW_TRACE_EVENT(2, "clearIO"); + VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev)); + /* Issue "Clear Subchannel" */ ccode = csch(sch->schid); + VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode)); + switch (ccode) { case 0: /* @@ -149,6 +161,9 @@ static void fsm_notoper(struct vfio_ccw_private *private, { struct subchannel *sch = private->sch; + VFIO_CCW_TRACE_EVENT(2, "notoper"); + VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev)); + /* * TODO: * Probably we should send the machine check to the guest. @@ -229,6 +244,7 @@ static void fsm_io_request(struct vfio_ccw_private *private, struct ccw_io_region *io_region = private->io_region; struct mdev_device *mdev = private->mdev; char *errstr = "request"; + struct subchannel_id schid = get_schid(private); private->state = VFIO_CCW_STATE_CP_PROCESSING; memcpy(scsw, io_region->scsw_area, sizeof(*scsw)); @@ -239,18 +255,32 @@ static void fsm_io_request(struct vfio_ccw_private *private, /* Don't try to build a cp if transport mode is specified. */ if (orb->tm.b) { io_region->ret_code = -EOPNOTSUPP; + VFIO_CCW_MSG_EVENT(2, + "%pUl (%x.%x.%04x): transport mode\n", + mdev_uuid(mdev), schid.cssid, + schid.ssid, schid.sch_no); errstr = "transport mode"; goto err_out; } io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev), orb); if (io_region->ret_code) { + VFIO_CCW_MSG_EVENT(2, + "%pUl (%x.%x.%04x): cp_init=%d\n", + mdev_uuid(mdev), schid.cssid, + schid.ssid, schid.sch_no, + io_region->ret_code); errstr = "cp init"; goto err_out; } io_region->ret_code = cp_prefetch(&private->cp); if (io_region->ret_code) { + VFIO_CCW_MSG_EVENT(2, + "%pUl (%x.%x.%04x): cp_prefetch=%d\n", + mdev_uuid(mdev), schid.cssid, + schid.ssid, schid.sch_no, + io_region->ret_code); errstr = "cp prefetch"; cp_free(&private->cp); goto err_out; @@ -259,24 +289,37 @@ static void fsm_io_request(struct vfio_ccw_private *private, /* Start channel program and wait for I/O interrupt. */ io_region->ret_code = fsm_io_helper(private); if (io_region->ret_code) { + VFIO_CCW_MSG_EVENT(2, + "%pUl (%x.%x.%04x): fsm_io_helper=%d\n", + mdev_uuid(mdev), schid.cssid, + schid.ssid, schid.sch_no, + io_region->ret_code); errstr = "cp fsm_io_helper"; cp_free(&private->cp); goto err_out; } return; } else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) { + VFIO_CCW_MSG_EVENT(2, + "%pUl (%x.%x.%04x): halt on io_region\n", + mdev_uuid(mdev), schid.cssid, + schid.ssid, schid.sch_no); /* halt is handled via the async cmd region */ io_region->ret_code = -EOPNOTSUPP; goto err_out; } else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) { + VFIO_CCW_MSG_EVENT(2, + "%pUl (%x.%x.%04x): clear on io_region\n", + mdev_uuid(mdev), schid.cssid, + schid.ssid, schid.sch_no); /* clear is handled via the async cmd region */ io_region->ret_code = -EOPNOTSUPP; goto err_out; } err_out: - trace_vfio_ccw_io_fctl(scsw->cmd.fctl, get_schid(private), - io_region->ret_code, errstr); + trace_vfio_ccw_fsm_io_request(scsw->cmd.fctl, schid, + io_region->ret_code, errstr); } /* @@ -298,6 +341,10 @@ static void fsm_async_request(struct vfio_ccw_private *private, /* should not happen? */ cmd_region->ret_code = -EINVAL; } + + trace_vfio_ccw_fsm_async_request(get_schid(private), + cmd_region->command, + cmd_region->ret_code); } /* @@ -308,6 +355,9 @@ static void fsm_irq(struct vfio_ccw_private *private, { struct irb *irb = this_cpu_ptr(&cio_irb); + VFIO_CCW_TRACE_EVENT(6, "IRQ"); + VFIO_CCW_TRACE_EVENT(6, dev_name(&private->sch->dev)); + memcpy(&private->irb, irb, sizeof(*irb)); queue_work(vfio_ccw_work_q, &private->io_work); diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c index 5eb61116ca6f..f0d71ab77c50 100644 --- a/drivers/s390/cio/vfio_ccw_ops.c +++ b/drivers/s390/cio/vfio_ccw_ops.c @@ -124,6 +124,11 @@ static int vfio_ccw_mdev_create(struct kobject *kobj, struct mdev_device *mdev) private->mdev = mdev; private->state = VFIO_CCW_STATE_IDLE; + VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: create\n", + mdev_uuid(mdev), private->sch->schid.cssid, + private->sch->schid.ssid, + private->sch->schid.sch_no); + return 0; } @@ -132,6 +137,11 @@ static int vfio_ccw_mdev_remove(struct mdev_device *mdev) struct vfio_ccw_private *private = dev_get_drvdata(mdev_parent_dev(mdev)); + VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: remove\n", + mdev_uuid(mdev), private->sch->schid.cssid, + private->sch->schid.ssid, + private->sch->schid.sch_no); + if ((private->state != VFIO_CCW_STATE_NOT_OPER) && (private->state != VFIO_CCW_STATE_STANDBY)) { if (!vfio_ccw_sch_quiesce(private->sch)) diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h index f1092c3dc1b1..9b9bb4982972 100644 --- a/drivers/s390/cio/vfio_ccw_private.h +++ b/drivers/s390/cio/vfio_ccw_private.h @@ -17,6 +17,7 @@ #include <linux/eventfd.h> #include <linux/workqueue.h> #include <linux/vfio_ccw.h> +#include <asm/debug.h> #include "css.h" #include "vfio_ccw_cp.h" @@ -134,9 +135,26 @@ extern fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS]; static inline void vfio_ccw_fsm_event(struct vfio_ccw_private *private, int event) { + trace_vfio_ccw_fsm_event(private->sch->schid, private->state, event); vfio_ccw_jumptable[private->state][event](private, event); } extern struct workqueue_struct *vfio_ccw_work_q; + +/* s390 debug feature, similar to base cio */ +extern debug_info_t *vfio_ccw_debug_msg_id; +extern debug_info_t *vfio_ccw_debug_trace_id; + +#define VFIO_CCW_TRACE_EVENT(imp, txt) \ + debug_text_event(vfio_ccw_debug_trace_id, imp, txt) + +#define VFIO_CCW_MSG_EVENT(imp, args...) \ + debug_sprintf_event(vfio_ccw_debug_msg_id, imp, ##args) + +static inline void VFIO_CCW_HEX_EVENT(int level, void *data, int length) +{ + debug_event(vfio_ccw_debug_trace_id, level, data, length); +} + #endif diff --git a/drivers/s390/cio/vfio_ccw_trace.c b/drivers/s390/cio/vfio_ccw_trace.c new file mode 100644 index 000000000000..8c671d2519f6 --- /dev/null +++ b/drivers/s390/cio/vfio_ccw_trace.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Tracepoint definitions for vfio_ccw + * + * Copyright IBM Corp. 2019 + * Author(s): Eric Farman <farman@linux.ibm.com> + */ + +#define CREATE_TRACE_POINTS +#include "vfio_ccw_trace.h" + +EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_async_request); +EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_event); +EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_io_request); diff --git a/drivers/s390/cio/vfio_ccw_trace.h b/drivers/s390/cio/vfio_ccw_trace.h index b1da53ddec1f..f5d31887d413 100644 --- a/drivers/s390/cio/vfio_ccw_trace.h +++ b/drivers/s390/cio/vfio_ccw_trace.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 - * Tracepoints for vfio_ccw driver +/* SPDX-License-Identifier: GPL-2.0 */ +/* Tracepoints for vfio_ccw driver * * Copyright IBM Corp. 2018 * @@ -7,6 +7,8 @@ * Halil Pasic <pasic@linux.vnet.ibm.com> */ +#include "cio.h" + #undef TRACE_SYSTEM #define TRACE_SYSTEM vfio_ccw @@ -15,28 +17,88 @@ #include <linux/tracepoint.h> -TRACE_EVENT(vfio_ccw_io_fctl, +TRACE_EVENT(vfio_ccw_fsm_async_request, + TP_PROTO(struct subchannel_id schid, + int command, + int errno), + TP_ARGS(schid, command, errno), + + TP_STRUCT__entry( + __field(u8, cssid) + __field(u8, ssid) + __field(u16, sch_no) + __field(int, command) + __field(int, errno) + ), + + TP_fast_assign( + __entry->cssid = schid.cssid; + __entry->ssid = schid.ssid; + __entry->sch_no = schid.sch_no; + __entry->command = command; + __entry->errno = errno; + ), + + TP_printk("schid=%x.%x.%04x command=0x%x errno=%d", + __entry->cssid, + __entry->ssid, + __entry->sch_no, + __entry->command, + __entry->errno) +); + +TRACE_EVENT(vfio_ccw_fsm_event, + TP_PROTO(struct subchannel_id schid, int state, int event), + TP_ARGS(schid, state, event), + + TP_STRUCT__entry( + __field(u8, cssid) + __field(u8, ssid) + __field(u16, schno) + __field(int, state) + __field(int, event) + ), + + TP_fast_assign( + __entry->cssid = schid.cssid; + __entry->ssid = schid.ssid; + __entry->schno = schid.sch_no; + __entry->state = state; + __entry->event = event; + ), + + TP_printk("schid=%x.%x.%04x state=%d event=%d", + __entry->cssid, __entry->ssid, __entry->schno, + __entry->state, + __entry->event) +); + +TRACE_EVENT(vfio_ccw_fsm_io_request, TP_PROTO(int fctl, struct subchannel_id schid, int errno, char *errstr), TP_ARGS(fctl, schid, errno, errstr), TP_STRUCT__entry( + __field(u8, cssid) + __field(u8, ssid) + __field(u16, sch_no) __field(int, fctl) - __field_struct(struct subchannel_id, schid) __field(int, errno) __field(char*, errstr) ), TP_fast_assign( + __entry->cssid = schid.cssid; + __entry->ssid = schid.ssid; + __entry->sch_no = schid.sch_no; __entry->fctl = fctl; - __entry->schid = schid; __entry->errno = errno; __entry->errstr = errstr; ), - TP_printk("schid=%x.%x.%04x fctl=%x errno=%d info=%s", - __entry->schid.cssid, - __entry->schid.ssid, - __entry->schid.sch_no, + TP_printk("schid=%x.%x.%04x fctl=0x%x errno=%d info=%s", + __entry->cssid, + __entry->ssid, + __entry->sch_no, __entry->fctl, __entry->errno, __entry->errstr) |