summaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio
diff options
context:
space:
mode:
authorDong Jia Shi <bjsdjshi@linux.vnet.ibm.com>2017-03-17 04:17:40 +0100
committerCornelia Huck <cornelia.huck@de.ibm.com>2017-03-31 12:55:11 +0200
commitbbe37e4cb89702aa78e0f44618c5af7f9aaa33f6 (patch)
tree46269ef030a61c19f629701e53678e0311b2d2c7 /drivers/s390/cio
parente5f84dbaea59b4f712dac428c337528b70e1c533 (diff)
downloadtalos-op-linux-bbe37e4cb89702aa78e0f44618c5af7f9aaa33f6.tar.gz
talos-op-linux-bbe37e4cb89702aa78e0f44618c5af7f9aaa33f6.zip
vfio: ccw: introduce a finite state machine
The current implementation doesn't check if the subchannel is in a proper device state when handling an event. Let's introduce a finite state machine to manage the state/event change. Signed-off-by: Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> Message-Id: <20170317031743.40128-14-bjsdjshi@linux.vnet.ibm.com> Signed-off-by: Cornelia Huck <cornelia.huck@de.ibm.com>
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c116
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c207
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c28
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h41
5 files changed, 287 insertions, 107 deletions
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index b0586b223502..bdf47526038a 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -18,5 +18,5 @@ obj-$(CONFIG_CCWGROUP) += ccwgroup.o
qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
obj-$(CONFIG_QDIO) += qdio.o
-vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o
+vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o
obj-$(CONFIG_VFIO_CCW) += vfio_ccw.o
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 3e0a40ec8ade..e90dd43d2a55 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -60,54 +60,12 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch)
ret = cio_disable_subchannel(sch);
} while (ret == -EBUSY);
-
out_unlock:
+ private->state = VFIO_CCW_STATE_NOT_OPER;
spin_unlock_irq(sch->lock);
return ret;
}
-static int vfio_ccw_sch_io_helper(struct vfio_ccw_private *private)
-{
- struct subchannel *sch;
- union orb *orb;
- int ccode;
- __u8 lpm;
-
- sch = private->sch;
-
- orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
-
- /* Issue "Start Subchannel" */
- ccode = ssch(sch->schid, orb);
-
- switch (ccode) {
- case 0:
- /*
- * Initialize device status information
- */
- sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
- return 0;
- case 1: /* Status pending */
- case 2: /* Busy */
- return -EBUSY;
- case 3: /* Device/path not operational */
- {
- lpm = orb->cmd.lpm;
- if (lpm != 0)
- sch->lpm &= ~lpm;
- else
- sch->lpm = 0;
-
- if (cio_update_schib(sch))
- return -ENODEV;
-
- return sch->lpm ? -EACCES : -ENODEV;
- }
- default:
- return ccode;
- }
-}
-
static void vfio_ccw_sch_io_todo(struct work_struct *work)
{
struct vfio_ccw_private *private;
@@ -126,47 +84,9 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
if (private->io_trigger)
eventfd_signal(private->io_trigger, 1);
-}
-
-/* Deal with the ccw command request from the userspace. */
-int vfio_ccw_sch_cmd_request(struct vfio_ccw_private *private)
-{
- struct mdev_device *mdev = private->mdev;
- union orb *orb;
- union scsw *scsw = &private->scsw;
- struct ccw_io_region *io_region = &private->io_region;
- int ret;
-
- memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
-
- if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
- orb = (union orb *)io_region->orb_area;
-
- ret = cp_init(&private->cp, mdev_dev(mdev), orb);
- if (ret)
- return ret;
-
- ret = cp_prefetch(&private->cp);
- if (ret) {
- cp_free(&private->cp);
- return ret;
- }
-
- /* Start channel program and wait for I/O interrupt. */
- ret = vfio_ccw_sch_io_helper(private);
- if (!ret)
- cp_free(&private->cp);
- } else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
- /* XXX: Handle halt. */
- ret = -EOPNOTSUPP;
- } else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
- /* XXX: Handle clear. */
- ret = -EOPNOTSUPP;
- } else {
- ret = -EOPNOTSUPP;
- }
- return ret;
+ if (private->mdev)
+ private->state = VFIO_CCW_STATE_IDLE;
}
/*
@@ -223,20 +143,9 @@ static struct attribute_group vfio_subchannel_attr_group = {
static void vfio_ccw_sch_irq(struct subchannel *sch)
{
struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
- struct irb *irb;
inc_irq_stat(IRQIO_CIO);
-
- if (!private)
- return;
-
- irb = this_cpu_ptr(&cio_irb);
- memcpy(&private->irb, irb, sizeof(*irb));
-
- queue_work(vfio_ccw_work_q, &private->io_work);
-
- if (private->completion)
- complete(private->completion);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
}
static int vfio_ccw_sch_probe(struct subchannel *sch)
@@ -258,6 +167,7 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
dev_set_drvdata(&sch->dev, private);
spin_lock_irq(sch->lock);
+ private->state = VFIO_CCW_STATE_NOT_OPER;
sch->isc = VFIO_CCW_ISC;
ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
spin_unlock_irq(sch->lock);
@@ -274,6 +184,7 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
atomic_set(&private->avail, 1);
+ private->state = VFIO_CCW_STATE_STANDBY;
return 0;
@@ -321,6 +232,7 @@ static void vfio_ccw_sch_shutdown(struct subchannel *sch)
*/
static int vfio_ccw_sch_event(struct subchannel *sch, int process)
{
+ struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
unsigned long flags;
spin_lock_irqsave(sch->lock, flags);
@@ -331,16 +243,16 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
goto out_unlock;
if (cio_update_schib(sch)) {
- /* Not operational. */
- css_sched_sch_todo(sch, SCH_TODO_UNREG);
-
- /*
- * TODO:
- * Probably we should send the machine check to the guest.
- */
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
goto out_unlock;
}
+ private = dev_get_drvdata(&sch->dev);
+ if (private->state == VFIO_CCW_STATE_NOT_OPER) {
+ private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
+ VFIO_CCW_STATE_STANDBY;
+ }
+
out_unlock:
spin_unlock_irqrestore(sch->lock, flags);
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
new file mode 100644
index 000000000000..55b6cc55758e
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -0,0 +1,207 @@
+/*
+ * Finite state machine for vfio-ccw device handling
+ *
+ * Copyright IBM Corp. 2017
+ *
+ * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ */
+
+#include <linux/vfio.h>
+#include <linux/mdev.h>
+
+#include "ioasm.h"
+#include "vfio_ccw_private.h"
+
+static int fsm_io_helper(struct vfio_ccw_private *private)
+{
+ struct subchannel *sch;
+ union orb *orb;
+ int ccode;
+ __u8 lpm;
+ unsigned long flags;
+
+ sch = private->sch;
+
+ spin_lock_irqsave(sch->lock, flags);
+ private->state = VFIO_CCW_STATE_BUSY;
+ spin_unlock_irqrestore(sch->lock, flags);
+
+ orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
+
+ /* Issue "Start Subchannel" */
+ ccode = ssch(sch->schid, orb);
+
+ switch (ccode) {
+ case 0:
+ /*
+ * Initialize device status information
+ */
+ sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
+ return 0;
+ case 1: /* Status pending */
+ case 2: /* Busy */
+ return -EBUSY;
+ case 3: /* Device/path not operational */
+ {
+ lpm = orb->cmd.lpm;
+ if (lpm != 0)
+ sch->lpm &= ~lpm;
+ else
+ sch->lpm = 0;
+
+ if (cio_update_schib(sch))
+ return -ENODEV;
+
+ return sch->lpm ? -EACCES : -ENODEV;
+ }
+ default:
+ return ccode;
+ }
+}
+
+static void fsm_notoper(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct subchannel *sch = private->sch;
+
+ /*
+ * TODO:
+ * Probably we should send the machine check to the guest.
+ */
+ css_sched_sch_todo(sch, SCH_TODO_UNREG);
+ private->state = VFIO_CCW_STATE_NOT_OPER;
+}
+
+/*
+ * No operation action.
+ */
+static void fsm_nop(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+}
+
+static void fsm_io_error(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state);
+ private->io_region.ret_code = -EIO;
+}
+
+static void fsm_io_busy(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ private->io_region.ret_code = -EBUSY;
+}
+
+static void fsm_disabled_irq(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct subchannel *sch = private->sch;
+
+ /*
+ * An interrupt in a disabled state means a previous disable was not
+ * successful - should not happen, but we try to disable again.
+ */
+ cio_disable_subchannel(sch);
+}
+
+/*
+ * Deal with the ccw command request from the userspace.
+ */
+static void fsm_io_request(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ union orb *orb;
+ union scsw *scsw = &private->scsw;
+ struct ccw_io_region *io_region = &private->io_region;
+ struct mdev_device *mdev = private->mdev;
+
+ private->state = VFIO_CCW_STATE_BOXED;
+
+ memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
+
+ if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
+ orb = (union orb *)io_region->orb_area;
+
+ io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
+ orb);
+ if (io_region->ret_code)
+ goto err_out;
+
+ io_region->ret_code = cp_prefetch(&private->cp);
+ if (io_region->ret_code) {
+ cp_free(&private->cp);
+ goto err_out;
+ }
+
+ /* Start channel program and wait for I/O interrupt. */
+ io_region->ret_code = fsm_io_helper(private);
+ if (io_region->ret_code) {
+ cp_free(&private->cp);
+ goto err_out;
+ }
+ return;
+ } else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
+ /* XXX: Handle halt. */
+ io_region->ret_code = -EOPNOTSUPP;
+ goto err_out;
+ } else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
+ /* XXX: Handle clear. */
+ io_region->ret_code = -EOPNOTSUPP;
+ goto err_out;
+ }
+
+err_out:
+ private->state = VFIO_CCW_STATE_IDLE;
+}
+
+/*
+ * Got an interrupt for a normal io (state busy).
+ */
+static void fsm_irq(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct irb *irb;
+
+ if (!private)
+ return;
+
+ irb = this_cpu_ptr(&cio_irb);
+ memcpy(&private->irb, irb, sizeof(*irb));
+
+ queue_work(vfio_ccw_work_q, &private->io_work);
+
+ if (private->completion)
+ complete(private->completion);
+}
+
+/*
+ * Device statemachine
+ */
+fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
+ [VFIO_CCW_STATE_NOT_OPER] = {
+ [VFIO_CCW_EVENT_NOT_OPER] = fsm_nop,
+ [VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
+ [VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
+ },
+ [VFIO_CCW_STATE_STANDBY] = {
+ [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
+ [VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
+ [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ },
+ [VFIO_CCW_STATE_IDLE] = {
+ [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
+ [VFIO_CCW_EVENT_IO_REQ] = fsm_io_request,
+ [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ },
+ [VFIO_CCW_STATE_BOXED] = {
+ [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
+ [VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy,
+ [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ },
+ [VFIO_CCW_STATE_BUSY] = {
+ [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
+ [VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy,
+ [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ },
+};
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index d754d3d90574..b2e615404034 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -35,7 +35,11 @@ static int vfio_ccw_mdev_reset(struct mdev_device *mdev)
if (ret)
return ret;
- return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+ ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+ if (!ret)
+ private->state = VFIO_CCW_STATE_IDLE;
+
+ return ret;
}
static int vfio_ccw_mdev_notifier(struct notifier_block *nb,
@@ -112,10 +116,14 @@ static int vfio_ccw_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
struct vfio_ccw_private *private =
dev_get_drvdata(mdev_parent_dev(mdev));
+ if (private->state == VFIO_CCW_STATE_NOT_OPER)
+ return -ENODEV;
+
if (atomic_dec_if_positive(&private->avail) < 0)
return -EPERM;
private->mdev = mdev;
+ private->state = VFIO_CCW_STATE_IDLE;
return 0;
}
@@ -126,10 +134,20 @@ static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
dev_get_drvdata(mdev_parent_dev(mdev));
int ret;
+ if (!private)
+ goto out;
+
+ if ((private->state == VFIO_CCW_STATE_NOT_OPER) ||
+ (private->state == VFIO_CCW_STATE_STANDBY))
+ goto out;
+
ret = vfio_ccw_mdev_reset(mdev);
if (ret)
return ret;
+ private->state = VFIO_CCW_STATE_STANDBY;
+
+out:
private->mdev = NULL;
atomic_inc(&private->avail);
@@ -193,14 +211,18 @@ static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
private = dev_get_drvdata(mdev_parent_dev(mdev));
if (!private)
return -ENODEV;
+ if (private->state != VFIO_CCW_STATE_IDLE)
+ return -EACCES;
region = &private->io_region;
if (copy_from_user((void *)region + *ppos, buf, count))
return -EFAULT;
- region->ret_code = vfio_ccw_sch_cmd_request(private);
- if (region->ret_code != 0)
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
+ if (region->ret_code != 0) {
+ private->state = VFIO_CCW_STATE_IDLE;
return region->ret_code;
+ }
return count;
}
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index 2503797fb153..fc0f01c16ef9 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -21,6 +21,7 @@
/**
* struct vfio_ccw_private
* @sch: pointer to the subchannel
+ * @state: internal state of the device
* @completion: synchronization helper of the I/O completion
* @avail: available for creating a mediated device
* @mdev: pointer to the mediated device
@@ -34,6 +35,7 @@
*/
struct vfio_ccw_private {
struct subchannel *sch;
+ int state;
struct completion *completion;
atomic_t avail;
struct mdev_device *mdev;
@@ -52,6 +54,43 @@ extern int vfio_ccw_mdev_reg(struct subchannel *sch);
extern void vfio_ccw_mdev_unreg(struct subchannel *sch);
extern int vfio_ccw_sch_quiesce(struct subchannel *sch);
-extern int vfio_ccw_sch_cmd_request(struct vfio_ccw_private *private);
+
+/*
+ * States of the device statemachine.
+ */
+enum vfio_ccw_state {
+ VFIO_CCW_STATE_NOT_OPER,
+ VFIO_CCW_STATE_STANDBY,
+ VFIO_CCW_STATE_IDLE,
+ VFIO_CCW_STATE_BOXED,
+ VFIO_CCW_STATE_BUSY,
+ /* last element! */
+ NR_VFIO_CCW_STATES
+};
+
+/*
+ * Asynchronous events of the device statemachine.
+ */
+enum vfio_ccw_event {
+ VFIO_CCW_EVENT_NOT_OPER,
+ VFIO_CCW_EVENT_IO_REQ,
+ VFIO_CCW_EVENT_INTERRUPT,
+ /* last element! */
+ NR_VFIO_CCW_EVENTS
+};
+
+/*
+ * Action called through jumptable.
+ */
+typedef void (fsm_func_t)(struct vfio_ccw_private *, enum vfio_ccw_event);
+extern fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS];
+
+static inline void vfio_ccw_fsm_event(struct vfio_ccw_private *private,
+ int event)
+{
+ vfio_ccw_jumptable[private->state][event](private, event);
+}
+
+extern struct workqueue_struct *vfio_ccw_work_q;
#endif
OpenPOWER on IntegriCloud