summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 17:22:52 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 17:22:52 -0700
commit1756ac3d3c41341297ea25b818b7fce505bb2a9a (patch)
tree96382220afbb82fd5c576c4c08b3c3e13282851f
parent98edb6ca4174f17a64890a02f44c211c8b44fb3c (diff)
parent0643e4c6e4fd67778fa886a89e6ec2320e0ff4d3 (diff)
downloadblackbird-op-linux-1756ac3d3c41341297ea25b818b7fce505bb2a9a.tar.gz
blackbird-op-linux-1756ac3d3c41341297ea25b818b7fce505bb2a9a.zip
Merge branch 'virtio' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus
* 'virtio' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus: (27 commits) drivers/char: Eliminate use after free virtio: console: Accept console size along with resize control message virtio: console: Store each console's size in the console structure virtio: console: Resize console port 0 on config intr only if multiport is off virtio: console: Add support for nonblocking write()s virtio: console: Rename wait_is_over() to will_read_block() virtio: console: Don't always create a port 0 if using multiport virtio: console: Use a control message to add ports virtio: console: Move code around for future patches virtio: console: Remove config work handler virtio: console: Don't call hvc_remove() on unplugging console ports virtio: console: Return -EPIPE to hvc_console if we lost the connection virtio: console: Let host know of port or device add failures virtio: console: Add a __send_control_msg() that can send messages without a valid port virtio: Revert "virtio: disable multiport console support." virtio: add_buf_gfp trans_virtio: use virtqueue_xxx wrappers virtio-rng: use virtqueue_xxx wrappers virtio_ring: remove a level of indirection virtio_net: use virtqueue_xxx wrappers ... Fix up conflicts in drivers/net/virtio_net.c due to new virtqueue_xxx wrappers changes conflicting with some other cleanups.
-rw-r--r--drivers/block/virtio_blk.c46
-rw-r--r--drivers/char/hw_random/virtio-rng.c6
-rw-r--r--drivers/char/virtio_console.c700
-rw-r--r--drivers/net/virtio_net.c46
-rw-r--r--drivers/virtio/virtio_balloon.c17
-rw-r--r--drivers/virtio/virtio_ring.c44
-rw-r--r--include/linux/virtio.h55
-rw-r--r--include/linux/virtio_blk.h5
-rw-r--r--include/linux/virtio_console.h25
-rw-r--r--net/9p/trans_virtio.c6
10 files changed, 541 insertions, 409 deletions
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2138a7ae050c..83fa09a836ca 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -50,7 +50,7 @@ static void blk_done(struct virtqueue *vq)
unsigned long flags;
spin_lock_irqsave(&vblk->lock, flags);
- while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
+ while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
int error;
switch (vbr->status) {
@@ -70,6 +70,8 @@ static void blk_done(struct virtqueue *vq)
vbr->req->sense_len = vbr->in_hdr.sense_len;
vbr->req->errors = vbr->in_hdr.errors;
}
+ if (blk_special_request(vbr->req))
+ vbr->req->errors = (error != 0);
__blk_end_request_all(vbr->req, error);
list_del(&vbr->list);
@@ -103,6 +105,11 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
vbr->out_hdr.sector = 0;
vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
break;
+ case REQ_TYPE_SPECIAL:
+ vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
+ vbr->out_hdr.sector = 0;
+ vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
+ break;
case REQ_TYPE_LINUX_BLOCK:
if (req->cmd[0] == REQ_LB_OP_FLUSH) {
vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
@@ -151,7 +158,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
}
}
- if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
+ if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
mempool_free(vbr, vblk->pool);
return false;
}
@@ -180,7 +187,7 @@ static void do_virtblk_request(struct request_queue *q)
}
if (issued)
- vblk->vq->vq_ops->kick(vblk->vq);
+ virtqueue_kick(vblk->vq);
}
static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
@@ -189,12 +196,45 @@ static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
req->cmd[0] = REQ_LB_OP_FLUSH;
}
+/* return id (s/n) string for *disk to *id_str
+ */
+static int virtblk_get_id(struct gendisk *disk, char *id_str)
+{
+ struct virtio_blk *vblk = disk->private_data;
+ struct request *req;
+ struct bio *bio;
+
+ bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
+ GFP_KERNEL);
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL);
+ if (IS_ERR(req)) {
+ bio_put(bio);
+ return PTR_ERR(req);
+ }
+
+ req->cmd_type = REQ_TYPE_SPECIAL;
+ return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
+}
+
static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long data)
{
struct gendisk *disk = bdev->bd_disk;
struct virtio_blk *vblk = disk->private_data;
+ if (cmd == 0x56424944) { /* 'VBID' */
+ void __user *usr_data = (void __user *)data;
+ char id_str[VIRTIO_BLK_ID_BYTES];
+ int err;
+
+ err = virtblk_get_id(disk, id_str);
+ if (!err && copy_to_user(usr_data, id_str, VIRTIO_BLK_ID_BYTES))
+ err = -EFAULT;
+ return err;
+ }
/*
* Only allow the generic SCSI ioctls if the host can support it.
*/
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 64fe0a793efd..75f1cbd61c17 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -32,7 +32,7 @@ static bool busy;
static void random_recv_done(struct virtqueue *vq)
{
/* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
- if (!vq->vq_ops->get_buf(vq, &data_avail))
+ if (!virtqueue_get_buf(vq, &data_avail))
return;
complete(&have_data);
@@ -46,10 +46,10 @@ static void register_buffer(u8 *buf, size_t size)
sg_init_one(&sg, buf, size);
/* There should always be room for one buffer. */
- if (vq->vq_ops->add_buf(vq, &sg, 0, 1, buf) < 0)
+ if (virtqueue_add_buf(vq, &sg, 0, 1, buf) < 0)
BUG();
- vq->vq_ops->kick(vq);
+ virtqueue_kick(vq);
}
static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 196428c2287a..8c99bf1b5e9f 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -33,35 +33,6 @@
#include <linux/workqueue.h>
#include "hvc_console.h"
-/* Moved here from .h file in order to disable MULTIPORT. */
-#define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */
-
-struct virtio_console_multiport_conf {
- struct virtio_console_config config;
- /* max. number of ports this device can hold */
- __u32 max_nr_ports;
- /* number of ports added so far */
- __u32 nr_ports;
-} __attribute__((packed));
-
-/*
- * A message that's passed between the Host and the Guest for a
- * particular port.
- */
-struct virtio_console_control {
- __u32 id; /* Port number */
- __u16 event; /* The kind of control event (see below) */
- __u16 value; /* Extra information for the key */
-};
-
-/* Some events for control messages */
-#define VIRTIO_CONSOLE_PORT_READY 0
-#define VIRTIO_CONSOLE_CONSOLE_PORT 1
-#define VIRTIO_CONSOLE_RESIZE 2
-#define VIRTIO_CONSOLE_PORT_OPEN 3
-#define VIRTIO_CONSOLE_PORT_NAME 4
-#define VIRTIO_CONSOLE_PORT_REMOVE 5
-
/*
* This is a global struct for storing common data for all the devices
* this driver handles.
@@ -107,6 +78,9 @@ struct console {
/* The hvc device associated with this console port */
struct hvc_struct *hvc;
+ /* The size of the console */
+ struct winsize ws;
+
/*
* This number identifies the number that we used to register
* with hvc in hvc_instantiate() and hvc_alloc(); this is the
@@ -139,7 +113,6 @@ struct ports_device {
* notification
*/
struct work_struct control_work;
- struct work_struct config_work;
struct list_head ports;
@@ -150,7 +123,7 @@ struct ports_device {
spinlock_t cvq_lock;
/* The current config space is stored here */
- struct virtio_console_multiport_conf config;
+ struct virtio_console_config config;
/* The virtio device we're associated with */
struct virtio_device *vdev;
@@ -189,6 +162,9 @@ struct port {
*/
spinlock_t inbuf_lock;
+ /* Protect the operations on the out_vq. */
+ spinlock_t outvq_lock;
+
/* The IO vqs for this port */
struct virtqueue *in_vq, *out_vq;
@@ -214,6 +190,8 @@ struct port {
/* The 'id' to identify the port with the Host */
u32 id;
+ bool outvq_full;
+
/* Is the host device open */
bool host_connected;
@@ -328,7 +306,7 @@ static void *get_inbuf(struct port *port)
unsigned int len;
vq = port->in_vq;
- buf = vq->vq_ops->get_buf(vq, &len);
+ buf = virtqueue_get_buf(vq, &len);
if (buf) {
buf->len = len;
buf->offset = 0;
@@ -349,8 +327,8 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
sg_init_one(sg, buf->buf, buf->size);
- ret = vq->vq_ops->add_buf(vq, sg, 0, 1, buf);
- vq->vq_ops->kick(vq);
+ ret = virtqueue_add_buf(vq, sg, 0, 1, buf);
+ virtqueue_kick(vq);
return ret;
}
@@ -366,7 +344,7 @@ static void discard_port_data(struct port *port)
if (port->inbuf)
buf = port->inbuf;
else
- buf = vq->vq_ops->get_buf(vq, &len);
+ buf = virtqueue_get_buf(vq, &len);
ret = 0;
while (buf) {
@@ -374,7 +352,7 @@ static void discard_port_data(struct port *port)
ret++;
free_buf(buf);
}
- buf = vq->vq_ops->get_buf(vq, &len);
+ buf = virtqueue_get_buf(vq, &len);
}
port->inbuf = NULL;
if (ret)
@@ -403,57 +381,96 @@ out:
return ret;
}
-static ssize_t send_control_msg(struct port *port, unsigned int event,
- unsigned int value)
+static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
+ unsigned int event, unsigned int value)
{
struct scatterlist sg[1];
struct virtio_console_control cpkt;
struct virtqueue *vq;
unsigned int len;
- if (!use_multiport(port->portdev))
+ if (!use_multiport(portdev))
return 0;
- cpkt.id = port->id;
+ cpkt.id = port_id;
cpkt.event = event;
cpkt.value = value;
- vq = port->portdev->c_ovq;
+ vq = portdev->c_ovq;
sg_init_one(sg, &cpkt, sizeof(cpkt));
- if (vq->vq_ops->add_buf(vq, sg, 1, 0, &cpkt) >= 0) {
- vq->vq_ops->kick(vq);
- while (!vq->vq_ops->get_buf(vq, &len))
+ if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt) >= 0) {
+ virtqueue_kick(vq);
+ while (!virtqueue_get_buf(vq, &len))
cpu_relax();
}
return 0;
}
-static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count)
+static ssize_t send_control_msg(struct port *port, unsigned int event,
+ unsigned int value)
+{
+ return __send_control_msg(port->portdev, port->id, event, value);
+}
+
+/* Callers must take the port->outvq_lock */
+static void reclaim_consumed_buffers(struct port *port)
+{
+ void *buf;
+ unsigned int len;
+
+ while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
+ kfree(buf);
+ port->outvq_full = false;
+ }
+}
+
+static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
+ bool nonblock)
{
struct scatterlist sg[1];
struct virtqueue *out_vq;
ssize_t ret;
+ unsigned long flags;
unsigned int len;
out_vq = port->out_vq;
+ spin_lock_irqsave(&port->outvq_lock, flags);
+
+ reclaim_consumed_buffers(port);
+
sg_init_one(sg, in_buf, in_count);
- ret = out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, in_buf);
+ ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf);
/* Tell Host to go! */
- out_vq->vq_ops->kick(out_vq);
+ virtqueue_kick(out_vq);
if (ret < 0) {
in_count = 0;
- goto fail;
+ goto done;
}
- /* Wait till the host acknowledges it pushed out the data we sent. */
- while (!out_vq->vq_ops->get_buf(out_vq, &len))
+ if (ret == 0)
+ port->outvq_full = true;
+
+ if (nonblock)
+ goto done;
+
+ /*
+ * Wait till the host acknowledges it pushed out the data we
+ * sent. This is done for ports in blocking mode or for data
+ * from the hvc_console; the tty operations are performed with
+ * spinlocks held so we can't sleep here.
+ */
+ while (!virtqueue_get_buf(out_vq, &len))
cpu_relax();
-fail:
- /* We're expected to return the amount of data we wrote */
+done:
+ spin_unlock_irqrestore(&port->outvq_lock, flags);
+ /*
+ * We're expected to return the amount of data we wrote -- all
+ * of it
+ */
return in_count;
}
@@ -503,9 +520,28 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
}
/* The condition that must be true for polling to end */
-static bool wait_is_over(struct port *port)
+static bool will_read_block(struct port *port)
+{
+ return !port_has_data(port) && port->host_connected;
+}
+
+static bool will_write_block(struct port *port)
{
- return port_has_data(port) || !port->host_connected;
+ bool ret;
+
+ if (!port->host_connected)
+ return true;
+
+ spin_lock_irq(&port->outvq_lock);
+ /*
+ * Check if the Host has consumed any buffers since we last
+ * sent data (this is only applicable for nonblocking ports).
+ */
+ reclaim_consumed_buffers(port);
+ ret = port->outvq_full;
+ spin_unlock_irq(&port->outvq_lock);
+
+ return ret;
}
static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
@@ -528,7 +564,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
return -EAGAIN;
ret = wait_event_interruptible(port->waitqueue,
- wait_is_over(port));
+ !will_read_block(port));
if (ret < 0)
return ret;
}
@@ -554,9 +590,22 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
struct port *port;
char *buf;
ssize_t ret;
+ bool nonblock;
port = filp->private_data;
+ nonblock = filp->f_flags & O_NONBLOCK;
+
+ if (will_write_block(port)) {
+ if (nonblock)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(port->waitqueue,
+ !will_write_block(port));
+ if (ret < 0)
+ return ret;
+ }
+
count = min((size_t)(32 * 1024), count);
buf = kmalloc(count, GFP_KERNEL);
@@ -569,9 +618,14 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
goto free_buf;
}
- ret = send_buf(port, buf, count);
+ ret = send_buf(port, buf, count, nonblock);
+
+ if (nonblock && ret > 0)
+ goto out;
+
free_buf:
kfree(buf);
+out:
return ret;
}
@@ -586,7 +640,7 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
ret = 0;
if (port->inbuf)
ret |= POLLIN | POLLRDNORM;
- if (port->host_connected)
+ if (!will_write_block(port))
ret |= POLLOUT;
if (!port->host_connected)
ret |= POLLHUP;
@@ -610,6 +664,10 @@ static int port_fops_release(struct inode *inode, struct file *filp)
spin_unlock_irq(&port->inbuf_lock);
+ spin_lock_irq(&port->outvq_lock);
+ reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
+
return 0;
}
@@ -638,6 +696,15 @@ static int port_fops_open(struct inode *inode, struct file *filp)
port->guest_connected = true;
spin_unlock_irq(&port->inbuf_lock);
+ spin_lock_irq(&port->outvq_lock);
+ /*
+ * There might be a chance that we missed reclaiming a few
+ * buffers in the window of the port getting previously closed
+ * and opening now.
+ */
+ reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
+
/* Notify host of port being opened */
send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1);
@@ -676,9 +743,9 @@ static int put_chars(u32 vtermno, const char *buf, int count)
port = find_port_by_vtermno(vtermno);
if (!port)
- return 0;
+ return -EPIPE;
- return send_buf(port, (void *)buf, count);
+ return send_buf(port, (void *)buf, count, false);
}
/*
@@ -692,9 +759,13 @@ static int get_chars(u32 vtermno, char *buf, int count)
{
struct port *port;
+ /* If we've not set up the port yet, we have no input to give. */
+ if (unlikely(early_put_chars))
+ return 0;
+
port = find_port_by_vtermno(vtermno);
if (!port)
- return 0;
+ return -EPIPE;
/* If we don't have an input queue yet, we can't get input. */
BUG_ON(!port->in_vq);
@@ -705,22 +776,14 @@ static int get_chars(u32 vtermno, char *buf, int count)
static void resize_console(struct port *port)
{
struct virtio_device *vdev;
- struct winsize ws;
/* The port could have been hot-unplugged */
- if (!port)
+ if (!port || !is_console_port(port))
return;
vdev = port->portdev->vdev;
- if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) {
- vdev->config->get(vdev,
- offsetof(struct virtio_console_config, cols),
- &ws.ws_col, sizeof(u16));
- vdev->config->get(vdev,
- offsetof(struct virtio_console_config, rows),
- &ws.ws_row, sizeof(u16));
- hvc_resize(port->cons.hvc, ws);
- }
+ if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE))
+ hvc_resize(port->cons.hvc, port->cons.ws);
}
/* We set the configuration at this point, since we now have a tty */
@@ -804,6 +867,13 @@ int init_port_console(struct port *port)
spin_unlock_irq(&pdrvdata_lock);
port->guest_connected = true;
+ /*
+ * Start using the new console output if this is the first
+ * console to come up.
+ */
+ if (early_put_chars)
+ early_put_chars = NULL;
+
/* Notify host of port being opened */
send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
@@ -859,6 +929,8 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf,
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"host_connected: %d\n", port->host_connected);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "outvq_full: %d\n", port->outvq_full);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
"is_console: %s\n",
is_console_port(port) ? "yes" : "no");
out_offset += snprintf(buf + out_offset, out_count - out_offset,
@@ -875,6 +947,153 @@ static const struct file_operations port_debugfs_ops = {
.read = debugfs_read,
};
+static void set_console_size(struct port *port, u16 rows, u16 cols)
+{
+ if (!port || !is_console_port(port))
+ return;
+
+ port->cons.ws.ws_row = rows;
+ port->cons.ws.ws_col = cols;
+}
+
+static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
+{
+ struct port_buffer *buf;
+ unsigned int nr_added_bufs;
+ int ret;
+
+ nr_added_bufs = 0;
+ do {
+ buf = alloc_buf(PAGE_SIZE);
+ if (!buf)
+ break;
+
+ spin_lock_irq(lock);
+ ret = add_inbuf(vq, buf);
+ if (ret < 0) {
+ spin_unlock_irq(lock);
+ free_buf(buf);
+ break;
+ }
+ nr_added_bufs++;
+ spin_unlock_irq(lock);
+ } while (ret > 0);
+
+ return nr_added_bufs;
+}
+
+static int add_port(struct ports_device *portdev, u32 id)
+{
+ char debugfs_name[16];
+ struct port *port;
+ struct port_buffer *buf;
+ dev_t devt;
+ unsigned int nr_added_bufs;
+ int err;
+
+ port = kmalloc(sizeof(*port), GFP_KERNEL);
+ if (!port) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ port->portdev = portdev;
+ port->id = id;
+
+ port->name = NULL;
+ port->inbuf = NULL;
+ port->cons.hvc = NULL;
+
+ port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
+
+ port->host_connected = port->guest_connected = false;
+
+ port->outvq_full = false;
+
+ port->in_vq = portdev->in_vqs[port->id];
+ port->out_vq = portdev->out_vqs[port->id];
+
+ cdev_init(&port->cdev, &port_fops);
+
+ devt = MKDEV(portdev->chr_major, id);
+ err = cdev_add(&port->cdev, devt, 1);
+ if (err < 0) {
+ dev_err(&port->portdev->vdev->dev,
+ "Error %d adding cdev for port %u\n", err, id);
+ goto free_port;
+ }
+ port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
+ devt, port, "vport%up%u",
+ port->portdev->drv_index, id);
+ if (IS_ERR(port->dev)) {
+ err = PTR_ERR(port->dev);
+ dev_err(&port->portdev->vdev->dev,
+ "Error %d creating device for port %u\n",
+ err, id);
+ goto free_cdev;
+ }
+
+ spin_lock_init(&port->inbuf_lock);
+ spin_lock_init(&port->outvq_lock);
+ init_waitqueue_head(&port->waitqueue);
+
+ /* Fill the in_vq with buffers so the host can send us data. */
+ nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
+ if (!nr_added_bufs) {
+ dev_err(port->dev, "Error allocating inbufs\n");
+ err = -ENOMEM;
+ goto free_device;
+ }
+
+ /*
+ * If we're not using multiport support, this has to be a console port
+ */
+ if (!use_multiport(port->portdev)) {
+ err = init_port_console(port);
+ if (err)
+ goto free_inbufs;
+ }
+
+ spin_lock_irq(&portdev->ports_lock);
+ list_add_tail(&port->list, &port->portdev->ports);
+ spin_unlock_irq(&portdev->ports_lock);
+
+ /*
+ * Tell the Host we're set so that it can send us various
+ * configuration parameters for this port (eg, port name,
+ * caching, whether this is a console port, etc.)
+ */
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
+
+ if (pdrvdata.debugfs_dir) {
+ /*
+ * Finally, create the debugfs file that we can use to
+ * inspect a port's state at any time
+ */
+ sprintf(debugfs_name, "vport%up%u",
+ port->portdev->drv_index, id);
+ port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
+ pdrvdata.debugfs_dir,
+ port,
+ &port_debugfs_ops);
+ }
+ return 0;
+
+free_inbufs:
+ while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
+ free_buf(buf);
+free_device:
+ device_destroy(pdrvdata.class, port->dev->devt);
+free_cdev:
+ cdev_del(&port->cdev);
+free_port:
+ kfree(port);
+fail:
+ /* The host might want to notify management sw about port add failure */
+ __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0);
+ return err;
+}
+
/* Remove all port-specific data. */
static int remove_port(struct port *port)
{
@@ -888,7 +1107,18 @@ static int remove_port(struct port *port)
spin_lock_irq(&pdrvdata_lock);
list_del(&port->cons.list);
spin_unlock_irq(&pdrvdata_lock);
+#if 0
+ /*
+ * hvc_remove() not called as removing one hvc port
+ * results in other hvc ports getting frozen.
+ *
+ * Once this is resolved in hvc, this functionality
+ * will be enabled. Till that is done, the -EPIPE
+ * return from get_chars() above will help
+ * hvc_console.c to clean up on ports we remove here.
+ */
hvc_remove(port->cons.hvc);
+#endif
}
if (port->guest_connected)
send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
@@ -900,8 +1130,10 @@ static int remove_port(struct port *port)
/* Remove unused data this port might have received. */
discard_port_data(port);
+ reclaim_consumed_buffers(port);
+
/* Remove buffers we queued up for the Host to send us data in. */
- while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq)))
+ while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
free_buf(buf);
kfree(port->name);
@@ -924,7 +1156,7 @@ static void handle_control_message(struct ports_device *portdev,
cpkt = (struct virtio_console_control *)(buf->buf + buf->offset);
port = find_port_by_id(portdev, cpkt->id);
- if (!port) {
+ if (!port && cpkt->event != VIRTIO_CONSOLE_PORT_ADD) {
/* No valid header at start of buffer. Drop it. */
dev_dbg(&portdev->vdev->dev,
"Invalid index %u in control packet\n", cpkt->id);
@@ -932,6 +1164,24 @@ static void handle_control_message(struct ports_device *portdev,
}
switch (cpkt->event) {
+ case VIRTIO_CONSOLE_PORT_ADD:
+ if (port) {
+ dev_dbg(&portdev->vdev->dev,
+ "Port %u already added\n", port->id);
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
+ break;
+ }
+ if (cpkt->id >= portdev->config.max_nr_ports) {
+ dev_warn(&portdev->vdev->dev,
+ "Request for adding port with out-of-bound id %u, max. supported id: %u\n",
+ cpkt->id, portdev->config.max_nr_ports - 1);
+ break;
+ }
+ add_port(portdev, cpkt->id);
+ break;
+ case VIRTIO_CONSOLE_PORT_REMOVE:
+ remove_port(port);
+ break;
case VIRTIO_CONSOLE_CONSOLE_PORT:
if (!cpkt->value)
break;
@@ -944,15 +1194,34 @@ static void handle_control_message(struct ports_device *portdev,
* have to notify the host first.
*/
break;
- case VIRTIO_CONSOLE_RESIZE:
+ case VIRTIO_CONSOLE_RESIZE: {
+ struct {
+ __u16 rows;
+ __u16 cols;
+ } size;
+
if (!is_console_port(port))
break;
+
+ memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
+ sizeof(size));
+ set_console_size(port, size.rows, size.cols);
+
port->cons.hvc->irq_requested = 1;
resize_console(port);
break;
+ }
case VIRTIO_CONSOLE_PORT_OPEN:
port->host_connected = cpkt->value;
wake_up_interruptible(&port->waitqueue);
+ /*
+ * If the host port got closed and the host had any
+ * unconsumed buffers, we'll be able to reclaim them
+ * now.
+ */
+ spin_lock_irq(&port->outvq_lock);
+ reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
break;
case VIRTIO_CONSOLE_PORT_NAME:
/*
@@ -990,32 +1259,6 @@ static void handle_control_message(struct ports_device *portdev,
kobject_uevent(&port->dev->kobj, KOBJ_CHANGE);
}
break;
- case VIRTIO_CONSOLE_PORT_REMOVE:
- /*
- * Hot unplug the port. We don't decrement nr_ports
- * since we don't want to deal with extra complexities
- * of using the lowest-available port id: We can just
- * pick up the nr_ports number as the id and not have
- * userspace send it to us. This helps us in two
- * ways:
- *
- * - We don't need to have a 'port_id' field in the
- * config space when a port is hot-added. This is a
- * good thing as we might queue up multiple hotplug
- * requests issued in our workqueue.
- *
- * - Another way to deal with this would have been to
- * use a bitmap of the active ports and select the
- * lowest non-active port from that map. That
- * bloats the already tight config space and we
- * would end up artificially limiting the
- * max. number of ports to sizeof(bitmap). Right
- * now we can support 2^32 ports (as the port id is
- * stored in a u32 type).
- *
- */
- remove_port(port);
- break;
}
}
@@ -1030,7 +1273,7 @@ static void control_work_handler(struct work_struct *work)
vq = portdev->c_ivq;
spin_lock(&portdev->cvq_lock);
- while ((buf = vq->vq_ops->get_buf(vq, &len))) {
+ while ((buf = virtqueue_get_buf(vq, &len))) {
spin_unlock(&portdev->cvq_lock);
buf->len = len;
@@ -1092,204 +1335,29 @@ static void config_intr(struct virtio_device *vdev)
struct ports_device *portdev;
portdev = vdev->priv;
- if (use_multiport(portdev)) {
- /* Handle port hot-add */
- schedule_work(&portdev->config_work);
- }
- /*
- * We'll use this way of resizing only for legacy support.
- * For newer userspace (VIRTIO_CONSOLE_F_MULTPORT+), use
- * control messages to indicate console size changes so that
- * it can be done per-port
- */
- resize_console(find_port_by_id(portdev, 0));
-}
-
-static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
-{
- struct port_buffer *buf;
- unsigned int nr_added_bufs;
- int ret;
-
- nr_added_bufs = 0;
- do {
- buf = alloc_buf(PAGE_SIZE);
- if (!buf)
- break;
-
- spin_lock_irq(lock);
- ret = add_inbuf(vq, buf);
- if (ret < 0) {
- spin_unlock_irq(lock);
- free_buf(buf);
- break;
- }
- nr_added_bufs++;
- spin_unlock_irq(lock);
- } while (ret > 0);
-
- return nr_added_bufs;
-}
-
-static int add_port(struct ports_device *portdev, u32 id)
-{
- char debugfs_name[16];
- struct port *port;
- struct port_buffer *buf;
- dev_t devt;
- unsigned int nr_added_bufs;
- int err;
-
- port = kmalloc(sizeof(*port), GFP_KERNEL);
- if (!port) {
- err = -ENOMEM;
- goto fail;
- }
-
- port->portdev = portdev;
- port->id = id;
-
- port->name = NULL;
- port->inbuf = NULL;
- port->cons.hvc = NULL;
-
- port->host_connected = port->guest_connected = false;
-
- port->in_vq = portdev->in_vqs[port->id];
- port->out_vq = portdev->out_vqs[port->id];
-
- cdev_init(&port->cdev, &port_fops);
-
- devt = MKDEV(portdev->chr_major, id);
- err = cdev_add(&port->cdev, devt, 1);
- if (err < 0) {
- dev_err(&port->portdev->vdev->dev,
- "Error %d adding cdev for port %u\n", err, id);
- goto free_port;
- }
- port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
- devt, port, "vport%up%u",
- port->portdev->drv_index, id);
- if (IS_ERR(port->dev)) {
- err = PTR_ERR(port->dev);
- dev_err(&port->portdev->vdev->dev,
- "Error %d creating device for port %u\n",
- err, id);
- goto free_cdev;
- }
-
- spin_lock_init(&port->inbuf_lock);
- init_waitqueue_head(&port->waitqueue);
-
- /* Fill the in_vq with buffers so the host can send us data. */
- nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
- if (!nr_added_bufs) {
- dev_err(port->dev, "Error allocating inbufs\n");
- err = -ENOMEM;
- goto free_device;
- }
-
- /*
- * If we're not using multiport support, this has to be a console port
- */
- if (!use_multiport(port->portdev)) {
- err = init_port_console(port);
- if (err)
- goto free_inbufs;
- }
-
- spin_lock_irq(&portdev->ports_lock);
- list_add_tail(&port->list, &port->portdev->ports);
- spin_unlock_irq(&portdev->ports_lock);
-
- /*
- * Tell the Host we're set so that it can send us various
- * configuration parameters for this port (eg, port name,
- * caching, whether this is a console port, etc.)
- */
- send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
-
- if (pdrvdata.debugfs_dir) {
- /*
- * Finally, create the debugfs file that we can use to
- * inspect a port's state at any time
- */
- sprintf(debugfs_name, "vport%up%u",
- port->portdev->drv_index, id);
- port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
- pdrvdata.debugfs_dir,
- port,
- &port_debugfs_ops);
- }
- return 0;
-
-free_inbufs:
- while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq)))
- free_buf(buf);
-free_device:
- device_destroy(pdrvdata.class, port->dev->devt);
-free_cdev:
- cdev_del(&port->cdev);
-free_port:
- kfree(port);
-fail:
- return err;
-}
-/*
- * The workhandler for config-space updates.
- *
- * This is called when ports are hot-added.
- */
-static void config_work_handler(struct work_struct *work)
-{
- struct virtio_console_multiport_conf virtconconf;
- struct ports_device *portdev;
- struct virtio_device *vdev;
- int err;
+ if (!use_multiport(portdev)) {
+ struct port *port;
+ u16 rows, cols;
- portdev = container_of(work, struct ports_device, config_work);
+ vdev->config->get(vdev,
+ offsetof(struct virtio_console_config, cols),
+ &cols, sizeof(u16));
+ vdev->config->get(vdev,
+ offsetof(struct virtio_console_config, rows),
+ &rows, sizeof(u16));
- vdev = portdev->vdev;
- vdev->config->get(vdev,
- offsetof(struct virtio_console_multiport_conf,
- nr_ports),
- &virtconconf.nr_ports,
- sizeof(virtconconf.nr_ports));
+ port = find_port_by_id(portdev, 0);
+ set_console_size(port, rows, cols);
- if (portdev->config.nr_ports == virtconconf.nr_ports) {
/*
- * Port 0 got hot-added. Since we already did all the
- * other initialisation for it, just tell the Host
- * that the port is ready if we find the port. In
- * case the port was hot-removed earlier, we call
- * add_port to add the port.
+ * We'll use this way of resizing only for legacy
+ * support. For newer userspace
+ * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages
+ * to indicate console size changes so that it can be
+ * done per-port.
*/
- struct port *port;
-
- port = find_port_by_id(portdev, 0);
- if (!port)
- add_port(portdev, 0);
- else
- send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
- return;
- }
- if (virtconconf.nr_ports > portdev->config.max_nr_ports) {
- dev_warn(&vdev->dev,
- "More ports specified (%u) than allowed (%u)",
- portdev->config.nr_ports + 1,
- portdev->config.max_nr_ports);
- return;
- }
- if (virtconconf.nr_ports < portdev->config.nr_ports)
- return;
-
- /* Hot-add ports */
- while (virtconconf.nr_ports - portdev->config.nr_ports) {
- err = add_port(portdev, portdev->config.nr_ports);
- if (err)
- break;
- portdev->config.nr_ports++;
+ resize_console(port);
}
}
@@ -1414,7 +1482,6 @@ static const struct file_operations portdev_fops = {
static int __devinit virtcons_probe(struct virtio_device *vdev)
{
struct ports_device *portdev;
- u32 i;
int err;
bool multiport;
@@ -1443,37 +1510,19 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
}
multiport = false;
- portdev->config.nr_ports = 1;
portdev->config.max_nr_ports = 1;
-#if 0 /* Multiport is not quite ready yet --RR */
if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) {
multiport = true;
vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT;
- vdev->config->get(vdev,
- offsetof(struct virtio_console_multiport_conf,
- nr_ports),
- &portdev->config.nr_ports,
- sizeof(portdev->config.nr_ports));
- vdev->config->get(vdev,
- offsetof(struct virtio_console_multiport_conf,
- max_nr_ports),
+ vdev->config->get(vdev, offsetof(struct virtio_console_config,
+ max_nr_ports),
&portdev->config.max_nr_ports,
sizeof(portdev->config.max_nr_ports));
- if (portdev->config.nr_ports > portdev->config.max_nr_ports) {
- dev_warn(&vdev->dev,
- "More ports (%u) specified than allowed (%u). Will init %u ports.",
- portdev->config.nr_ports,
- portdev->config.max_nr_ports,
- portdev->config.max_nr_ports);
-
- portdev->config.nr_ports = portdev->config.max_nr_ports;
- }
}
/* Let the Host know we support multiple ports.*/
vdev->config->finalize_features(vdev);
-#endif
err = init_vqs(portdev);
if (err < 0) {
@@ -1489,7 +1538,6 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
spin_lock_init(&portdev->cvq_lock);
INIT_WORK(&portdev->control_work, &control_work_handler);
- INIT_WORK(&portdev->config_work, &config_work_handler);
nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock);
if (!nr_added_bufs) {
@@ -1498,16 +1546,22 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
err = -ENOMEM;
goto free_vqs;
}
+ } else {
+ /*
+ * For backward compatibility: Create a console port
+ * if we're running on older host.
+ */
+ add_port(portdev, 0);
}
- for (i = 0; i < portdev->config.nr_ports; i++)
- add_port(portdev, i);
-
- /* Start using the new console output. */
- early_put_chars = NULL;
+ __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
+ VIRTIO_CONSOLE_DEVICE_READY, 1);
return 0;
free_vqs:
+ /* The host might want to notify mgmt sw about device add failure */
+ __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
+ VIRTIO_CONSOLE_DEVICE_READY, 0);
vdev->config->del_vqs(vdev);
kfree(portdev->in_vqs);
kfree(portdev->out_vqs);
@@ -1529,17 +1583,16 @@ static void virtcons_remove(struct virtio_device *vdev)
portdev = vdev->priv;
cancel_work_sync(&portdev->control_work);
- cancel_work_sync(&portdev->config_work);
list_for_each_entry_safe(port, port2, &portdev->ports, list)
remove_port(port);
unregister_chrdev(portdev->chr_major, "virtio-portsdev");
- while ((buf = portdev->c_ivq->vq_ops->get_buf(portdev->c_ivq, &len)))
+ while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
free_buf(buf);
- while ((buf = portdev->c_ivq->vq_ops->detach_unused_buf(portdev->c_ivq)))
+ while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
free_buf(buf);
vdev->config->del_vqs(vdev);
@@ -1556,6 +1609,7 @@ static struct virtio_device_id id_table[] = {
static unsigned int features[] = {
VIRTIO_CONSOLE_F_SIZE,
+ VIRTIO_CONSOLE_F_MULTIPORT,
};
static struct virtio_driver virtio_console = {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b0a85d038796..78eb3190b9b1 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -122,7 +122,7 @@ static void skb_xmit_done(struct virtqueue *svq)
struct virtnet_info *vi = svq->vdev->priv;
/* Suppress further interrupts. */
- svq->vq_ops->disable_cb(svq);
+ virtqueue_disable_cb(svq);
/* We were probably waiting for more output buffers. */
netif_wake_queue(vi->dev);
@@ -210,7 +210,7 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
return -EINVAL;
}
- page = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
+ page = virtqueue_get_buf(vi->rvq, &len);
if (!page) {
pr_debug("%s: rx error: %d buffers missing\n",
skb->dev->name, hdr->mhdr.num_buffers);
@@ -340,7 +340,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len);
- err = vi->rvq->vq_ops->add_buf(vi->rvq, vi->rx_sg, 0, 2, skb);
+ err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb);
if (err < 0)
dev_kfree_skb(skb);
@@ -385,7 +385,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
/* chain first in list head */
first->private = (unsigned long)list;
- err = vi->rvq->vq_ops->add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
+ err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
first);
if (err < 0)
give_pages(vi, first);
@@ -404,7 +404,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE);
- err = vi->rvq->vq_ops->add_buf(vi->rvq, vi->rx_sg, 0, 1, page);
+ err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page);
if (err < 0)
give_pages(vi, page);
@@ -433,7 +433,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
} while (err > 0);
if (unlikely(vi->num > vi->max))
vi->max = vi->num;
- vi->rvq->vq_ops->kick(vi->rvq);
+ virtqueue_kick(vi->rvq);
return !oom;
}
@@ -442,7 +442,7 @@ static void skb_recv_done(struct virtqueue *rvq)
struct virtnet_info *vi = rvq->vdev->priv;
/* Schedule NAPI, Suppress further interrupts if successful. */
if (napi_schedule_prep(&vi->napi)) {
- rvq->vq_ops->disable_cb(rvq);
+ virtqueue_disable_cb(rvq);
__napi_schedule(&vi->napi);
}
}
@@ -471,7 +471,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
again:
while (received < budget &&
- (buf = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
+ (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) {
receive_buf(vi->dev, buf, len);
--vi->num;
received++;
@@ -485,9 +485,9 @@ again:
/* Out of packets? */
if (received < budget) {
napi_complete(napi);
- if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) &&
+ if (unlikely(!virtqueue_enable_cb(vi->rvq)) &&
napi_schedule_prep(napi)) {
- vi->rvq->vq_ops->disable_cb(vi->rvq);
+ virtqueue_disable_cb(vi->rvq);
__napi_schedule(napi);
goto again;
}
@@ -501,7 +501,7 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
struct sk_buff *skb;
unsigned int len, tot_sgs = 0;
- while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
+ while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
pr_debug("Sent skb %p\n", skb);
vi->dev->stats.tx_bytes += skb->len;
vi->dev->stats.tx_packets++;
@@ -554,7 +554,7 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr);
hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1;
- return vi->svq->vq_ops->add_buf(vi->svq, vi->tx_sg, hdr->num_sg,
+ return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg,
0, skb);
}
@@ -574,14 +574,14 @@ again:
if (unlikely(capacity < 0)) {
netif_stop_queue(dev);
dev_warn(&dev->dev, "Unexpected full queue\n");
- if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
- vi->svq->vq_ops->disable_cb(vi->svq);
+ if (unlikely(!virtqueue_enable_cb(vi->svq))) {
+ virtqueue_disable_cb(vi->svq);
netif_start_queue(dev);
goto again;
}
return NETDEV_TX_BUSY;
}
- vi->svq->vq_ops->kick(vi->svq);
+ virtqueue_kick(vi->svq);
/* Don't wait up for transmitted skbs to be freed. */
skb_orphan(skb);
@@ -591,12 +591,12 @@ again:
* before it gets out of hand. Naturally, this wastes entries. */
if (capacity < 2+MAX_SKB_FRAGS) {
netif_stop_queue(dev);
- if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
+ if (unlikely(!virtqueue_enable_cb(vi->svq))) {
/* More just got used, free them then recheck. */
capacity += free_old_xmit_skbs(vi);
if (capacity >= 2+MAX_SKB_FRAGS) {
netif_start_queue(dev);
- vi->svq->vq_ops->disable_cb(vi->svq);
+ virtqueue_disable_cb(vi->svq);
}
}
}
@@ -641,7 +641,7 @@ static int virtnet_open(struct net_device *dev)
* now. virtnet_poll wants re-enable the queue, so we disable here.
* We synchronize against interrupts via NAPI_STATE_SCHED */
if (napi_schedule_prep(&vi->napi)) {
- vi->rvq->vq_ops->disable_cb(vi->rvq);
+ virtqueue_disable_cb(vi->rvq);
__napi_schedule(&vi->napi);
}
return 0;
@@ -678,15 +678,15 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
- BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0);
+ BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi) < 0);
- vi->cvq->vq_ops->kick(vi->cvq);
+ virtqueue_kick(vi->cvq);
/*
* Spin for a response, the kick causes an ioport write, trapping
* into the hypervisor, so the request should be handled immediately.
*/
- while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp))
+ while (!virtqueue_get_buf(vi->cvq, &tmp))
cpu_relax();
return status == VIRTIO_NET_OK;
@@ -1003,13 +1003,13 @@ static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
while (1) {
- buf = vi->svq->vq_ops->detach_unused_buf(vi->svq);
+ buf = virtqueue_detach_unused_buf(vi->svq);
if (!buf)
break;
dev_kfree_skb(buf);
}
while (1) {
- buf = vi->rvq->vq_ops->detach_unused_buf(vi->rvq);
+ buf = virtqueue_detach_unused_buf(vi->rvq);
if (!buf)
break;
if (vi->mergeable_rx_bufs || vi->big_packets)
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index bfec7c29486d..0f1da45ba47d 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -75,7 +75,7 @@ static void balloon_ack(struct virtqueue *vq)
struct virtio_balloon *vb;
unsigned int len;
- vb = vq->vq_ops->get_buf(vq, &len);
+ vb = virtqueue_get_buf(vq, &len);
if (vb)
complete(&vb->acked);
}
@@ -89,9 +89,9 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
init_completion(&vb->acked);
/* We should always be able to add one buffer to an empty queue. */
- if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0)
+ if (virtqueue_add_buf(vq, &sg, 1, 0, vb) < 0)
BUG();
- vq->vq_ops->kick(vq);
+ virtqueue_kick(vq);
/* When host has read buffer, this completes via balloon_ack */
wait_for_completion(&vb->acked);
@@ -204,7 +204,7 @@ static void stats_request(struct virtqueue *vq)
struct virtio_balloon *vb;
unsigned int len;
- vb = vq->vq_ops->get_buf(vq, &len);
+ vb = virtqueue_get_buf(vq, &len);
if (!vb)
return;
vb->need_stats_update = 1;
@@ -221,9 +221,9 @@ static void stats_handle_request(struct virtio_balloon *vb)
vq = vb->stats_vq;
sg_init_one(&sg, vb->stats, sizeof(vb->stats));
- if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0)
+ if (virtqueue_add_buf(vq, &sg, 1, 0, vb) < 0)
BUG();
- vq->vq_ops->kick(vq);
+ virtqueue_kick(vq);
}
static void virtballoon_changed(struct virtio_device *vdev)
@@ -314,10 +314,9 @@ static int virtballoon_probe(struct virtio_device *vdev)
* use it to signal us later.
*/
sg_init_one(&sg, vb->stats, sizeof vb->stats);
- if (vb->stats_vq->vq_ops->add_buf(vb->stats_vq,
- &sg, 1, 0, vb) < 0)
+ if (virtqueue_add_buf(vb->stats_vq, &sg, 1, 0, vb) < 0)
BUG();
- vb->stats_vq->vq_ops->kick(vb->stats_vq);
+ virtqueue_kick(vb->stats_vq);
}
vb->thread = kthread_run(balloon, vb, "vballoon");
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 0f90634bcb85..1ca88908723b 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -110,13 +110,14 @@ struct vring_virtqueue
static int vring_add_indirect(struct vring_virtqueue *vq,
struct scatterlist sg[],
unsigned int out,
- unsigned int in)
+ unsigned int in,
+ gfp_t gfp)
{
struct vring_desc *desc;
unsigned head;
int i;
- desc = kmalloc((out + in) * sizeof(struct vring_desc), GFP_ATOMIC);
+ desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
if (!desc)
return vq->vring.num;
@@ -155,11 +156,12 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
return head;
}
-static int vring_add_buf(struct virtqueue *_vq,
- struct scatterlist sg[],
- unsigned int out,
- unsigned int in,
- void *data)
+int virtqueue_add_buf_gfp(struct virtqueue *_vq,
+ struct scatterlist sg[],
+ unsigned int out,
+ unsigned int in,
+ void *data,
+ gfp_t gfp)
{
struct vring_virtqueue *vq = to_vvq(_vq);
unsigned int i, avail, head, uninitialized_var(prev);
@@ -171,7 +173,7 @@ static int vring_add_buf(struct virtqueue *_vq,
/* If the host supports indirect descriptor tables, and we have multiple
* buffers, then go indirect. FIXME: tune this threshold */
if (vq->indirect && (out + in) > 1 && vq->num_free) {
- head = vring_add_indirect(vq, sg, out, in);
+ head = vring_add_indirect(vq, sg, out, in, gfp);
if (head != vq->vring.num)
goto add_head;
}
@@ -232,8 +234,9 @@ add_head:
return vq->num_free ? vq->vring.num : 0;
return vq->num_free;
}
+EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp);
-static void vring_kick(struct virtqueue *_vq)
+void virtqueue_kick(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
START_USE(vq);
@@ -253,6 +256,7 @@ static void vring_kick(struct virtqueue *_vq)
END_USE(vq);
}
+EXPORT_SYMBOL_GPL(virtqueue_kick);
static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
{
@@ -284,7 +288,7 @@ static inline bool more_used(const struct vring_virtqueue *vq)
return vq->last_used_idx != vq->vring.used->idx;
}
-static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
+void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
{
struct vring_virtqueue *vq = to_vvq(_vq);
void *ret;
@@ -325,15 +329,17 @@ static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
END_USE(vq);
return ret;
}
+EXPORT_SYMBOL_GPL(virtqueue_get_buf);
-static void vring_disable_cb(struct virtqueue *_vq)
+void virtqueue_disable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
}
+EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
-static bool vring_enable_cb(struct virtqueue *_vq)
+bool virtqueue_enable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -351,8 +357,9 @@ static bool vring_enable_cb(struct virtqueue *_vq)
END_USE(vq);
return true;
}
+EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
-static void *vring_detach_unused_buf(struct virtqueue *_vq)
+void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
unsigned int i;
@@ -375,6 +382,7 @@ static void *vring_detach_unused_buf(struct virtqueue *_vq)
END_USE(vq);
return NULL;
}
+EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
irqreturn_t vring_interrupt(int irq, void *_vq)
{
@@ -396,15 +404,6 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
}
EXPORT_SYMBOL_GPL(vring_interrupt);
-static struct virtqueue_ops vring_vq_ops = {
- .add_buf = vring_add_buf,
- .get_buf = vring_get_buf,
- .kick = vring_kick,
- .disable_cb = vring_disable_cb,
- .enable_cb = vring_enable_cb,
- .detach_unused_buf = vring_detach_unused_buf,
-};
-
struct virtqueue *vring_new_virtqueue(unsigned int num,
unsigned int vring_align,
struct virtio_device *vdev,
@@ -429,7 +428,6 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
vring_init(&vq->vring, num, pages, vring_align);
vq->vq.callback = callback;
vq->vq.vdev = vdev;
- vq->vq.vq_ops = &vring_vq_ops;
vq->vq.name = name;
vq->notify = notify;
vq->broken = false;
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 40d1709bdbf4..aff5b4f74041 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -7,6 +7,7 @@
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/mod_devicetable.h>
+#include <linux/gfp.h>
/**
* virtqueue - a queue to register buffers for sending or receiving.
@@ -14,7 +15,6 @@
* @callback: the function to call when buffers are consumed (can be NULL).
* @name: the name of this virtqueue (mainly for debugging)
* @vdev: the virtio device this queue was created for.
- * @vq_ops: the operations for this virtqueue (see below).
* @priv: a pointer for the virtqueue implementation to use.
*/
struct virtqueue {
@@ -22,60 +22,71 @@ struct virtqueue {
void (*callback)(struct virtqueue *vq);
const char *name;
struct virtio_device *vdev;
- struct virtqueue_ops *vq_ops;
void *priv;
};
/**
- * virtqueue_ops - operations for virtqueue abstraction layer
- * @add_buf: expose buffer to other end
+ * operations for virtqueue
+ * virtqueue_add_buf: expose buffer to other end
* vq: the struct virtqueue we're talking about.
* sg: the description of the buffer(s).
* out_num: the number of sg readable by other side
* in_num: the number of sg which are writable (after readable ones)
* data: the token identifying the buffer.
+ * gfp: how to do memory allocations (if necessary).
* Returns remaining capacity of queue (sg segments) or a negative error.
- * @kick: update after add_buf
+ * virtqueue_kick: update after add_buf
* vq: the struct virtqueue
* After one or more add_buf calls, invoke this to kick the other side.
- * @get_buf: get the next used buffer
+ * virtqueue_get_buf: get the next used buffer
* vq: the struct virtqueue we're talking about.
* len: the length written into the buffer
* Returns NULL or the "data" token handed to add_buf.
- * @disable_cb: disable callbacks
+ * virtqueue_disable_cb: disable callbacks
* vq: the struct virtqueue we're talking about.
* Note that this is not necessarily synchronous, hence unreliable and only
* useful as an optimization.
- * @enable_cb: restart callbacks after disable_cb.
+ * virtqueue_enable_cb: restart callbacks after disable_cb.
* vq: the struct virtqueue we're talking about.
* This re-enables callbacks; it returns "false" if there are pending
* buffers in the queue, to detect a possible race between the driver
* checking for more work, and enabling callbacks.
- * @detach_unused_buf: detach first unused buffer
+ * virtqueue_detach_unused_buf: detach first unused buffer
* vq: the struct virtqueue we're talking about.
* Returns NULL or the "data" token handed to add_buf
*
* Locking rules are straightforward: the driver is responsible for
* locking. No two operations may be invoked simultaneously, with the exception
- * of @disable_cb.
+ * of virtqueue_disable_cb.
*
* All operations can be called in any context.
*/
-struct virtqueue_ops {
- int (*add_buf)(struct virtqueue *vq,
- struct scatterlist sg[],
- unsigned int out_num,
- unsigned int in_num,
- void *data);
- void (*kick)(struct virtqueue *vq);
+int virtqueue_add_buf_gfp(struct virtqueue *vq,
+ struct scatterlist sg[],
+ unsigned int out_num,
+ unsigned int in_num,
+ void *data,
+ gfp_t gfp);
- void *(*get_buf)(struct virtqueue *vq, unsigned int *len);
+static inline int virtqueue_add_buf(struct virtqueue *vq,
+ struct scatterlist sg[],
+ unsigned int out_num,
+ unsigned int in_num,
+ void *data)
+{
+ return virtqueue_add_buf_gfp(vq, sg, out_num, in_num, data, GFP_ATOMIC);
+}
- void (*disable_cb)(struct virtqueue *vq);
- bool (*enable_cb)(struct virtqueue *vq);
- void *(*detach_unused_buf)(struct virtqueue *vq);
-};
+void virtqueue_kick(struct virtqueue *vq);
+
+void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
+
+void virtqueue_disable_cb(struct virtqueue *vq);
+
+bool virtqueue_enable_cb(struct virtqueue *vq);
+
+void *virtqueue_detach_unused_buf(struct virtqueue *vq);
/**
* virtio_device - representation of a device using virtio
diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h
index e52029e98919..167720d695ed 100644
--- a/include/linux/virtio_blk.h
+++ b/include/linux/virtio_blk.h
@@ -17,6 +17,8 @@
#define VIRTIO_BLK_F_FLUSH 9 /* Cache flush command support */
#define VIRTIO_BLK_F_TOPOLOGY 10 /* Topology information is available */
+#define VIRTIO_BLK_ID_BYTES 20 /* ID string length */
+
struct virtio_blk_config {
/* The capacity (in 512-byte sectors). */
__u64 capacity;
@@ -67,6 +69,9 @@ struct virtio_blk_config {
/* Cache flush command */
#define VIRTIO_BLK_T_FLUSH 4
+/* Get device ID command */
+#define VIRTIO_BLK_T_GET_ID 8
+
/* Barrier before this op. */
#define VIRTIO_BLK_T_BARRIER 0x80000000
diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h
index 92228a8fbcbc..a85064db8f94 100644
--- a/include/linux/virtio_console.h
+++ b/include/linux/virtio_console.h
@@ -12,14 +12,39 @@
/* Feature bits */
#define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */
+#define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */
+
+#define VIRTIO_CONSOLE_BAD_ID (~(u32)0)
struct virtio_console_config {
/* colums of the screens */
__u16 cols;
/* rows of the screens */
__u16 rows;
+ /* max. number of ports this device can hold */
+ __u32 max_nr_ports;
} __attribute__((packed));
+/*
+ * A message that's passed between the Host and the Guest for a
+ * particular port.
+ */
+struct virtio_console_control {
+ __u32 id; /* Port number */
+ __u16 event; /* The kind of control event (see below) */
+ __u16 value; /* Extra information for the key */
+};
+
+/* Some events for control messages */
+#define VIRTIO_CONSOLE_DEVICE_READY 0
+#define VIRTIO_CONSOLE_PORT_ADD 1
+#define VIRTIO_CONSOLE_PORT_REMOVE 2
+#define VIRTIO_CONSOLE_PORT_READY 3
+#define VIRTIO_CONSOLE_CONSOLE_PORT 4
+#define VIRTIO_CONSOLE_RESIZE 5
+#define VIRTIO_CONSOLE_PORT_OPEN 6
+#define VIRTIO_CONSOLE_PORT_NAME 7
+
#ifdef __KERNEL__
int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int));
#endif /* __KERNEL__ */
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 7eb78ecc1618..dcfbe99ff81c 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -137,7 +137,7 @@ static void req_done(struct virtqueue *vq)
P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n");
- while ((rc = chan->vq->vq_ops->get_buf(chan->vq, &len)) != NULL) {
+ while ((rc = virtqueue_get_buf(chan->vq, &len)) != NULL) {
P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc);
P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
req = p9_tag_lookup(chan->client, rc->tag);
@@ -209,13 +209,13 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
req->status = REQ_STATUS_SENT;
- if (chan->vq->vq_ops->add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) {
+ if (virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) {
P9_DPRINTK(P9_DEBUG_TRANS,
"9p debug: virtio rpc add_buf returned failure");
return -EIO;
}
- chan->vq->vq_ops->kick(chan->vq);
+ virtqueue_kick(chan->vq);
P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n");
return 0;
OpenPOWER on IntegriCloud