diff options
Diffstat (limited to 'drivers/virtio')
-rw-r--r-- | drivers/virtio/Makefile | 1 | ||||
-rw-r--r-- | drivers/virtio/virtio.c | 111 | ||||
-rw-r--r-- | drivers/virtio/virtio_balloon.c | 57 | ||||
-rw-r--r-- | drivers/virtio/virtio_mmio.c | 18 | ||||
-rw-r--r-- | drivers/virtio/virtio_pci_common.c (renamed from drivers/virtio/virtio_pci.c) | 437 | ||||
-rw-r--r-- | drivers/virtio/virtio_pci_common.h | 135 | ||||
-rw-r--r-- | drivers/virtio/virtio_pci_legacy.c | 306 | ||||
-rw-r--r-- | drivers/virtio/virtio_ring.c | 109 |
8 files changed, 726 insertions, 448 deletions
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile index 9076635697bb..bf5104b56894 100644 --- a/drivers/virtio/Makefile +++ b/drivers/virtio/Makefile @@ -1,4 +1,5 @@ obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o +virtio_pci-y := virtio_pci_legacy.o virtio_pci_common.o obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index df598dd8c5c8..b9f70dfc4751 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -3,6 +3,7 @@ #include <linux/virtio_config.h> #include <linux/module.h> #include <linux/idr.h> +#include <uapi/linux/virtio_ids.h> /* Unique numbering for virtio devices. */ static DEFINE_IDA(virtio_index_ida); @@ -49,9 +50,9 @@ static ssize_t features_show(struct device *_d, /* We actually represent this as a bitstring, as it could be * arbitrary length in future. */ - for (i = 0; i < ARRAY_SIZE(dev->features)*BITS_PER_LONG; i++) + for (i = 0; i < sizeof(dev->features)*8; i++) len += sprintf(buf+len, "%c", - test_bit(i, dev->features) ? '1' : '0'); + __virtio_test_bit(dev, i) ? '1' : '0'); len += sprintf(buf+len, "\n"); return len; } @@ -113,6 +114,13 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev, for (i = 0; i < drv->feature_table_size; i++) if (drv->feature_table[i] == fbit) return; + + if (drv->feature_table_legacy) { + for (i = 0; i < drv->feature_table_size_legacy; i++) + if (drv->feature_table_legacy[i] == fbit) + return; + } + BUG(); } EXPORT_SYMBOL_GPL(virtio_check_driver_offered_feature); @@ -154,12 +162,35 @@ static void virtio_config_enable(struct virtio_device *dev) spin_unlock_irq(&dev->config_lock); } +static int virtio_finalize_features(struct virtio_device *dev) +{ + int ret = dev->config->finalize_features(dev); + unsigned status; + + if (ret) + return ret; + + if (!virtio_has_feature(dev, VIRTIO_F_VERSION_1)) + return 0; + + add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK); + status = dev->config->get_status(dev); + if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) { + dev_err(&dev->dev, "virtio: device refuses features: %x\n", + status); + return -ENODEV; + } + return 0; +} + static int virtio_dev_probe(struct device *_d) { int err, i; struct virtio_device *dev = dev_to_virtio(_d); struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); - u32 device_features; + u64 device_features; + u64 driver_features; + u64 driver_features_legacy; /* We have a driver! */ add_status(dev, VIRTIO_CONFIG_S_DRIVER); @@ -167,34 +198,55 @@ static int virtio_dev_probe(struct device *_d) /* Figure out what features the device supports. */ device_features = dev->config->get_features(dev); - /* Features supported by both device and driver into dev->features. */ - memset(dev->features, 0, sizeof(dev->features)); + /* Figure out what features the driver supports. */ + driver_features = 0; for (i = 0; i < drv->feature_table_size; i++) { unsigned int f = drv->feature_table[i]; - BUG_ON(f >= 32); - if (device_features & (1 << f)) - set_bit(f, dev->features); + BUG_ON(f >= 64); + driver_features |= (1ULL << f); + } + + /* Some drivers have a separate feature table for virtio v1.0 */ + if (drv->feature_table_legacy) { + driver_features_legacy = 0; + for (i = 0; i < drv->feature_table_size_legacy; i++) { + unsigned int f = drv->feature_table_legacy[i]; + BUG_ON(f >= 64); + driver_features_legacy |= (1ULL << f); + } + } else { + driver_features_legacy = driver_features; } + if (device_features & (1ULL << VIRTIO_F_VERSION_1)) + dev->features = driver_features & device_features; + else + dev->features = driver_features_legacy & device_features; + /* Transport features always preserved to pass to finalize_features. */ for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) - if (device_features & (1 << i)) - set_bit(i, dev->features); + if (device_features & (1ULL << i)) + __virtio_set_bit(dev, i); - dev->config->finalize_features(dev); + err = virtio_finalize_features(dev); + if (err) + goto err; err = drv->probe(dev); if (err) - add_status(dev, VIRTIO_CONFIG_S_FAILED); - else { - add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); - if (drv->scan) - drv->scan(dev); + goto err; - virtio_config_enable(dev); - } + add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); + if (drv->scan) + drv->scan(dev); + + virtio_config_enable(dev); + return 0; +err: + add_status(dev, VIRTIO_CONFIG_S_FAILED); return err; + } static int virtio_dev_remove(struct device *_d) @@ -223,6 +275,12 @@ static struct bus_type virtio_bus = { .remove = virtio_dev_remove, }; +bool virtio_device_is_legacy_only(struct virtio_device_id id) +{ + return id.device == VIRTIO_ID_BALLOON; +} +EXPORT_SYMBOL_GPL(virtio_device_is_legacy_only); + int register_virtio_driver(struct virtio_driver *driver) { /* Catch this early. */ @@ -303,6 +361,7 @@ EXPORT_SYMBOL_GPL(virtio_device_freeze); int virtio_device_restore(struct virtio_device *dev) { struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); + int ret; /* We always start by resetting the device, in case a previous * driver messed it up. */ @@ -322,14 +381,14 @@ int virtio_device_restore(struct virtio_device *dev) /* We have a driver! */ add_status(dev, VIRTIO_CONFIG_S_DRIVER); - dev->config->finalize_features(dev); + ret = virtio_finalize_features(dev); + if (ret) + goto err; if (drv->restore) { - int ret = drv->restore(dev); - if (ret) { - add_status(dev, VIRTIO_CONFIG_S_FAILED); - return ret; - } + ret = drv->restore(dev); + if (ret) + goto err; } /* Finally, tell the device we're all set */ @@ -338,6 +397,10 @@ int virtio_device_restore(struct virtio_device *dev) virtio_config_enable(dev); return 0; + +err: + add_status(dev, VIRTIO_CONFIG_S_FAILED); + return ret; } EXPORT_SYMBOL_GPL(virtio_device_restore); #endif diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index c9703d4d6f67..50c5f42d7a9f 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -28,6 +28,7 @@ #include <linux/slab.h> #include <linux/module.h> #include <linux/balloon_compaction.h> +#include <linux/oom.h> /* * Balloon device works in 4K page units. So each page is pointed to by @@ -36,6 +37,12 @@ */ #define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT) #define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256 +#define OOM_VBALLOON_DEFAULT_PAGES 256 +#define VIRTBALLOON_OOM_NOTIFY_PRIORITY 80 + +static int oom_pages = OOM_VBALLOON_DEFAULT_PAGES; +module_param(oom_pages, int, S_IRUSR | S_IWUSR); +MODULE_PARM_DESC(oom_pages, "pages to free on OOM"); struct virtio_balloon { @@ -71,6 +78,9 @@ struct virtio_balloon /* Memory statistics */ int need_stats_update; struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR]; + + /* To register callback in oom notifier call chain */ + struct notifier_block nb; }; static struct virtio_device_id id_table[] = { @@ -168,8 +178,9 @@ static void release_pages_by_pfn(const u32 pfns[], unsigned int num) } } -static void leak_balloon(struct virtio_balloon *vb, size_t num) +static unsigned leak_balloon(struct virtio_balloon *vb, size_t num) { + unsigned num_freed_pages; struct page *page; struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info; @@ -186,6 +197,7 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num) vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE; } + num_freed_pages = vb->num_pfns; /* * Note that if * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST); @@ -195,6 +207,7 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num) tell_host(vb, vb->deflate_vq); mutex_unlock(&vb->balloon_lock); release_pages_by_pfn(vb->pfns, vb->num_pfns); + return num_freed_pages; } static inline void update_stat(struct virtio_balloon *vb, int idx, @@ -287,6 +300,38 @@ static void update_balloon_size(struct virtio_balloon *vb) &actual); } +/* + * virtballoon_oom_notify - release pages when system is under severe + * memory pressure (called from out_of_memory()) + * @self : notifier block struct + * @dummy: not used + * @parm : returned - number of freed pages + * + * The balancing of memory by use of the virtio balloon should not cause + * the termination of processes while there are pages in the balloon. + * If virtio balloon manages to release some memory, it will make the + * system return and retry the allocation that forced the OOM killer + * to run. + */ +static int virtballoon_oom_notify(struct notifier_block *self, + unsigned long dummy, void *parm) +{ + struct virtio_balloon *vb; + unsigned long *freed; + unsigned num_freed_pages; + + vb = container_of(self, struct virtio_balloon, nb); + if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) + return NOTIFY_OK; + + freed = parm; + num_freed_pages = leak_balloon(vb, oom_pages); + update_balloon_size(vb); + *freed += num_freed_pages; + + return NOTIFY_OK; +} + static int balloon(void *_vballoon) { struct virtio_balloon *vb = _vballoon; @@ -443,6 +488,12 @@ static int virtballoon_probe(struct virtio_device *vdev) if (err) goto out_free_vb; + vb->nb.notifier_call = virtballoon_oom_notify; + vb->nb.priority = VIRTBALLOON_OOM_NOTIFY_PRIORITY; + err = register_oom_notifier(&vb->nb); + if (err < 0) + goto out_oom_notify; + vb->thread = kthread_run(balloon, vb, "vballoon"); if (IS_ERR(vb->thread)) { err = PTR_ERR(vb->thread); @@ -452,6 +503,8 @@ static int virtballoon_probe(struct virtio_device *vdev) return 0; out_del_vqs: + unregister_oom_notifier(&vb->nb); +out_oom_notify: vdev->config->del_vqs(vdev); out_free_vb: kfree(vb); @@ -476,6 +529,7 @@ static void virtballoon_remove(struct virtio_device *vdev) { struct virtio_balloon *vb = vdev->priv; + unregister_oom_notifier(&vb->nb); kthread_stop(vb->thread); remove_common(vb); kfree(vb); @@ -515,6 +569,7 @@ static int virtballoon_restore(struct virtio_device *vdev) static unsigned int features[] = { VIRTIO_BALLOON_F_MUST_TELL_HOST, VIRTIO_BALLOON_F_STATS_VQ, + VIRTIO_BALLOON_F_DEFLATE_ON_OOM, }; static struct virtio_driver virtio_balloon_driver = { diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index ef9a1650bb80..00d115b22bd8 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -142,7 +142,7 @@ struct virtio_mmio_vq_info { /* Configuration interface */ -static u32 vm_get_features(struct virtio_device *vdev) +static u64 vm_get_features(struct virtio_device *vdev) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); @@ -152,19 +152,20 @@ static u32 vm_get_features(struct virtio_device *vdev) return readl(vm_dev->base + VIRTIO_MMIO_HOST_FEATURES); } -static void vm_finalize_features(struct virtio_device *vdev) +static int vm_finalize_features(struct virtio_device *vdev) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); - int i; /* Give virtio_ring a chance to accept features. */ vring_transport_features(vdev); - for (i = 0; i < ARRAY_SIZE(vdev->features); i++) { - writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL); - writel(vdev->features[i], - vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES); - } + /* Make sure we don't have any features > 32 bits! */ + BUG_ON((u32)vdev->features != vdev->features); + + writel(0, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL); + writel(vdev->features, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES); + + return 0; } static void vm_get(struct virtio_device *vdev, unsigned offset, @@ -639,7 +640,6 @@ static struct platform_driver virtio_mmio_driver = { .remove = virtio_mmio_remove, .driver = { .name = "virtio-mmio", - .owner = THIS_MODULE, .of_match_table = virtio_mmio_match, }, }; diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci_common.c index d34ebfa604f3..2ef9529809d8 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci_common.c @@ -1,175 +1,26 @@ /* - * Virtio PCI driver + * Virtio PCI driver - common functionality for all device versions * * This module allows virtio devices to be used over a virtual PCI device. * This can be used with QEMU based VMMs like KVM or Xen. * * Copyright IBM Corp. 2007 + * Copyright Red Hat, Inc. 2014 * * Authors: * Anthony Liguori <aliguori@us.ibm.com> + * Rusty Russell <rusty@rustcorp.com.au> + * Michael S. Tsirkin <mst@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * */ -#include <linux/module.h> -#include <linux/list.h> -#include <linux/pci.h> -#include <linux/slab.h> -#include <linux/interrupt.h> -#include <linux/virtio.h> -#include <linux/virtio_config.h> -#include <linux/virtio_ring.h> -#include <linux/virtio_pci.h> -#include <linux/highmem.h> -#include <linux/spinlock.h> - -MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>"); -MODULE_DESCRIPTION("virtio-pci"); -MODULE_LICENSE("GPL"); -MODULE_VERSION("1"); - -/* Our device structure */ -struct virtio_pci_device -{ - struct virtio_device vdev; - struct pci_dev *pci_dev; - - /* the IO mapping for the PCI config space */ - void __iomem *ioaddr; - - /* a list of queues so we can dispatch IRQs */ - spinlock_t lock; - struct list_head virtqueues; - - /* MSI-X support */ - int msix_enabled; - int intx_enabled; - struct msix_entry *msix_entries; - cpumask_var_t *msix_affinity_masks; - /* Name strings for interrupts. This size should be enough, - * and I'm too lazy to allocate each name separately. */ - char (*msix_names)[256]; - /* Number of available vectors */ - unsigned msix_vectors; - /* Vectors allocated, excluding per-vq vectors if any */ - unsigned msix_used_vectors; - - /* Whether we have vector per vq */ - bool per_vq_vectors; -}; - -/* Constants for MSI-X */ -/* Use first vector for configuration changes, second and the rest for - * virtqueues Thus, we need at least 2 vectors for MSI. */ -enum { - VP_MSIX_CONFIG_VECTOR = 0, - VP_MSIX_VQ_VECTOR = 1, -}; - -struct virtio_pci_vq_info -{ - /* the actual virtqueue */ - struct virtqueue *vq; - - /* the number of entries in the queue */ - int num; - - /* the virtual address of the ring queue */ - void *queue; - - /* the list node for the virtqueues list */ - struct list_head node; - - /* MSI-X vector (or none) */ - unsigned msix_vector; -}; - -/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ -static const struct pci_device_id virtio_pci_id_table[] = { - { PCI_DEVICE(0x1af4, PCI_ANY_ID) }, - { 0 } -}; - -MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); - -/* Convert a generic virtio device to our structure */ -static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) -{ - return container_of(vdev, struct virtio_pci_device, vdev); -} - -/* virtio config->get_features() implementation */ -static u32 vp_get_features(struct virtio_device *vdev) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - - /* When someone needs more than 32 feature bits, we'll need to - * steal a bit to indicate that the rest are somewhere else. */ - return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES); -} - -/* virtio config->finalize_features() implementation */ -static void vp_finalize_features(struct virtio_device *vdev) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - - /* Give virtio_ring a chance to accept features. */ - vring_transport_features(vdev); - - /* We only support 32 feature bits. */ - BUILD_BUG_ON(ARRAY_SIZE(vdev->features) != 1); - iowrite32(vdev->features[0], vp_dev->ioaddr+VIRTIO_PCI_GUEST_FEATURES); -} - -/* virtio config->get() implementation */ -static void vp_get(struct virtio_device *vdev, unsigned offset, - void *buf, unsigned len) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - void __iomem *ioaddr = vp_dev->ioaddr + - VIRTIO_PCI_CONFIG(vp_dev) + offset; - u8 *ptr = buf; - int i; - - for (i = 0; i < len; i++) - ptr[i] = ioread8(ioaddr + i); -} - -/* the config->set() implementation. it's symmetric to the config->get() - * implementation */ -static void vp_set(struct virtio_device *vdev, unsigned offset, - const void *buf, unsigned len) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - void __iomem *ioaddr = vp_dev->ioaddr + - VIRTIO_PCI_CONFIG(vp_dev) + offset; - const u8 *ptr = buf; - int i; - - for (i = 0; i < len; i++) - iowrite8(ptr[i], ioaddr + i); -} - -/* config->{get,set}_status() implementations */ -static u8 vp_get_status(struct virtio_device *vdev) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); -} - -static void vp_set_status(struct virtio_device *vdev, u8 status) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - /* We should never be setting status to 0. */ - BUG_ON(status == 0); - iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS); -} +#include "virtio_pci_common.h" /* wait for pending irq handlers */ -static void vp_synchronize_vectors(struct virtio_device *vdev) +void vp_synchronize_vectors(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); int i; @@ -181,26 +32,12 @@ static void vp_synchronize_vectors(struct virtio_device *vdev) synchronize_irq(vp_dev->msix_entries[i].vector); } -static void vp_reset(struct virtio_device *vdev) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - /* 0 status means a reset. */ - iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS); - /* Flush out the status write, and flush in device writes, - * including MSi-X interrupts, if any. */ - ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); - /* Flush pending VQ/configuration callbacks. */ - vp_synchronize_vectors(vdev); -} - /* the notify function used when creating a virt queue */ -static bool vp_notify(struct virtqueue *vq) +bool vp_notify(struct virtqueue *vq) { - struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); - /* we write the queue's selector into the notification register to * signal the other end */ - iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); + iowrite16(vq->index, (void __iomem *)vq->priv); return true; } @@ -244,7 +81,7 @@ static irqreturn_t vp_interrupt(int irq, void *opaque) /* reading the ISR has the effect of also clearing it so it's very * important to save off the value. */ - isr = ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); + isr = ioread8(vp_dev->isr); /* It's definitely not us if the ISR was not high */ if (!isr) @@ -276,10 +113,7 @@ static void vp_free_vectors(struct virtio_device *vdev) if (vp_dev->msix_enabled) { /* Disable the vector used for configuration */ - iowrite16(VIRTIO_MSI_NO_VECTOR, - vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); - /* Flush the write out to device */ - ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); + vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR); pci_disable_msix(vp_dev->pci_dev); vp_dev->msix_enabled = 0; @@ -343,9 +177,8 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, goto error; ++vp_dev->msix_used_vectors; - iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); + v = vp_dev->config_vector(vp_dev, v); /* Verify we had enough resources to assign the vector */ - v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); if (v == VIRTIO_MSI_NO_VECTOR) { err = -EBUSY; goto error; @@ -381,66 +214,25 @@ static int vp_request_intx(struct virtio_device *vdev) return err; } -static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index, - void (*callback)(struct virtqueue *vq), - const char *name, - u16 msix_vec) +static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index, + void (*callback)(struct virtqueue *vq), + const char *name, + u16 msix_vec) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); - struct virtio_pci_vq_info *info; + struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL); struct virtqueue *vq; - unsigned long flags, size; - u16 num; - int err; - - /* Select the queue we're interested in */ - iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); - - /* Check if queue is either not available or already active. */ - num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM); - if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) - return ERR_PTR(-ENOENT); + unsigned long flags; - /* allocate and fill out our structure the represents an active - * queue */ - info = kmalloc(sizeof(struct virtio_pci_vq_info), GFP_KERNEL); + /* fill out our structure that represents an active queue */ if (!info) return ERR_PTR(-ENOMEM); - info->num = num; - info->msix_vector = msix_vec; - - size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); - info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); - if (info->queue == NULL) { - err = -ENOMEM; + vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, msix_vec); + if (IS_ERR(vq)) goto out_info; - } - /* activate the queue */ - iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT, - vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); - - /* create the vring */ - vq = vring_new_virtqueue(index, info->num, VIRTIO_PCI_VRING_ALIGN, vdev, - true, info->queue, vp_notify, callback, name); - if (!vq) { - err = -ENOMEM; - goto out_activate_queue; - } - - vq->priv = info; info->vq = vq; - - if (msix_vec != VIRTIO_MSI_NO_VECTOR) { - iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); - msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); - if (msix_vec == VIRTIO_MSI_NO_VECTOR) { - err = -EBUSY; - goto out_assign; - } - } - if (callback) { spin_lock_irqsave(&vp_dev->lock, flags); list_add(&info->node, &vp_dev->virtqueues); @@ -449,56 +241,37 @@ static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index, INIT_LIST_HEAD(&info->node); } + vp_dev->vqs[index] = info; return vq; -out_assign: - vring_del_virtqueue(vq); -out_activate_queue: - iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); - free_pages_exact(info->queue, size); out_info: kfree(info); - return ERR_PTR(err); + return vq; } static void vp_del_vq(struct virtqueue *vq) { struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); - struct virtio_pci_vq_info *info = vq->priv; - unsigned long flags, size; + struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; + unsigned long flags; spin_lock_irqsave(&vp_dev->lock, flags); list_del(&info->node); spin_unlock_irqrestore(&vp_dev->lock, flags); - iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); - - if (vp_dev->msix_enabled) { - iowrite16(VIRTIO_MSI_NO_VECTOR, - vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); - /* Flush the write out to device */ - ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); - } - - vring_del_virtqueue(vq); - - /* Select and deactivate the queue */ - iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); - - size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); - free_pages_exact(info->queue, size); + vp_dev->del_vq(info); kfree(info); } /* the config->del_vqs() implementation */ -static void vp_del_vqs(struct virtio_device *vdev) +void vp_del_vqs(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtqueue *vq, *n; struct virtio_pci_vq_info *info; list_for_each_entry_safe(vq, n, &vdev->vqs, list) { - info = vq->priv; + info = vp_dev->vqs[vq->index]; if (vp_dev->per_vq_vectors && info->msix_vector != VIRTIO_MSI_NO_VECTOR) free_irq(vp_dev->msix_entries[info->msix_vector].vector, @@ -508,6 +281,7 @@ static void vp_del_vqs(struct virtio_device *vdev) vp_dev->per_vq_vectors = false; vp_free_vectors(vdev); + kfree(vp_dev->vqs); } static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, @@ -521,11 +295,15 @@ static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, u16 msix_vec; int i, err, nvectors, allocated_vectors; + vp_dev->vqs = kmalloc(nvqs * sizeof *vp_dev->vqs, GFP_KERNEL); + if (!vp_dev->vqs) + return -ENOMEM; + if (!use_msix) { /* Old style: one normal interrupt for change and all vqs. */ err = vp_request_intx(vdev); if (err) - goto error_request; + goto error_find; } else { if (per_vq_vectors) { /* Best option: one for change interrupt, one per vq. */ @@ -540,7 +318,7 @@ static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors); if (err) - goto error_request; + goto error_find; } vp_dev->per_vq_vectors = per_vq_vectors; @@ -555,7 +333,7 @@ static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, msix_vec = allocated_vectors++; else msix_vec = VP_MSIX_VQ_VECTOR; - vqs[i] = setup_vq(vdev, i, callbacks[i], names[i], msix_vec); + vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], msix_vec); if (IS_ERR(vqs[i])) { err = PTR_ERR(vqs[i]); goto error_find; @@ -582,16 +360,14 @@ static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, error_find: vp_del_vqs(vdev); - -error_request: return err; } /* the config->find_vqs() implementation */ -static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char *names[]) +int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]) { int err; @@ -609,7 +385,7 @@ static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, false, false); } -static const char *vp_bus_name(struct virtio_device *vdev) +const char *vp_bus_name(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); @@ -621,11 +397,11 @@ static const char *vp_bus_name(struct virtio_device *vdev) * - OR over all affinities for shared MSI * - ignore the affinity request if we're using INTX */ -static int vp_set_vq_affinity(struct virtqueue *vq, int cpu) +int vp_set_vq_affinity(struct virtqueue *vq, int cpu) { struct virtio_device *vdev = vq->vdev; struct virtio_pci_device *vp_dev = to_vp_device(vdev); - struct virtio_pci_vq_info *info = vq->priv; + struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; struct cpumask *mask; unsigned int irq; @@ -645,21 +421,7 @@ static int vp_set_vq_affinity(struct virtqueue *vq, int cpu) return 0; } -static const struct virtio_config_ops virtio_pci_config_ops = { - .get = vp_get, - .set = vp_set, - .get_status = vp_get_status, - .set_status = vp_set_status, - .reset = vp_reset, - .find_vqs = vp_find_vqs, - .del_vqs = vp_del_vqs, - .get_features = vp_get_features, - .finalize_features = vp_finalize_features, - .bus_name = vp_bus_name, - .set_vq_affinity = vp_set_vq_affinity, -}; - -static void virtio_pci_release_dev(struct device *_d) +void virtio_pci_release_dev(struct device *_d) { /* * No need for a release method as we allocate/free @@ -668,94 +430,6 @@ static void virtio_pci_release_dev(struct device *_d) */ } -/* the PCI probing function */ -static int virtio_pci_probe(struct pci_dev *pci_dev, - const struct pci_device_id *id) -{ - struct virtio_pci_device *vp_dev; - int err; - - /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */ - if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f) - return -ENODEV; - - if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) { - printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n", - VIRTIO_PCI_ABI_VERSION, pci_dev->revision); - return -ENODEV; - } - - /* allocate our structure and fill it out */ - vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL); - if (vp_dev == NULL) - return -ENOMEM; - - vp_dev->vdev.dev.parent = &pci_dev->dev; - vp_dev->vdev.dev.release = virtio_pci_release_dev; - vp_dev->vdev.config = &virtio_pci_config_ops; - vp_dev->pci_dev = pci_dev; - INIT_LIST_HEAD(&vp_dev->virtqueues); - spin_lock_init(&vp_dev->lock); - - /* Disable MSI/MSIX to bring device to a known good state. */ - pci_msi_off(pci_dev); - - /* enable the device */ - err = pci_enable_device(pci_dev); - if (err) - goto out; - - err = pci_request_regions(pci_dev, "virtio-pci"); - if (err) - goto out_enable_device; - - vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0); - if (vp_dev->ioaddr == NULL) { - err = -ENOMEM; - goto out_req_regions; - } - - pci_set_drvdata(pci_dev, vp_dev); - pci_set_master(pci_dev); - - /* we use the subsystem vendor/device id as the virtio vendor/device - * id. this allows us to use the same PCI vendor/device id for all - * virtio devices and to identify the particular virtio driver by - * the subsystem ids */ - vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; - vp_dev->vdev.id.device = pci_dev->subsystem_device; - - /* finally register the virtio device */ - err = register_virtio_device(&vp_dev->vdev); - if (err) - goto out_set_drvdata; - - return 0; - -out_set_drvdata: - pci_iounmap(pci_dev, vp_dev->ioaddr); -out_req_regions: - pci_release_regions(pci_dev); -out_enable_device: - pci_disable_device(pci_dev); -out: - kfree(vp_dev); - return err; -} - -static void virtio_pci_remove(struct pci_dev *pci_dev) -{ - struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); - - unregister_virtio_device(&vp_dev->vdev); - - vp_del_vqs(&vp_dev->vdev); - pci_iounmap(pci_dev, vp_dev->ioaddr); - pci_release_regions(pci_dev); - pci_disable_device(pci_dev); - kfree(vp_dev); -} - #ifdef CONFIG_PM_SLEEP static int virtio_pci_freeze(struct device *dev) { @@ -789,6 +463,26 @@ static const struct dev_pm_ops virtio_pci_pm_ops = { }; #endif + +/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ +static const struct pci_device_id virtio_pci_id_table[] = { + { PCI_DEVICE(0x1af4, PCI_ANY_ID) }, + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); + +static int virtio_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *id) +{ + return virtio_pci_legacy_probe(pci_dev, id); +} + +static void virtio_pci_remove(struct pci_dev *pci_dev) +{ + virtio_pci_legacy_remove(pci_dev); +} + static struct pci_driver virtio_pci_driver = { .name = "virtio-pci", .id_table = virtio_pci_id_table, @@ -800,3 +494,8 @@ static struct pci_driver virtio_pci_driver = { }; module_pci_driver(virtio_pci_driver); + +MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>"); +MODULE_DESCRIPTION("virtio-pci"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("1"); diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h new file mode 100644 index 000000000000..adddb647b21d --- /dev/null +++ b/drivers/virtio/virtio_pci_common.h @@ -0,0 +1,135 @@ +#ifndef _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H +#define _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H +/* + * Virtio PCI driver - APIs for common functionality for all device versions + * + * This module allows virtio devices to be used over a virtual PCI device. + * This can be used with QEMU based VMMs like KVM or Xen. + * + * Copyright IBM Corp. 2007 + * Copyright Red Hat, Inc. 2014 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * Rusty Russell <rusty@rustcorp.com.au> + * Michael S. Tsirkin <mst@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include <linux/module.h> +#include <linux/list.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/virtio.h> +#include <linux/virtio_config.h> +#include <linux/virtio_ring.h> +#include <linux/virtio_pci.h> +#include <linux/highmem.h> +#include <linux/spinlock.h> + +struct virtio_pci_vq_info { + /* the actual virtqueue */ + struct virtqueue *vq; + + /* the number of entries in the queue */ + int num; + + /* the virtual address of the ring queue */ + void *queue; + + /* the list node for the virtqueues list */ + struct list_head node; + + /* MSI-X vector (or none) */ + unsigned msix_vector; +}; + +/* Our device structure */ +struct virtio_pci_device { + struct virtio_device vdev; + struct pci_dev *pci_dev; + + /* the IO mapping for the PCI config space */ + void __iomem *ioaddr; + + /* the IO mapping for ISR operation */ + void __iomem *isr; + + /* a list of queues so we can dispatch IRQs */ + spinlock_t lock; + struct list_head virtqueues; + + /* array of all queues for house-keeping */ + struct virtio_pci_vq_info **vqs; + + /* MSI-X support */ + int msix_enabled; + int intx_enabled; + struct msix_entry *msix_entries; + cpumask_var_t *msix_affinity_masks; + /* Name strings for interrupts. This size should be enough, + * and I'm too lazy to allocate each name separately. */ + char (*msix_names)[256]; + /* Number of available vectors */ + unsigned msix_vectors; + /* Vectors allocated, excluding per-vq vectors if any */ + unsigned msix_used_vectors; + + /* Whether we have vector per vq */ + bool per_vq_vectors; + + struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev, + struct virtio_pci_vq_info *info, + unsigned idx, + void (*callback)(struct virtqueue *vq), + const char *name, + u16 msix_vec); + void (*del_vq)(struct virtio_pci_vq_info *info); + + u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector); +}; + +/* Constants for MSI-X */ +/* Use first vector for configuration changes, second and the rest for + * virtqueues Thus, we need at least 2 vectors for MSI. */ +enum { + VP_MSIX_CONFIG_VECTOR = 0, + VP_MSIX_VQ_VECTOR = 1, +}; + +/* Convert a generic virtio device to our structure */ +static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) +{ + return container_of(vdev, struct virtio_pci_device, vdev); +} + +/* wait for pending irq handlers */ +void vp_synchronize_vectors(struct virtio_device *vdev); +/* the notify function used when creating a virt queue */ +bool vp_notify(struct virtqueue *vq); +/* the config->del_vqs() implementation */ +void vp_del_vqs(struct virtio_device *vdev); +/* the config->find_vqs() implementation */ +int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]); +const char *vp_bus_name(struct virtio_device *vdev); + +/* Setup the affinity for a virtqueue: + * - force the affinity for per vq vector + * - OR over all affinities for shared MSI + * - ignore the affinity request if we're using INTX + */ +int vp_set_vq_affinity(struct virtqueue *vq, int cpu); +void virtio_pci_release_dev(struct device *); + +int virtio_pci_legacy_probe(struct pci_dev *pci_dev, + const struct pci_device_id *id); +void virtio_pci_legacy_remove(struct pci_dev *pci_dev); + +#endif diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c new file mode 100644 index 000000000000..6c76f0f5658c --- /dev/null +++ b/drivers/virtio/virtio_pci_legacy.c @@ -0,0 +1,306 @@ +/* + * Virtio PCI driver - legacy device support + * + * This module allows virtio devices to be used over a virtual PCI device. + * This can be used with QEMU based VMMs like KVM or Xen. + * + * Copyright IBM Corp. 2007 + * Copyright Red Hat, Inc. 2014 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * Rusty Russell <rusty@rustcorp.com.au> + * Michael S. Tsirkin <mst@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "virtio_pci_common.h" + +/* virtio config->get_features() implementation */ +static u64 vp_get_features(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + /* When someone needs more than 32 feature bits, we'll need to + * steal a bit to indicate that the rest are somewhere else. */ + return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES); +} + +/* virtio config->finalize_features() implementation */ +static int vp_finalize_features(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + /* Give virtio_ring a chance to accept features. */ + vring_transport_features(vdev); + + /* Make sure we don't have any features > 32 bits! */ + BUG_ON((u32)vdev->features != vdev->features); + + /* We only support 32 feature bits. */ + iowrite32(vdev->features, vp_dev->ioaddr + VIRTIO_PCI_GUEST_FEATURES); + + return 0; +} + +/* virtio config->get() implementation */ +static void vp_get(struct virtio_device *vdev, unsigned offset, + void *buf, unsigned len) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + void __iomem *ioaddr = vp_dev->ioaddr + + VIRTIO_PCI_CONFIG(vp_dev) + offset; + u8 *ptr = buf; + int i; + + for (i = 0; i < len; i++) + ptr[i] = ioread8(ioaddr + i); +} + +/* the config->set() implementation. it's symmetric to the config->get() + * implementation */ +static void vp_set(struct virtio_device *vdev, unsigned offset, + const void *buf, unsigned len) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + void __iomem *ioaddr = vp_dev->ioaddr + + VIRTIO_PCI_CONFIG(vp_dev) + offset; + const u8 *ptr = buf; + int i; + + for (i = 0; i < len; i++) + iowrite8(ptr[i], ioaddr + i); +} + +/* config->{get,set}_status() implementations */ +static u8 vp_get_status(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); +} + +static void vp_set_status(struct virtio_device *vdev, u8 status) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + /* We should never be setting status to 0. */ + BUG_ON(status == 0); + iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS); +} + +static void vp_reset(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + /* 0 status means a reset. */ + iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS); + /* Flush out the status write, and flush in device writes, + * including MSi-X interrupts, if any. */ + ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); + /* Flush pending VQ/configuration callbacks. */ + vp_synchronize_vectors(vdev); +} + +static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) +{ + /* Setup the vector used for configuration events */ + iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); + /* Verify we had enough resources to assign the vector */ + /* Will also flush the write out to device */ + return ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); +} + +static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, + struct virtio_pci_vq_info *info, + unsigned index, + void (*callback)(struct virtqueue *vq), + const char *name, + u16 msix_vec) +{ + struct virtqueue *vq; + unsigned long size; + u16 num; + int err; + + /* Select the queue we're interested in */ + iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); + + /* Check if queue is either not available or already active. */ + num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM); + if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) + return ERR_PTR(-ENOENT); + + info->num = num; + info->msix_vector = msix_vec; + + size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); + info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); + if (info->queue == NULL) + return ERR_PTR(-ENOMEM); + + /* activate the queue */ + iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT, + vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); + + /* create the vring */ + vq = vring_new_virtqueue(index, info->num, + VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev, + true, info->queue, vp_notify, callback, name); + if (!vq) { + err = -ENOMEM; + goto out_activate_queue; + } + + vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY; + + if (msix_vec != VIRTIO_MSI_NO_VECTOR) { + iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); + msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); + if (msix_vec == VIRTIO_MSI_NO_VECTOR) { + err = -EBUSY; + goto out_assign; + } + } + + return vq; + +out_assign: + vring_del_virtqueue(vq); +out_activate_queue: + iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); + free_pages_exact(info->queue, size); + return ERR_PTR(err); +} + +static void del_vq(struct virtio_pci_vq_info *info) +{ + struct virtqueue *vq = info->vq; + struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); + unsigned long size; + + iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); + + if (vp_dev->msix_enabled) { + iowrite16(VIRTIO_MSI_NO_VECTOR, + vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); + /* Flush the write out to device */ + ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); + } + + vring_del_virtqueue(vq); + + /* Select and deactivate the queue */ + iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); + + size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); + free_pages_exact(info->queue, size); +} + +static const struct virtio_config_ops virtio_pci_config_ops = { + .get = vp_get, + .set = vp_set, + .get_status = vp_get_status, + .set_status = vp_set_status, + .reset = vp_reset, + .find_vqs = vp_find_vqs, + .del_vqs = vp_del_vqs, + .get_features = vp_get_features, + .finalize_features = vp_finalize_features, + .bus_name = vp_bus_name, + .set_vq_affinity = vp_set_vq_affinity, +}; + +/* the PCI probing function */ +int virtio_pci_legacy_probe(struct pci_dev *pci_dev, + const struct pci_device_id *id) +{ + struct virtio_pci_device *vp_dev; + int err; + + /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */ + if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f) + return -ENODEV; + + if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) { + printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n", + VIRTIO_PCI_ABI_VERSION, pci_dev->revision); + return -ENODEV; + } + + /* allocate our structure and fill it out */ + vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL); + if (vp_dev == NULL) + return -ENOMEM; + + vp_dev->vdev.dev.parent = &pci_dev->dev; + vp_dev->vdev.dev.release = virtio_pci_release_dev; + vp_dev->vdev.config = &virtio_pci_config_ops; + vp_dev->pci_dev = pci_dev; + INIT_LIST_HEAD(&vp_dev->virtqueues); + spin_lock_init(&vp_dev->lock); + + /* Disable MSI/MSIX to bring device to a known good state. */ + pci_msi_off(pci_dev); + + /* enable the device */ + err = pci_enable_device(pci_dev); + if (err) + goto out; + + err = pci_request_regions(pci_dev, "virtio-pci"); + if (err) + goto out_enable_device; + + vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0); + if (vp_dev->ioaddr == NULL) { + err = -ENOMEM; + goto out_req_regions; + } + + vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR; + + pci_set_drvdata(pci_dev, vp_dev); + pci_set_master(pci_dev); + + /* we use the subsystem vendor/device id as the virtio vendor/device + * id. this allows us to use the same PCI vendor/device id for all + * virtio devices and to identify the particular virtio driver by + * the subsystem ids */ + vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; + vp_dev->vdev.id.device = pci_dev->subsystem_device; + + vp_dev->config_vector = vp_config_vector; + vp_dev->setup_vq = setup_vq; + vp_dev->del_vq = del_vq; + + /* finally register the virtio device */ + err = register_virtio_device(&vp_dev->vdev); + if (err) + goto out_set_drvdata; + + return 0; + +out_set_drvdata: + pci_iounmap(pci_dev, vp_dev->ioaddr); +out_req_regions: + pci_release_regions(pci_dev); +out_enable_device: + pci_disable_device(pci_dev); +out: + kfree(vp_dev); + return err; +} + +void virtio_pci_legacy_remove(struct pci_dev *pci_dev) +{ + struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); + + unregister_virtio_device(&vp_dev->vdev); + + vp_del_vqs(&vp_dev->vdev); + pci_iounmap(pci_dev, vp_dev->ioaddr); + pci_release_regions(pci_dev); + pci_disable_device(pci_dev); + kfree(vp_dev); +} diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 3b1f89b6e743..00ec6b3f96b2 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -99,7 +99,8 @@ struct vring_virtqueue #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) -static struct vring_desc *alloc_indirect(unsigned int total_sg, gfp_t gfp) +static struct vring_desc *alloc_indirect(struct virtqueue *_vq, + unsigned int total_sg, gfp_t gfp) { struct vring_desc *desc; unsigned int i; @@ -116,7 +117,7 @@ static struct vring_desc *alloc_indirect(unsigned int total_sg, gfp_t gfp) return NULL; for (i = 0; i < total_sg; i++) - desc[i].next = i+1; + desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1); return desc; } @@ -165,17 +166,17 @@ static inline int virtqueue_add(struct virtqueue *_vq, /* If the host supports indirect descriptor tables, and we have multiple * buffers, then go indirect. FIXME: tune this threshold */ if (vq->indirect && total_sg > 1 && vq->vq.num_free) - desc = alloc_indirect(total_sg, gfp); + desc = alloc_indirect(_vq, total_sg, gfp); else desc = NULL; if (desc) { /* Use a single buffer which doesn't continue */ - vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; - vq->vring.desc[head].addr = virt_to_phys(desc); + vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT); + vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, virt_to_phys(desc)); /* avoid kmemleak false positive (hidden by virt_to_phys) */ kmemleak_ignore(desc); - vq->vring.desc[head].len = total_sg * sizeof(struct vring_desc); + vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc)); /* Set up rest to use this indirect table. */ i = 0; @@ -205,28 +206,28 @@ static inline int virtqueue_add(struct virtqueue *_vq, for (n = 0; n < out_sgs; n++) { for (sg = sgs[n]; sg; sg = sg_next(sg)) { - desc[i].flags = VRING_DESC_F_NEXT; - desc[i].addr = sg_phys(sg); - desc[i].len = sg->length; + desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT); + desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg)); + desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); prev = i; - i = desc[i].next; + i = virtio16_to_cpu(_vq->vdev, desc[i].next); } } for (; n < (out_sgs + in_sgs); n++) { for (sg = sgs[n]; sg; sg = sg_next(sg)) { - desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; - desc[i].addr = sg_phys(sg); - desc[i].len = sg->length; + desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE); + desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg)); + desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); prev = i; - i = desc[i].next; + i = virtio16_to_cpu(_vq->vdev, desc[i].next); } } /* Last one doesn't continue. */ - desc[prev].flags &= ~VRING_DESC_F_NEXT; + desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); /* Update free pointer */ if (indirect) - vq->free_head = vq->vring.desc[head].next; + vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next); else vq->free_head = i; @@ -235,13 +236,13 @@ static inline int virtqueue_add(struct virtqueue *_vq, /* Put entry in available array (but don't update avail->idx until they * do sync). */ - avail = (vq->vring.avail->idx & (vq->vring.num-1)); - vq->vring.avail->ring[avail] = head; + avail = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) & (vq->vring.num - 1); + vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); /* Descriptors and available array need to be set before we expose the * new available array entries. */ virtio_wmb(vq->weak_barriers); - vq->vring.avail->idx++; + vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) + 1); vq->num_added++; /* This is very unlikely, but theoretically possible. Kick @@ -354,8 +355,8 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq) * event. */ virtio_mb(vq->weak_barriers); - old = vq->vring.avail->idx - vq->num_added; - new = vq->vring.avail->idx; + old = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->num_added; + new = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx); vq->num_added = 0; #ifdef DEBUG @@ -367,10 +368,10 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq) #endif if (vq->event) { - needs_kick = vring_need_event(vring_avail_event(&vq->vring), + needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)), new, old); } else { - needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY); + needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY)); } END_USE(vq); return needs_kick; @@ -432,15 +433,15 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head) i = head; /* Free the indirect table */ - if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT) - kfree(phys_to_virt(vq->vring.desc[i].addr)); + if (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)) + kfree(phys_to_virt(virtio64_to_cpu(vq->vq.vdev, vq->vring.desc[i].addr))); - while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { - i = vq->vring.desc[i].next; + while (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT)) { + i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next); vq->vq.num_free++; } - vq->vring.desc[i].next = vq->free_head; + vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head); vq->free_head = head; /* Plus final descriptor */ vq->vq.num_free++; @@ -448,7 +449,7 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head) static inline bool more_used(const struct vring_virtqueue *vq) { - return vq->last_used_idx != vq->vring.used->idx; + return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); } /** @@ -491,8 +492,8 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) virtio_rmb(vq->weak_barriers); last_used = (vq->last_used_idx & (vq->vring.num - 1)); - i = vq->vring.used->ring[last_used].id; - *len = vq->vring.used->ring[last_used].len; + i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id); + *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len); if (unlikely(i >= vq->vring.num)) { BAD_RING(vq, "id %u out of range\n", i); @@ -510,8 +511,8 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) /* If we expect an interrupt for the next entry, tell host * by writing event index and flush out the write before * the read in the next get_buf call. */ - if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { - vring_used_event(&vq->vring) = vq->last_used_idx; + if (!(vq->vring.avail->flags & cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT))) { + vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx); virtio_mb(vq->weak_barriers); } @@ -537,7 +538,7 @@ void virtqueue_disable_cb(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); - vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; + vq->vring.avail->flags |= cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT); } EXPORT_SYMBOL_GPL(virtqueue_disable_cb); @@ -565,8 +566,8 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to * either clear the flags bit or point the event index at the next * entry. Always do both to keep code simple. */ - vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; - vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx; + vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT); + vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); END_USE(vq); return last_used_idx; } @@ -586,7 +587,7 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) struct vring_virtqueue *vq = to_vvq(_vq); virtio_mb(vq->weak_barriers); - return (u16)last_used_idx != vq->vring.used->idx; + return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx); } EXPORT_SYMBOL_GPL(virtqueue_poll); @@ -633,12 +634,12 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to * either clear the flags bit or point the event index at the next * entry. Always do both to keep code simple. */ - vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; + vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT); /* TODO: tune this threshold */ - bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; - vring_used_event(&vq->vring) = vq->last_used_idx + bufs; + bufs = (u16)(virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->last_used_idx) * 3 / 4; + vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs); virtio_mb(vq->weak_barriers); - if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { + if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) { END_USE(vq); return false; } @@ -670,7 +671,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq) /* detach_buf clears data, so grab it now. */ buf = vq->data[i]; detach_buf(vq, i); - vq->vring.avail->idx--; + vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - 1); END_USE(vq); return buf; } @@ -747,12 +748,12 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, /* No callback? Tell other side not to bother us. */ if (!callback) - vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; + vq->vring.avail->flags |= cpu_to_virtio16(vdev, VRING_AVAIL_F_NO_INTERRUPT); /* Put everything in free lists. */ vq->free_head = 0; for (i = 0; i < num-1; i++) { - vq->vring.desc[i].next = i+1; + vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1); vq->data[i] = NULL; } vq->data[i] = NULL; @@ -779,9 +780,11 @@ void vring_transport_features(struct virtio_device *vdev) break; case VIRTIO_RING_F_EVENT_IDX: break; + case VIRTIO_F_VERSION_1: + break; default: /* We don't understand this bit. */ - clear_bit(i, vdev->features); + __virtio_clear_bit(vdev, i); } } } @@ -826,4 +829,20 @@ void virtio_break_device(struct virtio_device *dev) } EXPORT_SYMBOL_GPL(virtio_break_device); +void *virtqueue_get_avail(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + return vq->vring.avail; +} +EXPORT_SYMBOL_GPL(virtqueue_get_avail); + +void *virtqueue_get_used(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + return vq->vring.used; +} +EXPORT_SYMBOL_GPL(virtqueue_get_used); + MODULE_LICENSE("GPL"); |