diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/kvmgt.c')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/kvmgt.c | 167 |
1 files changed, 163 insertions, 4 deletions
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 110f07e8bcfb..f86983d6655b 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -53,11 +53,23 @@ static const struct intel_gvt_ops *intel_gvt_ops; #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT) #define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1) +#define OPREGION_SIGNATURE "IntelGraphicsMem" + +struct vfio_region; +struct intel_vgpu_regops { + size_t (*rw)(struct intel_vgpu *vgpu, char *buf, + size_t count, loff_t *ppos, bool iswrite); + void (*release)(struct intel_vgpu *vgpu, + struct vfio_region *region); +}; + struct vfio_region { u32 type; u32 subtype; size_t size; u32 flags; + const struct intel_vgpu_regops *ops; + void *data; }; struct kvmgt_pgfn { @@ -316,6 +328,108 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info, } } +static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf, + size_t count, loff_t *ppos, bool iswrite) +{ + unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - + VFIO_PCI_NUM_REGIONS; + void *base = vgpu->vdev.region[i].data; + loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; + + if (pos >= vgpu->vdev.region[i].size || iswrite) { + gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n"); + return -EINVAL; + } + count = min(count, (size_t)(vgpu->vdev.region[i].size - pos)); + memcpy(buf, base + pos, count); + + return count; +} + +static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu, + struct vfio_region *region) +{ +} + +static const struct intel_vgpu_regops intel_vgpu_regops_opregion = { + .rw = intel_vgpu_reg_rw_opregion, + .release = intel_vgpu_reg_release_opregion, +}; + +static int intel_vgpu_register_reg(struct intel_vgpu *vgpu, + unsigned int type, unsigned int subtype, + const struct intel_vgpu_regops *ops, + size_t size, u32 flags, void *data) +{ + struct vfio_region *region; + + region = krealloc(vgpu->vdev.region, + (vgpu->vdev.num_regions + 1) * sizeof(*region), + GFP_KERNEL); + if (!region) + return -ENOMEM; + + vgpu->vdev.region = region; + vgpu->vdev.region[vgpu->vdev.num_regions].type = type; + vgpu->vdev.region[vgpu->vdev.num_regions].subtype = subtype; + vgpu->vdev.region[vgpu->vdev.num_regions].ops = ops; + vgpu->vdev.region[vgpu->vdev.num_regions].size = size; + vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags; + vgpu->vdev.region[vgpu->vdev.num_regions].data = data; + vgpu->vdev.num_regions++; + return 0; +} + +static int kvmgt_get_vfio_device(void *p_vgpu) +{ + struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; + + vgpu->vdev.vfio_device = vfio_device_get_from_dev( + mdev_dev(vgpu->vdev.mdev)); + if (!vgpu->vdev.vfio_device) { + gvt_vgpu_err("failed to get vfio device\n"); + return -ENODEV; + } + return 0; +} + + +static int kvmgt_set_opregion(void *p_vgpu) +{ + struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; + void *base; + int ret; + + /* Each vgpu has its own opregion, although VFIO would create another + * one later. This one is used to expose opregion to VFIO. And the + * other one created by VFIO later, is used by guest actually. + */ + base = vgpu_opregion(vgpu)->va; + if (!base) + return -ENOMEM; + + if (memcmp(base, OPREGION_SIGNATURE, 16)) { + memunmap(base); + return -EINVAL; + } + + ret = intel_vgpu_register_reg(vgpu, + PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE, + VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, + &intel_vgpu_regops_opregion, OPREGION_SIZE, + VFIO_REGION_INFO_FLAG_READ, base); + + return ret; +} + +static void kvmgt_put_vfio_device(void *vgpu) +{ + if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device)) + return; + + vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device); +} + static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) { struct intel_vgpu *vgpu = NULL; @@ -546,7 +660,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, int ret = -EINVAL; - if (index >= VFIO_PCI_NUM_REGIONS) { + if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) { gvt_vgpu_err("invalid index: %u\n", index); return -EINVAL; } @@ -574,8 +688,14 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, case VFIO_PCI_BAR5_REGION_INDEX: case VFIO_PCI_VGA_REGION_INDEX: case VFIO_PCI_ROM_REGION_INDEX: + break; default: - gvt_vgpu_err("unsupported region: %u\n", index); + if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) + return -EINVAL; + + index -= VFIO_PCI_NUM_REGIONS; + return vgpu->vdev.region[index].ops->rw(vgpu, buf, count, + ppos, is_write); } return ret == 0 ? count : ret; @@ -838,7 +958,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, info.flags = VFIO_DEVICE_FLAGS_PCI; info.flags |= VFIO_DEVICE_FLAGS_RESET; - info.num_regions = VFIO_PCI_NUM_REGIONS; + info.num_regions = VFIO_PCI_NUM_REGIONS + + vgpu->vdev.num_regions; info.num_irqs = VFIO_PCI_NUM_IRQS; return copy_to_user((void __user *)arg, &info, minsz) ? @@ -908,13 +1029,17 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); info.size = 0; - info.flags = 0; + gvt_dbg_core("get region info bar:%d\n", info.index); break; case VFIO_PCI_ROM_REGION_INDEX: case VFIO_PCI_VGA_REGION_INDEX: + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.size = 0; + info.flags = 0; + gvt_dbg_core("get region info index:%d\n", info.index); break; default: @@ -959,6 +1084,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, } if (caps.size) { + info.flags |= VFIO_REGION_INFO_FLAG_CAPS; if (info.argsz < sizeof(info) + caps.size) { info.argsz = sizeof(info) + caps.size; info.cap_offset = 0; @@ -1045,6 +1171,33 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, } else if (cmd == VFIO_DEVICE_RESET) { intel_gvt_ops->vgpu_reset(vgpu); return 0; + } else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) { + struct vfio_device_gfx_plane_info dmabuf; + int ret = 0; + + minsz = offsetofend(struct vfio_device_gfx_plane_info, + dmabuf_id); + if (copy_from_user(&dmabuf, (void __user *)arg, minsz)) + return -EFAULT; + if (dmabuf.argsz < minsz) + return -EINVAL; + + ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf); + if (ret != 0) + return ret; + + return copy_to_user((void __user *)arg, &dmabuf, minsz) ? + -EFAULT : 0; + } else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) { + __u32 dmabuf_id; + __s32 dmabuf_fd; + + if (get_user(dmabuf_id, (__u32 __user *)arg)) + return -EFAULT; + + dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id); + return dmabuf_fd; + } return 0; @@ -1286,6 +1439,9 @@ static int kvmgt_guest_init(struct mdev_device *mdev) kvmgt_protect_table_init(info); gvt_cache_init(vgpu); + mutex_init(&vgpu->dmabuf_lock); + init_completion(&vgpu->vblank_done); + info->track_node.track_write = kvmgt_page_track_write; info->track_node.track_flush_slot = kvmgt_page_track_flush_slot; kvm_page_track_register_notifier(kvm, &info->track_node); @@ -1426,6 +1582,9 @@ struct intel_gvt_mpt kvmgt_mpt = { .read_gpa = kvmgt_read_gpa, .write_gpa = kvmgt_write_gpa, .gfn_to_mfn = kvmgt_gfn_to_pfn, + .set_opregion = kvmgt_set_opregion, + .get_vfio_device = kvmgt_get_vfio_device, + .put_vfio_device = kvmgt_put_vfio_device, }; EXPORT_SYMBOL_GPL(kvmgt_mpt); |