summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/virtio/virtgpu_vq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/virtio/virtgpu_vq.c')
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c104
1 files changed, 89 insertions, 15 deletions
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 51bef1775e47..6bc2008b0d0d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -192,8 +192,16 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
- if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
- DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
+ if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
+ if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
+ struct virtio_gpu_ctrl_hdr *cmd;
+ cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf;
+ DRM_ERROR("response 0x%x (command 0x%x)\n",
+ le32_to_cpu(resp->type),
+ le32_to_cpu(cmd->type));
+ } else
+ DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
+ }
if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
u64 f = le64_to_cpu(resp->fence_id);
@@ -298,7 +306,7 @@ static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf,
struct virtio_gpu_ctrl_hdr *hdr,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtqueue *vq = vgdev->ctrlq.vq;
int rc;
@@ -405,7 +413,7 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_detach_backing *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -467,7 +475,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset,
__le32 width, __le32 height,
__le32 x, __le32 y,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -497,7 +505,7 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
struct virtio_gpu_mem_entry *ents,
uint32_t nents,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_attach_backing *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -584,6 +592,45 @@ static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
wake_up(&vgdev->resp_wq);
}
+static int virtio_get_edid_block(void *data, u8 *buf,
+ unsigned int block, size_t len)
+{
+ struct virtio_gpu_resp_edid *resp = data;
+ size_t start = block * EDID_LENGTH;
+
+ if (start + len > le32_to_cpu(resp->size))
+ return -1;
+ memcpy(buf, resp->edid + start, len);
+ return 0;
+}
+
+static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ struct virtio_gpu_cmd_get_edid *cmd =
+ (struct virtio_gpu_cmd_get_edid *)vbuf->buf;
+ struct virtio_gpu_resp_edid *resp =
+ (struct virtio_gpu_resp_edid *)vbuf->resp_buf;
+ uint32_t scanout = le32_to_cpu(cmd->scanout);
+ struct virtio_gpu_output *output;
+ struct edid *new_edid, *old_edid;
+
+ if (scanout >= vgdev->num_scanouts)
+ return;
+ output = vgdev->outputs + scanout;
+
+ new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
+
+ spin_lock(&vgdev->display_info_lock);
+ old_edid = output->edid;
+ output->edid = new_edid;
+ drm_connector_update_edid_property(&output->conn, output->edid);
+ spin_unlock(&vgdev->display_info_lock);
+
+ kfree(old_edid);
+ wake_up(&vgdev->resp_wq);
+}
+
int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
{
struct virtio_gpu_ctrl_hdr *cmd_p;
@@ -686,6 +733,34 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
return 0;
}
+int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
+{
+ struct virtio_gpu_cmd_get_edid *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+ void *resp_buf;
+ int scanout;
+
+ if (WARN_ON(!vgdev->has_edid))
+ return -EINVAL;
+
+ for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
+ resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
+ GFP_KERNEL);
+ if (!resp_buf)
+ return -ENOMEM;
+
+ cmd_p = virtio_gpu_alloc_cmd_resp
+ (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
+ sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
+ resp_buf);
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
+ cmd_p->scanout = cpu_to_le32(scanout);
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ }
+
+ return 0;
+}
+
void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
uint32_t nlen, const char *name)
{
@@ -753,8 +828,7 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
- struct virtio_gpu_resource_create_3d *rc_3d,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_resource_create_3d *rc_3d)
{
struct virtio_gpu_resource_create_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -766,7 +840,7 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
cmd_p->hdr.flags = 0;
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
bo->created = true;
}
@@ -775,7 +849,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -803,7 +877,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -823,7 +897,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void *data, uint32_t data_size,
- uint32_t ctx_id, struct virtio_gpu_fence **fence)
+ uint32_t ctx_id, struct virtio_gpu_fence *fence)
{
struct virtio_gpu_cmd_submit *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -843,7 +917,7 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
struct virtio_gpu_mem_entry *ents;
@@ -896,11 +970,11 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj)
{
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
- struct virtio_gpu_fence *fence;
if (use_dma_api && obj->mapped) {
+ struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
/* detach backing and wait for the host process it ... */
- virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence);
+ virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
dma_fence_wait(&fence->f, true);
dma_fence_put(&fence->f);
OpenPOWER on IntegriCloud