diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_resource.c')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 277 |
1 files changed, 242 insertions, 35 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 210ef15b1d09..c1912f852b42 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -31,6 +31,7 @@ #include <drm/ttm/ttm_placement.h> #include <drm/drmP.h> #include "vmwgfx_resource_priv.h" +#include "vmwgfx_binding.h" #define VMW_RES_EVICT_ERR_COUNT 10 @@ -121,6 +122,7 @@ static void vmw_resource_release(struct kref *kref) int id; struct idr *idr = &dev_priv->res_idr[res->func->res_type]; + write_lock(&dev_priv->resource_lock); res->avail = false; list_del_init(&res->lru_head); write_unlock(&dev_priv->resource_lock); @@ -143,10 +145,10 @@ static void vmw_resource_release(struct kref *kref) } if (likely(res->hw_destroy != NULL)) { - res->hw_destroy(res); mutex_lock(&dev_priv->binding_mutex); - vmw_context_binding_res_list_kill(&res->binding_head); + vmw_binding_res_list_kill(&res->binding_head); mutex_unlock(&dev_priv->binding_mutex); + res->hw_destroy(res); } id = res->id; @@ -156,20 +158,17 @@ static void vmw_resource_release(struct kref *kref) kfree(res); write_lock(&dev_priv->resource_lock); - if (id != -1) idr_remove(idr, id); + write_unlock(&dev_priv->resource_lock); } void vmw_resource_unreference(struct vmw_resource **p_res) { struct vmw_resource *res = *p_res; - struct vmw_private *dev_priv = res->dev_priv; *p_res = NULL; - write_lock(&dev_priv->resource_lock); kref_put(&res->kref, vmw_resource_release); - write_unlock(&dev_priv->resource_lock); } @@ -260,17 +259,16 @@ void vmw_resource_activate(struct vmw_resource *res, write_unlock(&dev_priv->resource_lock); } -struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv, - struct idr *idr, int id) +static struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv, + struct idr *idr, int id) { struct vmw_resource *res; read_lock(&dev_priv->resource_lock); res = idr_find(idr, id); - if (res && res->avail) - kref_get(&res->kref); - else + if (!res || !res->avail || !kref_get_unless_zero(&res->kref)) res = NULL; + read_unlock(&dev_priv->resource_lock); if (unlikely(res == NULL)) @@ -900,20 +898,21 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_user_stream_size, false, true); + ttm_read_unlock(&dev_priv->reservation_sem); if (unlikely(ret != 0)) { if (ret != -ERESTARTSYS) DRM_ERROR("Out of graphics memory for stream" " creation.\n"); - goto out_unlock; - } + goto out_ret; + } stream = kmalloc(sizeof(*stream), GFP_KERNEL); if (unlikely(stream == NULL)) { ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_user_stream_size); ret = -ENOMEM; - goto out_unlock; + goto out_ret; } res = &stream->stream.res; @@ -926,7 +925,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free); if (unlikely(ret != 0)) - goto out_unlock; + goto out_ret; tmp = vmw_resource_reference(res); ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM, @@ -940,8 +939,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, arg->stream_id = res->id; out_err: vmw_resource_unreference(&res); -out_unlock: - ttm_read_unlock(&dev_priv->reservation_sem); +out_ret: return ret; } @@ -1152,14 +1150,16 @@ out_bind_failed: * command submission. * * @res: Pointer to the struct vmw_resource to unreserve. + * @switch_backup: Backup buffer has been switched. * @new_backup: Pointer to new backup buffer if command submission - * switched. - * @new_backup_offset: New backup offset if @new_backup is !NULL. + * switched. May be NULL. + * @new_backup_offset: New backup offset if @switch_backup is true. * * Currently unreserving a resource means putting it back on the device's * resource lru list, so that it can be evicted if necessary. */ void vmw_resource_unreserve(struct vmw_resource *res, + bool switch_backup, struct vmw_dma_buffer *new_backup, unsigned long new_backup_offset) { @@ -1168,22 +1168,25 @@ void vmw_resource_unreserve(struct vmw_resource *res, if (!list_empty(&res->lru_head)) return; - if (new_backup && new_backup != res->backup) { - + if (switch_backup && new_backup != res->backup) { if (res->backup) { lockdep_assert_held(&res->backup->base.resv->lock.base); list_del_init(&res->mob_head); vmw_dmabuf_unreference(&res->backup); } - res->backup = vmw_dmabuf_reference(new_backup); - lockdep_assert_held(&new_backup->base.resv->lock.base); - list_add_tail(&res->mob_head, &new_backup->res_list); + if (new_backup) { + res->backup = vmw_dmabuf_reference(new_backup); + lockdep_assert_held(&new_backup->base.resv->lock.base); + list_add_tail(&res->mob_head, &new_backup->res_list); + } else { + res->backup = NULL; + } } - if (new_backup) + if (switch_backup) res->backup_offset = new_backup_offset; - if (!res->func->may_evict || res->id == -1) + if (!res->func->may_evict || res->id == -1 || res->pin_count) return; write_lock(&dev_priv->resource_lock); @@ -1259,7 +1262,8 @@ out_no_reserve: * the buffer may not be bound to the resource at this point. * */ -int vmw_resource_reserve(struct vmw_resource *res, bool no_backup) +int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, + bool no_backup) { struct vmw_private *dev_priv = res->dev_priv; int ret; @@ -1270,9 +1274,13 @@ int vmw_resource_reserve(struct vmw_resource *res, bool no_backup) if (res->func->needs_backup && res->backup == NULL && !no_backup) { - ret = vmw_resource_buf_alloc(res, true); - if (unlikely(ret != 0)) + ret = vmw_resource_buf_alloc(res, interruptible); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed to allocate a backup buffer " + "of size %lu. bytes\n", + (unsigned long) res->backup_size); return ret; + } } return 0; @@ -1305,7 +1313,7 @@ vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf) * @res: The resource to evict. * @interruptible: Whether to wait interruptible. */ -int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) +static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) { struct ttm_validate_buffer val_buf; const struct vmw_res_func *func = res->func; @@ -1356,7 +1364,7 @@ int vmw_resource_validate(struct vmw_resource *res) struct ttm_validate_buffer val_buf; unsigned err_count = 0; - if (likely(!res->func->may_evict)) + if (!res->func->create) return 0; val_buf.bo = NULL; @@ -1443,9 +1451,9 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo, /** * vmw_resource_move_notify - TTM move_notify_callback * - * @bo: The TTM buffer object about to move. - * @mem: The truct ttm_mem_reg indicating to what memory - * region the move is taking place. + * @bo: The TTM buffer object about to move. + * @mem: The struct ttm_mem_reg indicating to what memory + * region the move is taking place. * * Evicts the Guest Backed hardware resource if the backup * buffer is being moved out of MOB memory. @@ -1495,6 +1503,101 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo, } } + + +/** + * vmw_query_readback_all - Read back cached query states + * + * @dx_query_mob: Buffer containing the DX query MOB + * + * Read back cached states from the device if they exist. This function + * assumings binding_mutex is held. + */ +int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob) +{ + struct vmw_resource *dx_query_ctx; + struct vmw_private *dev_priv; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXReadbackAllQuery body; + } *cmd; + + + /* No query bound, so do nothing */ + if (!dx_query_mob || !dx_query_mob->dx_query_ctx) + return 0; + + dx_query_ctx = dx_query_mob->dx_query_ctx; + dev_priv = dx_query_ctx->dev_priv; + + cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for " + "query MOB read back.\n"); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY; + cmd->header.size = sizeof(cmd->body); + cmd->body.cid = dx_query_ctx->id; + + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + + /* Triggers a rebind the next time affected context is bound */ + dx_query_mob->dx_query_ctx = NULL; + + return 0; +} + + + +/** + * vmw_query_move_notify - Read back cached query states + * + * @bo: The TTM buffer object about to move. + * @mem: The memory region @bo is moving to. + * + * Called before the query MOB is swapped out to read back cached query + * states from the device. + */ +void vmw_query_move_notify(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem) +{ + struct vmw_dma_buffer *dx_query_mob; + struct ttm_bo_device *bdev = bo->bdev; + struct vmw_private *dev_priv; + + + dev_priv = container_of(bdev, struct vmw_private, bdev); + + mutex_lock(&dev_priv->binding_mutex); + + dx_query_mob = container_of(bo, struct vmw_dma_buffer, base); + if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) { + mutex_unlock(&dev_priv->binding_mutex); + return; + } + + /* If BO is being moved from MOB to system memory */ + if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) { + struct vmw_fence_obj *fence; + + (void) vmw_query_readback_all(dx_query_mob); + mutex_unlock(&dev_priv->binding_mutex); + + /* Create a fence and attach the BO to it */ + (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); + vmw_fence_single_bo(bo, fence); + + if (fence != NULL) + vmw_fence_obj_unreference(&fence); + + (void) ttm_bo_wait(bo, false, false, false); + } else + mutex_unlock(&dev_priv->binding_mutex); + +} + /** * vmw_resource_needs_backup - Return whether a resource needs a backup buffer. * @@ -1573,3 +1676,107 @@ void vmw_resource_evict_all(struct vmw_private *dev_priv) mutex_unlock(&dev_priv->cmdbuf_mutex); } + +/** + * vmw_resource_pin - Add a pin reference on a resource + * + * @res: The resource to add a pin reference on + * + * This function adds a pin reference, and if needed validates the resource. + * Having a pin reference means that the resource can never be evicted, and + * its id will never change as long as there is a pin reference. + * This function returns 0 on success and a negative error code on failure. + */ +int vmw_resource_pin(struct vmw_resource *res, bool interruptible) +{ + struct vmw_private *dev_priv = res->dev_priv; + int ret; + + ttm_write_lock(&dev_priv->reservation_sem, interruptible); + mutex_lock(&dev_priv->cmdbuf_mutex); + ret = vmw_resource_reserve(res, interruptible, false); + if (ret) + goto out_no_reserve; + + if (res->pin_count == 0) { + struct vmw_dma_buffer *vbo = NULL; + + if (res->backup) { + vbo = res->backup; + + ttm_bo_reserve(&vbo->base, interruptible, false, false, + NULL); + if (!vbo->pin_count) { + ret = ttm_bo_validate + (&vbo->base, + res->func->backup_placement, + interruptible, false); + if (ret) { + ttm_bo_unreserve(&vbo->base); + goto out_no_validate; + } + } + + /* Do we really need to pin the MOB as well? */ + vmw_bo_pin_reserved(vbo, true); + } + ret = vmw_resource_validate(res); + if (vbo) + ttm_bo_unreserve(&vbo->base); + if (ret) + goto out_no_validate; + } + res->pin_count++; + +out_no_validate: + vmw_resource_unreserve(res, false, NULL, 0UL); +out_no_reserve: + mutex_unlock(&dev_priv->cmdbuf_mutex); + ttm_write_unlock(&dev_priv->reservation_sem); + + return ret; +} + +/** + * vmw_resource_unpin - Remove a pin reference from a resource + * + * @res: The resource to remove a pin reference from + * + * Having a pin reference means that the resource can never be evicted, and + * its id will never change as long as there is a pin reference. + */ +void vmw_resource_unpin(struct vmw_resource *res) +{ + struct vmw_private *dev_priv = res->dev_priv; + int ret; + + ttm_read_lock(&dev_priv->reservation_sem, false); + mutex_lock(&dev_priv->cmdbuf_mutex); + + ret = vmw_resource_reserve(res, false, true); + WARN_ON(ret); + + WARN_ON(res->pin_count == 0); + if (--res->pin_count == 0 && res->backup) { + struct vmw_dma_buffer *vbo = res->backup; + + ttm_bo_reserve(&vbo->base, false, false, false, NULL); + vmw_bo_pin_reserved(vbo, false); + ttm_bo_unreserve(&vbo->base); + } + + vmw_resource_unreserve(res, false, NULL, 0UL); + + mutex_unlock(&dev_priv->cmdbuf_mutex); + ttm_read_unlock(&dev_priv->reservation_sem); +} + +/** + * vmw_res_type - Return the resource type + * + * @res: Pointer to the resource + */ +enum vmw_res_type vmw_res_type(const struct vmw_resource *res) +{ + return res->func->res_type; +} |