summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2012-11-20 12:19:35 +0000
committerDave Airlie <airlied@redhat.com>2012-11-21 07:47:06 +1000
commitc0951b797e7d0f2c6b0df2c0e18185c72d0cf1a1 (patch)
tree5f3bb6a46059469b663f4fedf4aa8a5b0a404819 /drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
parentbf6f036848ab2151c2498f11cb7d31a52a95dd5c (diff)
downloadblackbird-op-linux-c0951b797e7d0f2c6b0df2c0e18185c72d0cf1a1.tar.gz
blackbird-op-linux-c0951b797e7d0f2c6b0df2c0e18185c72d0cf1a1.zip
drm/vmwgfx: Refactor resource management
Refactor resource management to make it easy to hook up resources that are backed up by buffers. In particular, resources and their backing buffers can be evicted and rebound, if supported by the device. To avoid query deadlocks, the query code is also modified somewhat. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Brian Paul <brianp@vmware.com> Reviewed-by: Dmitry Torokhov <dtor@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c898
1 files changed, 650 insertions, 248 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index e5775a0db495..534c96703c3f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -30,6 +30,181 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_placement.h>
+#define VMW_RES_HT_ORDER 12
+
+/**
+ * struct vmw_resource_relocation - Relocation info for resources
+ *
+ * @head: List head for the software context's relocation list.
+ * @res: Non-ref-counted pointer to the resource.
+ * @offset: Offset of 4 byte entries into the command buffer where the
+ * id that needs fixup is located.
+ */
+struct vmw_resource_relocation {
+ struct list_head head;
+ const struct vmw_resource *res;
+ unsigned long offset;
+};
+
+/**
+ * struct vmw_resource_val_node - Validation info for resources
+ *
+ * @head: List head for the software context's resource list.
+ * @hash: Hash entry for quick resouce to val_node lookup.
+ * @res: Ref-counted pointer to the resource.
+ * @switch_backup: Boolean whether to switch backup buffer on unreserve.
+ * @new_backup: Refcounted pointer to the new backup buffer.
+ * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
+ * @first_usage: Set to true the first time the resource is referenced in
+ * the command stream.
+ * @no_buffer_needed: Resources do not need to allocate buffer backup on
+ * reservation. The command stream will provide one.
+ */
+struct vmw_resource_val_node {
+ struct list_head head;
+ struct drm_hash_item hash;
+ struct vmw_resource *res;
+ struct vmw_dma_buffer *new_backup;
+ unsigned long new_backup_offset;
+ bool first_usage;
+ bool no_buffer_needed;
+};
+
+/**
+ * vmw_resource_unreserve - unreserve resources previously reserved for
+ * command submission.
+ *
+ * @list_head: list of resources to unreserve.
+ * @backoff: Whether command submission failed.
+ */
+static void vmw_resource_list_unreserve(struct list_head *list,
+ bool backoff)
+{
+ struct vmw_resource_val_node *val;
+
+ list_for_each_entry(val, list, head) {
+ struct vmw_resource *res = val->res;
+ struct vmw_dma_buffer *new_backup =
+ backoff ? NULL : val->new_backup;
+
+ vmw_resource_unreserve(res, new_backup,
+ val->new_backup_offset);
+ vmw_dmabuf_unreference(&val->new_backup);
+ }
+}
+
+
+/**
+ * vmw_resource_val_add - Add a resource to the software context's
+ * resource list if it's not already on it.
+ *
+ * @sw_context: Pointer to the software context.
+ * @res: Pointer to the resource.
+ * @p_node On successful return points to a valid pointer to a
+ * struct vmw_resource_val_node, if non-NULL on entry.
+ */
+static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
+ struct vmw_resource *res,
+ struct vmw_resource_val_node **p_node)
+{
+ struct vmw_resource_val_node *node;
+ struct drm_hash_item *hash;
+ int ret;
+
+ if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
+ &hash) == 0)) {
+ node = container_of(hash, struct vmw_resource_val_node, hash);
+ node->first_usage = false;
+ if (unlikely(p_node != NULL))
+ *p_node = node;
+ return 0;
+ }
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (unlikely(node == NULL)) {
+ DRM_ERROR("Failed to allocate a resource validation "
+ "entry.\n");
+ return -ENOMEM;
+ }
+
+ node->hash.key = (unsigned long) res;
+ ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to initialize a resource validation "
+ "entry.\n");
+ kfree(node);
+ return ret;
+ }
+ list_add_tail(&node->head, &sw_context->resource_list);
+ node->res = vmw_resource_reference(res);
+ node->first_usage = true;
+
+ if (unlikely(p_node != NULL))
+ *p_node = node;
+
+ return 0;
+}
+
+/**
+ * vmw_resource_relocation_add - Add a relocation to the relocation list
+ *
+ * @list: Pointer to head of relocation list.
+ * @res: The resource.
+ * @offset: Offset into the command buffer currently being parsed where the
+ * id that needs fixup is located. Granularity is 4 bytes.
+ */
+static int vmw_resource_relocation_add(struct list_head *list,
+ const struct vmw_resource *res,
+ unsigned long offset)
+{
+ struct vmw_resource_relocation *rel;
+
+ rel = kmalloc(sizeof(*rel), GFP_KERNEL);
+ if (unlikely(rel == NULL)) {
+ DRM_ERROR("Failed to allocate a resource relocation.\n");
+ return -ENOMEM;
+ }
+
+ rel->res = res;
+ rel->offset = offset;
+ list_add_tail(&rel->head, list);
+
+ return 0;
+}
+
+/**
+ * vmw_resource_relocations_free - Free all relocations on a list
+ *
+ * @list: Pointer to the head of the relocation list.
+ */
+static void vmw_resource_relocations_free(struct list_head *list)
+{
+ struct vmw_resource_relocation *rel, *n;
+
+ list_for_each_entry_safe(rel, n, list, head) {
+ list_del(&rel->head);
+ kfree(rel);
+ }
+}
+
+/**
+ * vmw_resource_relocations_apply - Apply all relocations on a list
+ *
+ * @cb: Pointer to the start of the command buffer bein patch. This need
+ * not be the same buffer as the one being parsed when the relocation
+ * list was built, but the contents must be the same modulo the
+ * resource ids.
+ * @list: Pointer to the head of the relocation list.
+ */
+static void vmw_resource_relocations_apply(uint32_t *cb,
+ struct list_head *list)
+{
+ struct vmw_resource_relocation *rel;
+
+ list_for_each_entry(rel, list, head)
+ cb[rel->offset] = rel->res->id;
+}
+
static int vmw_cmd_invalid(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
@@ -44,25 +219,11 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
return 0;
}
-static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
- struct vmw_resource **p_res)
-{
- struct vmw_resource *res = *p_res;
-
- if (list_empty(&res->validate_head)) {
- list_add_tail(&res->validate_head, &sw_context->resource_list);
- *p_res = NULL;
- } else
- vmw_resource_unreference(p_res);
-}
-
/**
* vmw_bo_to_validate_list - add a bo to a validate list
*
* @sw_context: The software context used for this command submission batch.
* @bo: The buffer object to add.
- * @fence_flags: Fence flags to be or'ed with any other fence flags for
- * this buffer on this submission batch.
* @p_val_node: If non-NULL Will be updated with the validate node number
* on return.
*
@@ -74,21 +235,37 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
uint32_t *p_val_node)
{
uint32_t val_node;
+ struct vmw_validate_buffer *vval_buf;
struct ttm_validate_buffer *val_buf;
+ struct drm_hash_item *hash;
+ int ret;
- val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
-
- if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
- DRM_ERROR("Max number of DMA buffers per submission"
- " exceeded.\n");
- return -EINVAL;
- }
-
- val_buf = &sw_context->val_bufs[val_node];
- if (unlikely(val_node == sw_context->cur_val_buf)) {
+ if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
+ &hash) == 0)) {
+ vval_buf = container_of(hash, struct vmw_validate_buffer,
+ hash);
+ val_buf = &vval_buf->base;
+ val_node = vval_buf - sw_context->val_bufs;
+ } else {
+ val_node = sw_context->cur_val_buf;
+ if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
+ DRM_ERROR("Max number of DMA buffers per submission "
+ "exceeded.\n");
+ return -EINVAL;
+ }
+ vval_buf = &sw_context->val_bufs[val_node];
+ vval_buf->hash.key = (unsigned long) bo;
+ ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to initialize a buffer validation "
+ "entry.\n");
+ return ret;
+ }
+ ++sw_context->cur_val_buf;
+ val_buf = &vval_buf->base;
val_buf->bo = ttm_bo_reference(bo);
+ val_buf->reserved = false;
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
- ++sw_context->cur_val_buf;
}
sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
@@ -99,85 +276,174 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
return 0;
}
-static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
+/**
+ * vmw_resources_reserve - Reserve all resources on the sw_context's
+ * resource list.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Note that since vmware's command submission currently is protected by
+ * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
+ * since only a single thread at once will attempt this.
+ */
+static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
{
- struct vmw_resource *ctx;
-
- struct vmw_cid_cmd {
- SVGA3dCmdHeader header;
- __le32 cid;
- } *cmd;
+ struct vmw_resource_val_node *val;
int ret;
- cmd = container_of(header, struct vmw_cid_cmd, header);
- if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
- return 0;
+ list_for_each_entry(val, &sw_context->resource_list, head) {
+ struct vmw_resource *res = val->res;
- ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid,
- &ctx);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could not find or use context %u\n",
- (unsigned) cmd->cid);
- return ret;
+ ret = vmw_resource_reserve(res, val->no_buffer_needed);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (res->backup) {
+ struct ttm_buffer_object *bo = &res->backup->base;
+
+ ret = vmw_bo_to_validate_list
+ (sw_context, bo, NULL);
+
+ if (unlikely(ret != 0))
+ return ret;
+ }
}
+ return 0;
+}
- sw_context->last_cid = cmd->cid;
- sw_context->cid_valid = true;
- sw_context->cur_ctx = ctx;
- vmw_resource_to_validate_list(sw_context, &ctx);
+/**
+ * vmw_resources_validate - Validate all resources on the sw_context's
+ * resource list.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Before this function is called, all resource backup buffers must have
+ * been validated.
+ */
+static int vmw_resources_validate(struct vmw_sw_context *sw_context)
+{
+ struct vmw_resource_val_node *val;
+ int ret;
+
+ list_for_each_entry(val, &sw_context->resource_list, head) {
+ struct vmw_resource *res = val->res;
+ ret = vmw_resource_validate(res);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Failed to validate resource.\n");
+ return ret;
+ }
+ }
return 0;
}
-static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
+/**
+ * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * on the resource validate list unless it's already there.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @res_type: Resource type.
+ * @converter: User-space visisble type specific information.
+ * @id: Pointer to the location in the command buffer currently being
+ * parsed from where the user-space resource id handle is located.
+ */
+static int vmw_cmd_res_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
- uint32_t *sid)
+ enum vmw_res_type res_type,
+ const struct vmw_user_resource_conv *converter,
+ uint32_t *id,
+ struct vmw_resource_val_node **p_val)
{
- struct vmw_surface *srf;
- int ret;
+ struct vmw_res_cache_entry *rcache =
+ &sw_context->res_cache[res_type];
struct vmw_resource *res;
+ struct vmw_resource_val_node *node;
+ int ret;
- if (*sid == SVGA3D_INVALID_ID)
+ if (*id == SVGA3D_INVALID_ID)
return 0;
- if (likely((sw_context->sid_valid &&
- *sid == sw_context->last_sid))) {
- *sid = sw_context->sid_translation;
- return 0;
- }
+ /*
+ * Fastpath in case of repeated commands referencing the same
+ * resource
+ */
- ret = vmw_user_surface_lookup_handle(dev_priv,
- sw_context->tfile,
- *sid, &srf);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could ot find or use surface 0x%08x "
- "address 0x%08lx\n",
- (unsigned int) *sid,
- (unsigned long) sid);
- return ret;
+ if (likely(rcache->valid && *id == rcache->handle)) {
+ const struct vmw_resource *res = rcache->res;
+
+ rcache->node->first_usage = false;
+ if (p_val)
+ *p_val = rcache->node;
+
+ return vmw_resource_relocation_add
+ (&sw_context->res_relocations, res,
+ id - sw_context->buf_start);
}
- ret = vmw_surface_validate(dev_priv, srf);
+ ret = vmw_user_resource_lookup_handle(dev_priv,
+ sw_context->tfile,
+ *id,
+ converter,
+ &res);
if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Could not validate surface.\n");
- vmw_surface_unreference(&srf);
+ DRM_ERROR("Could not find or use resource 0x%08x.\n",
+ (unsigned) *id);
+ dump_stack();
return ret;
}
- sw_context->last_sid = *sid;
- sw_context->sid_valid = true;
- sw_context->sid_translation = srf->res.id;
- *sid = sw_context->sid_translation;
+ rcache->valid = true;
+ rcache->res = res;
+ rcache->handle = *id;
- res = &srf->res;
- vmw_resource_to_validate_list(sw_context, &res);
+ ret = vmw_resource_relocation_add(&sw_context->res_relocations,
+ res,
+ id - sw_context->buf_start);
+ if (unlikely(ret != 0))
+ goto out_no_reloc;
+
+ ret = vmw_resource_val_add(sw_context, res, &node);
+ if (unlikely(ret != 0))
+ goto out_no_reloc;
+ rcache->node = node;
+ if (p_val)
+ *p_val = node;
+ vmw_resource_unreference(&res);
return 0;
+
+out_no_reloc:
+ BUG_ON(sw_context->error_resource != NULL);
+ sw_context->error_resource = res;
+
+ return ret;
}
+/**
+ * vmw_cmd_cid_check - Check a command header for valid context information.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @header: A command header with an embedded user-space context handle.
+ *
+ * Convenience function: Call vmw_cmd_res_check with the user-space context
+ * handle embedded in @header.
+ */
+static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_cid_cmd {
+ SVGA3dCmdHeader header;
+ __le32 cid;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_cid_cmd, header);
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->cid, NULL);
+}
static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
@@ -194,7 +460,9 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
return ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
- ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.target.sid, NULL);
return ret;
}
@@ -209,10 +477,14 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
int ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
- ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.src.sid, NULL);
if (unlikely(ret != 0))
return ret;
- return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.dest.sid, NULL);
}
static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
@@ -226,10 +498,14 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
int ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
- ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.src.sid, NULL);
if (unlikely(ret != 0))
return ret;
- return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.dest.sid, NULL);
}
static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
@@ -248,7 +524,9 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
return -EPERM;
}
- return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.srcImage.sid, NULL);
}
static int vmw_cmd_present_check(struct vmw_private *dev_priv,
@@ -268,14 +546,15 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
return -EPERM;
}
- return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter, &cmd->body.sid,
+ NULL);
}
/**
* vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
*
* @dev_priv: The device private structure.
- * @cid: The hardware context for the next query.
* @new_query_bo: The new buffer holding query results.
* @sw_context: The software context used for this command submission.
*
@@ -283,18 +562,18 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
* query results, and if another buffer currently is pinned for query
* results. If so, the function prepares the state of @sw_context for
* switching pinned buffers after successful submission of the current
- * command batch. It also checks whether we're using a new query context.
- * In that case, it makes sure we emit a query barrier for the old
- * context before the current query buffer is fenced.
+ * command batch.
*/
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
- uint32_t cid,
struct ttm_buffer_object *new_query_bo,
struct vmw_sw_context *sw_context)
{
+ struct vmw_res_cache_entry *ctx_entry =
+ &sw_context->res_cache[vmw_res_context];
int ret;
- bool add_cid = false;
- uint32_t cid_to_add;
+
+ BUG_ON(!ctx_entry->valid);
+ sw_context->last_query_ctx = ctx_entry->res;
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
@@ -304,9 +583,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
}
if (unlikely(sw_context->cur_query_bo != NULL)) {
- BUG_ON(!sw_context->query_cid_valid);
- add_cid = true;
- cid_to_add = sw_context->cur_query_cid;
+ sw_context->needs_post_query_barrier = true;
ret = vmw_bo_to_validate_list(sw_context,
sw_context->cur_query_bo,
NULL);
@@ -323,27 +600,6 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
}
- if (unlikely(cid != sw_context->cur_query_cid &&
- sw_context->query_cid_valid)) {
- add_cid = true;
- cid_to_add = sw_context->cur_query_cid;
- }
-
- sw_context->cur_query_cid = cid;
- sw_context->query_cid_valid = true;
-
- if (add_cid) {
- struct vmw_resource *ctx = sw_context->cur_ctx;
-
- if (list_empty(&ctx->query_head))
- list_add_tail(&ctx->query_head,
- &sw_context->query_list);
- ret = vmw_bo_to_validate_list(sw_context,
- dev_priv->dummy_query_bo,
- NULL);
- if (unlikely(ret != 0))
- return ret;
- }
return 0;
}
@@ -355,10 +611,9 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
* @sw_context: The software context used for this command submission batch.
*
* This function will check if we're switching query buffers, and will then,
- * if no other query waits are issued this command submission batch,
* issue a dummy occlusion query wait used as a query barrier. When the fence
* object following that query wait has signaled, we are sure that all
- * preseding queries have finished, and the old query buffer can be unpinned.
+ * preceding queries have finished, and the old query buffer can be unpinned.
* However, since both the new query buffer and the old one are fenced with
* that fence, we can do an asynchronus unpin now, and be sure that the
* old query buffer won't be moved until the fence has signaled.
@@ -369,20 +624,19 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context)
{
-
- struct vmw_resource *ctx, *next_ctx;
- int ret;
-
/*
* The validate list should still hold references to all
* contexts here.
*/
- list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list,
- query_head) {
- list_del_init(&ctx->query_head);
+ if (sw_context->needs_post_query_barrier) {
+ struct vmw_res_cache_entry *ctx_entry =
+ &sw_context->res_cache[vmw_res_context];
+ struct vmw_resource *ctx;
+ int ret;
- BUG_ON(list_empty(&ctx->validate_head));
+ BUG_ON(!ctx_entry->valid);
+ ctx = ctx_entry->res;
ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
@@ -396,40 +650,46 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
ttm_bo_unref(&dev_priv->pinned_bo);
}
- vmw_bo_pin(sw_context->cur_query_bo, true);
+ if (!sw_context->needs_post_query_barrier) {
+ vmw_bo_pin(sw_context->cur_query_bo, true);
- /*
- * We pin also the dummy_query_bo buffer so that we
- * don't need to validate it when emitting
- * dummy queries in context destroy paths.
- */
+ /*
+ * We pin also the dummy_query_bo buffer so that we
+ * don't need to validate it when emitting
+ * dummy queries in context destroy paths.
+ */
- vmw_bo_pin(dev_priv->dummy_query_bo, true);
- dev_priv->dummy_query_bo_pinned = true;
+ vmw_bo_pin(dev_priv->dummy_query_bo, true);
+ dev_priv->dummy_query_bo_pinned = true;
- dev_priv->query_cid = sw_context->cur_query_cid;
- dev_priv->pinned_bo =
- ttm_bo_reference(sw_context->cur_query_bo);
+ BUG_ON(sw_context->last_query_ctx == NULL);
+ dev_priv->query_cid = sw_context->last_query_ctx->id;
+ dev_priv->query_cid_valid = true;
+ dev_priv->pinned_bo =
+ ttm_bo_reference(sw_context->cur_query_bo);
+ }
}
}
/**
- * vmw_query_switch_backoff - clear query barrier list
- * @sw_context: The sw context used for this submission batch.
+ * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
+ * handle to a valid SVGAGuestPtr
*
- * This function is used as part of an error path, where a previously
- * set up list of query barriers needs to be cleared.
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: The software context used for this command batch validation.
+ * @ptr: Pointer to the user-space handle to be translated.
+ * @vmw_bo_p: Points to a location that, on successful return will carry
+ * a reference-counted pointer to the DMA buffer identified by the
+ * user-space handle in @id.
*
+ * This function saves information needed to translate a user-space buffer
+ * handle to a valid SVGAGuestPtr. The translation does not take place
+ * immediately, but during a call to vmw_apply_relocations().
+ * This function builds a relocation list and a list of buffers to validate.
+ * The former needs to be freed using either vmw_apply_relocations() or
+ * vmw_free_relocations(). The latter needs to be freed using
+ * vmw_clear_validations.
*/
-static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context)
-{
- struct list_head *list, *next;
-
- list_for_each_safe(list, next, &sw_context->query_list) {
- list_del_init(list);
- }
-}
-
static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAGuestPtr *ptr,
@@ -471,6 +731,37 @@ out_no_reloc:
return ret;
}
+/**
+ * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_begin_query_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBeginQuery q;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_begin_query_cmd,
+ header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->q.cid,
+ NULL);
+}
+
+/**
+ * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
static int vmw_cmd_end_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
@@ -493,13 +784,19 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid,
- &vmw_bo->base, sw_context);
+ ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
vmw_dmabuf_unreference(&vmw_bo);
return ret;
}
+/*
+ * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
@@ -510,7 +807,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
SVGA3dCmdWaitForQuery q;
} *cmd;
int ret;
- struct vmw_resource *ctx;
cmd = container_of(header, struct vmw_query_cmd, header);
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
@@ -524,16 +820,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
return ret;
vmw_dmabuf_unreference(&vmw_bo);
-
- /*
- * This wait will act as a barrier for previous waits for this
- * context.
- */
-
- ctx = sw_context->cur_ctx;
- if (!list_empty(&ctx->query_head))
- list_del_init(&ctx->query_head);
-
return 0;
}
@@ -542,14 +828,12 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
struct vmw_dma_buffer *vmw_bo = NULL;
- struct ttm_buffer_object *bo;
struct vmw_surface *srf = NULL;
struct vmw_dma_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceDMA dma;
} *cmd;
int ret;
- struct vmw_resource *res;
cmd = container_of(header, struct vmw_dma_cmd, header);
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
@@ -558,37 +842,20 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- bo = &vmw_bo->base;
- ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
- cmd->dma.host.sid, &srf);
- if (ret) {
- DRM_ERROR("could not find surface\n");
- goto out_no_reloc;
- }
-
- ret = vmw_surface_validate(dev_priv, srf);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter, &cmd->dma.host.sid,
+ NULL);
if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Culd not validate surface.\n");
- goto out_no_validate;
+ if (unlikely(ret != -ERESTARTSYS))
+ DRM_ERROR("could not find surface for DMA.\n");
+ goto out_no_surface;
}
- /*
- * Patch command stream with device SID.
- */
- cmd->dma.host.sid = srf->res.id;
- vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
-
- vmw_dmabuf_unreference(&vmw_bo);
-
- res = &srf->res;
- vmw_resource_to_validate_list(sw_context, &res);
+ srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
- return 0;
+ vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
-out_no_validate:
- vmw_surface_unreference(&srf);
-out_no_reloc:
+out_no_surface:
vmw_dmabuf_unreference(&vmw_bo);
return ret;
}
@@ -621,8 +888,9 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
}
for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
- ret = vmw_cmd_sid_check(dev_priv, sw_context,
- &decl->array.surfaceId);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &decl->array.surfaceId, NULL);
if (unlikely(ret != 0))
return ret;
}
@@ -636,8 +904,9 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
range = (SVGA3dPrimitiveRange *) decl;
for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
- ret = vmw_cmd_sid_check(dev_priv, sw_context,
- &range->indexArray.surfaceId);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &range->indexArray.surfaceId, NULL);
if (unlikely(ret != 0))
return ret;
}
@@ -668,8 +937,9 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
continue;
- ret = vmw_cmd_sid_check(dev_priv, sw_context,
- &cur_state->value);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cur_state->value, NULL);
if (unlikely(ret != 0))
return ret;
}
@@ -700,6 +970,34 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
return ret;
}
+/**
+ * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_set_shader_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetShader body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_set_shader_cmd,
+ header);
+
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ return 0;
+}
+
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -773,16 +1071,20 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader),
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query),
VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
- &vmw_cmd_blt_surf_screen_check)
+ &vmw_cmd_blt_surf_screen_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid),
};
static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -829,6 +1131,8 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
int32_t cur_size = size;
int ret;
+ sw_context->buf_start = buf;
+
while (cur_size > 0) {
size = cur_size;
ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
@@ -860,43 +1164,63 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
for (i = 0; i < sw_context->cur_reloc; ++i) {
reloc = &sw_context->relocs[i];
- validate = &sw_context->val_bufs[reloc->index];
+ validate = &sw_context->val_bufs[reloc->index].base;
bo = validate->bo;
- if (bo->mem.mem_type == TTM_PL_VRAM) {
+ switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
reloc->location->offset += bo->offset;
reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
- } else
+ break;
+ case VMW_PL_GMR:
reloc->location->gmrId = bo->mem.start;
+ break;
+ default:
+ BUG();
+ }
}
vmw_free_relocations(sw_context);
}
+/**
+ * vmw_resource_list_unrefererence - Free up a resource list and unreference
+ * all resources referenced by it.
+ *
+ * @list: The resource list.
+ */
+static void vmw_resource_list_unreference(struct list_head *list)
+{
+ struct vmw_resource_val_node *val, *val_next;
+
+ /*
+ * Drop references to resources held during command submission.
+ */
+
+ list_for_each_entry_safe(val, val_next, list, head) {
+ list_del_init(&val->head);
+ vmw_resource_unreference(&val->res);
+ kfree(val);
+ }
+}
+
static void vmw_clear_validations(struct vmw_sw_context *sw_context)
{
- struct ttm_validate_buffer *entry, *next;
- struct vmw_resource *res, *res_next;
+ struct vmw_validate_buffer *entry, *next;
+ struct vmw_resource_val_node *val;
/*
* Drop references to DMA buffers held during command submission.
*/
list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
- head) {
- list_del(&entry->head);
- vmw_dmabuf_validate_clear(entry->bo);
- ttm_bo_unref(&entry->bo);
+ base.head) {
+ list_del(&entry->base.head);
+ ttm_bo_unref(&entry->base.bo);
+ (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
sw_context->cur_val_buf--;
}
BUG_ON(sw_context->cur_val_buf != 0);
- /*
- * Drop references to resources held during command submission.
- */
- vmw_resource_unreserve(&sw_context->resource_list);
- list_for_each_entry_safe(res, res_next, &sw_context->resource_list,
- validate_head) {
- list_del_init(&res->validate_head);
- vmw_resource_unreference(&res);
- }
+ list_for_each_entry(val, &sw_context->resource_list, head)
+ (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
}
static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
@@ -939,11 +1263,11 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
static int vmw_validate_buffers(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context)
{
- struct ttm_validate_buffer *entry;
+ struct vmw_validate_buffer *entry;
int ret;
- list_for_each_entry(entry, &sw_context->validate_nodes, head) {
- ret = vmw_validate_single_buffer(dev_priv, entry->bo);
+ list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
+ ret = vmw_validate_single_buffer(dev_priv, entry->base.bo);
if (unlikely(ret != 0))
return ret;
}
@@ -1106,6 +1430,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
{
struct vmw_sw_context *sw_context = &dev_priv->ctx;
struct vmw_fence_obj *fence = NULL;
+ struct vmw_resource *error_resource;
+ struct list_head resource_list;
uint32_t handle;
void *cmd;
int ret;
@@ -1135,24 +1461,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->kernel = true;
sw_context->tfile = vmw_fpriv(file_priv)->tfile;
- sw_context->cid_valid = false;
- sw_context->sid_valid = false;
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
sw_context->fence_flags = 0;
- INIT_LIST_HEAD(&sw_context->query_list);
INIT_LIST_HEAD(&sw_context->resource_list);
sw_context->cur_query_bo = dev_priv->pinned_bo;
- sw_context->cur_query_cid = dev_priv->query_cid;
- sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL);
-
+ sw_context->last_query_ctx = NULL;
+ sw_context->needs_post_query_barrier = false;
+ memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
INIT_LIST_HEAD(&sw_context->validate_nodes);
+ INIT_LIST_HEAD(&sw_context->res_relocations);
+ if (!sw_context->res_ht_initialized) {
+ ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
+ if (unlikely(ret != 0))
+ goto out_unlock;
+ sw_context->res_ht_initialized = true;
+ }
+ INIT_LIST_HEAD(&resource_list);
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size);
if (unlikely(ret != 0))
goto out_err;
+ ret = vmw_resources_reserve(sw_context);
+ if (unlikely(ret != 0))
+ goto out_err;
+
ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
if (unlikely(ret != 0))
goto out_err;
@@ -1161,24 +1496,31 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (unlikely(ret != 0))
goto out_err;
- vmw_apply_relocations(sw_context);
+ ret = vmw_resources_validate(sw_context);
+ if (unlikely(ret != 0))
+ goto out_err;
if (throttle_us) {
ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
throttle_us);
if (unlikely(ret != 0))
- goto out_throttle;
+ goto out_err;
}
cmd = vmw_fifo_reserve(dev_priv, command_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving fifo space for commands.\n");
ret = -ENOMEM;
- goto out_throttle;
+ goto out_err;
}
+ vmw_apply_relocations(sw_context);
memcpy(cmd, kernel_commands, command_size);
+
+ vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
+ vmw_resource_relocations_free(&sw_context->res_relocations);
+
vmw_fifo_commit(dev_priv, command_size);
vmw_query_bo_switch_commit(dev_priv, sw_context);
@@ -1194,9 +1536,14 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (ret != 0)
DRM_ERROR("Fence submission error. Syncing.\n");
+ vmw_resource_list_unreserve(&sw_context->resource_list, false);
ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
(void *) fence);
+ if (unlikely(dev_priv->pinned_bo != NULL &&
+ !dev_priv->query_cid_valid))
+ __vmw_execbuf_release_pinned_bo(dev_priv, fence);
+
vmw_clear_validations(sw_context);
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
user_fence_rep, fence, handle);
@@ -1209,17 +1556,40 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_fence_obj_unreference(&fence);
}
+ list_splice_init(&sw_context->resource_list, &resource_list);
mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+ /*
+ * Unreference resources outside of the cmdbuf_mutex to
+ * avoid deadlocks in resource destruction paths.
+ */
+ vmw_resource_list_unreference(&resource_list);
+
return 0;
out_err:
+ vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_free_relocations(sw_context);
-out_throttle:
- vmw_query_switch_backoff(sw_context);
ttm_eu_backoff_reservation(&sw_context->validate_nodes);
+ vmw_resource_list_unreserve(&sw_context->resource_list, true);
vmw_clear_validations(sw_context);
+ if (unlikely(dev_priv->pinned_bo != NULL &&
+ !dev_priv->query_cid_valid))
+ __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
out_unlock:
+ list_splice_init(&sw_context->resource_list, &resource_list);
+ error_resource = sw_context->error_resource;
+ sw_context->error_resource = NULL;
mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+ /*
+ * Unreference resources outside of the cmdbuf_mutex to
+ * avoid deadlocks in resource destruction paths.
+ */
+ vmw_resource_list_unreference(&resource_list);
+ if (unlikely(error_resource != NULL))
+ vmw_resource_unreference(&error_resource);
+
return ret;
}
@@ -1244,13 +1614,13 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
/**
- * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
+ * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
* query bo.
*
* @dev_priv: The device private structure.
- * @only_on_cid_match: Only flush and unpin if the current active query cid
- * matches @cid.
- * @cid: Optional context id to match.
+ * @fence: If non-NULL should point to a struct vmw_fence_obj issued
+ * _after_ a query barrier that flushes all queries touching the current
+ * buffer pointed to by @dev_priv->pinned_bo
*
* This function should be used to unpin the pinned query bo, or
* as a query barrier when we need to make sure that all queries have
@@ -1263,23 +1633,21 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
*
* The function will synchronize on the previous query barrier, and will
* thus not finish until that barrier has executed.
+ *
+ * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
+ * before calling this function.
*/
-void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
- bool only_on_cid_match, uint32_t cid)
+void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+ struct vmw_fence_obj *fence)
{
int ret = 0;
struct list_head validate_list;
struct ttm_validate_buffer pinned_val, query_val;
- struct vmw_fence_obj *fence;
-
- mutex_lock(&dev_priv->cmdbuf_mutex);
+ struct vmw_fence_obj *lfence = NULL;
if (dev_priv->pinned_bo == NULL)
goto out_unlock;
- if (only_on_cid_match && cid != dev_priv->query_cid)
- goto out_unlock;
-
INIT_LIST_HEAD(&validate_list);
pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
@@ -1297,25 +1665,34 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
goto out_no_reserve;
}
- ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
- if (unlikely(ret != 0)) {
- vmw_execbuf_unpin_panic(dev_priv);
- goto out_no_emit;
+ if (dev_priv->query_cid_valid) {
+ BUG_ON(fence != NULL);
+ ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
+ if (unlikely(ret != 0)) {
+ vmw_execbuf_unpin_panic(dev_priv);
+ goto out_no_emit;
+ }
+ dev_priv->query_cid_valid = false;
}
vmw_bo_pin(dev_priv->pinned_bo, false);
vmw_bo_pin(dev_priv->dummy_query_bo, false);
dev_priv->dummy_query_bo_pinned = false;
- (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ if (fence == NULL) {
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
+ NULL);
+ fence = lfence;
+ }
ttm_eu_fence_buffer_objects(&validate_list, (void *) fence);
+ if (lfence != NULL)
+ vmw_fence_obj_unreference(&lfence);
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
ttm_bo_unref(&dev_priv->pinned_bo);
out_unlock:
- mutex_unlock(&dev_priv->cmdbuf_mutex);
return;
out_no_emit:
@@ -1324,6 +1701,31 @@ out_no_reserve:
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
ttm_bo_unref(&dev_priv->pinned_bo);
+}
+
+/**
+ * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
+ * query bo.
+ *
+ * @dev_priv: The device private structure.
+ *
+ * This function should be used to unpin the pinned query bo, or
+ * as a query barrier when we need to make sure that all queries have
+ * finished before the next fifo command. (For example on hardware
+ * context destructions where the hardware may otherwise leak unfinished
+ * queries).
+ *
+ * This function does not return any failure codes, but make attempts
+ * to do safe unpinning in case of errors.
+ *
+ * The function will synchronize on the previous query barrier, and will
+ * thus not finish until that barrier has executed.
+ */
+void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
+{
+ mutex_lock(&dev_priv->cmdbuf_mutex);
+ if (dev_priv->query_cid_valid)
+ __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
OpenPOWER on IntegriCloud