diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_vma.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_vma.c | 80 |
1 files changed, 38 insertions, 42 deletions
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 2645f4e850c2..e0e677b2a3a9 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -32,6 +32,7 @@ #include "i915_drv.h" #include "i915_globals.h" +#include "i915_trace.h" #include "i915_vma.h" static struct i915_global_vma { @@ -86,8 +87,7 @@ static inline struct i915_vma *active_to_vma(struct i915_active *ref) static int __i915_vma_active(struct i915_active *ref) { - i915_vma_get(active_to_vma(ref)); - return 0; + return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT; } static void __i915_vma_retire(struct i915_active *ref) @@ -119,7 +119,6 @@ vma_create(struct drm_i915_gem_object *obj, i915_active_init(vm->i915, &vma->active, __i915_vma_active, __i915_vma_retire); - INIT_ACTIVE_REQUEST(&vma->last_fence); /* Declare ourselves safe for use inside shrinkers */ if (IS_ENABLED(CONFIG_LOCKDEP)) { @@ -801,8 +800,6 @@ static void __i915_vma_destroy(struct i915_vma *vma) GEM_BUG_ON(vma->node.allocated); GEM_BUG_ON(vma->fence); - GEM_BUG_ON(i915_active_request_isset(&vma->last_fence)); - mutex_lock(&vma->vm->mutex); list_del(&vma->vm_link); mutex_unlock(&vma->vm->mutex); @@ -867,7 +864,7 @@ void i915_vma_revoke_mmap(struct i915_vma *vma) struct drm_vma_offset_node *node = &vma->obj->base.vma_node; u64 vma_offset; - lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); + lockdep_assert_held(&vma->vm->mutex); if (!i915_vma_has_userfault(vma)) return; @@ -886,23 +883,6 @@ void i915_vma_revoke_mmap(struct i915_vma *vma) list_del(&vma->obj->userfault_link); } -static void export_fence(struct i915_vma *vma, - struct i915_request *rq, - unsigned int flags) -{ - struct dma_resv *resv = vma->resv; - - /* - * Ignore errors from failing to allocate the new fence, we can't - * handle an error right now. Worst case should be missed - * synchronisation leading to rendering corruption. - */ - if (flags & EXEC_OBJECT_WRITE) - dma_resv_add_excl_fence(resv, &rq->fence); - else if (dma_resv_reserve_shared(resv, 1) == 0) - dma_resv_add_shared_fence(resv, &rq->fence); -} - int i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq, unsigned int flags) @@ -922,27 +902,30 @@ int i915_vma_move_to_active(struct i915_vma *vma, * add the active reference first and queue for it to be dropped * *last*. */ - err = i915_active_ref(&vma->active, rq->fence.context, rq); + err = i915_active_ref(&vma->active, rq->timeline, rq); if (unlikely(err)) return err; - obj->write_domain = 0; if (flags & EXEC_OBJECT_WRITE) { - obj->write_domain = I915_GEM_DOMAIN_RENDER; - - if (intel_fb_obj_invalidate(obj, ORIGIN_CS)) - __i915_active_request_set(&obj->frontbuffer_write, rq); + if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS)) + i915_active_ref(&obj->frontbuffer->write, + rq->timeline, + rq); + dma_resv_add_excl_fence(vma->resv, &rq->fence); + obj->write_domain = I915_GEM_DOMAIN_RENDER; obj->read_domains = 0; + } else { + err = dma_resv_reserve_shared(vma->resv, 1); + if (unlikely(err)) + return err; + + dma_resv_add_shared_fence(vma->resv, &rq->fence); + obj->write_domain = 0; } obj->read_domains |= I915_GEM_GPU_DOMAINS; obj->mm.dirty = true; - if (flags & EXEC_OBJECT_NEEDS_FENCE) - __i915_active_request_set(&vma->last_fence, rq); - - export_fence(vma, rq, flags); - GEM_BUG_ON(!i915_vma_is_active(vma)); return 0; } @@ -973,14 +956,7 @@ int i915_vma_unbind(struct i915_vma *vma) * before we are finished). */ __i915_vma_pin(vma); - ret = i915_active_wait(&vma->active); - if (ret) - goto unpin; - - ret = i915_active_request_retire(&vma->last_fence, - &vma->vm->i915->drm.struct_mutex); -unpin: __i915_vma_unpin(vma); if (ret) return ret; @@ -1006,12 +982,16 @@ unpin: GEM_BUG_ON(i915_vma_has_ggtt_write(vma)); /* release the fence reg _after_ flushing */ - ret = i915_vma_put_fence(vma); + mutex_lock(&vma->vm->mutex); + ret = i915_vma_revoke_fence(vma); + mutex_unlock(&vma->vm->mutex); if (ret) return ret; /* Force a pagefault for domain tracking on next user access */ + mutex_lock(&vma->vm->mutex); i915_vma_revoke_mmap(vma); + mutex_unlock(&vma->vm->mutex); __i915_vma_iounmap(vma); vma->flags &= ~I915_VMA_CAN_FENCE; @@ -1030,6 +1010,22 @@ unpin: return 0; } +struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma) +{ + i915_gem_object_make_unshrinkable(vma->obj); + return vma; +} + +void i915_vma_make_shrinkable(struct i915_vma *vma) +{ + i915_gem_object_make_shrinkable(vma->obj); +} + +void i915_vma_make_purgeable(struct i915_vma *vma) +{ + i915_gem_object_make_purgeable(vma->obj); +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/i915_vma.c" #endif |