diff options
author | Thomas Hellstrom <thellstrom@vmware.com> | 2010-10-19 09:01:01 +0200 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-10-21 11:53:28 +1000 |
commit | e1efc9b6ac22c605fd326b3f6af9b393325d43b4 (patch) | |
tree | 98960a29a6b4217b3ac219fd2be37e6b648e4589 /drivers/gpu/drm/ttm/ttm_bo.c | |
parent | 40d857bba2915a4e8d82f44744a186bfdd1a46ea (diff) | |
download | talos-op-linux-e1efc9b6ac22c605fd326b3f6af9b393325d43b4.tar.gz talos-op-linux-e1efc9b6ac22c605fd326b3f6af9b393325d43b4.zip |
drm/ttm: Optimize delayed buffer destruction
This commit replaces the ttm_bo_cleanup_ref function with two new functions.
One for the case where the bo is not yet on the delayed destroy list, and
one for the case where the bo was on the delayed destroy list, at least at
the time of call. This makes it possible to optimize the two cases somewhat.
It also enables the possibility to directly destroy buffers on the
delayed delete list when they are about to be evicted or swapped out.
Currently they were only evicted / swapped and destruction was left for the
delayed buffer destruction thread.
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo.c')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 171 |
1 files changed, 107 insertions, 64 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 4a73f401644d..a1cb783c7131 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -455,100 +455,123 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) wake_up_all(&bo->event_queue); } - -/** - * If bo idle, remove from delayed- and lru lists, and unref. - * If not idle, and already on delayed list, do nothing. - * If not idle, and not on delayed list, put on delayed list, - * up the list_kref and schedule a delayed list check. - */ - -static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) +static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_global *glob = bo->glob; - struct ttm_bo_driver *driver = bdev->driver; + struct ttm_bo_driver *driver; + void *sync_obj; + void *sync_obj_arg; + int put_count; int ret; spin_lock(&bo->lock); -retry: - (void) ttm_bo_wait(bo, false, false, !remove_all); - + (void) ttm_bo_wait(bo, false, false, true); if (!bo->sync_obj) { - int put_count; - - spin_unlock(&bo->lock); spin_lock(&glob->lru_lock); - ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0); /** - * Someone else has the object reserved. Bail and retry. + * Lock inversion between bo::reserve and bo::lock here, + * but that's OK, since we're only trylocking. */ - if (unlikely(ret == -EBUSY)) { - spin_unlock(&glob->lru_lock); - spin_lock(&bo->lock); - goto requeue; - } - - /** - * We can re-check for sync object without taking - * the bo::lock since setting the sync object requires - * also bo::reserved. A busy object at this point may - * be caused by another thread starting an accelerated - * eviction. - */ + ret = ttm_bo_reserve_locked(bo, false, true, false, 0); - if (unlikely(bo->sync_obj)) { - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); - spin_unlock(&glob->lru_lock); - spin_lock(&bo->lock); - if (remove_all) - goto retry; - else - goto requeue; - } + if (unlikely(ret == -EBUSY)) + goto queue; + spin_unlock(&bo->lock); put_count = ttm_bo_del_from_lru(bo); - if (!list_empty(&bo->ddestroy)) { - list_del_init(&bo->ddestroy); - ++put_count; - } spin_unlock(&glob->lru_lock); ttm_bo_cleanup_memtype_use(bo); while (put_count--) kref_put(&bo->list_kref, ttm_bo_ref_bug); - return 0; + return; + } else { + spin_lock(&glob->lru_lock); } -requeue: +queue: + sync_obj = bo->sync_obj; + sync_obj_arg = bo->sync_obj_arg; + driver = bdev->driver; + + kref_get(&bo->list_kref); + list_add_tail(&bo->ddestroy, &bdev->ddestroy); + spin_unlock(&glob->lru_lock); + spin_unlock(&bo->lock); + + if (sync_obj) + driver->sync_obj_flush(sync_obj, sync_obj_arg); + schedule_delayed_work(&bdev->wq, + ((HZ / 100) < 1) ? 1 : HZ / 100); +} + +/** + * function ttm_bo_cleanup_refs + * If bo idle, remove from delayed- and lru lists, and unref. + * If not idle, do nothing. + * + * @interruptible Any sleeps should occur interruptibly. + * @no_wait_reserve Never wait for reserve. Return -EBUSY instead. + * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. + */ + +static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, + bool interruptible, + bool no_wait_reserve, + bool no_wait_gpu) +{ + struct ttm_bo_global *glob = bo->glob; + int put_count; + int ret = 0; + +retry: + spin_lock(&bo->lock); + ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); + spin_unlock(&bo->lock); + + if (unlikely(ret != 0)) + return ret; + spin_lock(&glob->lru_lock); - if (list_empty(&bo->ddestroy)) { - void *sync_obj = bo->sync_obj; - void *sync_obj_arg = bo->sync_obj_arg; + ret = ttm_bo_reserve_locked(bo, interruptible, + no_wait_reserve, false, 0); - kref_get(&bo->list_kref); - list_add_tail(&bo->ddestroy, &bdev->ddestroy); + if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) { spin_unlock(&glob->lru_lock); - spin_unlock(&bo->lock); + return ret; + } - if (sync_obj) - driver->sync_obj_flush(sync_obj, sync_obj_arg); - schedule_delayed_work(&bdev->wq, - ((HZ / 100) < 1) ? 1 : HZ / 100); - ret = 0; + /** + * We can re-check for sync object without taking + * the bo::lock since setting the sync object requires + * also bo::reserved. A busy object at this point may + * be caused by another thread recently starting an accelerated + * eviction. + */ - } else { + if (unlikely(bo->sync_obj)) { + atomic_set(&bo->reserved, 0); + wake_up_all(&bo->event_queue); spin_unlock(&glob->lru_lock); - spin_unlock(&bo->lock); - ret = -EBUSY; + goto retry; } - return ret; + put_count = ttm_bo_del_from_lru(bo); + list_del_init(&bo->ddestroy); + ++put_count; + + spin_unlock(&glob->lru_lock); + ttm_bo_cleanup_memtype_use(bo); + + while (put_count--) + kref_put(&bo->list_kref, ttm_bo_ref_bug); + + return 0; } /** @@ -580,7 +603,8 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) } spin_unlock(&glob->lru_lock); - ret = ttm_bo_cleanup_refs(entry, remove_all); + ret = ttm_bo_cleanup_refs(entry, false, !remove_all, + !remove_all); kref_put(&entry->list_kref, ttm_bo_release_list); entry = nentry; @@ -623,7 +647,7 @@ static void ttm_bo_release(struct kref *kref) bo->vm_node = NULL; } write_unlock(&bdev->vm_lock); - ttm_bo_cleanup_refs(bo, false); + ttm_bo_cleanup_refs_or_queue(bo); kref_put(&bo->list_kref, ttm_bo_release_list); write_lock(&bdev->vm_lock); } @@ -731,6 +755,18 @@ retry: bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); kref_get(&bo->list_kref); + if (!list_empty(&bo->ddestroy)) { + spin_unlock(&glob->lru_lock); + ret = ttm_bo_cleanup_refs(bo, interruptible, + no_wait_reserve, no_wait_gpu); + kref_put(&bo->list_kref, ttm_bo_release_list); + + if (likely(ret == 0 || ret == -ERESTARTSYS)) + return ret; + + goto retry; + } + ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0); if (unlikely(ret == -EBUSY)) { @@ -1754,6 +1790,13 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) struct ttm_buffer_object, swap); kref_get(&bo->list_kref); + if (!list_empty(&bo->ddestroy)) { + spin_unlock(&glob->lru_lock); + (void) ttm_bo_cleanup_refs(bo, false, false, false); + kref_put(&bo->list_kref, ttm_bo_release_list); + continue; + } + /** * Reserve buffer. Since we unlock while sleeping, we need * to re-check that nobody removed us from the swap-list while |