summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c239
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c86
4 files changed, 80 insertions, 259 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 9b07b7d44a58..cb9dd674670c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -150,6 +150,9 @@ static void ttm_bo_release_list(struct kref *list_kref)
if (bo->ttm)
ttm_tt_destroy(bo->ttm);
atomic_dec(&bo->glob->bo_count);
+ if (bo->resv == &bo->ttm_resv)
+ reservation_object_fini(&bo->ttm_resv);
+
if (bo->destroy)
bo->destroy(bo);
else {
@@ -158,24 +161,12 @@ static void ttm_bo_release_list(struct kref *list_kref)
ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
}
-static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
- bool interruptible)
-{
- if (interruptible) {
- return wait_event_interruptible(bo->event_queue,
- !ttm_bo_is_reserved(bo));
- } else {
- wait_event(bo->event_queue, !ttm_bo_is_reserved(bo));
- return 0;
- }
-}
-
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
- BUG_ON(!ttm_bo_is_reserved(bo));
+ lockdep_assert_held(&bo->resv->lock.base);
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
@@ -191,6 +182,7 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
}
}
}
+EXPORT_SYMBOL(ttm_bo_add_to_lru);
int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
@@ -213,71 +205,6 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
return put_count;
}
-int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
- bool interruptible,
- bool no_wait, bool use_sequence, uint32_t sequence)
-{
- int ret;
-
- while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
- /**
- * Deadlock avoidance for multi-bo reserving.
- */
- if (use_sequence && bo->seq_valid) {
- /**
- * We've already reserved this one.
- */
- if (unlikely(sequence == bo->val_seq))
- return -EDEADLK;
- /**
- * Already reserved by a thread that will not back
- * off for us. We need to back off.
- */
- if (unlikely(sequence - bo->val_seq < (1 << 31)))
- return -EAGAIN;
- }
-
- if (no_wait)
- return -EBUSY;
-
- ret = ttm_bo_wait_unreserved(bo, interruptible);
-
- if (unlikely(ret))
- return ret;
- }
-
- if (use_sequence) {
- bool wake_up = false;
- /**
- * Wake up waiters that may need to recheck for deadlock,
- * if we decreased the sequence number.
- */
- if (unlikely((bo->val_seq - sequence < (1 << 31))
- || !bo->seq_valid))
- wake_up = true;
-
- /*
- * In the worst case with memory ordering these values can be
- * seen in the wrong order. However since we call wake_up_all
- * in that case, this will hopefully not pose a problem,
- * and the worst case would only cause someone to accidentally
- * hit -EAGAIN in ttm_bo_reserve when they see old value of
- * val_seq. However this would only happen if seq_valid was
- * written before val_seq was, and just means some slightly
- * increased cpu usage
- */
- bo->val_seq = sequence;
- bo->seq_valid = true;
- if (wake_up)
- wake_up_all(&bo->event_queue);
- } else {
- bo->seq_valid = false;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(ttm_bo_reserve);
-
static void ttm_bo_ref_bug(struct kref *list_kref)
{
BUG();
@@ -290,89 +217,16 @@ void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
(never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
}
-int ttm_bo_reserve(struct ttm_buffer_object *bo,
- bool interruptible,
- bool no_wait, bool use_sequence, uint32_t sequence)
-{
- struct ttm_bo_global *glob = bo->glob;
- int put_count = 0;
- int ret;
-
- ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence,
- sequence);
- if (likely(ret == 0)) {
- spin_lock(&glob->lru_lock);
- put_count = ttm_bo_del_from_lru(bo);
- spin_unlock(&glob->lru_lock);
- ttm_bo_list_ref_sub(bo, put_count, true);
- }
-
- return ret;
-}
-
-int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
- bool interruptible, uint32_t sequence)
-{
- bool wake_up = false;
- int ret;
-
- while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
- WARN_ON(bo->seq_valid && sequence == bo->val_seq);
-
- ret = ttm_bo_wait_unreserved(bo, interruptible);
-
- if (unlikely(ret))
- return ret;
- }
-
- if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid)
- wake_up = true;
-
- /**
- * Wake up waiters that may need to recheck for deadlock,
- * if we decreased the sequence number.
- */
- bo->val_seq = sequence;
- bo->seq_valid = true;
- if (wake_up)
- wake_up_all(&bo->event_queue);
-
- return 0;
-}
-
-int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
- bool interruptible, uint32_t sequence)
-{
- struct ttm_bo_global *glob = bo->glob;
- int put_count, ret;
-
- ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence);
- if (likely(!ret)) {
- spin_lock(&glob->lru_lock);
- put_count = ttm_bo_del_from_lru(bo);
- spin_unlock(&glob->lru_lock);
- ttm_bo_list_ref_sub(bo, put_count, true);
- }
- return ret;
-}
-EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
-
-void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
-{
- ttm_bo_add_to_lru(bo);
- atomic_set(&bo->reserved, 0);
- wake_up_all(&bo->event_queue);
-}
-
-void ttm_bo_unreserve(struct ttm_buffer_object *bo)
+void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
{
- struct ttm_bo_global *glob = bo->glob;
+ int put_count;
- spin_lock(&glob->lru_lock);
- ttm_bo_unreserve_locked(bo);
- spin_unlock(&glob->lru_lock);
+ spin_lock(&bo->glob->lru_lock);
+ put_count = ttm_bo_del_from_lru(bo);
+ spin_unlock(&bo->glob->lru_lock);
+ ttm_bo_list_ref_sub(bo, put_count, true);
}
-EXPORT_SYMBOL(ttm_bo_unreserve);
+EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
/*
* Call bo->mutex locked.
@@ -544,17 +398,7 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
}
ttm_bo_mem_put(bo, &bo->mem);
- atomic_set(&bo->reserved, 0);
- wake_up_all(&bo->event_queue);
-
- /*
- * Since the final reference to this bo may not be dropped by
- * the current task we have to put a memory barrier here to make
- * sure the changes done in this function are always visible.
- *
- * This function only needs protection against the final kref_put.
- */
- smp_mb__before_atomic_dec();
+ ww_mutex_unlock (&bo->resv->lock);
}
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
@@ -586,10 +430,8 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
sync_obj = driver->sync_obj_ref(bo->sync_obj);
spin_unlock(&bdev->fence_lock);
- if (!ret) {
- atomic_set(&bo->reserved, 0);
- wake_up_all(&bo->event_queue);
- }
+ if (!ret)
+ ww_mutex_unlock(&bo->resv->lock);
kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
@@ -639,8 +481,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
sync_obj = driver->sync_obj_ref(bo->sync_obj);
spin_unlock(&bdev->fence_lock);
- atomic_set(&bo->reserved, 0);
- wake_up_all(&bo->event_queue);
+ ww_mutex_unlock(&bo->resv->lock);
spin_unlock(&glob->lru_lock);
ret = driver->sync_obj_wait(sync_obj, false, interruptible);
@@ -678,8 +519,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
spin_unlock(&bdev->fence_lock);
if (ret || unlikely(list_empty(&bo->ddestroy))) {
- atomic_set(&bo->reserved, 0);
- wake_up_all(&bo->event_queue);
+ ww_mutex_unlock(&bo->resv->lock);
spin_unlock(&glob->lru_lock);
return ret;
}
@@ -831,7 +671,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
goto out;
}
- BUG_ON(!ttm_bo_is_reserved(bo));
+ lockdep_assert_held(&bo->resv->lock.base);
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
@@ -1121,7 +961,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_mem_reg mem;
struct ttm_bo_device *bdev = bo->bdev;
- BUG_ON(!ttm_bo_is_reserved(bo));
+ lockdep_assert_held(&bo->resv->lock.base);
/*
* FIXME: It's possible to pipeline buffer moves.
@@ -1180,7 +1020,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
{
int ret;
- BUG_ON(!ttm_bo_is_reserved(bo));
+ lockdep_assert_held(&bo->resv->lock.base);
/* Check that range is valid */
if (placement->lpfn || placement->fpfn)
if (placement->fpfn > placement->lpfn ||
@@ -1239,6 +1079,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
int ret = 0;
unsigned long num_pages;
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+ bool locked;
ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
if (ret) {
@@ -1265,8 +1106,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
kref_init(&bo->kref);
kref_init(&bo->list_kref);
atomic_set(&bo->cpu_writers, 0);
- atomic_set(&bo->reserved, 1);
- init_waitqueue_head(&bo->event_queue);
INIT_LIST_HEAD(&bo->lru);
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
@@ -1284,37 +1123,34 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->mem.bus.io_reserved_count = 0;
bo->priv_flags = 0;
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
- bo->seq_valid = false;
bo->persistent_swap_storage = persistent_swap_storage;
bo->acc_size = acc_size;
bo->sg = sg;
+ bo->resv = &bo->ttm_resv;
+ reservation_object_init(bo->resv);
atomic_inc(&bo->glob->bo_count);
ret = ttm_bo_check_placement(bo, placement);
- if (unlikely(ret != 0))
- goto out_err;
/*
* For ttm_bo_type_device buffers, allocate
* address space from the device.
*/
- if (bo->type == ttm_bo_type_device ||
- bo->type == ttm_bo_type_sg) {
+ if (likely(!ret) &&
+ (bo->type == ttm_bo_type_device ||
+ bo->type == ttm_bo_type_sg))
ret = ttm_bo_setup_vm(bo);
- if (ret)
- goto out_err;
- }
- ret = ttm_bo_validate(bo, placement, interruptible, false);
- if (ret)
- goto out_err;
+ locked = ww_mutex_trylock(&bo->resv->lock);
+ WARN_ON(!locked);
- ttm_bo_unreserve(bo);
- return 0;
+ if (likely(!ret))
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
-out_err:
ttm_bo_unreserve(bo);
- ttm_bo_unref(&bo);
+
+ if (unlikely(ret))
+ ttm_bo_unref(&bo);
return ret;
}
@@ -1619,9 +1455,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
goto out_no_sys;
bdev->addr_space_rb = RB_ROOT;
- ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
- if (unlikely(ret != 0))
- goto out_no_addr_mm;
+ drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
INIT_LIST_HEAD(&bdev->ddestroy);
@@ -1635,8 +1469,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
mutex_unlock(&glob->device_list_mutex);
return 0;
-out_no_addr_mm:
- ttm_bo_clean_mm(bdev, 0);
out_no_sys:
return ret;
}
@@ -1927,8 +1759,7 @@ out:
* already swapped buffer.
*/
- atomic_set(&bo->reserved, 0);
- wake_up_all(&bo->event_queue);
+ ww_mutex_unlock(&bo->resv->lock);
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index 9212494e9072..e4367f91472a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -103,18 +103,12 @@ static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
unsigned long p_size)
{
struct ttm_range_manager *rman;
- int ret;
rman = kzalloc(sizeof(*rman), GFP_KERNEL);
if (!rman)
return -ENOMEM;
- ret = drm_mm_init(&rman->mm, 0, p_size);
- if (ret) {
- kfree(rman);
- return ret;
- }
-
+ drm_mm_init(&rman->mm, 0, p_size);
spin_lock_init(&rman->lock);
man->priv = rman;
return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index af894584dd90..319cf4127c5b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -433,6 +433,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
struct ttm_buffer_object *fbo;
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_driver *driver = bdev->driver;
+ int ret;
fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
if (!fbo)
@@ -445,7 +446,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
* TODO: Explicit member copy would probably be better here.
*/
- init_waitqueue_head(&fbo->event_queue);
INIT_LIST_HEAD(&fbo->ddestroy);
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
@@ -463,6 +463,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
kref_init(&fbo->kref);
fbo->destroy = &ttm_transfered_destroy;
fbo->acc_size = 0;
+ fbo->resv = &fbo->ttm_resv;
+ reservation_object_init(fbo->resv);
+ ret = ww_mutex_trylock(&fbo->resv->lock);
+ WARN_ON(!ret);
*new_obj = fbo;
return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 7b90def15674..6c911789ae5c 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -32,7 +32,8 @@
#include <linux/sched.h>
#include <linux/module.h>
-static void ttm_eu_backoff_reservation_locked(struct list_head *list)
+static void ttm_eu_backoff_reservation_locked(struct list_head *list,
+ struct ww_acquire_ctx *ticket)
{
struct ttm_validate_buffer *entry;
@@ -41,14 +42,12 @@ static void ttm_eu_backoff_reservation_locked(struct list_head *list)
if (!entry->reserved)
continue;
+ entry->reserved = false;
if (entry->removed) {
ttm_bo_add_to_lru(bo);
entry->removed = false;
-
}
- entry->reserved = false;
- atomic_set(&bo->reserved, 0);
- wake_up_all(&bo->event_queue);
+ ww_mutex_unlock(&bo->resv->lock);
}
}
@@ -82,7 +81,8 @@ static void ttm_eu_list_ref_sub(struct list_head *list)
}
}
-void ttm_eu_backoff_reservation(struct list_head *list)
+void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
+ struct list_head *list)
{
struct ttm_validate_buffer *entry;
struct ttm_bo_global *glob;
@@ -93,7 +93,8 @@ void ttm_eu_backoff_reservation(struct list_head *list)
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
spin_lock(&glob->lru_lock);
- ttm_eu_backoff_reservation_locked(list);
+ ttm_eu_backoff_reservation_locked(list, ticket);
+ ww_acquire_fini(ticket);
spin_unlock(&glob->lru_lock);
}
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
@@ -110,12 +111,12 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
* buffers in different orders.
*/
-int ttm_eu_reserve_buffers(struct list_head *list)
+int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
+ struct list_head *list)
{
struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
int ret;
- uint32_t val_seq;
if (list_empty(list))
return 0;
@@ -129,9 +130,7 @@ int ttm_eu_reserve_buffers(struct list_head *list)
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
- spin_lock(&glob->lru_lock);
- val_seq = entry->bo->bdev->val_seq++;
-
+ ww_acquire_init(ticket, &reservation_ww_class);
retry:
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
@@ -140,49 +139,34 @@ retry:
if (entry->reserved)
continue;
- ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
- switch (ret) {
- case 0:
- break;
- case -EBUSY:
- ttm_eu_del_from_lru_locked(list);
- spin_unlock(&glob->lru_lock);
- ret = ttm_bo_reserve_nolru(bo, true, false,
- true, val_seq);
- spin_lock(&glob->lru_lock);
- if (!ret)
- break;
-
- if (unlikely(ret != -EAGAIN))
- goto err;
- /* fallthrough */
- case -EAGAIN:
- ttm_eu_backoff_reservation_locked(list);
+ ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket);
- /*
- * temporarily increase sequence number every retry,
- * to prevent us from seeing our old reservation
- * sequence when someone else reserved the buffer,
- * but hasn't updated the seq_valid/seqno members yet.
+ if (ret == -EDEADLK) {
+ /* uh oh, we lost out, drop every reservation and try
+ * to only reserve this buffer, then start over if
+ * this succeeds.
*/
- val_seq = entry->bo->bdev->val_seq++;
-
+ spin_lock(&glob->lru_lock);
+ ttm_eu_backoff_reservation_locked(list, ticket);
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
- ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq);
- if (unlikely(ret != 0))
- return ret;
- spin_lock(&glob->lru_lock);
+ ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
+ ticket);
+ if (unlikely(ret != 0)) {
+ if (ret == -EINTR)
+ ret = -ERESTARTSYS;
+ goto err_fini;
+ }
+
entry->reserved = true;
if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
ret = -EBUSY;
goto err;
}
goto retry;
- default:
+ } else if (ret)
goto err;
- }
entry->reserved = true;
if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
@@ -191,21 +175,27 @@ retry:
}
}
+ ww_acquire_done(ticket);
+ spin_lock(&glob->lru_lock);
ttm_eu_del_from_lru_locked(list);
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
-
return 0;
err:
- ttm_eu_backoff_reservation_locked(list);
+ spin_lock(&glob->lru_lock);
+ ttm_eu_backoff_reservation_locked(list, ticket);
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
+err_fini:
+ ww_acquire_done(ticket);
+ ww_acquire_fini(ticket);
return ret;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
-void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
+void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
+ struct list_head *list, void *sync_obj)
{
struct ttm_validate_buffer *entry;
struct ttm_buffer_object *bo;
@@ -228,11 +218,13 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
bo = entry->bo;
entry->old_sync_obj = bo->sync_obj;
bo->sync_obj = driver->sync_obj_ref(sync_obj);
- ttm_bo_unreserve_locked(bo);
+ ttm_bo_add_to_lru(bo);
+ ww_mutex_unlock(&bo->resv->lock);
entry->reserved = false;
}
spin_unlock(&bdev->fence_lock);
spin_unlock(&glob->lru_lock);
+ ww_acquire_fini(ticket);
list_for_each_entry(entry, list, head) {
if (entry->old_sync_obj)
OpenPOWER on IntegriCloud