diff options
author | Maarten Lankhorst <m.b.lankhorst@gmail.com> | 2013-06-27 13:48:17 +0200 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2013-06-28 12:02:20 +1000 |
commit | ecff665f5e3f1c6909353e00b9420e45ae23d995 (patch) | |
tree | 8fed7d4570ec707427e954c6d2695d1549e08364 /drivers/gpu/drm/ttm | |
parent | 786d7257e537da0674c02e16e3b30a44665d1cee (diff) | |
download | talos-op-linux-ecff665f5e3f1c6909353e00b9420e45ae23d995.tar.gz talos-op-linux-ecff665f5e3f1c6909353e00b9420e45ae23d995.zip |
drm/ttm: make ttm reservation calls behave like reservation calls
This commit converts the source of the val_seq counter to
the ww_mutex api. The reservation objects are converted later,
because there is still a lockdep splat in nouveau that has to
resolved first.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Reviewed-by: Jerome Glisse <jglisse@redhat.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 50 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_execbuf_util.c | 58 |
2 files changed, 62 insertions, 46 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 9b07b7d44a58..b912375b9c18 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -215,7 +215,8 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, bool interruptible, - bool no_wait, bool use_sequence, uint32_t sequence) + bool no_wait, bool use_ticket, + struct ww_acquire_ctx *ticket) { int ret; @@ -223,17 +224,17 @@ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, /** * Deadlock avoidance for multi-bo reserving. */ - if (use_sequence && bo->seq_valid) { + if (use_ticket && bo->seq_valid) { /** * We've already reserved this one. */ - if (unlikely(sequence == bo->val_seq)) + if (unlikely(ticket->stamp == bo->val_seq)) return -EDEADLK; /** * Already reserved by a thread that will not back * off for us. We need to back off. */ - if (unlikely(sequence - bo->val_seq < (1 << 31))) + if (unlikely(ticket->stamp - bo->val_seq <= LONG_MAX)) return -EAGAIN; } @@ -246,13 +247,14 @@ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, return ret; } - if (use_sequence) { + if (use_ticket) { bool wake_up = false; + /** * Wake up waiters that may need to recheck for deadlock, * if we decreased the sequence number. */ - if (unlikely((bo->val_seq - sequence < (1 << 31)) + if (unlikely((bo->val_seq - ticket->stamp <= LONG_MAX) || !bo->seq_valid)) wake_up = true; @@ -266,7 +268,7 @@ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, * written before val_seq was, and just means some slightly * increased cpu usage */ - bo->val_seq = sequence; + bo->val_seq = ticket->stamp; bo->seq_valid = true; if (wake_up) wake_up_all(&bo->event_queue); @@ -292,14 +294,15 @@ void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, int ttm_bo_reserve(struct ttm_buffer_object *bo, bool interruptible, - bool no_wait, bool use_sequence, uint32_t sequence) + bool no_wait, bool use_ticket, + struct ww_acquire_ctx *ticket) { struct ttm_bo_global *glob = bo->glob; int put_count = 0; int ret; - ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence, - sequence); + ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_ticket, + ticket); if (likely(ret == 0)) { spin_lock(&glob->lru_lock); put_count = ttm_bo_del_from_lru(bo); @@ -311,13 +314,14 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo, } int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, - bool interruptible, uint32_t sequence) + bool interruptible, + struct ww_acquire_ctx *ticket) { bool wake_up = false; int ret; while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { - WARN_ON(bo->seq_valid && sequence == bo->val_seq); + WARN_ON(bo->seq_valid && ticket->stamp == bo->val_seq); ret = ttm_bo_wait_unreserved(bo, interruptible); @@ -325,14 +329,14 @@ int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, return ret; } - if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid) + if (bo->val_seq - ticket->stamp < LONG_MAX || !bo->seq_valid) wake_up = true; /** * Wake up waiters that may need to recheck for deadlock, * if we decreased the sequence number. */ - bo->val_seq = sequence; + bo->val_seq = ticket->stamp; bo->seq_valid = true; if (wake_up) wake_up_all(&bo->event_queue); @@ -341,12 +345,12 @@ int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, } int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, - bool interruptible, uint32_t sequence) + bool interruptible, struct ww_acquire_ctx *ticket) { struct ttm_bo_global *glob = bo->glob; int put_count, ret; - ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence); + ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, ticket); if (likely(!ret)) { spin_lock(&glob->lru_lock); put_count = ttm_bo_del_from_lru(bo); @@ -357,7 +361,7 @@ int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_reserve_slowpath); -void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo) +void ttm_bo_unreserve_ticket_locked(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket) { ttm_bo_add_to_lru(bo); atomic_set(&bo->reserved, 0); @@ -369,11 +373,21 @@ void ttm_bo_unreserve(struct ttm_buffer_object *bo) struct ttm_bo_global *glob = bo->glob; spin_lock(&glob->lru_lock); - ttm_bo_unreserve_locked(bo); + ttm_bo_unreserve_ticket_locked(bo, NULL); spin_unlock(&glob->lru_lock); } EXPORT_SYMBOL(ttm_bo_unreserve); +void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket) +{ + struct ttm_bo_global *glob = bo->glob; + + spin_lock(&glob->lru_lock); + ttm_bo_unreserve_ticket_locked(bo, ticket); + spin_unlock(&glob->lru_lock); +} +EXPORT_SYMBOL(ttm_bo_unreserve_ticket); + /* * Call bo->mutex locked. */ diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 7b90def15674..efcb734e5543 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -32,7 +32,8 @@ #include <linux/sched.h> #include <linux/module.h> -static void ttm_eu_backoff_reservation_locked(struct list_head *list) +static void ttm_eu_backoff_reservation_locked(struct list_head *list, + struct ww_acquire_ctx *ticket) { struct ttm_validate_buffer *entry; @@ -41,14 +42,15 @@ static void ttm_eu_backoff_reservation_locked(struct list_head *list) if (!entry->reserved) continue; + entry->reserved = false; if (entry->removed) { - ttm_bo_add_to_lru(bo); + ttm_bo_unreserve_ticket_locked(bo, ticket); entry->removed = false; + } else { + atomic_set(&bo->reserved, 0); + wake_up_all(&bo->event_queue); } - entry->reserved = false; - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); } } @@ -82,7 +84,8 @@ static void ttm_eu_list_ref_sub(struct list_head *list) } } -void ttm_eu_backoff_reservation(struct list_head *list) +void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, + struct list_head *list) { struct ttm_validate_buffer *entry; struct ttm_bo_global *glob; @@ -93,7 +96,8 @@ void ttm_eu_backoff_reservation(struct list_head *list) entry = list_first_entry(list, struct ttm_validate_buffer, head); glob = entry->bo->glob; spin_lock(&glob->lru_lock); - ttm_eu_backoff_reservation_locked(list); + ttm_eu_backoff_reservation_locked(list, ticket); + ww_acquire_fini(ticket); spin_unlock(&glob->lru_lock); } EXPORT_SYMBOL(ttm_eu_backoff_reservation); @@ -110,12 +114,12 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation); * buffers in different orders. */ -int ttm_eu_reserve_buffers(struct list_head *list) +int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, + struct list_head *list) { struct ttm_bo_global *glob; struct ttm_validate_buffer *entry; int ret; - uint32_t val_seq; if (list_empty(list)) return 0; @@ -129,8 +133,8 @@ int ttm_eu_reserve_buffers(struct list_head *list) entry = list_first_entry(list, struct ttm_validate_buffer, head); glob = entry->bo->glob; + ww_acquire_init(ticket, &reservation_ww_class); spin_lock(&glob->lru_lock); - val_seq = entry->bo->bdev->val_seq++; retry: list_for_each_entry(entry, list, head) { @@ -140,7 +144,7 @@ retry: if (entry->reserved) continue; - ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq); + ret = ttm_bo_reserve_nolru(bo, true, true, true, ticket); switch (ret) { case 0: break; @@ -148,8 +152,9 @@ retry: ttm_eu_del_from_lru_locked(list); spin_unlock(&glob->lru_lock); ret = ttm_bo_reserve_nolru(bo, true, false, - true, val_seq); + true, ticket); spin_lock(&glob->lru_lock); + if (!ret) break; @@ -158,21 +163,13 @@ retry: /* fallthrough */ case -EAGAIN: - ttm_eu_backoff_reservation_locked(list); - - /* - * temporarily increase sequence number every retry, - * to prevent us from seeing our old reservation - * sequence when someone else reserved the buffer, - * but hasn't updated the seq_valid/seqno members yet. - */ - val_seq = entry->bo->bdev->val_seq++; - + ttm_eu_backoff_reservation_locked(list, ticket); spin_unlock(&glob->lru_lock); ttm_eu_list_ref_sub(list); - ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq); + ret = ttm_bo_reserve_slowpath_nolru(bo, true, ticket); if (unlikely(ret != 0)) - return ret; + goto err_fini; + spin_lock(&glob->lru_lock); entry->reserved = true; if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { @@ -191,21 +188,25 @@ retry: } } + ww_acquire_done(ticket); ttm_eu_del_from_lru_locked(list); spin_unlock(&glob->lru_lock); ttm_eu_list_ref_sub(list); - return 0; err: - ttm_eu_backoff_reservation_locked(list); + ttm_eu_backoff_reservation_locked(list, ticket); spin_unlock(&glob->lru_lock); ttm_eu_list_ref_sub(list); +err_fini: + ww_acquire_done(ticket); + ww_acquire_fini(ticket); return ret; } EXPORT_SYMBOL(ttm_eu_reserve_buffers); -void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) +void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, + struct list_head *list, void *sync_obj) { struct ttm_validate_buffer *entry; struct ttm_buffer_object *bo; @@ -228,11 +229,12 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) bo = entry->bo; entry->old_sync_obj = bo->sync_obj; bo->sync_obj = driver->sync_obj_ref(sync_obj); - ttm_bo_unreserve_locked(bo); + ttm_bo_unreserve_ticket_locked(bo, ticket); entry->reserved = false; } spin_unlock(&bdev->fence_lock); spin_unlock(&glob->lru_lock); + ww_acquire_fini(ticket); list_for_each_entry(entry, list, head) { if (entry->old_sync_obj) |