diff options
Diffstat (limited to 'drivers/gpu/drm/i915/selftests/i915_active.c')
-rw-r--r-- | drivers/gpu/drm/i915/selftests/i915_active.c | 226 |
1 files changed, 180 insertions, 46 deletions
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c index c0b3537a5fa6..ef572a0c2566 100644 --- a/drivers/gpu/drm/i915/selftests/i915_active.c +++ b/drivers/gpu/drm/i915/selftests/i915_active.c @@ -4,7 +4,10 @@ * Copyright © 2018 Intel Corporation */ +#include <linux/kref.h> + #include "gem/i915_gem_pm.h" +#include "gt/intel_gt.h" #include "i915_selftest.h" @@ -13,42 +16,90 @@ struct live_active { struct i915_active base; + struct kref ref; bool retired; }; -static void __live_active_retire(struct i915_active *base) +static void __live_get(struct live_active *active) +{ + kref_get(&active->ref); +} + +static void __live_free(struct live_active *active) +{ + i915_active_fini(&active->base); + kfree(active); +} + +static void __live_release(struct kref *ref) +{ + struct live_active *active = container_of(ref, typeof(*active), ref); + + __live_free(active); +} + +static void __live_put(struct live_active *active) +{ + kref_put(&active->ref, __live_release); +} + +static int __live_active(struct i915_active *base) +{ + struct live_active *active = container_of(base, typeof(*active), base); + + __live_get(active); + return 0; +} + +static void __live_retire(struct i915_active *base) { struct live_active *active = container_of(base, typeof(*active), base); active->retired = true; + __live_put(active); +} + +static struct live_active *__live_alloc(struct drm_i915_private *i915) +{ + struct live_active *active; + + active = kzalloc(sizeof(*active), GFP_KERNEL); + if (!active) + return NULL; + + kref_init(&active->ref); + i915_active_init(&active->base, __live_active, __live_retire); + + return active; } -static int __live_active_setup(struct drm_i915_private *i915, - struct live_active *active) +static struct live_active * +__live_active_setup(struct drm_i915_private *i915) { struct intel_engine_cs *engine; struct i915_sw_fence *submit; - enum intel_engine_id id; + struct live_active *active; unsigned int count = 0; int err = 0; - submit = heap_fence_create(GFP_KERNEL); - if (!submit) - return -ENOMEM; + active = __live_alloc(i915); + if (!active) + return ERR_PTR(-ENOMEM); - i915_active_init(i915, &active->base, __live_active_retire); - active->retired = false; + submit = heap_fence_create(GFP_KERNEL); + if (!submit) { + kfree(active); + return ERR_PTR(-ENOMEM); + } - if (!i915_active_acquire(&active->base)) { - pr_err("First i915_active_acquire should report being idle\n"); - err = -EINVAL; + err = i915_active_acquire(&active->base); + if (err) goto out; - } - for_each_engine(engine, i915, id) { + for_each_uabi_engine(engine, i915) { struct i915_request *rq; - rq = i915_request_create(engine->kernel_context); + rq = intel_engine_create_kernel_request(engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); break; @@ -58,8 +109,7 @@ static int __live_active_setup(struct drm_i915_private *i915, submit, GFP_KERNEL); if (err >= 0) - err = i915_active_ref(&active->base, - rq->fence.context, rq); + err = i915_active_add_request(&active->base, rq); i915_request_add(rq); if (err) { pr_err("Failed to track active ref!\n"); @@ -70,78 +120,84 @@ static int __live_active_setup(struct drm_i915_private *i915, } i915_active_release(&active->base); - if (active->retired && count) { + if (READ_ONCE(active->retired) && count) { pr_err("i915_active retired before submission!\n"); err = -EINVAL; } - if (active->base.count != count) { + if (atomic_read(&active->base.count) != count) { pr_err("i915_active not tracking all requests, found %d, expected %d\n", - active->base.count, count); + atomic_read(&active->base.count), count); err = -EINVAL; } out: i915_sw_fence_commit(submit); heap_fence_put(submit); + if (err) { + __live_put(active); + active = ERR_PTR(err); + } - return err; + return active; } static int live_active_wait(void *arg) { struct drm_i915_private *i915 = arg; - struct live_active active; - intel_wakeref_t wakeref; - int err; + struct live_active *active; + int err = 0; /* Check that we get a callback when requests retire upon waiting */ - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + active = __live_active_setup(i915); + if (IS_ERR(active)) + return PTR_ERR(active); - err = __live_active_setup(i915, &active); + i915_active_wait(&active->base); + if (!READ_ONCE(active->retired)) { + struct drm_printer p = drm_err_printer(__func__); - i915_active_wait(&active.base); - if (!active.retired) { pr_err("i915_active not retired after waiting!\n"); + i915_active_print(&active->base, &p); + err = -EINVAL; } - i915_active_fini(&active.base); - if (igt_flush_test(i915, I915_WAIT_LOCKED)) + __live_put(active); + + if (igt_flush_test(i915)) err = -EIO; - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); return err; } static int live_active_retire(void *arg) { struct drm_i915_private *i915 = arg; - struct live_active active; - intel_wakeref_t wakeref; - int err; + struct live_active *active; + int err = 0; /* Check that we get a callback when requests are indirectly retired */ - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - - err = __live_active_setup(i915, &active); + active = __live_active_setup(i915); + if (IS_ERR(active)) + return PTR_ERR(active); /* waits for & retires all requests */ - if (igt_flush_test(i915, I915_WAIT_LOCKED)) + if (igt_flush_test(i915)) err = -EIO; - if (!active.retired) { + if (!READ_ONCE(active->retired)) { + struct drm_printer p = drm_err_printer(__func__); + pr_err("i915_active not retired after flushing!\n"); + i915_active_print(&active->base, &p); + err = -EINVAL; } - i915_active_fini(&active.base); - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); + __live_put(active); + return err; } @@ -152,8 +208,86 @@ int i915_active_live_selftests(struct drm_i915_private *i915) SUBTEST(live_active_retire), }; - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(&i915->gt)) return 0; return i915_subtests(tests, i915); } + +static struct intel_engine_cs *node_to_barrier(struct active_node *it) +{ + struct intel_engine_cs *engine; + + if (!is_barrier(&it->base)) + return NULL; + + engine = __barrier_to_engine(it); + smp_rmb(); /* serialise with add_active_barriers */ + if (!is_barrier(&it->base)) + return NULL; + + return engine; +} + +void i915_active_print(struct i915_active *ref, struct drm_printer *m) +{ + drm_printf(m, "active %pS:%pS\n", ref->active, ref->retire); + drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count)); + drm_printf(m, "\tpreallocated barriers? %s\n", + yesno(!llist_empty(&ref->preallocated_barriers))); + + if (i915_active_acquire_if_busy(ref)) { + struct active_node *it, *n; + + rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { + struct intel_engine_cs *engine; + + engine = node_to_barrier(it); + if (engine) { + drm_printf(m, "\tbarrier: %s\n", engine->name); + continue; + } + + if (i915_active_fence_isset(&it->base)) { + drm_printf(m, + "\ttimeline: %llx\n", it->timeline); + continue; + } + } + + i915_active_release(ref); + } +} + +static void spin_unlock_wait(spinlock_t *lock) +{ + spin_lock_irq(lock); + spin_unlock_irq(lock); +} + +void i915_active_unlock_wait(struct i915_active *ref) +{ + if (i915_active_acquire_if_busy(ref)) { + struct active_node *it, *n; + + rcu_read_lock(); + rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { + struct dma_fence *f; + + /* Wait for all active callbacks */ + f = rcu_dereference(it->base.fence); + if (f) + spin_unlock_wait(f->lock); + } + rcu_read_unlock(); + + i915_active_release(ref); + } + + /* And wait for the retire callback */ + spin_lock_irq(&ref->tree_lock); + spin_unlock_irq(&ref->tree_lock); + + /* ... which may have been on a thread instead */ + flush_work(&ref->work); +} |