From b300fde8965fdd628341c4b602481ebde8ac9cb7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 26 Feb 2019 09:49:21 +0000 Subject: drm/i915: Remove i915_request.global_seqno Having weaned the interrupt handling off using a single global execution queue, we no longer need to emit a global_seqno. Note that we still have a few assumptions about execution order along engine timelines, but this removes the most obvious artefact! Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190226094922.31617-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_request.h | 32 -------------------------------- 1 file changed, 32 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_request.h') diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 40f3e8dcbdd5..1e127c1c53fa 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -147,14 +147,6 @@ struct i915_request { */ const u32 *hwsp_seqno; - /** - * GEM sequence number associated with this request on the - * global execution timeline. It is zero when the request is not - * on the HW queue (i.e. not on the engine timeline list). - * Its value is guarded by the timeline spinlock. - */ - u32 global_seqno; - /** Position in the ring of the start of the request */ u32 head; @@ -247,30 +239,6 @@ i915_request_put(struct i915_request *rq) dma_fence_put(&rq->fence); } -/** - * i915_request_global_seqno - report the current global seqno - * @request - the request - * - * A request is assigned a global seqno only when it is on the hardware - * execution queue. The global seqno can be used to maintain a list of - * requests on the same engine in retirement order, for example for - * constructing a priority queue for waiting. Prior to its execution, or - * if it is subsequently removed in the event of preemption, its global - * seqno is zero. As both insertion and removal from the execution queue - * may operate in IRQ context, it is not guarded by the usual struct_mutex - * BKL. Instead those relying on the global seqno must be prepared for its - * value to change between reads. Only when the request is complete can - * the global seqno be stable (due to the memory barriers on submitting - * the commands to the hardware to write the breadcrumb, if the HWS shows - * that it has passed the global seqno and the global seqno is unchanged - * after the read, it is indeed complete). - */ -static inline u32 -i915_request_global_seqno(const struct i915_request *request) -{ - return READ_ONCE(request->global_seqno); -} - int i915_request_await_object(struct i915_request *to, struct drm_i915_gem_object *obj, bool write); -- cgit v1.2.3 From 32eb6bcfdda9dad240cf6a22fda2b3418b1a1b8e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 28 Feb 2019 10:20:33 +0000 Subject: drm/i915: Make request allocation caches global As kmem_caches share the same properties (size, allocation/free behaviour) for all potential devices, we can use global caches. While this potential has worse fragmentation behaviour (one can argue that different devices would have different activity lifetimes, but you can also argue that activity is temporal across the system) it is the default behaviour of the system at large to amalgamate matching caches. The benefit for us is much reduced pointer dancing along the frequent allocation paths. v2: Defer shrinking until after a global grace period for futureproofing multiple consumers of the slab caches, similar to the current strategy for avoiding shrinking too early. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190228102035.5857-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/i915_active.c | 7 +- drivers/gpu/drm/i915/i915_active.h | 1 + drivers/gpu/drm/i915/i915_drv.h | 3 - drivers/gpu/drm/i915/i915_gem.c | 34 +------ drivers/gpu/drm/i915/i915_globals.c | 113 +++++++++++++++++++++++ drivers/gpu/drm/i915/i915_globals.h | 15 +++ drivers/gpu/drm/i915/i915_pci.c | 8 +- drivers/gpu/drm/i915/i915_request.c | 53 +++++++++-- drivers/gpu/drm/i915/i915_request.h | 10 ++ drivers/gpu/drm/i915/i915_scheduler.c | 66 ++++++++++--- drivers/gpu/drm/i915/i915_scheduler.h | 34 ++++++- drivers/gpu/drm/i915/intel_guc_submission.c | 3 +- drivers/gpu/drm/i915/intel_lrc.c | 6 +- drivers/gpu/drm/i915/intel_ringbuffer.h | 17 ---- drivers/gpu/drm/i915/selftests/mock_engine.c | 45 +++++---- drivers/gpu/drm/i915/selftests/mock_gem_device.c | 26 ------ drivers/gpu/drm/i915/selftests/mock_request.c | 12 +-- drivers/gpu/drm/i915/selftests/mock_request.h | 7 -- 19 files changed, 312 insertions(+), 149 deletions(-) create mode 100644 drivers/gpu/drm/i915/i915_globals.c create mode 100644 drivers/gpu/drm/i915/i915_globals.h (limited to 'drivers/gpu/drm/i915/i915_request.h') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 1787e1299b1b..a1d834068765 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -77,6 +77,7 @@ i915-y += \ i915_gem_tiling.o \ i915_gem_userptr.o \ i915_gemfs.o \ + i915_globals.o \ i915_query.o \ i915_request.o \ i915_scheduler.o \ diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index db7bb5bd5add..d9f6471ac16c 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -294,7 +294,12 @@ int __init i915_global_active_init(void) return 0; } -void __exit i915_global_active_exit(void) +void i915_global_active_shrink(void) +{ + kmem_cache_shrink(global.slab_cache); +} + +void i915_global_active_exit(void) { kmem_cache_destroy(global.slab_cache); } diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h index 12b5c1d287d1..5fbd9102384b 100644 --- a/drivers/gpu/drm/i915/i915_active.h +++ b/drivers/gpu/drm/i915/i915_active.h @@ -420,6 +420,7 @@ static inline void i915_active_fini(struct i915_active *ref) { } #endif int i915_global_active_init(void); +void i915_global_active_shrink(void); void i915_global_active_exit(void); #endif /* _I915_ACTIVE_H_ */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index cc09caf3870e..f16016b330b3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1473,9 +1473,6 @@ struct drm_i915_private { struct kmem_cache *objects; struct kmem_cache *vmas; struct kmem_cache *luts; - struct kmem_cache *requests; - struct kmem_cache *dependencies; - struct kmem_cache *priorities; const struct intel_device_info __info; /* Use INTEL_INFO() to access. */ struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index de8b92da56cf..f6fe10fce0ec 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -42,6 +42,7 @@ #include "i915_drv.h" #include "i915_gem_clflush.h" #include "i915_gemfs.h" +#include "i915_globals.h" #include "i915_reset.h" #include "i915_trace.h" #include "i915_vgpu.h" @@ -187,6 +188,8 @@ void i915_gem_unpark(struct drm_i915_private *i915) if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */ i915->gt.epoch = 1; + i915_globals_unpark(); + intel_enable_gt_powersave(i915); i915_update_gfx_val(i915); if (INTEL_GEN(i915) >= 6) @@ -2892,12 +2895,11 @@ static void shrink_caches(struct drm_i915_private *i915) * filled slabs to prioritise allocating from the mostly full slabs, * with the aim of reducing fragmentation. */ - kmem_cache_shrink(i915->priorities); - kmem_cache_shrink(i915->dependencies); - kmem_cache_shrink(i915->requests); kmem_cache_shrink(i915->luts); kmem_cache_shrink(i915->vmas); kmem_cache_shrink(i915->objects); + + i915_globals_park(); } struct sleep_rcu_work { @@ -5237,23 +5239,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv) if (!dev_priv->luts) goto err_vmas; - dev_priv->requests = KMEM_CACHE(i915_request, - SLAB_HWCACHE_ALIGN | - SLAB_RECLAIM_ACCOUNT | - SLAB_TYPESAFE_BY_RCU); - if (!dev_priv->requests) - goto err_luts; - - dev_priv->dependencies = KMEM_CACHE(i915_dependency, - SLAB_HWCACHE_ALIGN | - SLAB_RECLAIM_ACCOUNT); - if (!dev_priv->dependencies) - goto err_requests; - - dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN); - if (!dev_priv->priorities) - goto err_dependencies; - INIT_LIST_HEAD(&dev_priv->gt.active_rings); INIT_LIST_HEAD(&dev_priv->gt.closed_vma); @@ -5278,12 +5263,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv) return 0; -err_dependencies: - kmem_cache_destroy(dev_priv->dependencies); -err_requests: - kmem_cache_destroy(dev_priv->requests); -err_luts: - kmem_cache_destroy(dev_priv->luts); err_vmas: kmem_cache_destroy(dev_priv->vmas); err_objects: @@ -5301,9 +5280,6 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv) cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu); - kmem_cache_destroy(dev_priv->priorities); - kmem_cache_destroy(dev_priv->dependencies); - kmem_cache_destroy(dev_priv->requests); kmem_cache_destroy(dev_priv->luts); kmem_cache_destroy(dev_priv->vmas); kmem_cache_destroy(dev_priv->objects); diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c new file mode 100644 index 000000000000..7fd1b3945a04 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_globals.c @@ -0,0 +1,113 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include +#include + +#include "i915_active.h" +#include "i915_globals.h" +#include "i915_request.h" +#include "i915_scheduler.h" + +int __init i915_globals_init(void) +{ + int err; + + err = i915_global_active_init(); + if (err) + return err; + + err = i915_global_request_init(); + if (err) + goto err_active; + + err = i915_global_scheduler_init(); + if (err) + goto err_request; + + return 0; + +err_request: + i915_global_request_exit(); +err_active: + i915_global_active_exit(); + return err; +} + +static void i915_globals_shrink(void) +{ + /* + * kmem_cache_shrink() discards empty slabs and reorders partially + * filled slabs to prioritise allocating from the mostly full slabs, + * with the aim of reducing fragmentation. + */ + i915_global_active_shrink(); + i915_global_request_shrink(); + i915_global_scheduler_shrink(); +} + +static atomic_t active; +static atomic_t epoch; +struct park_work { + struct rcu_work work; + int epoch; +}; + +static void __i915_globals_park(struct work_struct *work) +{ + struct park_work *wrk = container_of(work, typeof(*wrk), work.work); + + /* Confirm nothing woke up in the last grace period */ + if (wrk->epoch == atomic_read(&epoch)) + i915_globals_shrink(); + + kfree(wrk); +} + +void i915_globals_park(void) +{ + struct park_work *wrk; + + /* + * Defer shrinking the global slab caches (and other work) until + * after a RCU grace period has completed with no activity. This + * is to try and reduce the latency impact on the consumers caused + * by us shrinking the caches the same time as they are trying to + * allocate, with the assumption being that if we idle long enough + * for an RCU grace period to elapse since the last use, it is likely + * to be longer until we need the caches again. + */ + if (!atomic_dec_and_test(&active)) + return; + + wrk = kmalloc(sizeof(*wrk), GFP_KERNEL); + if (!wrk) + return; + + wrk->epoch = atomic_inc_return(&epoch); + INIT_RCU_WORK(&wrk->work, __i915_globals_park); + queue_rcu_work(system_wq, &wrk->work); +} + +void i915_globals_unpark(void) +{ + atomic_inc(&epoch); + atomic_inc(&active); +} + +void __exit i915_globals_exit(void) +{ + /* Flush any residual park_work */ + rcu_barrier(); + flush_scheduled_work(); + + i915_global_scheduler_exit(); + i915_global_request_exit(); + i915_global_active_exit(); + + /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */ + rcu_barrier(); +} diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h new file mode 100644 index 000000000000..e468f0413a73 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_globals.h @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef _I915_GLOBALS_H_ +#define _I915_GLOBALS_H_ + +int i915_globals_init(void); +void i915_globals_park(void); +void i915_globals_unpark(void); +void i915_globals_exit(void); + +#endif /* _I915_GLOBALS_H_ */ diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index c4d6b8da9b03..a9211c370cd1 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -28,8 +28,8 @@ #include -#include "i915_active.h" #include "i915_drv.h" +#include "i915_globals.h" #include "i915_selftest.h" #define PLATFORM(x) .platform = (x), .platform_mask = BIT(x) @@ -802,7 +802,9 @@ static int __init i915_init(void) bool use_kms = true; int err; - i915_global_active_init(); + err = i915_globals_init(); + if (err) + return err; err = i915_mock_selftests(); if (err) @@ -835,7 +837,7 @@ static void __exit i915_exit(void) return; pci_unregister_driver(&i915_pci_driver); - i915_global_active_exit(); + i915_globals_exit(); } module_init(i915_init); diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 935db5548f80..a011bf4be48e 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -32,6 +32,11 @@ #include "i915_active.h" #include "i915_reset.h" +static struct i915_global_request { + struct kmem_cache *slab_requests; + struct kmem_cache *slab_dependencies; +} global; + static const char *i915_fence_get_driver_name(struct dma_fence *fence) { return "i915"; @@ -86,7 +91,7 @@ static void i915_fence_release(struct dma_fence *fence) */ i915_sw_fence_fini(&rq->submit); - kmem_cache_free(rq->i915->requests, rq); + kmem_cache_free(global.slab_requests, rq); } const struct dma_fence_ops i915_fence_ops = { @@ -292,7 +297,7 @@ static void i915_request_retire(struct i915_request *request) unreserve_gt(request->i915); - i915_sched_node_fini(request->i915, &request->sched); + i915_sched_node_fini(&request->sched); i915_request_put(request); } @@ -491,7 +496,7 @@ i915_request_alloc_slow(struct intel_context *ce) ring_retire_requests(ring); out: - return kmem_cache_alloc(ce->gem_context->i915->requests, GFP_KERNEL); + return kmem_cache_alloc(global.slab_requests, GFP_KERNEL); } static int add_timeline_barrier(struct i915_request *rq) @@ -579,7 +584,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) * * Do not use kmem_cache_zalloc() here! */ - rq = kmem_cache_alloc(i915->requests, + rq = kmem_cache_alloc(global.slab_requests, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); if (unlikely(!rq)) { rq = i915_request_alloc_slow(ce); @@ -666,7 +671,7 @@ err_unwind: GEM_BUG_ON(!list_empty(&rq->sched.signalers_list)); GEM_BUG_ON(!list_empty(&rq->sched.waiters_list)); - kmem_cache_free(i915->requests, rq); + kmem_cache_free(global.slab_requests, rq); err_unreserve: unreserve_gt(i915); intel_context_unpin(ce); @@ -685,9 +690,7 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from) return 0; if (to->engine->schedule) { - ret = i915_sched_node_add_dependency(to->i915, - &to->sched, - &from->sched); + ret = i915_sched_node_add_dependency(&to->sched, &from->sched); if (ret < 0) return ret; } @@ -1175,3 +1178,37 @@ void i915_retire_requests(struct drm_i915_private *i915) #include "selftests/mock_request.c" #include "selftests/i915_request.c" #endif + +int __init i915_global_request_init(void) +{ + global.slab_requests = KMEM_CACHE(i915_request, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT | + SLAB_TYPESAFE_BY_RCU); + if (!global.slab_requests) + return -ENOMEM; + + global.slab_dependencies = KMEM_CACHE(i915_dependency, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT); + if (!global.slab_dependencies) + goto err_requests; + + return 0; + +err_requests: + kmem_cache_destroy(global.slab_requests); + return -ENOMEM; +} + +void i915_global_request_shrink(void) +{ + kmem_cache_shrink(global.slab_dependencies); + kmem_cache_shrink(global.slab_requests); +} + +void i915_global_request_exit(void) +{ + kmem_cache_destroy(global.slab_dependencies); + kmem_cache_destroy(global.slab_requests); +} diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 1e127c1c53fa..be3ded6bcf56 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -29,6 +29,7 @@ #include "i915_gem.h" #include "i915_scheduler.h" +#include "i915_selftest.h" #include "i915_sw_fence.h" #include @@ -196,6 +197,11 @@ struct i915_request { struct drm_i915_file_private *file_priv; /** file_priv list entry for this request */ struct list_head client_link; + + I915_SELFTEST_DECLARE(struct { + struct list_head link; + unsigned long delay; + } mock;) }; #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) @@ -371,4 +377,8 @@ static inline void i915_request_mark_complete(struct i915_request *rq) void i915_retire_requests(struct drm_i915_private *i915); +int i915_global_request_init(void); +void i915_global_request_shrink(void); +void i915_global_request_exit(void); + #endif /* I915_REQUEST_H */ diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index 38efefd22dce..0dd720593f9c 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -10,6 +10,11 @@ #include "i915_request.h" #include "i915_scheduler.h" +static struct i915_global_scheduler { + struct kmem_cache *slab_dependencies; + struct kmem_cache *slab_priorities; +} global; + static DEFINE_SPINLOCK(schedule_lock); static const struct i915_request * @@ -37,16 +42,15 @@ void i915_sched_node_init(struct i915_sched_node *node) } static struct i915_dependency * -i915_dependency_alloc(struct drm_i915_private *i915) +i915_dependency_alloc(void) { - return kmem_cache_alloc(i915->dependencies, GFP_KERNEL); + return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL); } static void -i915_dependency_free(struct drm_i915_private *i915, - struct i915_dependency *dep) +i915_dependency_free(struct i915_dependency *dep) { - kmem_cache_free(i915->dependencies, dep); + kmem_cache_free(global.slab_dependencies, dep); } bool __i915_sched_node_add_dependency(struct i915_sched_node *node, @@ -73,25 +77,23 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node, return ret; } -int i915_sched_node_add_dependency(struct drm_i915_private *i915, - struct i915_sched_node *node, +int i915_sched_node_add_dependency(struct i915_sched_node *node, struct i915_sched_node *signal) { struct i915_dependency *dep; - dep = i915_dependency_alloc(i915); + dep = i915_dependency_alloc(); if (!dep) return -ENOMEM; if (!__i915_sched_node_add_dependency(node, signal, dep, I915_DEPENDENCY_ALLOC)) - i915_dependency_free(i915, dep); + i915_dependency_free(dep); return 0; } -void i915_sched_node_fini(struct drm_i915_private *i915, - struct i915_sched_node *node) +void i915_sched_node_fini(struct i915_sched_node *node) { struct i915_dependency *dep, *tmp; @@ -111,7 +113,7 @@ void i915_sched_node_fini(struct drm_i915_private *i915, list_del(&dep->wait_link); if (dep->flags & I915_DEPENDENCY_ALLOC) - i915_dependency_free(i915, dep); + i915_dependency_free(dep); } /* Remove ourselves from everyone who depends upon us */ @@ -121,7 +123,7 @@ void i915_sched_node_fini(struct drm_i915_private *i915, list_del(&dep->signal_link); if (dep->flags & I915_DEPENDENCY_ALLOC) - i915_dependency_free(i915, dep); + i915_dependency_free(dep); } spin_unlock(&schedule_lock); @@ -198,7 +200,7 @@ find_priolist: if (prio == I915_PRIORITY_NORMAL) { p = &execlists->default_priolist; } else { - p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC); + p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC); /* Convert an allocation failure to a priority bump */ if (unlikely(!p)) { prio = I915_PRIORITY_NORMAL; /* recurses just once */ @@ -424,3 +426,39 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump) spin_unlock_bh(&schedule_lock); } + +void __i915_priolist_free(struct i915_priolist *p) +{ + kmem_cache_free(global.slab_priorities, p); +} + +int __init i915_global_scheduler_init(void) +{ + global.slab_dependencies = KMEM_CACHE(i915_dependency, + SLAB_HWCACHE_ALIGN); + if (!global.slab_dependencies) + return -ENOMEM; + + global.slab_priorities = KMEM_CACHE(i915_priolist, + SLAB_HWCACHE_ALIGN); + if (!global.slab_priorities) + goto err_priorities; + + return 0; + +err_priorities: + kmem_cache_destroy(global.slab_priorities); + return -ENOMEM; +} + +void i915_global_scheduler_shrink(void) +{ + kmem_cache_shrink(global.slab_dependencies); + kmem_cache_shrink(global.slab_priorities); +} + +void i915_global_scheduler_exit(void) +{ + kmem_cache_destroy(global.slab_dependencies); + kmem_cache_destroy(global.slab_priorities); +} diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h index dbe9cb7ecd82..4153c6a607b0 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.h +++ b/drivers/gpu/drm/i915/i915_scheduler.h @@ -83,6 +83,23 @@ struct i915_dependency { #define I915_DEPENDENCY_ALLOC BIT(0) }; +struct i915_priolist { + struct list_head requests[I915_PRIORITY_COUNT]; + struct rb_node node; + unsigned long used; + int priority; +}; + +#define priolist_for_each_request(it, plist, idx) \ + for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \ + list_for_each_entry(it, &(plist)->requests[idx], sched.link) + +#define priolist_for_each_request_consume(it, n, plist, idx) \ + for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \ + list_for_each_entry_safe(it, n, \ + &(plist)->requests[idx - 1], \ + sched.link) + void i915_sched_node_init(struct i915_sched_node *node); bool __i915_sched_node_add_dependency(struct i915_sched_node *node, @@ -90,12 +107,10 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node, struct i915_dependency *dep, unsigned long flags); -int i915_sched_node_add_dependency(struct drm_i915_private *i915, - struct i915_sched_node *node, +int i915_sched_node_add_dependency(struct i915_sched_node *node, struct i915_sched_node *signal); -void i915_sched_node_fini(struct drm_i915_private *i915, - struct i915_sched_node *node); +void i915_sched_node_fini(struct i915_sched_node *node); void i915_schedule(struct i915_request *request, const struct i915_sched_attr *attr); @@ -105,4 +120,15 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump); struct list_head * i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio); +void __i915_priolist_free(struct i915_priolist *p); +static inline void i915_priolist_free(struct i915_priolist *p) +{ + if (p->priority != I915_PRIORITY_NORMAL) + __i915_priolist_free(p); +} + +int i915_global_scheduler_init(void); +void i915_global_scheduler_shrink(void); +void i915_global_scheduler_exit(void); + #endif /* _I915_SCHEDULER_H_ */ diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index 20cbceeabeae..4366db7978a8 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -781,8 +781,7 @@ static bool __guc_dequeue(struct intel_engine_cs *engine) } rb_erase_cached(&p->node, &execlists->queue); - if (p->priority != I915_PRIORITY_NORMAL) - kmem_cache_free(engine->i915->priorities, p); + i915_priolist_free(p); } done: execlists->queue_priority_hint = diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 81d188508b44..661d2f6da84f 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -781,8 +781,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) } rb_erase_cached(&p->node, &execlists->queue); - if (p->priority != I915_PRIORITY_NORMAL) - kmem_cache_free(engine->i915->priorities, p); + i915_priolist_free(p); } done: @@ -935,8 +934,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) } rb_erase_cached(&p->node, &execlists->queue); - if (p->priority != I915_PRIORITY_NORMAL) - kmem_cache_free(engine->i915->priorities, p); + i915_priolist_free(p); } /* Remaining _unready_ requests will be nop'ed when submitted */ diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 533e2053664e..b8ec7e40a59b 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -187,23 +187,6 @@ enum intel_engine_id { #define _VECS(n) (VECS + (n)) }; -struct i915_priolist { - struct list_head requests[I915_PRIORITY_COUNT]; - struct rb_node node; - unsigned long used; - int priority; -}; - -#define priolist_for_each_request(it, plist, idx) \ - for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \ - list_for_each_entry(it, &(plist)->requests[idx], sched.link) - -#define priolist_for_each_request_consume(it, n, plist, idx) \ - for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \ - list_for_each_entry_safe(it, n, \ - &(plist)->requests[idx - 1], \ - sched.link) - struct st_preempt_hang { struct completion completion; unsigned int count; diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c index 6f3fb803c747..ec1ae948954c 100644 --- a/drivers/gpu/drm/i915/selftests/mock_engine.c +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c @@ -76,26 +76,26 @@ static void mock_ring_free(struct intel_ring *base) kfree(ring); } -static struct mock_request *first_request(struct mock_engine *engine) +static struct i915_request *first_request(struct mock_engine *engine) { return list_first_entry_or_null(&engine->hw_queue, - struct mock_request, - link); + struct i915_request, + mock.link); } -static void advance(struct mock_request *request) +static void advance(struct i915_request *request) { - list_del_init(&request->link); - i915_request_mark_complete(&request->base); - GEM_BUG_ON(!i915_request_completed(&request->base)); + list_del_init(&request->mock.link); + i915_request_mark_complete(request); + GEM_BUG_ON(!i915_request_completed(request)); - intel_engine_queue_breadcrumbs(request->base.engine); + intel_engine_queue_breadcrumbs(request->engine); } static void hw_delay_complete(struct timer_list *t) { struct mock_engine *engine = from_timer(engine, t, hw_delay); - struct mock_request *request; + struct i915_request *request; unsigned long flags; spin_lock_irqsave(&engine->hw_lock, flags); @@ -110,8 +110,9 @@ static void hw_delay_complete(struct timer_list *t) * requeue the timer for the next delayed request. */ while ((request = first_request(engine))) { - if (request->delay) { - mod_timer(&engine->hw_delay, jiffies + request->delay); + if (request->mock.delay) { + mod_timer(&engine->hw_delay, + jiffies + request->mock.delay); break; } @@ -169,10 +170,8 @@ err: static int mock_request_alloc(struct i915_request *request) { - struct mock_request *mock = container_of(request, typeof(*mock), base); - - INIT_LIST_HEAD(&mock->link); - mock->delay = 0; + INIT_LIST_HEAD(&request->mock.link); + request->mock.delay = 0; return 0; } @@ -190,7 +189,6 @@ static u32 *mock_emit_breadcrumb(struct i915_request *request, u32 *cs) static void mock_submit_request(struct i915_request *request) { - struct mock_request *mock = container_of(request, typeof(*mock), base); struct mock_engine *engine = container_of(request->engine, typeof(*engine), base); unsigned long flags; @@ -198,12 +196,13 @@ static void mock_submit_request(struct i915_request *request) i915_request_submit(request); spin_lock_irqsave(&engine->hw_lock, flags); - list_add_tail(&mock->link, &engine->hw_queue); - if (mock->link.prev == &engine->hw_queue) { - if (mock->delay) - mod_timer(&engine->hw_delay, jiffies + mock->delay); + list_add_tail(&request->mock.link, &engine->hw_queue); + if (list_is_first(&request->mock.link, &engine->hw_queue)) { + if (request->mock.delay) + mod_timer(&engine->hw_delay, + jiffies + request->mock.delay); else - advance(mock); + advance(request); } spin_unlock_irqrestore(&engine->hw_lock, flags); } @@ -263,12 +262,12 @@ void mock_engine_flush(struct intel_engine_cs *engine) { struct mock_engine *mock = container_of(engine, typeof(*mock), base); - struct mock_request *request, *rn; + struct i915_request *request, *rn; del_timer_sync(&mock->hw_delay); spin_lock_irq(&mock->hw_lock); - list_for_each_entry_safe(request, rn, &mock->hw_queue, link) + list_for_each_entry_safe(request, rn, &mock->hw_queue, mock.link) advance(request); spin_unlock_irq(&mock->hw_lock); } diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index fc516a2970f4..5a98caba6d69 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -79,9 +79,6 @@ static void mock_device_release(struct drm_device *dev) destroy_workqueue(i915->wq); - kmem_cache_destroy(i915->priorities); - kmem_cache_destroy(i915->dependencies); - kmem_cache_destroy(i915->requests); kmem_cache_destroy(i915->vmas); kmem_cache_destroy(i915->objects); @@ -211,23 +208,6 @@ struct drm_i915_private *mock_gem_device(void) if (!i915->vmas) goto err_objects; - i915->requests = KMEM_CACHE(mock_request, - SLAB_HWCACHE_ALIGN | - SLAB_RECLAIM_ACCOUNT | - SLAB_TYPESAFE_BY_RCU); - if (!i915->requests) - goto err_vmas; - - i915->dependencies = KMEM_CACHE(i915_dependency, - SLAB_HWCACHE_ALIGN | - SLAB_RECLAIM_ACCOUNT); - if (!i915->dependencies) - goto err_requests; - - i915->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN); - if (!i915->priorities) - goto err_dependencies; - i915_timelines_init(i915); INIT_LIST_HEAD(&i915->gt.active_rings); @@ -257,12 +237,6 @@ err_context: err_unlock: mutex_unlock(&i915->drm.struct_mutex); i915_timelines_fini(i915); - kmem_cache_destroy(i915->priorities); -err_dependencies: - kmem_cache_destroy(i915->dependencies); -err_requests: - kmem_cache_destroy(i915->requests); -err_vmas: kmem_cache_destroy(i915->vmas); err_objects: kmem_cache_destroy(i915->objects); diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c index 0dc29e242597..d1a7c9608712 100644 --- a/drivers/gpu/drm/i915/selftests/mock_request.c +++ b/drivers/gpu/drm/i915/selftests/mock_request.c @@ -31,29 +31,25 @@ mock_request(struct intel_engine_cs *engine, unsigned long delay) { struct i915_request *request; - struct mock_request *mock; /* NB the i915->requests slab cache is enlarged to fit mock_request */ request = i915_request_alloc(engine, context); if (IS_ERR(request)) return NULL; - mock = container_of(request, typeof(*mock), base); - mock->delay = delay; - - return &mock->base; + request->mock.delay = delay; + return request; } bool mock_cancel_request(struct i915_request *request) { - struct mock_request *mock = container_of(request, typeof(*mock), base); struct mock_engine *engine = container_of(request->engine, typeof(*engine), base); bool was_queued; spin_lock_irq(&engine->hw_lock); - was_queued = !list_empty(&mock->link); - list_del_init(&mock->link); + was_queued = !list_empty(&request->mock.link); + list_del_init(&request->mock.link); spin_unlock_irq(&engine->hw_lock); if (was_queued) diff --git a/drivers/gpu/drm/i915/selftests/mock_request.h b/drivers/gpu/drm/i915/selftests/mock_request.h index 995fb728380c..4acf0211df20 100644 --- a/drivers/gpu/drm/i915/selftests/mock_request.h +++ b/drivers/gpu/drm/i915/selftests/mock_request.h @@ -29,13 +29,6 @@ #include "../i915_request.h" -struct mock_request { - struct i915_request base; - - struct list_head link; - unsigned long delay; -}; - struct i915_request * mock_request(struct intel_engine_cs *engine, struct i915_gem_context *context, -- cgit v1.2.3 From ebece7539242a9204e5748fb6a6b5031d220b164 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 1 Mar 2019 17:08:59 +0000 Subject: drm/i915: Keep timeline HWSP allocated until idle across the system In preparation for enabling HW semaphores, we need to keep in flight timeline HWSP alive until its use across entire system has completed, as any other timeline active on the GPU may still refer back to the already retired timeline. We both have to delay recycling available cachelines and unpinning old HWSP until the next idle point. An easy option would be to simply keep all used HWSP until the system as a whole was idle, i.e. we could release them all at once on parking. However, on a busy system, we may never see a global idle point, essentially meaning the resource will be leaked until we are forced to do a GC pass. We already employ a fine-grained idle detection mechanism for vma, which we can reuse here so that each cacheline can be freed immediately after the last request using it is retired. v3: Keep track of the activity of each cacheline. v4: cacheline_free() on canceling the seqno tracking v5: Finally with a testcase to exercise wraparound v6: Pack cacheline into empty bits of page-aligned vaddr v7: Use i915_utils to hide the pointer casting around bit manipulation Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190301170901.8340-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_request.c | 31 +-- drivers/gpu/drm/i915/i915_request.h | 11 + drivers/gpu/drm/i915/i915_timeline.c | 293 +++++++++++++++++++++++-- drivers/gpu/drm/i915/i915_timeline.h | 11 +- drivers/gpu/drm/i915/selftests/i915_timeline.c | 113 ++++++++++ 5 files changed, 420 insertions(+), 39 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_request.h') diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 719d1a5ab082..d354967d6ae8 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -325,11 +325,6 @@ void i915_request_retire_upto(struct i915_request *rq) } while (tmp != rq); } -static u32 timeline_get_seqno(struct i915_timeline *tl) -{ - return tl->seqno += 1 + tl->has_initial_breadcrumb; -} - static void move_to_timeline(struct i915_request *request, struct i915_timeline *timeline) { @@ -532,8 +527,10 @@ struct i915_request * i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) { struct drm_i915_private *i915 = engine->i915; - struct i915_request *rq; struct intel_context *ce; + struct i915_timeline *tl; + struct i915_request *rq; + u32 seqno; int ret; lockdep_assert_held(&i915->drm.struct_mutex); @@ -610,24 +607,27 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) } } - rq->rcustate = get_state_synchronize_rcu(); - INIT_LIST_HEAD(&rq->active_list); + + tl = ce->ring->timeline; + ret = i915_timeline_get_seqno(tl, rq, &seqno); + if (ret) + goto err_free; + rq->i915 = i915; rq->engine = engine; rq->gem_context = ctx; rq->hw_context = ce; rq->ring = ce->ring; - rq->timeline = ce->ring->timeline; + rq->timeline = tl; GEM_BUG_ON(rq->timeline == &engine->timeline); - rq->hwsp_seqno = rq->timeline->hwsp_seqno; + rq->hwsp_seqno = tl->hwsp_seqno; + rq->hwsp_cacheline = tl->hwsp_cacheline; + rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */ spin_lock_init(&rq->lock); - dma_fence_init(&rq->fence, - &i915_fence_ops, - &rq->lock, - rq->timeline->fence_context, - timeline_get_seqno(rq->timeline)); + dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, + tl->fence_context, seqno); /* We bump the ref for the fence chain */ i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify); @@ -687,6 +687,7 @@ err_unwind: GEM_BUG_ON(!list_empty(&rq->sched.signalers_list)); GEM_BUG_ON(!list_empty(&rq->sched.waiters_list)); +err_free: kmem_cache_free(global.slab_requests, rq); err_unreserve: mutex_unlock(&ce->ring->timeline->mutex); diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index be3ded6bcf56..09eaad06d2c6 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -38,6 +38,7 @@ struct drm_file; struct drm_i915_gem_object; struct i915_request; struct i915_timeline; +struct i915_timeline_cacheline; struct i915_capture_list { struct i915_capture_list *next; @@ -148,6 +149,16 @@ struct i915_request { */ const u32 *hwsp_seqno; + /* + * If we need to access the timeline's seqno for this request in + * another request, we need to keep a read reference to this associated + * cacheline, so that we do not free and recycle it before the foreign + * observers have completed. Hence, we keep a pointer to the cacheline + * inside the timeline's HWSP vma, but it is only valid while this + * request has not completed and guarded by the timeline mutex. + */ + struct i915_timeline_cacheline *hwsp_cacheline; + /** Position in the ring of the start of the request */ u32 head; diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c index 87a80558da28..8484ba6e51d1 100644 --- a/drivers/gpu/drm/i915/i915_timeline.c +++ b/drivers/gpu/drm/i915/i915_timeline.c @@ -6,19 +6,32 @@ #include "i915_drv.h" -#include "i915_timeline.h" +#include "i915_active.h" #include "i915_syncmap.h" +#include "i915_timeline.h" + +#define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit))) +#define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit)) struct i915_timeline_hwsp { - struct i915_vma *vma; + struct i915_gt_timelines *gt; struct list_head free_link; + struct i915_vma *vma; u64 free_bitmap; }; -static inline struct i915_timeline_hwsp * -i915_timeline_hwsp(const struct i915_timeline *tl) +struct i915_timeline_cacheline { + struct i915_active active; + struct i915_timeline_hwsp *hwsp; + void *vaddr; +#define CACHELINE_BITS 6 +#define CACHELINE_FREE CACHELINE_BITS +}; + +static inline struct drm_i915_private * +hwsp_to_i915(struct i915_timeline_hwsp *hwsp) { - return tl->hwsp_ggtt->private; + return container_of(hwsp->gt, struct drm_i915_private, gt.timelines); } static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915) @@ -71,6 +84,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline) vma->private = hwsp; hwsp->vma = vma; hwsp->free_bitmap = ~0ull; + hwsp->gt = gt; spin_lock(>->hwsp_lock); list_add(&hwsp->free_link, >->hwsp_free_list); @@ -88,14 +102,9 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline) return hwsp->vma; } -static void hwsp_free(struct i915_timeline *timeline) +static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline) { - struct i915_gt_timelines *gt = &timeline->i915->gt.timelines; - struct i915_timeline_hwsp *hwsp; - - hwsp = i915_timeline_hwsp(timeline); - if (!hwsp) /* leave global HWSP alone! */ - return; + struct i915_gt_timelines *gt = hwsp->gt; spin_lock(>->hwsp_lock); @@ -103,7 +112,8 @@ static void hwsp_free(struct i915_timeline *timeline) if (!hwsp->free_bitmap) list_add_tail(&hwsp->free_link, >->hwsp_free_list); - hwsp->free_bitmap |= BIT_ULL(timeline->hwsp_offset / CACHELINE_BYTES); + GEM_BUG_ON(cacheline >= BITS_PER_TYPE(hwsp->free_bitmap)); + hwsp->free_bitmap |= BIT_ULL(cacheline); /* And if no one is left using it, give the page back to the system */ if (hwsp->free_bitmap == ~0ull) { @@ -115,6 +125,76 @@ static void hwsp_free(struct i915_timeline *timeline) spin_unlock(>->hwsp_lock); } +static void __idle_cacheline_free(struct i915_timeline_cacheline *cl) +{ + GEM_BUG_ON(!i915_active_is_idle(&cl->active)); + + i915_gem_object_unpin_map(cl->hwsp->vma->obj); + i915_vma_put(cl->hwsp->vma); + __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS)); + + i915_active_fini(&cl->active); + kfree(cl); +} + +static void __cacheline_retire(struct i915_active *active) +{ + struct i915_timeline_cacheline *cl = + container_of(active, typeof(*cl), active); + + i915_vma_unpin(cl->hwsp->vma); + if (ptr_test_bit(cl->vaddr, CACHELINE_FREE)) + __idle_cacheline_free(cl); +} + +static struct i915_timeline_cacheline * +cacheline_alloc(struct i915_timeline_hwsp *hwsp, unsigned int cacheline) +{ + struct i915_timeline_cacheline *cl; + void *vaddr; + + GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS)); + + cl = kmalloc(sizeof(*cl), GFP_KERNEL); + if (!cl) + return ERR_PTR(-ENOMEM); + + vaddr = i915_gem_object_pin_map(hwsp->vma->obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + kfree(cl); + return ERR_CAST(vaddr); + } + + i915_vma_get(hwsp->vma); + cl->hwsp = hwsp; + cl->vaddr = page_pack_bits(vaddr, cacheline); + + i915_active_init(hwsp_to_i915(hwsp), &cl->active, __cacheline_retire); + + return cl; +} + +static void cacheline_acquire(struct i915_timeline_cacheline *cl) +{ + if (cl && i915_active_acquire(&cl->active)) + __i915_vma_pin(cl->hwsp->vma); +} + +static void cacheline_release(struct i915_timeline_cacheline *cl) +{ + if (cl) + i915_active_release(&cl->active); +} + +static void cacheline_free(struct i915_timeline_cacheline *cl) +{ + GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE)); + cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE); + + if (i915_active_is_idle(&cl->active)) + __idle_cacheline_free(cl); +} + int i915_timeline_init(struct drm_i915_private *i915, struct i915_timeline *timeline, const char *name, @@ -136,29 +216,40 @@ int i915_timeline_init(struct drm_i915_private *i915, timeline->name = name; timeline->pin_count = 0; timeline->has_initial_breadcrumb = !hwsp; + timeline->hwsp_cacheline = NULL; - timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR; if (!hwsp) { + struct i915_timeline_cacheline *cl; unsigned int cacheline; hwsp = hwsp_alloc(timeline, &cacheline); if (IS_ERR(hwsp)) return PTR_ERR(hwsp); + cl = cacheline_alloc(hwsp->private, cacheline); + if (IS_ERR(cl)) { + __idle_hwsp_free(hwsp->private, cacheline); + return PTR_ERR(cl); + } + + timeline->hwsp_cacheline = cl; timeline->hwsp_offset = cacheline * CACHELINE_BYTES; - } - timeline->hwsp_ggtt = i915_vma_get(hwsp); - vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB); - if (IS_ERR(vaddr)) { - hwsp_free(timeline); - i915_vma_put(hwsp); - return PTR_ERR(vaddr); + vaddr = page_mask_bits(cl->vaddr); + } else { + timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR; + + vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); } timeline->hwsp_seqno = memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES); + timeline->hwsp_ggtt = i915_vma_get(hwsp); + GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size); + timeline->fence_context = dma_fence_context_alloc(1); spin_lock_init(&timeline->lock); @@ -240,9 +331,12 @@ void i915_timeline_fini(struct i915_timeline *timeline) GEM_BUG_ON(i915_active_request_isset(&timeline->barrier)); i915_syncmap_free(&timeline->sync); - hwsp_free(timeline); - i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj); + if (timeline->hwsp_cacheline) + cacheline_free(timeline->hwsp_cacheline); + else + i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj); + i915_vma_put(timeline->hwsp_ggtt); } @@ -285,6 +379,7 @@ int i915_timeline_pin(struct i915_timeline *tl) i915_ggtt_offset(tl->hwsp_ggtt) + offset_in_page(tl->hwsp_offset); + cacheline_acquire(tl->hwsp_cacheline); timeline_add_to_active(tl); return 0; @@ -294,6 +389,157 @@ unpin: return err; } +static u32 timeline_advance(struct i915_timeline *tl) +{ + GEM_BUG_ON(!tl->pin_count); + GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb); + + return tl->seqno += 1 + tl->has_initial_breadcrumb; +} + +static void timeline_rollback(struct i915_timeline *tl) +{ + tl->seqno -= 1 + tl->has_initial_breadcrumb; +} + +static noinline int +__i915_timeline_get_seqno(struct i915_timeline *tl, + struct i915_request *rq, + u32 *seqno) +{ + struct i915_timeline_cacheline *cl; + unsigned int cacheline; + struct i915_vma *vma; + void *vaddr; + int err; + + /* + * If there is an outstanding GPU reference to this cacheline, + * such as it being sampled by a HW semaphore on another timeline, + * we cannot wraparound our seqno value (the HW semaphore does + * a strict greater-than-or-equals compare, not i915_seqno_passed). + * So if the cacheline is still busy, we must detach ourselves + * from it and leave it inflight alongside its users. + * + * However, if nobody is watching and we can guarantee that nobody + * will, we could simply reuse the same cacheline. + * + * if (i915_active_request_is_signaled(&tl->last_request) && + * i915_active_is_signaled(&tl->hwsp_cacheline->active)) + * return 0; + * + * That seems unlikely for a busy timeline that needed to wrap in + * the first place, so just replace the cacheline. + */ + + vma = hwsp_alloc(tl, &cacheline); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_rollback; + } + + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); + if (err) { + __idle_hwsp_free(vma->private, cacheline); + goto err_rollback; + } + + cl = cacheline_alloc(vma->private, cacheline); + if (IS_ERR(cl)) { + err = PTR_ERR(cl); + __idle_hwsp_free(vma->private, cacheline); + goto err_unpin; + } + GEM_BUG_ON(cl->hwsp->vma != vma); + + /* + * Attach the old cacheline to the current request, so that we only + * free it after the current request is retired, which ensures that + * all writes into the cacheline from previous requests are complete. + */ + err = i915_active_ref(&tl->hwsp_cacheline->active, + tl->fence_context, rq); + if (err) + goto err_cacheline; + + cacheline_release(tl->hwsp_cacheline); /* ownership now xfered to rq */ + cacheline_free(tl->hwsp_cacheline); + + i915_vma_unpin(tl->hwsp_ggtt); /* binding kept alive by old cacheline */ + i915_vma_put(tl->hwsp_ggtt); + + tl->hwsp_ggtt = i915_vma_get(vma); + + vaddr = page_mask_bits(cl->vaddr); + tl->hwsp_offset = cacheline * CACHELINE_BYTES; + tl->hwsp_seqno = + memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES); + + tl->hwsp_offset += i915_ggtt_offset(vma); + + cacheline_acquire(cl); + tl->hwsp_cacheline = cl; + + *seqno = timeline_advance(tl); + GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno)); + return 0; + +err_cacheline: + cacheline_free(cl); +err_unpin: + i915_vma_unpin(vma); +err_rollback: + timeline_rollback(tl); + return err; +} + +int i915_timeline_get_seqno(struct i915_timeline *tl, + struct i915_request *rq, + u32 *seqno) +{ + *seqno = timeline_advance(tl); + + /* Replace the HWSP on wraparound for HW semaphores */ + if (unlikely(!*seqno && tl->hwsp_cacheline)) + return __i915_timeline_get_seqno(tl, rq, seqno); + + return 0; +} + +static int cacheline_ref(struct i915_timeline_cacheline *cl, + struct i915_request *rq) +{ + return i915_active_ref(&cl->active, rq->fence.context, rq); +} + +int i915_timeline_read_hwsp(struct i915_request *from, + struct i915_request *to, + u32 *hwsp) +{ + struct i915_timeline_cacheline *cl = from->hwsp_cacheline; + struct i915_timeline *tl = from->timeline; + int err; + + GEM_BUG_ON(to->timeline == tl); + + mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING); + err = i915_request_completed(from); + if (!err) + err = cacheline_ref(cl, to); + if (!err) { + if (likely(cl == tl->hwsp_cacheline)) { + *hwsp = tl->hwsp_offset; + } else { /* across a seqno wrap, recover the original offset */ + *hwsp = i915_ggtt_offset(cl->hwsp->vma) + + ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * + CACHELINE_BYTES; + } + } + mutex_unlock(&tl->mutex); + + return err; +} + void i915_timeline_unpin(struct i915_timeline *tl) { GEM_BUG_ON(!tl->pin_count); @@ -301,6 +547,7 @@ void i915_timeline_unpin(struct i915_timeline *tl) return; timeline_remove_from_active(tl); + cacheline_release(tl->hwsp_cacheline); /* * Since this timeline is idle, all bariers upon which we were waiting diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h index 36c3849f7108..60b1dfad93ed 100644 --- a/drivers/gpu/drm/i915/i915_timeline.h +++ b/drivers/gpu/drm/i915/i915_timeline.h @@ -34,7 +34,7 @@ #include "i915_utils.h" struct i915_vma; -struct i915_timeline_hwsp; +struct i915_timeline_cacheline; struct i915_timeline { u64 fence_context; @@ -51,6 +51,8 @@ struct i915_timeline { struct i915_vma *hwsp_ggtt; u32 hwsp_offset; + struct i915_timeline_cacheline *hwsp_cacheline; + bool has_initial_breadcrumb; /** @@ -162,8 +164,15 @@ static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl, } int i915_timeline_pin(struct i915_timeline *tl); +int i915_timeline_get_seqno(struct i915_timeline *tl, + struct i915_request *rq, + u32 *seqno); void i915_timeline_unpin(struct i915_timeline *tl); +int i915_timeline_read_hwsp(struct i915_request *from, + struct i915_request *until, + u32 *hwsp_offset); + void i915_timelines_init(struct drm_i915_private *i915); void i915_timelines_park(struct drm_i915_private *i915); void i915_timelines_fini(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c index 12ea69b1a1e5..844701759ffc 100644 --- a/drivers/gpu/drm/i915/selftests/i915_timeline.c +++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c @@ -641,6 +641,118 @@ out: #undef NUM_TIMELINES } +static int live_hwsp_wrap(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + struct i915_timeline *tl; + enum intel_engine_id id; + intel_wakeref_t wakeref; + int err = 0; + + /* + * Across a seqno wrap, we need to keep the old cacheline alive for + * foreign GPU references. + */ + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + tl = i915_timeline_create(i915, __func__, NULL); + if (IS_ERR(tl)) { + err = PTR_ERR(tl); + goto out_rpm; + } + if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline) + goto out_free; + + err = i915_timeline_pin(tl); + if (err) + goto out_free; + + for_each_engine(engine, i915, id) { + const u32 *hwsp_seqno[2]; + struct i915_request *rq; + u32 seqno[2]; + + if (!intel_engine_can_store_dword(engine)) + continue; + + rq = i915_request_alloc(engine, i915->kernel_context); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out; + } + + tl->seqno = -4u; + + err = i915_timeline_get_seqno(tl, rq, &seqno[0]); + if (err) { + i915_request_add(rq); + goto out; + } + pr_debug("seqno[0]:%08x, hwsp_offset:%08x\n", + seqno[0], tl->hwsp_offset); + + err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[0]); + if (err) { + i915_request_add(rq); + goto out; + } + hwsp_seqno[0] = tl->hwsp_seqno; + + err = i915_timeline_get_seqno(tl, rq, &seqno[1]); + if (err) { + i915_request_add(rq); + goto out; + } + pr_debug("seqno[1]:%08x, hwsp_offset:%08x\n", + seqno[1], tl->hwsp_offset); + + err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[1]); + if (err) { + i915_request_add(rq); + goto out; + } + hwsp_seqno[1] = tl->hwsp_seqno; + + /* With wrap should come a new hwsp */ + GEM_BUG_ON(seqno[1] >= seqno[0]); + GEM_BUG_ON(hwsp_seqno[0] == hwsp_seqno[1]); + + i915_request_add(rq); + + if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) { + pr_err("Wait for timeline writes timed out!\n"); + err = -EIO; + goto out; + } + + if (*hwsp_seqno[0] != seqno[0] || *hwsp_seqno[1] != seqno[1]) { + pr_err("Bad timeline values: found (%x, %x), expected (%x, %x)\n", + *hwsp_seqno[0], *hwsp_seqno[1], + seqno[0], seqno[1]); + err = -EINVAL; + goto out; + } + + i915_retire_requests(i915); /* recycle HWSP */ + } + +out: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + + i915_timeline_unpin(tl); +out_free: + i915_timeline_put(tl); +out_rpm: + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + + return err; +} + static int live_hwsp_recycle(void *arg) { struct drm_i915_private *i915 = arg; @@ -723,6 +835,7 @@ int i915_timeline_live_selftests(struct drm_i915_private *i915) SUBTEST(live_hwsp_recycle), SUBTEST(live_hwsp_engine), SUBTEST(live_hwsp_alternate), + SUBTEST(live_hwsp_wrap), }; return i915_subtests(tests, i915); -- cgit v1.2.3 From e88619646971168e3baedc850c21243d303e31ca Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 1 Mar 2019 17:09:00 +0000 Subject: drm/i915: Use HW semaphores for inter-engine synchronisation on gen8+ Having introduced per-context seqno, we now have a means to identity progress across the system without feel of rollback as befell the global_seqno. That is we can program a MI_SEMAPHORE_WAIT operation in advance of submission safe in the knowledge that our target seqno and address is stable. However, since we are telling the GPU to busy-spin on the target address until it matches the signaling seqno, we only want to do so when we are sure that busy-spin will be completed quickly. To achieve this we only submit the request to HW once the signaler is itself executing (modulo preemption causing us to wait longer), and we only do so for default and above priority requests (so that idle priority tasks never themselves hog the GPU waiting for others). As might be reasonably expected, HW semaphores excel in inter-engine synchronisation microbenchmarks (where the 3x reduced latency / increased throughput more than offset the power cost of spinning on a second ring) and have significant improvement (can be up to ~10%, most see no change) for single clients that utilize multiple engines (typically media players and transcoders), without regressing multiple clients that can saturate the system or changing the power envelope dramatically. v3: Drop the older NEQ branch, now we pin the signaler's HWSP anyway. v4: Tell the world and include it as part of scheduler caps. Testcase: igt/gem_exec_whisper Testcase: igt/benchmarks/gem_wsim Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190301170901.8340-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_drv.c | 2 +- drivers/gpu/drm/i915/i915_request.c | 138 +++++++++++++++++++++++++++++- drivers/gpu/drm/i915/i915_request.h | 26 +++++- drivers/gpu/drm/i915/i915_sw_fence.c | 4 +- drivers/gpu/drm/i915/i915_sw_fence.h | 3 + drivers/gpu/drm/i915/intel_engine_cs.c | 1 + drivers/gpu/drm/i915/intel_gpu_commands.h | 9 +- drivers/gpu/drm/i915/intel_lrc.c | 1 + drivers/gpu/drm/i915/intel_ringbuffer.h | 7 ++ include/uapi/drm/i915_drm.h | 1 + 10 files changed, 181 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_request.h') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index c6354f6cdbdb..c08abdef5eb6 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -351,7 +351,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL); break; case I915_PARAM_HAS_SEMAPHORES: - value = 0; + value = !!(dev_priv->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES); break; case I915_PARAM_HAS_SECURE_BATCHES: value = capable(CAP_SYS_ADMIN); diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index d354967d6ae8..59e30b8c4ee9 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -22,8 +22,9 @@ * */ -#include #include +#include +#include #include #include #include @@ -32,9 +33,16 @@ #include "i915_active.h" #include "i915_reset.h" +struct execute_cb { + struct list_head link; + struct irq_work work; + struct i915_sw_fence *fence; +}; + static struct i915_global_request { struct kmem_cache *slab_requests; struct kmem_cache *slab_dependencies; + struct kmem_cache *slab_execute_cbs; } global; static const char *i915_fence_get_driver_name(struct dma_fence *fence) @@ -325,6 +333,69 @@ void i915_request_retire_upto(struct i915_request *rq) } while (tmp != rq); } +static void irq_execute_cb(struct irq_work *wrk) +{ + struct execute_cb *cb = container_of(wrk, typeof(*cb), work); + + i915_sw_fence_complete(cb->fence); + kmem_cache_free(global.slab_execute_cbs, cb); +} + +static void __notify_execute_cb(struct i915_request *rq) +{ + struct execute_cb *cb; + + lockdep_assert_held(&rq->lock); + + if (list_empty(&rq->execute_cb)) + return; + + list_for_each_entry(cb, &rq->execute_cb, link) + irq_work_queue(&cb->work); + + /* + * XXX Rollback on __i915_request_unsubmit() + * + * In the future, perhaps when we have an active time-slicing scheduler, + * it will be interesting to unsubmit parallel execution and remove + * busywaits from the GPU until their master is restarted. This is + * quite hairy, we have to carefully rollback the fence and do a + * preempt-to-idle cycle on the target engine, all the while the + * master execute_cb may refire. + */ + INIT_LIST_HEAD(&rq->execute_cb); +} + +static int +i915_request_await_execution(struct i915_request *rq, + struct i915_request *signal, + gfp_t gfp) +{ + struct execute_cb *cb; + + if (i915_request_is_active(signal)) + return 0; + + cb = kmem_cache_alloc(global.slab_execute_cbs, gfp); + if (!cb) + return -ENOMEM; + + cb->fence = &rq->submit; + i915_sw_fence_await(cb->fence); + init_irq_work(&cb->work, irq_execute_cb); + + spin_lock_irq(&signal->lock); + if (i915_request_is_active(signal)) { + i915_sw_fence_complete(cb->fence); + kmem_cache_free(global.slab_execute_cbs, cb); + } else { + list_add_tail(&cb->link, &signal->execute_cb); + } + spin_unlock_irq(&signal->lock); + + return 0; +} + static void move_to_timeline(struct i915_request *request, struct i915_timeline *timeline) { @@ -361,6 +432,8 @@ void __i915_request_submit(struct i915_request *request) !i915_request_enable_breadcrumb(request)) intel_engine_queue_breadcrumbs(engine); + __notify_execute_cb(request); + spin_unlock(&request->lock); engine->emit_fini_breadcrumb(request, @@ -608,6 +681,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) } INIT_LIST_HEAD(&rq->active_list); + INIT_LIST_HEAD(&rq->execute_cb); tl = ce->ring->timeline; ret = i915_timeline_get_seqno(tl, rq, &seqno); @@ -696,6 +770,52 @@ err_unreserve: return ERR_PTR(ret); } +static int +emit_semaphore_wait(struct i915_request *to, + struct i915_request *from, + gfp_t gfp) +{ + u32 hwsp_offset; + u32 *cs; + int err; + + GEM_BUG_ON(!from->timeline->has_initial_breadcrumb); + GEM_BUG_ON(INTEL_GEN(to->i915) < 8); + + /* We need to pin the signaler's HWSP until we are finished reading. */ + err = i915_timeline_read_hwsp(from, to, &hwsp_offset); + if (err) + return err; + + /* Only submit our spinner after the signaler is running! */ + err = i915_request_await_execution(to, from, gfp); + if (err) + return err; + + cs = intel_ring_begin(to, 4); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + /* + * Using greater-than-or-equal here means we have to worry + * about seqno wraparound. To side step that issue, we swap + * the timeline HWSP upon wrapping, so that everyone listening + * for the old (pre-wrap) values do not see the much smaller + * (post-wrap) values than they were expecting (and so wait + * forever). + */ + *cs++ = MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_GTE_SDD; + *cs++ = from->fence.seqno; + *cs++ = hwsp_offset; + *cs++ = 0; + + intel_ring_advance(to, cs); + return 0; +} + static int i915_request_await_request(struct i915_request *to, struct i915_request *from) { @@ -717,6 +837,9 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from) ret = i915_sw_fence_await_sw_fence_gfp(&to->submit, &from->submit, I915_FENCE_GFP); + } else if (intel_engine_has_semaphores(to->engine) && + to->gem_context->sched.priority >= I915_PRIORITY_NORMAL) { + ret = emit_semaphore_wait(to, from, I915_FENCE_GFP); } else { ret = i915_sw_fence_await_dma_fence(&to->submit, &from->fence, 0, @@ -1208,14 +1331,23 @@ int __init i915_global_request_init(void) if (!global.slab_requests) return -ENOMEM; + global.slab_execute_cbs = KMEM_CACHE(execute_cb, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT | + SLAB_TYPESAFE_BY_RCU); + if (!global.slab_execute_cbs) + goto err_requests; + global.slab_dependencies = KMEM_CACHE(i915_dependency, SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT); if (!global.slab_dependencies) - goto err_requests; + goto err_execute_cbs; return 0; +err_execute_cbs: + kmem_cache_destroy(global.slab_execute_cbs); err_requests: kmem_cache_destroy(global.slab_requests); return -ENOMEM; @@ -1224,11 +1356,13 @@ err_requests: void i915_global_request_shrink(void) { kmem_cache_shrink(global.slab_dependencies); + kmem_cache_shrink(global.slab_execute_cbs); kmem_cache_shrink(global.slab_requests); } void i915_global_request_exit(void) { kmem_cache_destroy(global.slab_dependencies); + kmem_cache_destroy(global.slab_execute_cbs); kmem_cache_destroy(global.slab_requests); } diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 09eaad06d2c6..4dd1dea1d1af 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -129,6 +129,7 @@ struct i915_request { */ struct i915_sw_fence submit; wait_queue_entry_t submitq; + struct list_head execute_cb; /* * A list of everyone we wait upon, and everyone who waits upon us. @@ -343,10 +344,27 @@ static inline bool __i915_request_has_started(const struct i915_request *rq) * i915_request_started - check if the request has begun being executed * @rq: the request * - * Returns true if the request has been submitted to hardware, and the hardware - * has advanced passed the end of the previous request and so should be either - * currently processing the request (though it may be preempted and so - * not necessarily the next request to complete) or have completed the request. + * If the timeline is not using initial breadcrumbs, a request is + * considered started if the previous request on its timeline (i.e. + * context) has been signaled. + * + * If the timeline is using semaphores, it will also be emitting an + * "initial breadcrumb" after the semaphores are complete and just before + * it began executing the user payload. A request can therefore be active + * on the HW and not yet started as it is still busywaiting on its + * dependencies (via HW semaphores). + * + * If the request has started, its dependencies will have been signaled + * (either by fences or by semaphores) and it will have begun processing + * the user payload. + * + * However, even if a request has started, it may have been preempted and + * so no longer active, or it may have already completed. + * + * See also i915_request_is_active(). + * + * Returns true if the request has begun executing the user payload, or + * has completed: */ static inline bool i915_request_started(const struct i915_request *rq) { diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 7c58b049ecb5..8d1400d378d7 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -192,7 +192,7 @@ static void __i915_sw_fence_complete(struct i915_sw_fence *fence, __i915_sw_fence_notify(fence, FENCE_FREE); } -static void i915_sw_fence_complete(struct i915_sw_fence *fence) +void i915_sw_fence_complete(struct i915_sw_fence *fence) { debug_fence_assert(fence); @@ -202,7 +202,7 @@ static void i915_sw_fence_complete(struct i915_sw_fence *fence) __i915_sw_fence_complete(fence, NULL); } -static void i915_sw_fence_await(struct i915_sw_fence *fence) +void i915_sw_fence_await(struct i915_sw_fence *fence) { debug_fence_assert(fence); WARN_ON(atomic_inc_return(&fence->pending) <= 1); diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h index 0e055ea0179f..6dec9e1d1102 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.h +++ b/drivers/gpu/drm/i915/i915_sw_fence.h @@ -79,6 +79,9 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, unsigned long timeout, gfp_t gfp); +void i915_sw_fence_await(struct i915_sw_fence *fence); +void i915_sw_fence_complete(struct i915_sw_fence *fence); + static inline bool i915_sw_fence_signaled(const struct i915_sw_fence *fence) { return atomic_read(&fence->pending) <= 0; diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index df8f88142f1d..4fc7e2ac6278 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -616,6 +616,7 @@ void intel_engines_set_scheduler_caps(struct drm_i915_private *i915) } map[] = { #define MAP(x, y) { ilog2(I915_ENGINE_HAS_##x), ilog2(I915_SCHEDULER_CAP_##y) } MAP(PREEMPTION, PREEMPTION), + MAP(SEMAPHORES, SEMAPHORES), #undef MAP }; struct intel_engine_cs *engine; diff --git a/drivers/gpu/drm/i915/intel_gpu_commands.h b/drivers/gpu/drm/i915/intel_gpu_commands.h index b96a31bc1080..a34ece53a771 100644 --- a/drivers/gpu/drm/i915/intel_gpu_commands.h +++ b/drivers/gpu/drm/i915/intel_gpu_commands.h @@ -105,8 +105,13 @@ #define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */ #define MI_SEMAPHORE_TARGET(engine) ((engine)<<15) #define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */ -#define MI_SEMAPHORE_POLL (1<<15) -#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12) +#define MI_SEMAPHORE_POLL (1 << 15) +#define MI_SEMAPHORE_SAD_GT_SDD (0 << 12) +#define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12) +#define MI_SEMAPHORE_SAD_LT_SDD (2 << 12) +#define MI_SEMAPHORE_SAD_LTE_SDD (3 << 12) +#define MI_SEMAPHORE_SAD_EQ_SDD (4 << 12) +#define MI_SEMAPHORE_SAD_NEQ_SDD (5 << 12) #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) #define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2) #define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */ diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 3fd0c45a2920..6c9479acb433 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -2330,6 +2330,7 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine) engine->park = NULL; engine->unpark = NULL; + engine->flags |= I915_ENGINE_HAS_SEMAPHORES; engine->flags |= I915_ENGINE_SUPPORTS_STATS; if (engine->i915->preempt_context) engine->flags |= I915_ENGINE_HAS_PREEMPTION; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index b8ec7e40a59b..2e7264119ec4 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -499,6 +499,7 @@ struct intel_engine_cs { #define I915_ENGINE_NEEDS_CMD_PARSER BIT(0) #define I915_ENGINE_SUPPORTS_STATS BIT(1) #define I915_ENGINE_HAS_PREEMPTION BIT(2) +#define I915_ENGINE_HAS_SEMAPHORES BIT(3) unsigned int flags; /* @@ -576,6 +577,12 @@ intel_engine_has_preemption(const struct intel_engine_cs *engine) return engine->flags & I915_ENGINE_HAS_PREEMPTION; } +static inline bool +intel_engine_has_semaphores(const struct intel_engine_cs *engine) +{ + return engine->flags & I915_ENGINE_HAS_SEMAPHORES; +} + void intel_engines_set_scheduler_caps(struct drm_i915_private *i915); static inline bool __execlists_need_preempt(int prio, int last) diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 8304a7f1ec3f..b10eea3f6d24 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -479,6 +479,7 @@ typedef struct drm_i915_irq_wait { #define I915_SCHEDULER_CAP_ENABLED (1ul << 0) #define I915_SCHEDULER_CAP_PRIORITY (1ul << 1) #define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2) +#define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3) #define I915_PARAM_HUC_STATUS 42 -- cgit v1.2.3 From 103b76eeff2e86cad489a54e6003d0173df76bde Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 5 Mar 2019 21:38:30 +0000 Subject: drm/i915: Use i915_global_register() Rather than manually add every new global into each hook, use i915_global_register() function and keep a list of registered globals to invoke instead. However, I haven't found a way for random drivers to add an .init table to avoid having to manually add ourselves to i915_globals_init() each time. Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190305213830.18094-1-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/i915_active.c | 28 +++++++---- drivers/gpu/drm/i915/i915_active.h | 4 -- drivers/gpu/drm/i915/i915_gem_context.c | 28 +++++++---- drivers/gpu/drm/i915/i915_gem_context.h | 4 -- drivers/gpu/drm/i915/i915_gem_object.c | 28 +++++++---- drivers/gpu/drm/i915/i915_gem_object.h | 4 -- drivers/gpu/drm/i915/i915_globals.c | 82 +++++++++++++++------------------ drivers/gpu/drm/i915/i915_globals.h | 19 ++++++++ drivers/gpu/drm/i915/i915_request.c | 36 +++++++++------ drivers/gpu/drm/i915/i915_request.h | 4 -- drivers/gpu/drm/i915/i915_scheduler.c | 32 ++++++++----- drivers/gpu/drm/i915/i915_scheduler.h | 4 -- drivers/gpu/drm/i915/i915_vma.c | 28 +++++++---- drivers/gpu/drm/i915/i915_vma.h | 4 -- 14 files changed, 171 insertions(+), 134 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_request.h') diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index d9f6471ac16c..863ae12707ba 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -6,6 +6,7 @@ #include "i915_drv.h" #include "i915_active.h" +#include "i915_globals.h" #define BKL(ref) (&(ref)->i915->drm.struct_mutex) @@ -17,6 +18,7 @@ * nodes from a local slab cache to hopefully reduce the fragmentation. */ static struct i915_global_active { + struct i915_global base; struct kmem_cache *slab_cache; } global; @@ -285,21 +287,27 @@ void i915_active_retire_noop(struct i915_active_request *active, #include "selftests/i915_active.c" #endif -int __init i915_global_active_init(void) +static void i915_global_active_shrink(void) { - global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN); - if (!global.slab_cache) - return -ENOMEM; - - return 0; + kmem_cache_shrink(global.slab_cache); } -void i915_global_active_shrink(void) +static void i915_global_active_exit(void) { - kmem_cache_shrink(global.slab_cache); + kmem_cache_destroy(global.slab_cache); } -void i915_global_active_exit(void) +static struct i915_global_active global = { { + .shrink = i915_global_active_shrink, + .exit = i915_global_active_exit, +} }; + +int __init i915_global_active_init(void) { - kmem_cache_destroy(global.slab_cache); + global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN); + if (!global.slab_cache) + return -ENOMEM; + + i915_global_register(&global.base); + return 0; } diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h index 5fbd9102384b..8142a334b37b 100644 --- a/drivers/gpu/drm/i915/i915_active.h +++ b/drivers/gpu/drm/i915/i915_active.h @@ -419,8 +419,4 @@ void i915_active_fini(struct i915_active *ref); static inline void i915_active_fini(struct i915_active *ref) { } #endif -int i915_global_active_init(void); -void i915_global_active_shrink(void); -void i915_global_active_exit(void); - #endif /* _I915_ACTIVE_H_ */ diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 1e3211e909f1..b9f321947982 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -88,6 +88,7 @@ #include #include #include "i915_drv.h" +#include "i915_globals.h" #include "i915_trace.h" #include "intel_lrc_reg.h" #include "intel_workarounds.h" @@ -95,6 +96,7 @@ #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 static struct i915_global_context { + struct i915_global base; struct kmem_cache *slab_luts; } global; @@ -1423,21 +1425,27 @@ out_unlock: #include "selftests/i915_gem_context.c" #endif -int __init i915_global_context_init(void) +static void i915_global_context_shrink(void) { - global.slab_luts = KMEM_CACHE(i915_lut_handle, 0); - if (!global.slab_luts) - return -ENOMEM; - - return 0; + kmem_cache_shrink(global.slab_luts); } -void i915_global_context_shrink(void) +static void i915_global_context_exit(void) { - kmem_cache_shrink(global.slab_luts); + kmem_cache_destroy(global.slab_luts); } -void i915_global_context_exit(void) +static struct i915_global_context global = { { + .shrink = i915_global_context_shrink, + .exit = i915_global_context_exit, +} }; + +int __init i915_global_context_init(void) { - kmem_cache_destroy(global.slab_luts); + global.slab_luts = KMEM_CACHE(i915_lut_handle, 0); + if (!global.slab_luts) + return -ENOMEM; + + i915_global_register(&global.base); + return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h index be63666ffaac..2f9ef333acaa 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.h +++ b/drivers/gpu/drm/i915/i915_gem_context.h @@ -411,8 +411,4 @@ void intel_context_init(struct intel_context *ce, struct i915_lut_handle *i915_lut_handle_alloc(void); void i915_lut_handle_free(struct i915_lut_handle *lut); -int i915_global_context_init(void); -void i915_global_context_shrink(void); -void i915_global_context_exit(void); - #endif /* !__I915_GEM_CONTEXT_H__ */ diff --git a/drivers/gpu/drm/i915/i915_gem_object.c b/drivers/gpu/drm/i915/i915_gem_object.c index 4aeb8c3b87e4..ac6a5ab84586 100644 --- a/drivers/gpu/drm/i915/i915_gem_object.c +++ b/drivers/gpu/drm/i915/i915_gem_object.c @@ -24,8 +24,10 @@ #include "i915_drv.h" #include "i915_gem_object.h" +#include "i915_globals.h" static struct i915_global_object { + struct i915_global base; struct kmem_cache *slab_objects; } global; @@ -61,6 +63,21 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE); } +static void i915_global_objects_shrink(void) +{ + kmem_cache_shrink(global.slab_objects); +} + +static void i915_global_objects_exit(void) +{ + kmem_cache_destroy(global.slab_objects); +} + +static struct i915_global_object global = { { + .shrink = i915_global_objects_shrink, + .exit = i915_global_objects_exit, +} }; + int __init i915_global_objects_init(void) { global.slab_objects = @@ -68,15 +85,6 @@ int __init i915_global_objects_init(void) if (!global.slab_objects) return -ENOMEM; + i915_global_register(&global.base); return 0; } - -void i915_global_objects_shrink(void) -{ - kmem_cache_shrink(global.slab_objects); -} - -void i915_global_objects_exit(void) -{ - kmem_cache_destroy(global.slab_objects); -} diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h index 0eaa2b3aeb62..1a24dc97e4fd 100644 --- a/drivers/gpu/drm/i915/i915_gem_object.h +++ b/drivers/gpu/drm/i915/i915_gem_object.h @@ -502,8 +502,4 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, unsigned int cache_level); void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); -int i915_global_objects_init(void); -void i915_global_objects_shrink(void); -void i915_global_objects_exit(void); - #endif diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c index cfd0bc462f58..1cf4e8bc8ec6 100644 --- a/drivers/gpu/drm/i915/i915_globals.c +++ b/drivers/gpu/drm/i915/i915_globals.c @@ -15,62 +15,61 @@ #include "i915_scheduler.h" #include "i915_vma.h" -int __init i915_globals_init(void) +static LIST_HEAD(globals); + +void __init i915_global_register(struct i915_global *global) { - int err; + GEM_BUG_ON(!global->shrink); + GEM_BUG_ON(!global->exit); - err = i915_global_active_init(); - if (err) - return err; + list_add_tail(&global->link, &globals); +} - err = i915_global_context_init(); - if (err) - goto err_active; +static void __i915_globals_cleanup(void) +{ + struct i915_global *global, *next; - err = i915_global_objects_init(); - if (err) - goto err_context; + list_for_each_entry_safe_reverse(global, next, &globals, link) + global->exit(); +} - err = i915_global_request_init(); - if (err) - goto err_objects; +static __initconst int (* const initfn[])(void) = { + i915_global_active_init, + i915_global_context_init, + i915_global_objects_init, + i915_global_request_init, + i915_global_scheduler_init, + i915_global_vma_init, +}; - err = i915_global_scheduler_init(); - if (err) - goto err_request; +int __init i915_globals_init(void) +{ + int i; - err = i915_global_vma_init(); - if (err) - goto err_scheduler; + for (i = 0; i < ARRAY_SIZE(initfn); i++) { + int err; - return 0; + err = initfn[i](); + if (err) { + __i915_globals_cleanup(); + return err; + } + } -err_scheduler: - i915_global_scheduler_exit(); -err_request: - i915_global_request_exit(); -err_objects: - i915_global_objects_exit(); -err_context: - i915_global_context_exit(); -err_active: - i915_global_active_exit(); - return err; + return 0; } static void i915_globals_shrink(void) { + struct i915_global *global; + /* * kmem_cache_shrink() discards empty slabs and reorders partially * filled slabs to prioritise allocating from the mostly full slabs, * with the aim of reducing fragmentation. */ - i915_global_active_shrink(); - i915_global_context_shrink(); - i915_global_objects_shrink(); - i915_global_request_shrink(); - i915_global_scheduler_shrink(); - i915_global_vma_shrink(); + list_for_each_entry(global, &globals, link) + global->shrink(); } static atomic_t active; @@ -128,12 +127,7 @@ void __exit i915_globals_exit(void) rcu_barrier(); flush_scheduled_work(); - i915_global_vma_exit(); - i915_global_scheduler_exit(); - i915_global_request_exit(); - i915_global_objects_exit(); - i915_global_context_exit(); - i915_global_active_exit(); + __i915_globals_cleanup(); /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */ rcu_barrier(); diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h index e468f0413a73..a45529022a42 100644 --- a/drivers/gpu/drm/i915/i915_globals.h +++ b/drivers/gpu/drm/i915/i915_globals.h @@ -7,9 +7,28 @@ #ifndef _I915_GLOBALS_H_ #define _I915_GLOBALS_H_ +typedef void (*i915_global_func_t)(void); + +struct i915_global { + struct list_head link; + + i915_global_func_t shrink; + i915_global_func_t exit; +}; + +void i915_global_register(struct i915_global *global); + int i915_globals_init(void); void i915_globals_park(void); void i915_globals_unpark(void); void i915_globals_exit(void); +/* constructors */ +int i915_global_active_init(void); +int i915_global_context_init(void); +int i915_global_objects_init(void); +int i915_global_request_init(void); +int i915_global_scheduler_init(void); +int i915_global_vma_init(void); + #endif /* _I915_GLOBALS_H_ */ diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index bcf3c1a155e2..f8a63495114c 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -31,6 +31,7 @@ #include "i915_drv.h" #include "i915_active.h" +#include "i915_globals.h" #include "i915_reset.h" struct execute_cb { @@ -40,6 +41,7 @@ struct execute_cb { }; static struct i915_global_request { + struct i915_global base; struct kmem_cache *slab_requests; struct kmem_cache *slab_dependencies; struct kmem_cache *slab_execute_cbs; @@ -1338,6 +1340,25 @@ void i915_retire_requests(struct drm_i915_private *i915) #include "selftests/i915_request.c" #endif +static void i915_global_request_shrink(void) +{ + kmem_cache_shrink(global.slab_dependencies); + kmem_cache_shrink(global.slab_execute_cbs); + kmem_cache_shrink(global.slab_requests); +} + +static void i915_global_request_exit(void) +{ + kmem_cache_destroy(global.slab_dependencies); + kmem_cache_destroy(global.slab_execute_cbs); + kmem_cache_destroy(global.slab_requests); +} + +static struct i915_global_request global = { { + .shrink = i915_global_request_shrink, + .exit = i915_global_request_exit, +} }; + int __init i915_global_request_init(void) { global.slab_requests = KMEM_CACHE(i915_request, @@ -1360,6 +1381,7 @@ int __init i915_global_request_init(void) if (!global.slab_dependencies) goto err_execute_cbs; + i915_global_register(&global.base); return 0; err_execute_cbs: @@ -1368,17 +1390,3 @@ err_requests: kmem_cache_destroy(global.slab_requests); return -ENOMEM; } - -void i915_global_request_shrink(void) -{ - kmem_cache_shrink(global.slab_dependencies); - kmem_cache_shrink(global.slab_execute_cbs); - kmem_cache_shrink(global.slab_requests); -} - -void i915_global_request_exit(void) -{ - kmem_cache_destroy(global.slab_dependencies); - kmem_cache_destroy(global.slab_execute_cbs); - kmem_cache_destroy(global.slab_requests); -} diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 4dd1dea1d1af..8c8fa5010644 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -406,8 +406,4 @@ static inline void i915_request_mark_complete(struct i915_request *rq) void i915_retire_requests(struct drm_i915_private *i915); -int i915_global_request_init(void); -void i915_global_request_shrink(void); -void i915_global_request_exit(void); - #endif /* I915_REQUEST_H */ diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index 3f0a4d56bd37..e0f609d01564 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -7,10 +7,12 @@ #include #include "i915_drv.h" +#include "i915_globals.h" #include "i915_request.h" #include "i915_scheduler.h" static struct i915_global_scheduler { + struct i915_global base; struct kmem_cache *slab_dependencies; struct kmem_cache *slab_priorities; } global; @@ -437,6 +439,23 @@ void __i915_priolist_free(struct i915_priolist *p) kmem_cache_free(global.slab_priorities, p); } +static void i915_global_scheduler_shrink(void) +{ + kmem_cache_shrink(global.slab_dependencies); + kmem_cache_shrink(global.slab_priorities); +} + +static void i915_global_scheduler_exit(void) +{ + kmem_cache_destroy(global.slab_dependencies); + kmem_cache_destroy(global.slab_priorities); +} + +static struct i915_global_scheduler global = { { + .shrink = i915_global_scheduler_shrink, + .exit = i915_global_scheduler_exit, +} }; + int __init i915_global_scheduler_init(void) { global.slab_dependencies = KMEM_CACHE(i915_dependency, @@ -449,21 +468,10 @@ int __init i915_global_scheduler_init(void) if (!global.slab_priorities) goto err_priorities; + i915_global_register(&global.base); return 0; err_priorities: kmem_cache_destroy(global.slab_priorities); return -ENOMEM; } - -void i915_global_scheduler_shrink(void) -{ - kmem_cache_shrink(global.slab_dependencies); - kmem_cache_shrink(global.slab_priorities); -} - -void i915_global_scheduler_exit(void) -{ - kmem_cache_destroy(global.slab_dependencies); - kmem_cache_destroy(global.slab_priorities); -} diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h index 6ce450cf63fa..9a1d257f3d6e 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.h +++ b/drivers/gpu/drm/i915/i915_scheduler.h @@ -134,8 +134,4 @@ static inline void i915_priolist_free(struct i915_priolist *p) __i915_priolist_free(p); } -int i915_global_scheduler_init(void); -void i915_global_scheduler_shrink(void); -void i915_global_scheduler_exit(void); - #endif /* _I915_SCHEDULER_H_ */ diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 757a33998bbf..36726392e737 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -25,12 +25,14 @@ #include "i915_vma.h" #include "i915_drv.h" +#include "i915_globals.h" #include "intel_ringbuffer.h" #include "intel_frontbuffer.h" #include static struct i915_global_vma { + struct i915_global base; struct kmem_cache *slab_vmas; } global; @@ -1054,21 +1056,27 @@ unpin: #include "selftests/i915_vma.c" #endif -int __init i915_global_vma_init(void) +static void i915_global_vma_shrink(void) { - global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); - if (!global.slab_vmas) - return -ENOMEM; - - return 0; + kmem_cache_shrink(global.slab_vmas); } -void i915_global_vma_shrink(void) +static void i915_global_vma_exit(void) { - kmem_cache_shrink(global.slab_vmas); + kmem_cache_destroy(global.slab_vmas); } -void i915_global_vma_exit(void) +static struct i915_global_vma global = { { + .shrink = i915_global_vma_shrink, + .exit = i915_global_vma_exit, +} }; + +int __init i915_global_vma_init(void) { - kmem_cache_destroy(global.slab_vmas); + global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); + if (!global.slab_vmas) + return -ENOMEM; + + i915_global_register(&global.base); + return 0; } diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 37f93358aa3c..6eab70953a57 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -443,8 +443,4 @@ void i915_vma_parked(struct drm_i915_private *i915); struct i915_vma *i915_vma_alloc(void); void i915_vma_free(struct i915_vma *vma); -int i915_global_vma_init(void); -void i915_global_vma_shrink(void); -void i915_global_vma_exit(void); - #endif -- cgit v1.2.3 From ea593dbba4c8ed841630fa5445202627e1046ba6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 22 Mar 2019 09:23:25 +0000 Subject: drm/i915: Allow contexts to share a single timeline across all engines Previously, our view has been always to run the engines independently within a context. (Multiple engines happened before we had contexts and timelines, so they always operated independently and that behaviour persisted into contexts.) However, at the user level the context often represents a single timeline (e.g. GL contexts) and userspace must ensure that the individual engines are serialised to present that ordering to the client (or forgot about this detail entirely and hope no one notices - a fair ploy if the client can only directly control one engine themselves ;) In the next patch, we will want to construct a set of engines that operate as one, that have a single timeline interwoven between them, to present a single virtual engine to the user. (They submit to the virtual engine, then we decide which engine to execute on based.) To that end, we want to be able to create contexts which have a single timeline (fence context) shared between all engines, rather than multiple timelines. v2: Move the specialised timeline ordering to its own function. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190322092325.5883-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_context.c | 31 +++++++++-- drivers/gpu/drm/i915/i915_gem_context_types.h | 2 + drivers/gpu/drm/i915/i915_request.c | 80 ++++++++++++++++++--------- drivers/gpu/drm/i915/i915_request.h | 5 +- drivers/gpu/drm/i915/i915_sw_fence.c | 39 +++++++++++-- drivers/gpu/drm/i915/i915_sw_fence.h | 13 ++++- drivers/gpu/drm/i915/intel_lrc.c | 5 +- drivers/gpu/drm/i915/selftests/mock_context.c | 2 +- include/uapi/drm/i915_drm.h | 3 +- 9 files changed, 138 insertions(+), 42 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_request.h') diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 68f9c7c7bd6c..39ca3cb6af66 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -238,6 +238,9 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) rbtree_postorder_for_each_entry_safe(it, n, &ctx->hw_contexts, node) intel_context_put(it); + if (ctx->timeline) + i915_timeline_put(ctx->timeline); + kfree(ctx->name); put_pid(ctx->pid); @@ -403,12 +406,16 @@ static void __assign_ppgtt(struct i915_gem_context *ctx, } static struct i915_gem_context * -i915_gem_create_context(struct drm_i915_private *dev_priv) +i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags) { struct i915_gem_context *ctx; lockdep_assert_held(&dev_priv->drm.struct_mutex); + if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE && + !HAS_EXECLISTS(dev_priv)) + return ERR_PTR(-EINVAL); + /* Reap the most stale context */ contexts_free_first(dev_priv); @@ -431,6 +438,18 @@ i915_gem_create_context(struct drm_i915_private *dev_priv) i915_ppgtt_put(ppgtt); } + if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { + struct i915_timeline *timeline; + + timeline = i915_timeline_create(dev_priv, NULL); + if (IS_ERR(timeline)) { + context_close(ctx); + return ERR_CAST(timeline); + } + + ctx->timeline = timeline; + } + trace_i915_context_create(ctx); return ctx; @@ -459,7 +478,7 @@ i915_gem_context_create_gvt(struct drm_device *dev) if (ret) return ERR_PTR(ret); - ctx = i915_gem_create_context(to_i915(dev)); + ctx = i915_gem_create_context(to_i915(dev), 0); if (IS_ERR(ctx)) goto out; @@ -495,7 +514,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio) struct i915_gem_context *ctx; int err; - ctx = i915_gem_create_context(i915); + ctx = i915_gem_create_context(i915, 0); if (IS_ERR(ctx)) return ctx; @@ -658,7 +677,7 @@ int i915_gem_context_open(struct drm_i915_private *i915, idr_init_base(&file_priv->vm_idr, 1); mutex_lock(&i915->drm.struct_mutex); - ctx = i915_gem_create_context(i915); + ctx = i915_gem_create_context(i915, 0); mutex_unlock(&i915->drm.struct_mutex); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); @@ -800,7 +819,7 @@ last_request_on_engine(struct i915_timeline *timeline, rq = i915_active_request_raw(&timeline->last_request, &engine->i915->drm.struct_mutex); - if (rq && rq->engine == engine) { + if (rq && rq->engine->mask & engine->mask) { GEM_TRACE("last request on engine %s: %llx:%llu\n", engine->name, rq->fence.context, rq->fence.seqno); GEM_BUG_ON(rq->timeline != timeline); @@ -1520,7 +1539,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - ext_data.ctx = i915_gem_create_context(i915); + ext_data.ctx = i915_gem_create_context(i915, args->flags); mutex_unlock(&dev->struct_mutex); if (IS_ERR(ext_data.ctx)) return PTR_ERR(ext_data.ctx); diff --git a/drivers/gpu/drm/i915/i915_gem_context_types.h b/drivers/gpu/drm/i915/i915_gem_context_types.h index 63ae8eb21939..e2ec58b10fb2 100644 --- a/drivers/gpu/drm/i915/i915_gem_context_types.h +++ b/drivers/gpu/drm/i915/i915_gem_context_types.h @@ -41,6 +41,8 @@ struct i915_gem_context { /** file_priv: owning file descriptor */ struct drm_i915_file_private *file_priv; + struct i915_timeline *timeline; + /** * @ppgtt: unique address space (GTT) * diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 1529824d7c61..e9c2094ab8ea 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -992,6 +992,60 @@ void i915_request_skip(struct i915_request *rq, int error) memset(vaddr + head, 0, rq->postfix - head); } +static struct i915_request * +__i915_request_add_to_timeline(struct i915_request *rq) +{ + struct i915_timeline *timeline = rq->timeline; + struct i915_request *prev; + + /* + * Dependency tracking and request ordering along the timeline + * is special cased so that we can eliminate redundant ordering + * operations while building the request (we know that the timeline + * itself is ordered, and here we guarantee it). + * + * As we know we will need to emit tracking along the timeline, + * we embed the hooks into our request struct -- at the cost of + * having to have specialised no-allocation interfaces (which will + * be beneficial elsewhere). + * + * A second benefit to open-coding i915_request_await_request is + * that we can apply a slight variant of the rules specialised + * for timelines that jump between engines (such as virtual engines). + * If we consider the case of virtual engine, we must emit a dma-fence + * to prevent scheduling of the second request until the first is + * complete (to maximise our greedy late load balancing) and this + * precludes optimising to use semaphores serialisation of a single + * timeline across engines. + */ + prev = i915_active_request_raw(&timeline->last_request, + &rq->i915->drm.struct_mutex); + if (prev && !i915_request_completed(prev)) { + if (is_power_of_2(prev->engine->mask | rq->engine->mask)) + i915_sw_fence_await_sw_fence(&rq->submit, + &prev->submit, + &rq->submitq); + else + __i915_sw_fence_await_dma_fence(&rq->submit, + &prev->fence, + &rq->dmaq); + if (rq->engine->schedule) + __i915_sched_node_add_dependency(&rq->sched, + &prev->sched, + &rq->dep, + 0); + } + + spin_lock_irq(&timeline->lock); + list_add_tail(&rq->link, &timeline->requests); + spin_unlock_irq(&timeline->lock); + + GEM_BUG_ON(timeline->seqno != rq->fence.seqno); + __i915_active_request_set(&timeline->last_request, rq); + + return prev; +} + /* * NB: This function is not allowed to fail. Doing so would mean the the * request is not being tracked for completion but the work itself is @@ -1036,31 +1090,7 @@ void i915_request_add(struct i915_request *request) GEM_BUG_ON(IS_ERR(cs)); request->postfix = intel_ring_offset(request, cs); - /* - * Seal the request and mark it as pending execution. Note that - * we may inspect this state, without holding any locks, during - * hangcheck. Hence we apply the barrier to ensure that we do not - * see a more recent value in the hws than we are tracking. - */ - - prev = i915_active_request_raw(&timeline->last_request, - &request->i915->drm.struct_mutex); - if (prev && !i915_request_completed(prev)) { - i915_sw_fence_await_sw_fence(&request->submit, &prev->submit, - &request->submitq); - if (engine->schedule) - __i915_sched_node_add_dependency(&request->sched, - &prev->sched, - &request->dep, - 0); - } - - spin_lock_irq(&timeline->lock); - list_add_tail(&request->link, &timeline->requests); - spin_unlock_irq(&timeline->lock); - - GEM_BUG_ON(timeline->seqno != request->fence.seqno); - __i915_active_request_set(&timeline->last_request, request); + prev = __i915_request_add_to_timeline(request); list_add_tail(&request->ring_link, &ring->request_list); if (list_is_first(&request->ring_link, &ring->request_list)) diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 8c8fa5010644..cd6c130964cd 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -128,7 +128,10 @@ struct i915_request { * It is used by the driver to then queue the request for execution. */ struct i915_sw_fence submit; - wait_queue_entry_t submitq; + union { + wait_queue_entry_t submitq; + struct i915_sw_dma_fence_cb dmaq; + }; struct list_head execute_cb; /* diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 8d1400d378d7..5387aafd3424 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -359,11 +359,6 @@ int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence, return __i915_sw_fence_await_sw_fence(fence, signaler, NULL, gfp); } -struct i915_sw_dma_fence_cb { - struct dma_fence_cb base; - struct i915_sw_fence *fence; -}; - struct i915_sw_dma_fence_cb_timer { struct i915_sw_dma_fence_cb base; struct dma_fence *dma; @@ -480,6 +475,40 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, return ret; } +static void __dma_i915_sw_fence_wake(struct dma_fence *dma, + struct dma_fence_cb *data) +{ + struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base); + + i915_sw_fence_complete(cb->fence); +} + +int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, + struct dma_fence *dma, + struct i915_sw_dma_fence_cb *cb) +{ + int ret; + + debug_fence_assert(fence); + + if (dma_fence_is_signaled(dma)) + return 0; + + cb->fence = fence; + i915_sw_fence_await(fence); + + ret = dma_fence_add_callback(dma, &cb->base, __dma_i915_sw_fence_wake); + if (ret == 0) { + ret = 1; + } else { + i915_sw_fence_complete(fence); + if (ret == -ENOENT) /* fence already signaled */ + ret = 0; + } + + return ret; +} + int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, struct reservation_object *resv, const struct dma_fence_ops *exclude, diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h index 6dec9e1d1102..9cb5c3b307a6 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.h +++ b/drivers/gpu/drm/i915/i915_sw_fence.h @@ -9,14 +9,13 @@ #ifndef _I915_SW_FENCE_H_ #define _I915_SW_FENCE_H_ +#include #include #include #include /* for NOTIFY_DONE */ #include struct completion; -struct dma_fence; -struct dma_fence_ops; struct reservation_object; struct i915_sw_fence { @@ -68,10 +67,20 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence, struct i915_sw_fence *after, gfp_t gfp); + +struct i915_sw_dma_fence_cb { + struct dma_fence_cb base; + struct i915_sw_fence *fence; +}; + +int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, + struct dma_fence *dma, + struct i915_sw_dma_fence_cb *cb); int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, struct dma_fence *dma, unsigned long timeout, gfp_t gfp); + int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, struct reservation_object *resv, const struct dma_fence_ops *exclude, diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 35f7ef9e75c8..66bc3cd4e166 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -2802,7 +2802,10 @@ err_unpin_ctx: static struct i915_timeline *get_timeline(struct i915_gem_context *ctx) { - return i915_timeline_create(ctx->i915, NULL); + if (ctx->timeline) + return i915_timeline_get(ctx->timeline); + else + return i915_timeline_create(ctx->i915, NULL); } static int execlists_context_deferred_alloc(struct intel_context *ce, diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c index 81e5ace18b81..0426093bf1d9 100644 --- a/drivers/gpu/drm/i915/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/selftests/mock_context.c @@ -97,7 +97,7 @@ live_context(struct drm_i915_private *i915, struct drm_file *file) lockdep_assert_held(&i915->drm.struct_mutex); - ctx = i915_gem_create_context(i915); + ctx = i915_gem_create_context(i915, 0); if (IS_ERR(ctx)) return ctx; diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index d45b79746fc4..9999f7d6a5a9 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -1456,8 +1456,9 @@ struct drm_i915_gem_context_create_ext { __u32 ctx_id; /* output: id of new context*/ __u32 flags; #define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0) +#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1u << 1) #define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \ - (-(I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS << 1)) + (-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1)) __u64 extensions; }; -- cgit v1.2.3 From b66ea2c2cf59b80c38a14127fafb49fdf0df9180 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 3 Apr 2019 09:21:32 +0100 Subject: drm/i915: Use lockdep_pin_lock() over the construction of the request During request construction, we take the timeline->mutex to ensure exclusive access to the ringbuffer (for command emission) and the timeline itself (for command ordering). The timeline->mutex should not be dropped by callers until we release it in i915_request_add(). lockdep provides a pin/unpin lock facility to detect accidental unlocks inside critical sections, so put it to use for request construction. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190403082132.327-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_request.c | 5 +++++ drivers/gpu/drm/i915/i915_request.h | 10 ++++++++++ 2 files changed, 15 insertions(+) (limited to 'drivers/gpu/drm/i915/i915_request.h') diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 82094b9f5ba7..7f8a4eba98b8 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -751,7 +751,10 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) rq->infix = rq->ring->emit; /* end of header; start of user payload */ /* Check that we didn't interrupt ourselves with a new request */ + lockdep_assert_held(&rq->timeline->mutex); GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno); + rq->cookie = lockdep_pin_lock(&rq->timeline->mutex); + return rq; err_unwind: @@ -1070,6 +1073,8 @@ void i915_request_add(struct i915_request *request) engine->name, request->fence.context, request->fence.seqno); lockdep_assert_held(&request->timeline->mutex); + lockdep_unpin_lock(&request->timeline->mutex, request->cookie); + trace_i915_request_add(request); /* diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index cd6c130964cd..875be6f71412 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -26,6 +26,7 @@ #define I915_REQUEST_H #include +#include #include "i915_gem.h" #include "i915_scheduler.h" @@ -120,6 +121,15 @@ struct i915_request { */ unsigned long rcustate; + /* + * We pin the timeline->mutex while constructing the request to + * ensure that no caller accidentally drops it during construction. + * The timeline->mutex must be held to ensure that only this caller + * can use the ring and manipulate the associated timeline during + * construction. + */ + struct pin_cookie cookie; + /* * Fences for the various phases in the request's lifetime. * -- cgit v1.2.3 From b7404c7ecb38b66f103cec694e23a8e99252829e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 9 Apr 2019 16:29:22 +0100 Subject: drm/i915: Bump ready tasks ahead of busywaits Consider two tasks that are running in parallel on a pair of engines (vcs0, vcs1), but then must complete on a shared engine (rcs0). To maximise throughput, we want to run the first ready task on rcs0 (i.e. the first task that completes on either of vcs0 or vcs1). When using semaphores, however, we will instead queue onto rcs in submission order. To resolve this incorrect ordering, we want to re-evaluate the priority queue when each of the request is ready. Normally this happens because we only insert into the priority queue requests that are ready, but with semaphores we are inserting ahead of their readiness and to compensate we penalize those tasks with reduced priority (so that tasks that do not need to busywait should naturally be run first). However, given a series of tasks that each use semaphores, the queue degrades into submission fifo rather than readiness fifo, and so to counter this we give a small boost to semaphore users as their dependent tasks are completed (and so we no longer require any busywait prior to running the user task as they are then ready themselves). v2: Fixup irqsave for schedule_lock (Tvrtko) Testcase: igt/gem_exec_schedule/semaphore-codependency Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Dmitry Rogozhkin Cc: Dmitry Ermilov Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190409152922.23894-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_request.c | 40 +++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_request.h | 1 + drivers/gpu/drm/i915/i915_scheduler.c | 21 +++++++++--------- 3 files changed, 52 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_request.h') diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 96a9e8bcd805..a7d87cfaabcb 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -552,6 +552,36 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) return NOTIFY_DONE; } +static int __i915_sw_fence_call +semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) +{ + struct i915_request *request = + container_of(fence, typeof(*request), semaphore); + + switch (state) { + case FENCE_COMPLETE: + /* + * We only check a small portion of our dependencies + * and so cannot guarantee that there remains no + * semaphore chain across all. Instead of opting + * for the full NOSEMAPHORE boost, we go for the + * smaller (but still preempting) boost of + * NEWCLIENT. This will be enough to boost over + * a busywaiting request (as that cannot be + * NEWCLIENT) without accidentally boosting + * a busywait over real work elsewhere. + */ + i915_schedule_bump_priority(request, I915_PRIORITY_NEWCLIENT); + break; + + case FENCE_FREE: + i915_request_put(request); + break; + } + + return NOTIFY_DONE; +} + static void ring_retire_requests(struct intel_ring *ring) { struct i915_request *rq, *rn; @@ -702,6 +732,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) /* We bump the ref for the fence chain */ i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify); + i915_sw_fence_init(&i915_request_get(rq)->semaphore, semaphore_notify); i915_sched_node_init(&rq->sched); @@ -784,6 +815,12 @@ emit_semaphore_wait(struct i915_request *to, &from->fence, 0, I915_FENCE_GFP); + err = i915_sw_fence_await_dma_fence(&to->semaphore, + &from->fence, 0, + I915_FENCE_GFP); + if (err < 0) + return err; + /* We need to pin the signaler's HWSP until we are finished reading. */ err = i915_timeline_read_hwsp(from, to, &hwsp_offset); if (err) @@ -1114,6 +1151,7 @@ void i915_request_add(struct i915_request *request) * run at the earliest possible convenience. */ local_bh_disable(); + i915_sw_fence_commit(&request->semaphore); rcu_read_lock(); /* RCU serialisation for set-wedged protection */ if (engine->schedule) { struct i915_sched_attr attr = request->gem_context->sched; @@ -1320,7 +1358,9 @@ long i915_request_wait(struct i915_request *rq, if (flags & I915_WAIT_PRIORITY) { if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6) gen6_rps_boost(rq); + local_bh_disable(); /* suspend tasklets for reprioritisation */ i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT); + local_bh_enable(); /* kick tasklets en masse */ } wait.tsk = current; diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 875be6f71412..a982664618c2 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -143,6 +143,7 @@ struct i915_request { struct i915_sw_dma_fence_cb dmaq; }; struct list_head execute_cb; + struct i915_sw_fence semaphore; /* * A list of everyone we wait upon, and everyone who waits upon us. diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index 77dbf7d74e12..39bc4f54e272 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -64,7 +64,7 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node, { bool ret = false; - spin_lock(&schedule_lock); + spin_lock_irq(&schedule_lock); if (!node_signaled(signal)) { INIT_LIST_HEAD(&dep->dfs_link); @@ -81,7 +81,7 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node, ret = true; } - spin_unlock(&schedule_lock); + spin_unlock_irq(&schedule_lock); return ret; } @@ -108,7 +108,7 @@ void i915_sched_node_fini(struct i915_sched_node *node) GEM_BUG_ON(!list_empty(&node->link)); - spin_lock(&schedule_lock); + spin_lock_irq(&schedule_lock); /* * Everyone we depended upon (the fences we wait to be signaled) @@ -135,7 +135,7 @@ void i915_sched_node_fini(struct i915_sched_node *node) i915_dependency_free(dep); } - spin_unlock(&schedule_lock); + spin_unlock_irq(&schedule_lock); } static inline struct i915_priolist *to_priolist(struct rb_node *rb) @@ -356,7 +356,7 @@ static void __i915_schedule(struct i915_request *rq, memset(&cache, 0, sizeof(cache)); engine = rq->engine; - spin_lock_irq(&engine->timeline.lock); + spin_lock(&engine->timeline.lock); /* Fifo and depth-first replacement ensure our deps execute before us */ list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) { @@ -407,32 +407,33 @@ static void __i915_schedule(struct i915_request *rq, tasklet_hi_schedule(&engine->execlists.tasklet); } - spin_unlock_irq(&engine->timeline.lock); + spin_unlock(&engine->timeline.lock); } void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr) { - spin_lock(&schedule_lock); + spin_lock_irq(&schedule_lock); __i915_schedule(rq, attr); - spin_unlock(&schedule_lock); + spin_unlock_irq(&schedule_lock); } void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump) { struct i915_sched_attr attr; + unsigned long flags; GEM_BUG_ON(bump & ~I915_PRIORITY_MASK); if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID) return; - spin_lock_bh(&schedule_lock); + spin_lock_irqsave(&schedule_lock, flags); attr = rq->sched.attr; attr.priority |= bump; __i915_schedule(rq, &attr); - spin_unlock_bh(&schedule_lock); + spin_unlock_irqrestore(&schedule_lock, flags); } void __i915_priolist_free(struct i915_priolist *p) -- cgit v1.2.3