summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem_request.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_request.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c33
1 files changed, 17 insertions, 16 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 0e8d1010cecb..6348353b91ec 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -37,6 +37,17 @@ static const char *i915_fence_get_driver_name(struct dma_fence *fence)
static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
{
+ /* The timeline struct (as part of the ppgtt underneath a context)
+ * may be freed when the request is no longer in use by the GPU.
+ * We could extend the life of a context to beyond that of all
+ * fences, possibly keeping the hw resource around indefinitely,
+ * or we just give them a false name. Since
+ * dma_fence_ops.get_timeline_name is a debug feature, the occasional
+ * lie seems justifiable.
+ */
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return "signaled";
+
return to_request(fence)->timeline->common->name;
}
@@ -180,7 +191,6 @@ i915_priotree_init(struct i915_priotree *pt)
static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
{
- struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret;
@@ -192,15 +202,10 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
if (ret)
return ret;
- i915_gem_retire_requests(i915);
- GEM_BUG_ON(i915->gt.active_requests > 1);
-
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
for_each_engine(engine, i915, id) {
- struct intel_timeline *tl = &timeline->engine[id];
-
- if (wait_for(intel_engine_is_idle(engine), 50))
- return -EBUSY;
+ struct i915_gem_timeline *timeline;
+ struct intel_timeline *tl = engine->timeline;
if (!i915_seqno_passed(seqno, tl->seqno)) {
/* spin until threads are complete */
@@ -211,14 +216,10 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
/* Finally reset hw state */
tl->seqno = seqno;
intel_engine_init_global_seqno(engine, seqno);
- }
- list_for_each_entry(timeline, &i915->gt.timelines, link) {
- for_each_engine(engine, i915, id) {
- struct intel_timeline *tl = &timeline->engine[id];
-
- memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno));
- }
+ list_for_each_entry(timeline, &i915->gt.timelines, link)
+ memset(timeline->engine[id].sync_seqno, 0,
+ sizeof(timeline->engine[id].sync_seqno));
}
return 0;
@@ -295,7 +296,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
* completion order.
*/
list_del(&request->ring_link);
- request->ring->last_retired_head = request->postfix;
+ request->ring->head = request->postfix;
if (!--request->i915->gt.active_requests) {
GEM_BUG_ON(!request->i915->gt.awake);
mod_delayed_work(request->i915->wq,
OpenPOWER on IntegriCloud