From e1a73a54a96e80dc6009e73c9209e4f81ae22285 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 25 Jan 2019 10:05:20 +0000 Subject: drm/i915: Measure the required reserved size for request emission Instead of tediously and fragilely counting up the number of dwords required to emit the breadcrumb to seal a request, fake a request and measure it automatically once during engine setup. The downside is that this requires a fair amount of mocking to create a proper breadcrumb. Still, should be less error prone in future as the breadcrumb size fluctuates! Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190125100520.20163-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_ringbuffer.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c') diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index e39e483d8d16..107c4934e2fa 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -299,7 +299,7 @@ gen6_render_ring_flush(struct i915_request *rq, u32 mode) return 0; } -static void gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) +static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) { /* First we do the gen6_emit_post_sync_nonzero_flush w/a */ *cs++ = GFX_OP_PIPE_CONTROL(4); @@ -327,6 +327,8 @@ static void gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) rq->tail = intel_ring_offset(rq, cs); assert_ring_tail_valid(rq->ring, rq->tail); + + return cs; } static const int gen6_rcs_emit_breadcrumb_sz = 14; @@ -409,7 +411,7 @@ gen7_render_ring_flush(struct i915_request *rq, u32 mode) return 0; } -static void gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) +static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) { *cs++ = GFX_OP_PIPE_CONTROL(4); *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | @@ -427,10 +429,12 @@ static void gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) rq->tail = intel_ring_offset(rq, cs); assert_ring_tail_valid(rq->ring, rq->tail); + + return cs; } static const int gen7_rcs_emit_breadcrumb_sz = 6; -static void gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) +static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) { *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW; *cs++ = intel_hws_seqno_address(rq->engine) | MI_FLUSH_DW_USE_GTT; @@ -439,11 +443,13 @@ static void gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) rq->tail = intel_ring_offset(rq, cs); assert_ring_tail_valid(rq->ring, rq->tail); + + return cs; } static const int gen6_xcs_emit_breadcrumb_sz = 4; #define GEN7_XCS_WA 32 -static void gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) +static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) { int i; @@ -466,6 +472,8 @@ static void gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) rq->tail = intel_ring_offset(rq, cs); assert_ring_tail_valid(rq->ring, rq->tail); + + return cs; } static const int gen7_xcs_emit_breadcrumb_sz = 8 + GEN7_XCS_WA * 3; #undef GEN7_XCS_WA @@ -861,7 +869,7 @@ static void i9xx_submit_request(struct i915_request *request) intel_ring_set_tail(request->ring, request->tail)); } -static void i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs) +static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs) { *cs++ = MI_FLUSH; @@ -874,11 +882,13 @@ static void i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs) rq->tail = intel_ring_offset(rq, cs); assert_ring_tail_valid(rq->ring, rq->tail); + + return cs; } static const int i9xx_emit_breadcrumb_sz = 6; #define GEN5_WA_STORES 8 /* must be at least 1! */ -static void gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs) +static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs) { int i; @@ -895,6 +905,8 @@ static void gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs) rq->tail = intel_ring_offset(rq, cs); assert_ring_tail_valid(rq->ring, rq->tail); + + return cs; } static const int gen5_emit_breadcrumb_sz = GEN5_WA_STORES * 3 + 2; #undef GEN5_WA_STORES -- cgit v1.2.3 From 9fa4973e91be3e5cb220f7d607c21bf6e82c52d1 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 25 Jan 2019 12:00:04 +0000 Subject: drm/i915: Remove manual breadcumb counting Now that we know we measure the size of the engine->emit_breadcrumb() correctly, we can remove the previous manual counting. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190125120005.25191-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_request.c | 4 ++-- drivers/gpu/drm/i915/intel_engine_cs.c | 7 +++---- drivers/gpu/drm/i915/intel_lrc.c | 4 ---- drivers/gpu/drm/i915/intel_ringbuffer.c | 28 +++++----------------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 2 +- 5 files changed, 11 insertions(+), 34 deletions(-) (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c') diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index f941e40fd373..ddc35e9dc0c0 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -650,7 +650,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) * around inside i915_request_add() there is sufficient space at * the beginning of the ring as well. */ - rq->reserved_space = 2 * engine->emit_breadcrumb_sz * sizeof(u32); + rq->reserved_space = 2 * engine->emit_breadcrumb_dw * sizeof(u32); /* * Record the position of the start of the request so that @@ -901,7 +901,7 @@ void i915_request_add(struct i915_request *request) * GPU processing the request, we never over-estimate the * position of the ring's HEAD. */ - cs = intel_ring_begin(request, engine->emit_breadcrumb_sz); + cs = intel_ring_begin(request, engine->emit_breadcrumb_dw); GEM_BUG_ON(IS_ERR(cs)); request->postfix = intel_ring_offset(request, cs); diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 8f738a7cd117..ef4c8c50a4ba 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -611,7 +611,7 @@ struct measure_breadcrumb { u32 cs[1024]; }; -static int measure_breadcrumb_sz(struct intel_engine_cs *engine) +static int measure_breadcrumb_dw(struct intel_engine_cs *engine) { struct measure_breadcrumb *frame; unsigned int dw; @@ -637,7 +637,6 @@ static int measure_breadcrumb_sz(struct intel_engine_cs *engine) frame->rq.timeline = &frame->timeline; dw = engine->emit_breadcrumb(&frame->rq, frame->cs) - frame->cs; - GEM_BUG_ON(dw != engine->emit_breadcrumb_sz); i915_timeline_fini(&frame->timeline); kfree(frame); @@ -698,11 +697,11 @@ int intel_engine_init_common(struct intel_engine_cs *engine) if (ret) goto err_breadcrumbs; - ret = measure_breadcrumb_sz(engine); + ret = measure_breadcrumb_dw(engine); if (ret < 0) goto err_status_page; - engine->emit_breadcrumb_sz = ret; + engine->emit_breadcrumb_dw = ret; return 0; diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index d2299425cf2f..5551dd2ec0e6 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -2075,7 +2075,6 @@ static u32 *gen8_emit_breadcrumb(struct i915_request *request, u32 *cs) return gen8_emit_wa_tail(request, cs); } -static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS; static u32 *gen8_emit_breadcrumb_rcs(struct i915_request *request, u32 *cs) { @@ -2099,7 +2098,6 @@ static u32 *gen8_emit_breadcrumb_rcs(struct i915_request *request, u32 *cs) return gen8_emit_wa_tail(request, cs); } -static const int gen8_emit_breadcrumb_rcs_sz = 8 + WA_TAIL_DWORDS; static int gen8_init_rcs_context(struct i915_request *rq) { @@ -2192,7 +2190,6 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) engine->emit_flush = gen8_emit_flush; engine->emit_breadcrumb = gen8_emit_breadcrumb; - engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz; engine->set_default_submission = intel_execlists_set_default_submission; @@ -2298,7 +2295,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine) engine->init_context = gen8_init_rcs_context; engine->emit_flush = gen8_emit_flush_render; engine->emit_breadcrumb = gen8_emit_breadcrumb_rcs; - engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_rcs_sz; ret = logical_ring_init(engine); if (ret) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 107c4934e2fa..09c90475168a 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -330,7 +330,6 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) return cs; } -static const int gen6_rcs_emit_breadcrumb_sz = 14; static int gen7_render_ring_cs_stall_wa(struct i915_request *rq) @@ -432,7 +431,6 @@ static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) return cs; } -static const int gen7_rcs_emit_breadcrumb_sz = 6; static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) { @@ -446,7 +444,6 @@ static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) return cs; } -static const int gen6_xcs_emit_breadcrumb_sz = 4; #define GEN7_XCS_WA 32 static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) @@ -475,7 +472,6 @@ static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) return cs; } -static const int gen7_xcs_emit_breadcrumb_sz = 8 + GEN7_XCS_WA * 3; #undef GEN7_XCS_WA static void set_hwstam(struct intel_engine_cs *engine, u32 mask) @@ -885,7 +881,6 @@ static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs) return cs; } -static const int i9xx_emit_breadcrumb_sz = 6; #define GEN5_WA_STORES 8 /* must be at least 1! */ static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs) @@ -908,7 +903,6 @@ static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs) return cs; } -static const int gen5_emit_breadcrumb_sz = GEN5_WA_STORES * 3 + 2; #undef GEN5_WA_STORES static void @@ -2206,11 +2200,8 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, engine->request_alloc = ring_request_alloc; engine->emit_breadcrumb = i9xx_emit_breadcrumb; - engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz; - if (IS_GEN(dev_priv, 5)) { + if (IS_GEN(dev_priv, 5)) engine->emit_breadcrumb = gen5_emit_breadcrumb; - engine->emit_breadcrumb_sz = gen5_emit_breadcrumb_sz; - } engine->set_default_submission = i9xx_set_default_submission; @@ -2240,12 +2231,10 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine) engine->init_context = intel_rcs_ctx_init; engine->emit_flush = gen7_render_ring_flush; engine->emit_breadcrumb = gen7_rcs_emit_breadcrumb; - engine->emit_breadcrumb_sz = gen7_rcs_emit_breadcrumb_sz; } else if (IS_GEN(dev_priv, 6)) { engine->init_context = intel_rcs_ctx_init; engine->emit_flush = gen6_render_ring_flush; engine->emit_breadcrumb = gen6_rcs_emit_breadcrumb; - engine->emit_breadcrumb_sz = gen6_rcs_emit_breadcrumb_sz; } else if (IS_GEN(dev_priv, 5)) { engine->emit_flush = gen4_render_ring_flush; } else { @@ -2281,13 +2270,10 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine) engine->emit_flush = gen6_bsd_ring_flush; engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; - if (IS_GEN(dev_priv, 6)) { + if (IS_GEN(dev_priv, 6)) engine->emit_breadcrumb = gen6_xcs_emit_breadcrumb; - engine->emit_breadcrumb_sz = gen6_xcs_emit_breadcrumb_sz; - } else { + else engine->emit_breadcrumb = gen7_xcs_emit_breadcrumb; - engine->emit_breadcrumb_sz = gen7_xcs_emit_breadcrumb_sz; - } } else { engine->emit_flush = bsd_ring_flush; if (IS_GEN(dev_priv, 5)) @@ -2310,13 +2296,10 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine) engine->emit_flush = gen6_ring_flush; engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; - if (IS_GEN(dev_priv, 6)) { + if (IS_GEN(dev_priv, 6)) engine->emit_breadcrumb = gen6_xcs_emit_breadcrumb; - engine->emit_breadcrumb_sz = gen6_xcs_emit_breadcrumb_sz; - } else { + else engine->emit_breadcrumb = gen7_xcs_emit_breadcrumb; - engine->emit_breadcrumb_sz = gen7_xcs_emit_breadcrumb_sz; - } return intel_init_ring_buffer(engine); } @@ -2335,7 +2318,6 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine) engine->irq_disable = hsw_vebox_irq_disable; engine->emit_breadcrumb = gen7_xcs_emit_breadcrumb; - engine->emit_breadcrumb_sz = gen7_xcs_emit_breadcrumb_sz; return intel_init_ring_buffer(engine); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 479bd53d4ac6..0834e91d4ace 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -471,7 +471,7 @@ struct intel_engine_cs { #define I915_DISPATCH_SECURE BIT(0) #define I915_DISPATCH_PINNED BIT(1) u32 *(*emit_breadcrumb)(struct i915_request *rq, u32 *cs); - int emit_breadcrumb_sz; + int emit_breadcrumb_dw; /* Pass the request to the hardware queue (e.g. directly into * the legacy ringbuffer or to the end of an execlist). -- cgit v1.2.3 From eb8d0f5af4ec2d172baf8b4b9a2199cd916b4e54 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 25 Jan 2019 13:22:28 +0000 Subject: drm/i915: Remove GPU reset dependence on struct_mutex Now that the submission backends are controlled via their own spinlocks, with a wave of a magic wand we can lift the struct_mutex requirement around GPU reset. That is we allow the submission frontend (userspace) to keep on submitting while we process the GPU reset as we can suspend the backend independently. The major change is around the backoff/handoff strategy for performing the reset. With no mutex deadlock, we no longer have to coordinate with any waiter, and just perform the reset immediately. Testcase: igt/gem_mmap_gtt/hang # regresses Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190125132230.22221-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_debugfs.c | 38 +- drivers/gpu/drm/i915/i915_drv.h | 5 - drivers/gpu/drm/i915/i915_gem.c | 18 +- drivers/gpu/drm/i915/i915_gem_fence_reg.h | 1 - drivers/gpu/drm/i915/i915_gem_gtt.h | 1 + drivers/gpu/drm/i915/i915_gpu_error.c | 104 +++--- drivers/gpu/drm/i915/i915_gpu_error.h | 28 +- drivers/gpu/drm/i915/i915_request.c | 47 --- drivers/gpu/drm/i915/i915_reset.c | 392 ++++++++++----------- drivers/gpu/drm/i915/i915_reset.h | 3 + drivers/gpu/drm/i915/intel_engine_cs.c | 6 +- drivers/gpu/drm/i915/intel_guc_submission.c | 5 +- drivers/gpu/drm/i915/intel_hangcheck.c | 28 +- drivers/gpu/drm/i915/intel_lrc.c | 91 ++--- drivers/gpu/drm/i915/intel_overlay.c | 2 - drivers/gpu/drm/i915/intel_ringbuffer.c | 91 +++-- drivers/gpu/drm/i915/intel_ringbuffer.h | 17 +- drivers/gpu/drm/i915/selftests/intel_hangcheck.c | 57 +-- drivers/gpu/drm/i915/selftests/intel_workarounds.c | 3 - drivers/gpu/drm/i915/selftests/mock_gem_device.c | 4 +- 20 files changed, 404 insertions(+), 537 deletions(-) (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 9a9e1da496dc..76dea0572f3e 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1284,8 +1284,6 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) seq_puts(m, "Wedged\n"); if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) seq_puts(m, "Reset in progress: struct_mutex backoff\n"); - if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags)) - seq_puts(m, "Reset in progress: reset handoff to waiter\n"); if (waitqueue_active(&dev_priv->gpu_error.wait_queue)) seq_puts(m, "Waiter holding struct mutex\n"); if (waitqueue_active(&dev_priv->gpu_error.reset_queue)) @@ -1321,15 +1319,15 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) struct rb_node *rb; seq_printf(m, "%s:\n", engine->name); - seq_printf(m, "\tseqno = %x [current %x, last %x]\n", + seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n", engine->hangcheck.seqno, seqno[id], - intel_engine_last_submit(engine)); - seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n", + intel_engine_last_submit(engine), + jiffies_to_msecs(jiffies - + engine->hangcheck.action_timestamp)); + seq_printf(m, "\twaiters? %s, fake irq active? %s\n", yesno(intel_engine_has_waiter(engine)), yesno(test_bit(engine->id, - &dev_priv->gpu_error.missed_irq_rings)), - yesno(engine->hangcheck.stalled), - yesno(engine->hangcheck.wedged)); + &dev_priv->gpu_error.missed_irq_rings))); spin_lock_irq(&b->rb_lock); for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { @@ -1343,11 +1341,6 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", (long long)engine->hangcheck.acthd, (long long)acthd[id]); - seq_printf(m, "\taction = %s(%d) %d ms ago\n", - hangcheck_action_to_str(engine->hangcheck.action), - engine->hangcheck.action, - jiffies_to_msecs(jiffies - - engine->hangcheck.action_timestamp)); if (engine->id == RCS) { seq_puts(m, "\tinstdone read =\n"); @@ -3911,8 +3904,6 @@ static int i915_wedged_set(void *data, u64 val) { struct drm_i915_private *i915 = data; - struct intel_engine_cs *engine; - unsigned int tmp; /* * There is no safeguard against this debugfs entry colliding @@ -3925,18 +3916,8 @@ i915_wedged_set(void *data, u64 val) if (i915_reset_backoff(&i915->gpu_error)) return -EAGAIN; - for_each_engine_masked(engine, i915, val, tmp) { - engine->hangcheck.seqno = intel_engine_get_seqno(engine); - engine->hangcheck.stalled = true; - } - i915_handle_error(i915, val, I915_ERROR_CAPTURE, "Manually set wedged engine mask = %llx", val); - - wait_on_bit(&i915->gpu_error.flags, - I915_RESET_HANDOFF, - TASK_UNINTERRUPTIBLE); - return 0; } @@ -4091,13 +4072,8 @@ i915_drop_caches_set(void *data, u64 val) mutex_unlock(&i915->drm.struct_mutex); } - if (val & DROP_RESET_ACTIVE && - i915_terminally_wedged(&i915->gpu_error)) { + if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(&i915->gpu_error)) i915_handle_error(i915, ALL_ENGINES, 0, NULL); - wait_on_bit(&i915->gpu_error.flags, - I915_RESET_HANDOFF, - TASK_UNINTERRUPTIBLE); - } fs_reclaim_acquire(GFP_KERNEL); if (val & DROP_BOUND) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 3c111ad09922..0133d1da3d3c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3001,11 +3001,6 @@ static inline bool i915_reset_backoff(struct i915_gpu_error *error) return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags)); } -static inline bool i915_reset_handoff(struct i915_gpu_error *error) -{ - return unlikely(test_bit(I915_RESET_HANDOFF, &error->flags)); -} - static inline bool i915_terminally_wedged(struct i915_gpu_error *error) { return unlikely(test_bit(I915_WEDGED, &error->flags)); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b359390ba22c..d20b42386c3c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -657,11 +657,6 @@ i915_gem_object_wait(struct drm_i915_gem_object *obj, struct intel_rps_client *rps_client) { might_sleep(); -#if IS_ENABLED(CONFIG_LOCKDEP) - GEM_BUG_ON(debug_locks && - !!lockdep_is_held(&obj->base.dev->struct_mutex) != - !!(flags & I915_WAIT_LOCKED)); -#endif GEM_BUG_ON(timeout < 0); timeout = i915_gem_object_wait_reservation(obj->resv, @@ -4493,8 +4488,6 @@ void i915_gem_sanitize(struct drm_i915_private *i915) GEM_TRACE("\n"); - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); intel_uncore_forcewake_get(i915, FORCEWAKE_ALL); @@ -4520,6 +4513,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915) intel_uncore_forcewake_put(i915, FORCEWAKE_ALL); intel_runtime_pm_put(i915, wakeref); + mutex_lock(&i915->drm.struct_mutex); i915_gem_contexts_lost(i915); mutex_unlock(&i915->drm.struct_mutex); } @@ -4534,6 +4528,8 @@ int i915_gem_suspend(struct drm_i915_private *i915) wakeref = intel_runtime_pm_get(i915); intel_suspend_gt_powersave(i915); + flush_workqueue(i915->wq); + mutex_lock(&i915->drm.struct_mutex); /* @@ -4563,11 +4559,9 @@ int i915_gem_suspend(struct drm_i915_private *i915) i915_retire_requests(i915); /* ensure we flush after wedging */ mutex_unlock(&i915->drm.struct_mutex); + i915_reset_flush(i915); - intel_uc_suspend(i915); - - cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work); - cancel_delayed_work_sync(&i915->gt.retire_work); + drain_delayed_work(&i915->gt.retire_work); /* * As the idle_work is rearming if it detects a race, play safe and @@ -4575,6 +4569,8 @@ int i915_gem_suspend(struct drm_i915_private *i915) */ drain_delayed_work(&i915->gt.idle_work); + intel_uc_suspend(i915); + /* * Assert that we successfully flushed all the work and * reset the GPU back to its idle, low power state. diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h index 99a31ded4dfd..09dcaf14121b 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.h +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h @@ -50,4 +50,3 @@ struct drm_i915_fence_reg { }; #endif - diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 9229b03d629b..a0039ea97cdc 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -39,6 +39,7 @@ #include #include "i915_request.h" +#include "i915_reset.h" #include "i915_selftest.h" #include "i915_timeline.h" diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 1f8e80e31b49..4eef0462489c 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -533,10 +533,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m, err_printf(m, " waiting: %s\n", yesno(ee->waiting)); err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head); err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail); - err_printf(m, " hangcheck stall: %s\n", yesno(ee->hangcheck_stalled)); - err_printf(m, " hangcheck action: %s\n", - hangcheck_action_to_str(ee->hangcheck_action)); - err_printf(m, " hangcheck action timestamp: %dms (%lu%s)\n", + err_printf(m, " hangcheck timestamp: %dms (%lu%s)\n", jiffies_to_msecs(ee->hangcheck_timestamp - epoch), ee->hangcheck_timestamp, ee->hangcheck_timestamp == epoch ? "; epoch" : ""); @@ -684,15 +681,15 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, jiffies_to_msecs(error->capture - error->epoch)); for (i = 0; i < ARRAY_SIZE(error->engine); i++) { - if (error->engine[i].hangcheck_stalled && - error->engine[i].context.pid) { - err_printf(m, "Active process (on ring %s): %s [%d], score %d%s\n", - engine_name(m->i915, i), - error->engine[i].context.comm, - error->engine[i].context.pid, - error->engine[i].context.ban_score, - bannable(&error->engine[i].context)); - } + if (!error->engine[i].context.pid) + continue; + + err_printf(m, "Active process (on ring %s): %s [%d], score %d%s\n", + engine_name(m->i915, i), + error->engine[i].context.comm, + error->engine[i].context.pid, + error->engine[i].context.ban_score, + bannable(&error->engine[i].context)); } err_printf(m, "Reset count: %u\n", error->reset_count); err_printf(m, "Suspend count: %u\n", error->suspend_count); @@ -1144,7 +1141,8 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err, return i; } -/* Generate a semi-unique error code. The code is not meant to have meaning, The +/* + * Generate a semi-unique error code. The code is not meant to have meaning, The * code's only purpose is to try to prevent false duplicated bug reports by * grossly estimating a GPU error state. * @@ -1153,29 +1151,23 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err, * * It's only a small step better than a random number in its current form. */ -static u32 i915_error_generate_code(struct drm_i915_private *dev_priv, - struct i915_gpu_state *error, - int *engine_id) +static u32 i915_error_generate_code(struct i915_gpu_state *error, + unsigned long engine_mask) { - u32 error_code = 0; - int i; - - /* IPEHR would be an ideal way to detect errors, as it's the gross + /* + * IPEHR would be an ideal way to detect errors, as it's the gross * measure of "the command that hung." However, has some very common * synchronization commands which almost always appear in the case * strictly a client bug. Use instdone to differentiate those some. */ - for (i = 0; i < I915_NUM_ENGINES; i++) { - if (error->engine[i].hangcheck_stalled) { - if (engine_id) - *engine_id = i; + if (engine_mask) { + struct drm_i915_error_engine *ee = + &error->engine[ffs(engine_mask)]; - return error->engine[i].ipehr ^ - error->engine[i].instdone.instdone; - } + return ee->ipehr ^ ee->instdone.instdone; } - return error_code; + return 0; } static void gem_record_fences(struct i915_gpu_state *error) @@ -1338,9 +1330,8 @@ static void error_record_engine_registers(struct i915_gpu_state *error, } ee->idle = intel_engine_is_idle(engine); - ee->hangcheck_timestamp = engine->hangcheck.action_timestamp; - ee->hangcheck_action = engine->hangcheck.action; - ee->hangcheck_stalled = engine->hangcheck.stalled; + if (!ee->idle) + ee->hangcheck_timestamp = engine->hangcheck.action_timestamp; ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error, engine); @@ -1783,31 +1774,35 @@ static void capture_reg_state(struct i915_gpu_state *error) error->pgtbl_er = I915_READ(PGTBL_ER); } -static void i915_error_capture_msg(struct drm_i915_private *dev_priv, - struct i915_gpu_state *error, - u32 engine_mask, - const char *error_msg) +static const char * +error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg) { - u32 ecode; - int engine_id = -1, len; + int len; + int i; - ecode = i915_error_generate_code(dev_priv, error, &engine_id); + for (i = 0; i < ARRAY_SIZE(error->engine); i++) + if (!error->engine[i].context.pid) + engines &= ~BIT(i); len = scnprintf(error->error_msg, sizeof(error->error_msg), - "GPU HANG: ecode %d:%d:0x%08x", - INTEL_GEN(dev_priv), engine_id, ecode); - - if (engine_id != -1 && error->engine[engine_id].context.pid) + "GPU HANG: ecode %d:%lx:0x%08x", + INTEL_GEN(error->i915), engines, + i915_error_generate_code(error, engines)); + if (engines) { + /* Just show the first executing process, more is confusing */ + i = ffs(engines); len += scnprintf(error->error_msg + len, sizeof(error->error_msg) - len, ", in %s [%d]", - error->engine[engine_id].context.comm, - error->engine[engine_id].context.pid); + error->engine[i].context.comm, + error->engine[i].context.pid); + } + if (msg) + len += scnprintf(error->error_msg + len, + sizeof(error->error_msg) - len, + ", %s", msg); - scnprintf(error->error_msg + len, sizeof(error->error_msg) - len, - ", reason: %s, action: %s", - error_msg, - engine_mask ? "reset" : "continue"); + return error->error_msg; } static void capture_gen_state(struct i915_gpu_state *error) @@ -1847,7 +1842,7 @@ static unsigned long capture_find_epoch(const struct i915_gpu_state *error) for (i = 0; i < ARRAY_SIZE(error->engine); i++) { const struct drm_i915_error_engine *ee = &error->engine[i]; - if (ee->hangcheck_stalled && + if (ee->hangcheck_timestamp && time_before(ee->hangcheck_timestamp, epoch)) epoch = ee->hangcheck_timestamp; } @@ -1921,7 +1916,7 @@ i915_capture_gpu_state(struct drm_i915_private *i915) * i915_capture_error_state - capture an error record for later analysis * @i915: i915 device * @engine_mask: the mask of engines triggering the hang - * @error_msg: a message to insert into the error capture header + * @msg: a message to insert into the error capture header * * Should be called when an error is detected (either a hang or an error * interrupt) to capture error state from the time of the error. Fills @@ -1929,8 +1924,8 @@ i915_capture_gpu_state(struct drm_i915_private *i915) * to pick up. */ void i915_capture_error_state(struct drm_i915_private *i915, - u32 engine_mask, - const char *error_msg) + unsigned long engine_mask, + const char *msg) { static bool warned; struct i915_gpu_state *error; @@ -1946,8 +1941,7 @@ void i915_capture_error_state(struct drm_i915_private *i915, if (IS_ERR(error)) return; - i915_error_capture_msg(i915, error, engine_mask, error_msg); - DRM_INFO("%s\n", error->error_msg); + dev_info(i915->drm.dev, "%s\n", error_msg(error, engine_mask, msg)); if (!error->simulated) { spin_lock_irqsave(&i915->gpu_error.lock, flags); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 604291f7762d..231173786eae 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -85,8 +85,6 @@ struct i915_gpu_state { bool waiting; int num_waiters; unsigned long hangcheck_timestamp; - bool hangcheck_stalled; - enum intel_engine_hangcheck_action hangcheck_action; struct i915_address_space *vm; int num_requests; u32 reset_count; @@ -197,6 +195,8 @@ struct i915_gpu_state { struct scatterlist *sgl, *fit; }; +struct i915_gpu_restart; + struct i915_gpu_error { /* For hangcheck timer */ #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ @@ -247,15 +247,6 @@ struct i915_gpu_error { * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a * secondary role in preventing two concurrent global reset attempts. * - * #I915_RESET_HANDOFF - To perform the actual GPU reset, we need the - * struct_mutex. We try to acquire the struct_mutex in the reset worker, - * but it may be held by some long running waiter (that we cannot - * interrupt without causing trouble). Once we are ready to do the GPU - * reset, we set the I915_RESET_HANDOFF bit and wakeup any waiters. If - * they already hold the struct_mutex and want to participate they can - * inspect the bit and do the reset directly, otherwise the worker - * waits for the struct_mutex. - * * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to * acquire the struct_mutex to reset an engine, we need an explicit * flag to prevent two concurrent reset attempts in the same engine. @@ -269,20 +260,13 @@ struct i915_gpu_error { */ unsigned long flags; #define I915_RESET_BACKOFF 0 -#define I915_RESET_HANDOFF 1 -#define I915_RESET_MODESET 2 -#define I915_RESET_ENGINE 3 +#define I915_RESET_MODESET 1 +#define I915_RESET_ENGINE 2 #define I915_WEDGED (BITS_PER_LONG - 1) /** Number of times an engine has been reset */ u32 reset_engine_count[I915_NUM_ENGINES]; - /** Set of stalled engines with guilty requests, in the current reset */ - u32 stalled_mask; - - /** Reason for the current *global* reset */ - const char *reason; - struct mutex wedge_mutex; /* serialises wedging/unwedging */ /** @@ -299,6 +283,8 @@ struct i915_gpu_error { /* For missed irq/seqno simulation. */ unsigned long test_irq_rings; + + struct i915_gpu_restart *restart; }; struct drm_i915_error_state_buf { @@ -320,7 +306,7 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915); void i915_capture_error_state(struct drm_i915_private *dev_priv, - u32 engine_mask, + unsigned long engine_mask, const char *error_msg); static inline struct i915_gpu_state * diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index ddc35e9dc0c0..f4241a17e2ad 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1083,18 +1083,6 @@ static bool __i915_spin_request(const struct i915_request *rq, return false; } -static bool __i915_wait_request_check_and_reset(struct i915_request *request) -{ - struct i915_gpu_error *error = &request->i915->gpu_error; - - if (likely(!i915_reset_handoff(error))) - return false; - - __set_current_state(TASK_RUNNING); - i915_reset(request->i915, error->stalled_mask, error->reason); - return true; -} - /** * i915_request_wait - wait until execution of request has finished * @rq: the request to wait upon @@ -1120,17 +1108,10 @@ long i915_request_wait(struct i915_request *rq, { const int state = flags & I915_WAIT_INTERRUPTIBLE ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; - wait_queue_head_t *errq = &rq->i915->gpu_error.wait_queue; - DEFINE_WAIT_FUNC(reset, default_wake_function); DEFINE_WAIT_FUNC(exec, default_wake_function); struct intel_wait wait; might_sleep(); -#if IS_ENABLED(CONFIG_LOCKDEP) - GEM_BUG_ON(debug_locks && - !!lockdep_is_held(&rq->i915->drm.struct_mutex) != - !!(flags & I915_WAIT_LOCKED)); -#endif GEM_BUG_ON(timeout < 0); if (i915_request_completed(rq)) @@ -1140,11 +1121,7 @@ long i915_request_wait(struct i915_request *rq, return -ETIME; trace_i915_request_wait_begin(rq, flags); - add_wait_queue(&rq->execute, &exec); - if (flags & I915_WAIT_LOCKED) - add_wait_queue(errq, &reset); - intel_wait_init(&wait); if (flags & I915_WAIT_PRIORITY) i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT); @@ -1155,10 +1132,6 @@ restart: if (intel_wait_update_request(&wait, rq)) break; - if (flags & I915_WAIT_LOCKED && - __i915_wait_request_check_and_reset(rq)) - continue; - if (signal_pending_state(state, current)) { timeout = -ERESTARTSYS; goto complete; @@ -1188,9 +1161,6 @@ restart: */ goto wakeup; - if (flags & I915_WAIT_LOCKED) - __i915_wait_request_check_and_reset(rq); - for (;;) { if (signal_pending_state(state, current)) { timeout = -ERESTARTSYS; @@ -1214,21 +1184,6 @@ wakeup: if (i915_request_completed(rq)) break; - /* - * If the GPU is hung, and we hold the lock, reset the GPU - * and then check for completion. On a full reset, the engine's - * HW seqno will be advanced passed us and we are complete. - * If we do a partial reset, we have to wait for the GPU to - * resume and update the breadcrumb. - * - * If we don't hold the mutex, we can just wait for the worker - * to come along and update the breadcrumb (either directly - * itself, or indirectly by recovering the GPU). - */ - if (flags & I915_WAIT_LOCKED && - __i915_wait_request_check_and_reset(rq)) - continue; - /* Only spin if we know the GPU is processing this request */ if (__i915_spin_request(rq, wait.seqno, state, 2)) break; @@ -1242,8 +1197,6 @@ wakeup: intel_engine_remove_wait(rq->engine, &wait); complete: __set_current_state(TASK_RUNNING); - if (flags & I915_WAIT_LOCKED) - remove_wait_queue(errq, &reset); remove_wait_queue(&rq->execute, &exec); trace_i915_request_wait_end(rq); diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/i915_reset.c index 33408c4e6358..68af017ee548 100644 --- a/drivers/gpu/drm/i915/i915_reset.c +++ b/drivers/gpu/drm/i915/i915_reset.c @@ -5,6 +5,7 @@ */ #include +#include #include "i915_drv.h" #include "i915_gpu_error.h" @@ -14,27 +15,31 @@ #define RESET_MAX_RETRIES 3 +/* XXX How to handle concurrent GGTT updates using tiling registers? */ +#define RESET_UNDER_STOP_MACHINE 0 + static void engine_skip_context(struct i915_request *rq) { struct intel_engine_cs *engine = rq->engine; struct i915_gem_context *hung_ctx = rq->gem_context; struct i915_timeline *timeline = rq->timeline; - unsigned long flags; + lockdep_assert_held(&engine->timeline.lock); GEM_BUG_ON(timeline == &engine->timeline); - spin_lock_irqsave(&engine->timeline.lock, flags); spin_lock(&timeline->lock); - list_for_each_entry_continue(rq, &engine->timeline.requests, link) - if (rq->gem_context == hung_ctx) - i915_request_skip(rq, -EIO); + if (rq->global_seqno) { + list_for_each_entry_continue(rq, + &engine->timeline.requests, link) + if (rq->gem_context == hung_ctx) + i915_request_skip(rq, -EIO); + } list_for_each_entry(rq, &timeline->requests, link) i915_request_skip(rq, -EIO); spin_unlock(&timeline->lock); - spin_unlock_irqrestore(&engine->timeline.lock, flags); } static void client_mark_guilty(struct drm_i915_file_private *file_priv, @@ -61,7 +66,7 @@ static void client_mark_guilty(struct drm_i915_file_private *file_priv, } } -static void context_mark_guilty(struct i915_gem_context *ctx) +static bool context_mark_guilty(struct i915_gem_context *ctx) { unsigned int score; bool banned, bannable; @@ -74,7 +79,7 @@ static void context_mark_guilty(struct i915_gem_context *ctx) /* Cool contexts don't accumulate client ban score */ if (!bannable) - return; + return false; if (banned) { DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, banned\n", @@ -85,6 +90,8 @@ static void context_mark_guilty(struct i915_gem_context *ctx) if (!IS_ERR_OR_NULL(ctx->file_priv)) client_mark_guilty(ctx->file_priv, ctx); + + return banned; } static void context_mark_innocent(struct i915_gem_context *ctx) @@ -92,6 +99,21 @@ static void context_mark_innocent(struct i915_gem_context *ctx) atomic_inc(&ctx->active_count); } +void i915_reset_request(struct i915_request *rq, bool guilty) +{ + lockdep_assert_held(&rq->engine->timeline.lock); + GEM_BUG_ON(i915_request_completed(rq)); + + if (guilty) { + i915_request_skip(rq, -EIO); + if (context_mark_guilty(rq->gem_context)) + engine_skip_context(rq); + } else { + dma_fence_set_error(&rq->fence, -EAGAIN); + context_mark_innocent(rq->gem_context); + } +} + static void gen3_stop_engine(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; @@ -604,11 +626,8 @@ int intel_reset_guc(struct drm_i915_private *i915) * Ensure irq handler finishes, and not run again. * Also return the active request so that we only search for it once. */ -static struct i915_request * -reset_prepare_engine(struct intel_engine_cs *engine) +static void reset_prepare_engine(struct intel_engine_cs *engine) { - struct i915_request *rq; - /* * During the reset sequence, we must prevent the engine from * entering RC6. As the context state is undefined until we restart @@ -617,162 +636,85 @@ reset_prepare_engine(struct intel_engine_cs *engine) * GPU state upon resume, i.e. fail to restart after a reset. */ intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL); - - rq = engine->reset.prepare(engine); - if (rq && rq->fence.error == -EIO) - rq = ERR_PTR(-EIO); /* Previous reset failed! */ - - return rq; + engine->reset.prepare(engine); } -static int reset_prepare(struct drm_i915_private *i915) +static void reset_prepare(struct drm_i915_private *i915) { struct intel_engine_cs *engine; - struct i915_request *rq; enum intel_engine_id id; - int err = 0; - for_each_engine(engine, i915, id) { - rq = reset_prepare_engine(engine); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - continue; - } - - engine->hangcheck.active_request = rq; - } + for_each_engine(engine, i915, id) + reset_prepare_engine(engine); - i915_gem_revoke_fences(i915); intel_uc_sanitize(i915); - - return err; } -/* Returns the request if it was guilty of the hang */ -static struct i915_request * -reset_request(struct intel_engine_cs *engine, - struct i915_request *rq, - bool stalled) +static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask) { + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err; + /* - * The guilty request will get skipped on a hung engine. - * - * Users of client default contexts do not rely on logical - * state preserved between batches so it is safe to execute - * queued requests following the hang. Non default contexts - * rely on preserved state, so skipping a batch loses the - * evolution of the state and it needs to be considered corrupted. - * Executing more queued batches on top of corrupted state is - * risky. But we take the risk by trying to advance through - * the queued requests in order to make the client behaviour - * more predictable around resets, by not throwing away random - * amount of batches it has prepared for execution. Sophisticated - * clients can use gem_reset_stats_ioctl and dma fence status - * (exported via sync_file info ioctl on explicit fences) to observe - * when it loses the context state and should rebuild accordingly. - * - * The context ban, and ultimately the client ban, mechanism are safety - * valves if client submission ends up resulting in nothing more than - * subsequent hangs. + * Everything depends on having the GTT running, so we need to start + * there. */ + err = i915_ggtt_enable_hw(i915); + if (err) + return err; - if (i915_request_completed(rq)) { - GEM_TRACE("%s pardoned global=%d (fence %llx:%lld), current %d\n", - engine->name, rq->global_seqno, - rq->fence.context, rq->fence.seqno, - intel_engine_get_seqno(engine)); - stalled = false; - } - - if (stalled) { - context_mark_guilty(rq->gem_context); - i915_request_skip(rq, -EIO); + for_each_engine(engine, i915, id) + intel_engine_reset(engine, stalled_mask & ENGINE_MASK(id)); - /* If this context is now banned, skip all pending requests. */ - if (i915_gem_context_is_banned(rq->gem_context)) - engine_skip_context(rq); - } else { - /* - * Since this is not the hung engine, it may have advanced - * since the hang declaration. Double check by refinding - * the active request at the time of the reset. - */ - rq = i915_gem_find_active_request(engine); - if (rq) { - unsigned long flags; - - context_mark_innocent(rq->gem_context); - dma_fence_set_error(&rq->fence, -EAGAIN); - - /* Rewind the engine to replay the incomplete rq */ - spin_lock_irqsave(&engine->timeline.lock, flags); - rq = list_prev_entry(rq, link); - if (&rq->link == &engine->timeline.requests) - rq = NULL; - spin_unlock_irqrestore(&engine->timeline.lock, flags); - } - } + i915_gem_restore_fences(i915); - return rq; + return err; } -static void reset_engine(struct intel_engine_cs *engine, - struct i915_request *rq, - bool stalled) +static void reset_finish_engine(struct intel_engine_cs *engine) { - if (rq) - rq = reset_request(engine, rq, stalled); - - /* Setup the CS to resume from the breadcrumb of the hung request */ - engine->reset.reset(engine, rq); + engine->reset.finish(engine); + intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL); } -static void gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask) +struct i915_gpu_restart { + struct work_struct work; + struct drm_i915_private *i915; +}; + +static void restart_work(struct work_struct *work) { + struct i915_gpu_restart *arg = container_of(work, typeof(*arg), work); + struct drm_i915_private *i915 = arg->i915; struct intel_engine_cs *engine; enum intel_engine_id id; + intel_wakeref_t wakeref; - lockdep_assert_held(&i915->drm.struct_mutex); - - i915_retire_requests(i915); + wakeref = intel_runtime_pm_get(i915); + mutex_lock(&i915->drm.struct_mutex); + WRITE_ONCE(i915->gpu_error.restart, NULL); for_each_engine(engine, i915, id) { - struct intel_context *ce; - - reset_engine(engine, - engine->hangcheck.active_request, - stalled_mask & ENGINE_MASK(id)); - ce = fetch_and_zero(&engine->last_retired_context); - if (ce) - intel_context_unpin(ce); + struct i915_request *rq; /* * Ostensibily, we always want a context loaded for powersaving, * so if the engine is idle after the reset, send a request * to load our scratch kernel_context. - * - * More mysteriously, if we leave the engine idle after a reset, - * the next userspace batch may hang, with what appears to be - * an incoherent read by the CS (presumably stale TLB). An - * empty request appears sufficient to paper over the glitch. */ - if (intel_engine_is_idle(engine)) { - struct i915_request *rq; + if (!intel_engine_is_idle(engine)) + continue; - rq = i915_request_alloc(engine, i915->kernel_context); - if (!IS_ERR(rq)) - i915_request_add(rq); - } + rq = i915_request_alloc(engine, i915->kernel_context); + if (!IS_ERR(rq)) + i915_request_add(rq); } - i915_gem_restore_fences(i915); -} - -static void reset_finish_engine(struct intel_engine_cs *engine) -{ - engine->reset.finish(engine); + mutex_unlock(&i915->drm.struct_mutex); + intel_runtime_pm_put(i915, wakeref); - intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL); + kfree(arg); } static void reset_finish(struct drm_i915_private *i915) @@ -780,11 +722,30 @@ static void reset_finish(struct drm_i915_private *i915) struct intel_engine_cs *engine; enum intel_engine_id id; - lockdep_assert_held(&i915->drm.struct_mutex); - - for_each_engine(engine, i915, id) { - engine->hangcheck.active_request = NULL; + for_each_engine(engine, i915, id) reset_finish_engine(engine); +} + +static void reset_restart(struct drm_i915_private *i915) +{ + struct i915_gpu_restart *arg; + + /* + * Following the reset, ensure that we always reload context for + * powersaving, and to correct engine->last_retired_context. Since + * this requires us to submit a request, queue a worker to do that + * task for us to evade any locking here. + */ + if (READ_ONCE(i915->gpu_error.restart)) + return; + + arg = kmalloc(sizeof(*arg), GFP_KERNEL); + if (arg) { + arg->i915 = i915; + INIT_WORK(&arg->work, restart_work); + + WRITE_ONCE(i915->gpu_error.restart, arg); + queue_work(i915->wq, &arg->work); } } @@ -873,8 +834,6 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915) struct i915_timeline *tl; bool ret = false; - lockdep_assert_held(&i915->drm.struct_mutex); - if (!test_bit(I915_WEDGED, &error->flags)) return true; @@ -897,9 +856,9 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915) */ list_for_each_entry(tl, &i915->gt.timelines, link) { struct i915_request *rq; + long timeout; - rq = i915_gem_active_peek(&tl->last_request, - &i915->drm.struct_mutex); + rq = i915_gem_active_get_unlocked(&tl->last_request); if (!rq) continue; @@ -914,12 +873,12 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915) * and when the seqno passes the fence, the signaler * then signals the fence waking us up). */ - if (dma_fence_default_wait(&rq->fence, true, - MAX_SCHEDULE_TIMEOUT) < 0) + timeout = dma_fence_default_wait(&rq->fence, true, + MAX_SCHEDULE_TIMEOUT); + i915_request_put(rq); + if (timeout < 0) goto unlock; } - i915_retire_requests(i915); - GEM_BUG_ON(i915->gt.active_requests); intel_engines_sanitize(i915, false); @@ -933,7 +892,6 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915) * context and do not require stop_machine(). */ intel_engines_reset_default_submission(i915); - i915_gem_contexts_lost(i915); GEM_TRACE("end\n"); @@ -946,6 +904,52 @@ unlock: return ret; } +struct __i915_reset { + struct drm_i915_private *i915; + unsigned int stalled_mask; +}; + +static int __i915_reset__BKL(void *data) +{ + struct __i915_reset *arg = data; + int err; + + err = intel_gpu_reset(arg->i915, ALL_ENGINES); + if (err) + return err; + + return gt_reset(arg->i915, arg->stalled_mask); +} + +#if RESET_UNDER_STOP_MACHINE +/* + * XXX An alternative to using stop_machine would be to park only the + * processes that have a GGTT mmap. By remote parking the threads (SIGSTOP) + * we should be able to prevent their memmory accesses via the lost fence + * registers over the course of the reset without the potential recursive + * of mutexes between the pagefault handler and reset. + * + * See igt/gem_mmap_gtt/hang + */ +#define __do_reset(fn, arg) stop_machine(fn, arg, NULL) +#else +#define __do_reset(fn, arg) fn(arg) +#endif + +static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask) +{ + struct __i915_reset arg = { i915, stalled_mask }; + int err, i; + + err = __do_reset(__i915_reset__BKL, &arg); + for (i = 0; err && i < RESET_MAX_RETRIES; i++) { + msleep(100); + err = __do_reset(__i915_reset__BKL, &arg); + } + + return err; +} + /** * i915_reset - reset chip after a hang * @i915: #drm_i915_private to reset @@ -971,31 +975,22 @@ void i915_reset(struct drm_i915_private *i915, { struct i915_gpu_error *error = &i915->gpu_error; int ret; - int i; GEM_TRACE("flags=%lx\n", error->flags); might_sleep(); - lockdep_assert_held(&i915->drm.struct_mutex); assert_rpm_wakelock_held(i915); GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags)); - if (!test_bit(I915_RESET_HANDOFF, &error->flags)) - return; - /* Clear any previous failed attempts at recovery. Time to try again. */ if (!i915_gem_unset_wedged(i915)) - goto wakeup; + return; if (reason) dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason); error->reset_count++; - ret = reset_prepare(i915); - if (ret) { - dev_err(i915->drm.dev, "GPU recovery failed\n"); - goto taint; - } + reset_prepare(i915); if (!intel_has_gpu_reset(i915)) { if (i915_modparams.reset) @@ -1005,32 +1000,11 @@ void i915_reset(struct drm_i915_private *i915, goto error; } - for (i = 0; i < RESET_MAX_RETRIES; i++) { - ret = intel_gpu_reset(i915, ALL_ENGINES); - if (ret == 0) - break; - - msleep(100); - } - if (ret) { + if (do_reset(i915, stalled_mask)) { dev_err(i915->drm.dev, "Failed to reset chip\n"); goto taint; } - /* Ok, now get things going again... */ - - /* - * Everything depends on having the GTT running, so we need to start - * there. - */ - ret = i915_ggtt_enable_hw(i915); - if (ret) { - DRM_ERROR("Failed to re-enable GGTT following reset (%d)\n", - ret); - goto error; - } - - gt_reset(i915, stalled_mask); intel_overlay_reset(i915); /* @@ -1052,9 +1026,8 @@ void i915_reset(struct drm_i915_private *i915, finish: reset_finish(i915); -wakeup: - clear_bit(I915_RESET_HANDOFF, &error->flags); - wake_up_bit(&error->flags, I915_RESET_HANDOFF); + if (!i915_terminally_wedged(error)) + reset_restart(i915); return; taint: @@ -1073,7 +1046,6 @@ taint: add_taint(TAINT_WARN, LOCKDEP_STILL_OK); error: i915_gem_set_wedged(i915); - i915_retire_requests(i915); goto finish; } @@ -1099,18 +1071,16 @@ static inline int intel_gt_reset_engine(struct drm_i915_private *i915, int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) { struct i915_gpu_error *error = &engine->i915->gpu_error; - struct i915_request *active_request; int ret; GEM_TRACE("%s flags=%lx\n", engine->name, error->flags); GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags)); - active_request = reset_prepare_engine(engine); - if (IS_ERR_OR_NULL(active_request)) { - /* Either the previous reset failed, or we pardon the reset. */ - ret = PTR_ERR(active_request); - goto out; - } + if (i915_seqno_passed(intel_engine_get_seqno(engine), + intel_engine_last_submit(engine))) + return 0; + + reset_prepare_engine(engine); if (msg) dev_notice(engine->i915->drm.dev, @@ -1134,7 +1104,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) * active request and can drop it, adjust head to skip the offending * request to resume executing remaining requests in the queue. */ - reset_engine(engine, active_request, true); + intel_engine_reset(engine, true); /* * The engine and its registers (and workarounds in case of render) @@ -1171,30 +1141,7 @@ static void i915_reset_device(struct drm_i915_private *i915, i915_wedge_on_timeout(&w, i915, 5 * HZ) { intel_prepare_reset(i915); - error->reason = reason; - error->stalled_mask = engine_mask; - - /* Signal that locked waiters should reset the GPU */ - smp_mb__before_atomic(); - set_bit(I915_RESET_HANDOFF, &error->flags); - wake_up_all(&error->wait_queue); - - /* - * Wait for anyone holding the lock to wakeup, without - * blocking indefinitely on struct_mutex. - */ - do { - if (mutex_trylock(&i915->drm.struct_mutex)) { - i915_reset(i915, engine_mask, reason); - mutex_unlock(&i915->drm.struct_mutex); - } - } while (wait_on_bit_timeout(&error->flags, - I915_RESET_HANDOFF, - TASK_UNINTERRUPTIBLE, - 1)); - - error->stalled_mask = 0; - error->reason = NULL; + i915_reset(i915, engine_mask, reason); intel_finish_reset(i915); } @@ -1350,6 +1297,25 @@ out: intel_runtime_pm_put(i915, wakeref); } +bool i915_reset_flush(struct drm_i915_private *i915) +{ + int err; + + cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work); + + flush_workqueue(i915->wq); + GEM_BUG_ON(READ_ONCE(i915->gpu_error.restart)); + + mutex_lock(&i915->drm.struct_mutex); + err = i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED | + I915_WAIT_FOR_IDLE_BOOST, + MAX_SCHEDULE_TIMEOUT); + mutex_unlock(&i915->drm.struct_mutex); + + return !err; +} + static void i915_wedge_me(struct work_struct *work) { struct i915_wedge_me *w = container_of(work, typeof(*w), work.work); diff --git a/drivers/gpu/drm/i915/i915_reset.h b/drivers/gpu/drm/i915/i915_reset.h index b6a519bde67d..f2d347f319df 100644 --- a/drivers/gpu/drm/i915/i915_reset.h +++ b/drivers/gpu/drm/i915/i915_reset.h @@ -29,6 +29,9 @@ void i915_reset(struct drm_i915_private *i915, int i915_reset_engine(struct intel_engine_cs *engine, const char *reason); +void i915_reset_request(struct i915_request *rq, bool guilty); +bool i915_reset_flush(struct drm_i915_private *i915); + bool intel_has_gpu_reset(struct drm_i915_private *i915); bool intel_has_reset_engine(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index ef4c8c50a4ba..1a5c163b98d6 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -1119,10 +1119,8 @@ void intel_engines_sanitize(struct drm_i915_private *i915, bool force) if (!reset_engines(i915) && !force) return; - for_each_engine(engine, i915, id) { - if (engine->reset.reset) - engine->reset.reset(engine, NULL); - } + for_each_engine(engine, i915, id) + intel_engine_reset(engine, false); } /** diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index 349ae5844f24..45e2db683fe5 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -834,8 +834,7 @@ static void guc_submission_tasklet(unsigned long data) spin_unlock_irqrestore(&engine->timeline.lock, flags); } -static struct i915_request * -guc_reset_prepare(struct intel_engine_cs *engine) +static void guc_reset_prepare(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; @@ -861,8 +860,6 @@ guc_reset_prepare(struct intel_engine_cs *engine) */ if (engine->i915->guc.preempt_wq) flush_workqueue(engine->i915->guc.preempt_wq); - - return i915_gem_find_active_request(engine); } /* diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c index 741441daae32..5662d6fed523 100644 --- a/drivers/gpu/drm/i915/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/intel_hangcheck.c @@ -25,6 +25,17 @@ #include "i915_drv.h" #include "i915_reset.h" +struct hangcheck { + u64 acthd; + u32 seqno; + enum intel_engine_hangcheck_action action; + unsigned long action_timestamp; + int deadlock; + struct intel_instdone instdone; + bool wedged:1; + bool stalled:1; +}; + static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone) { u32 tmp = current_instdone | *old_instdone; @@ -119,25 +130,22 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd) } static void hangcheck_load_sample(struct intel_engine_cs *engine, - struct intel_engine_hangcheck *hc) + struct hangcheck *hc) { hc->acthd = intel_engine_get_active_head(engine); hc->seqno = intel_engine_get_seqno(engine); } static void hangcheck_store_sample(struct intel_engine_cs *engine, - const struct intel_engine_hangcheck *hc) + const struct hangcheck *hc) { engine->hangcheck.acthd = hc->acthd; engine->hangcheck.seqno = hc->seqno; - engine->hangcheck.action = hc->action; - engine->hangcheck.stalled = hc->stalled; - engine->hangcheck.wedged = hc->wedged; } static enum intel_engine_hangcheck_action hangcheck_get_action(struct intel_engine_cs *engine, - const struct intel_engine_hangcheck *hc) + const struct hangcheck *hc) { if (engine->hangcheck.seqno != hc->seqno) return ENGINE_ACTIVE_SEQNO; @@ -149,7 +157,7 @@ hangcheck_get_action(struct intel_engine_cs *engine, } static void hangcheck_accumulate_sample(struct intel_engine_cs *engine, - struct intel_engine_hangcheck *hc) + struct hangcheck *hc) { unsigned long timeout = I915_ENGINE_DEAD_TIMEOUT; @@ -265,19 +273,19 @@ static void i915_hangcheck_elapsed(struct work_struct *work) intel_uncore_arm_unclaimed_mmio_detection(dev_priv); for_each_engine(engine, dev_priv, id) { - struct intel_engine_hangcheck hc; + struct hangcheck hc; hangcheck_load_sample(engine, &hc); hangcheck_accumulate_sample(engine, &hc); hangcheck_store_sample(engine, &hc); - if (engine->hangcheck.stalled) { + if (hc.stalled) { hung |= intel_engine_flag(engine); if (hc.action != ENGINE_DEAD) stuck |= intel_engine_flag(engine); } - if (engine->hangcheck.wedged) + if (hc.wedged) wedged |= intel_engine_flag(engine); } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 5551dd2ec0e6..185867106c14 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -136,6 +136,7 @@ #include #include "i915_drv.h" #include "i915_gem_render_state.h" +#include "i915_reset.h" #include "i915_vgpu.h" #include "intel_lrc_reg.h" #include "intel_mocs.h" @@ -264,7 +265,8 @@ static void unwind_wa_tail(struct i915_request *rq) assert_ring_tail_valid(rq->ring, rq->tail); } -static void __unwind_incomplete_requests(struct intel_engine_cs *engine) +static struct i915_request * +__unwind_incomplete_requests(struct intel_engine_cs *engine) { struct i915_request *rq, *rn, *active = NULL; struct list_head *uninitialized_var(pl); @@ -306,6 +308,8 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine) list_move_tail(&active->sched.link, i915_sched_lookup_priolist(engine, prio)); } + + return active; } void @@ -1732,11 +1736,9 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine) return 0; } -static struct i915_request * -execlists_reset_prepare(struct intel_engine_cs *engine) +static void execlists_reset_prepare(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; - struct i915_request *request, *active; unsigned long flags; GEM_TRACE("%s: depth<-%d\n", engine->name, @@ -1752,59 +1754,21 @@ execlists_reset_prepare(struct intel_engine_cs *engine) * prevents the race. */ __tasklet_disable_sync_once(&execlists->tasklet); + GEM_BUG_ON(!reset_in_progress(execlists)); + /* And flush any current direct submission. */ spin_lock_irqsave(&engine->timeline.lock, flags); - - /* - * We want to flush the pending context switches, having disabled - * the tasklet above, we can assume exclusive access to the execlists. - * For this allows us to catch up with an inflight preemption event, - * and avoid blaming an innocent request if the stall was due to the - * preemption itself. - */ - process_csb(engine); - - /* - * The last active request can then be no later than the last request - * now in ELSP[0]. So search backwards from there, so that if the GPU - * has advanced beyond the last CSB update, it will be pardoned. - */ - active = NULL; - request = port_request(execlists->port); - if (request) { - /* - * Prevent the breadcrumb from advancing before we decide - * which request is currently active. - */ - intel_engine_stop_cs(engine); - - list_for_each_entry_from_reverse(request, - &engine->timeline.requests, - link) { - if (__i915_request_completed(request, - request->global_seqno)) - break; - - active = request; - } - } - + process_csb(engine); /* drain preemption events */ spin_unlock_irqrestore(&engine->timeline.lock, flags); - - return active; } -static void execlists_reset(struct intel_engine_cs *engine, - struct i915_request *request) +static void execlists_reset(struct intel_engine_cs *engine, bool stalled) { struct intel_engine_execlists * const execlists = &engine->execlists; + struct i915_request *rq; unsigned long flags; u32 *regs; - GEM_TRACE("%s request global=%d, current=%d\n", - engine->name, request ? request->global_seqno : 0, - intel_engine_get_seqno(engine)); - spin_lock_irqsave(&engine->timeline.lock, flags); /* @@ -1819,12 +1783,18 @@ static void execlists_reset(struct intel_engine_cs *engine, execlists_cancel_port_requests(execlists); /* Push back any incomplete requests for replay after the reset. */ - __unwind_incomplete_requests(engine); + rq = __unwind_incomplete_requests(engine); /* Following the reset, we need to reload the CSB read/write pointers */ reset_csb_pointers(&engine->execlists); - spin_unlock_irqrestore(&engine->timeline.lock, flags); + GEM_TRACE("%s seqno=%d, current=%d, stalled? %s\n", + engine->name, + rq ? rq->global_seqno : 0, + intel_engine_get_seqno(engine), + yesno(stalled)); + if (!rq) + goto out_unlock; /* * If the request was innocent, we leave the request in the ELSP @@ -1837,8 +1807,9 @@ static void execlists_reset(struct intel_engine_cs *engine, * and have to at least restore the RING register in the context * image back to the expected values to skip over the guilty request. */ - if (!request || request->fence.error != -EIO) - return; + i915_reset_request(rq, stalled); + if (!stalled) + goto out_unlock; /* * We want a simple context + ring to execute the breadcrumb update. @@ -1848,7 +1819,7 @@ static void execlists_reset(struct intel_engine_cs *engine, * future request will be after userspace has had the opportunity * to recreate its own state. */ - regs = request->hw_context->lrc_reg_state; + regs = rq->hw_context->lrc_reg_state; if (engine->pinned_default_state) { memcpy(regs, /* skip restoring the vanilla PPHWSP */ engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE, @@ -1856,17 +1827,14 @@ static void execlists_reset(struct intel_engine_cs *engine, } /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */ - request->ring->head = intel_ring_wrap(request->ring, request->postfix); + rq->ring->head = intel_ring_wrap(rq->ring, rq->postfix); + intel_ring_update_space(rq->ring); - execlists_init_reg_state(regs, request->gem_context, engine, - request->ring); + execlists_init_reg_state(regs, rq->gem_context, engine, rq->ring); + __execlists_update_reg_state(engine, rq->hw_context); - __execlists_update_reg_state(engine, request->hw_context); - - intel_ring_update_space(request->ring); - - /* Reset WaIdleLiteRestore:bdw,skl as well */ - unwind_wa_tail(request); +out_unlock: + spin_unlock_irqrestore(&engine->timeline.lock, flags); } static void execlists_reset_finish(struct intel_engine_cs *engine) @@ -1879,6 +1847,7 @@ static void execlists_reset_finish(struct intel_engine_cs *engine) * to sleep before we restart and reload a context. * */ + GEM_BUG_ON(!reset_in_progress(execlists)); if (!RB_EMPTY_ROOT(&execlists->queue.rb_root)) execlists->tasklet.func(execlists->tasklet.data); diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index c81db81e4416..f68c7975006c 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -478,8 +478,6 @@ void intel_overlay_reset(struct drm_i915_private *dev_priv) if (!overlay) return; - intel_overlay_release_old_vid(overlay); - overlay->old_xscale = 0; overlay->old_yscale = 0; overlay->crtc = NULL; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 09c90475168a..a9efc8c71254 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -33,6 +33,7 @@ #include "i915_drv.h" #include "i915_gem_render_state.h" +#include "i915_reset.h" #include "i915_trace.h" #include "intel_drv.h" #include "intel_workarounds.h" @@ -711,52 +712,80 @@ out: return ret; } -static struct i915_request *reset_prepare(struct intel_engine_cs *engine) +static void reset_prepare(struct intel_engine_cs *engine) { intel_engine_stop_cs(engine); - return i915_gem_find_active_request(engine); } -static void skip_request(struct i915_request *rq) +static void reset_ring(struct intel_engine_cs *engine, bool stalled) { - void *vaddr = rq->ring->vaddr; + struct i915_timeline *tl = &engine->timeline; + struct i915_request *pos, *rq; + unsigned long flags; u32 head; - head = rq->infix; - if (rq->postfix < head) { - memset32(vaddr + head, MI_NOOP, - (rq->ring->size - head) / sizeof(u32)); - head = 0; + rq = NULL; + spin_lock_irqsave(&tl->lock, flags); + list_for_each_entry(pos, &tl->requests, link) { + if (!__i915_request_completed(pos, pos->global_seqno)) { + rq = pos; + break; + } } - memset32(vaddr + head, MI_NOOP, (rq->postfix - head) / sizeof(u32)); -} - -static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq) -{ - GEM_TRACE("%s request global=%d, current=%d\n", - engine->name, rq ? rq->global_seqno : 0, - intel_engine_get_seqno(engine)); + GEM_TRACE("%s seqno=%d, current=%d, stalled? %s\n", + engine->name, + rq ? rq->global_seqno : 0, + intel_engine_get_seqno(engine), + yesno(stalled)); /* - * Try to restore the logical GPU state to match the continuation - * of the request queue. If we skip the context/PD restore, then - * the next request may try to execute assuming that its context - * is valid and loaded on the GPU and so may try to access invalid - * memory, prompting repeated GPU hangs. + * The guilty request will get skipped on a hung engine. * - * If the request was guilty, we still restore the logical state - * in case the next request requires it (e.g. the aliasing ppgtt), - * but skip over the hung batch. + * Users of client default contexts do not rely on logical + * state preserved between batches so it is safe to execute + * queued requests following the hang. Non default contexts + * rely on preserved state, so skipping a batch loses the + * evolution of the state and it needs to be considered corrupted. + * Executing more queued batches on top of corrupted state is + * risky. But we take the risk by trying to advance through + * the queued requests in order to make the client behaviour + * more predictable around resets, by not throwing away random + * amount of batches it has prepared for execution. Sophisticated + * clients can use gem_reset_stats_ioctl and dma fence status + * (exported via sync_file info ioctl on explicit fences) to observe + * when it loses the context state and should rebuild accordingly. * - * If the request was innocent, we try to replay the request with - * the restored context. + * The context ban, and ultimately the client ban, mechanism are safety + * valves if client submission ends up resulting in nothing more than + * subsequent hangs. */ + if (rq) { - /* If the rq hung, jump to its breadcrumb and skip the batch */ - rq->ring->head = intel_ring_wrap(rq->ring, rq->head); - if (rq->fence.error == -EIO) - skip_request(rq); + /* + * Try to restore the logical GPU state to match the + * continuation of the request queue. If we skip the + * context/PD restore, then the next request may try to execute + * assuming that its context is valid and loaded on the GPU and + * so may try to access invalid memory, prompting repeated GPU + * hangs. + * + * If the request was guilty, we still restore the logical + * state in case the next request requires it (e.g. the + * aliasing ppgtt), but skip over the hung batch. + * + * If the request was innocent, we try to replay the request + * with the restored context. + */ + i915_reset_request(rq, stalled); + + GEM_BUG_ON(rq->ring != engine->buffer); + head = rq->head; + } else { + head = engine->buffer->tail; } + engine->buffer->head = intel_ring_wrap(engine->buffer, head); + + spin_unlock_irqrestore(&tl->lock, flags); } static void reset_finish(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 5ad46c2fbc0f..f2effd001540 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -120,13 +120,8 @@ struct intel_instdone { struct intel_engine_hangcheck { u64 acthd; u32 seqno; - enum intel_engine_hangcheck_action action; unsigned long action_timestamp; - int deadlock; struct intel_instdone instdone; - struct i915_request *active_request; - bool stalled:1; - bool wedged:1; }; struct intel_ring { @@ -444,9 +439,8 @@ struct intel_engine_cs { int (*init_hw)(struct intel_engine_cs *engine); struct { - struct i915_request *(*prepare)(struct intel_engine_cs *engine); - void (*reset)(struct intel_engine_cs *engine, - struct i915_request *rq); + void (*prepare)(struct intel_engine_cs *engine); + void (*reset)(struct intel_engine_cs *engine, bool stalled); void (*finish)(struct intel_engine_cs *engine); } reset; @@ -1018,6 +1012,13 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset) return cs; } +static inline void intel_engine_reset(struct intel_engine_cs *engine, + bool stalled) +{ + if (engine->reset.reset) + engine->reset.reset(engine, stalled); +} + void intel_engines_sanitize(struct drm_i915_private *i915, bool force); bool intel_engine_is_idle(struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index 12550b55c42f..67431355cd6e 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -363,9 +363,7 @@ static int igt_global_reset(void *arg) /* Check that we can issue a global GPU reset */ igt_global_reset_lock(i915); - set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags); - mutex_lock(&i915->drm.struct_mutex); reset_count = i915_reset_count(&i915->gpu_error); i915_reset(i915, ALL_ENGINES, NULL); @@ -374,9 +372,7 @@ static int igt_global_reset(void *arg) pr_err("No GPU reset recorded!\n"); err = -EINVAL; } - mutex_unlock(&i915->drm.struct_mutex); - GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags)); igt_global_reset_unlock(i915); if (i915_terminally_wedged(&i915->gpu_error)) @@ -399,9 +395,7 @@ static int igt_wedged_reset(void *arg) i915_gem_set_wedged(i915); GEM_BUG_ON(!i915_terminally_wedged(&i915->gpu_error)); - set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags); i915_reset(i915, ALL_ENGINES, NULL); - GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags)); intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); @@ -511,7 +505,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active) break; } - if (!wait_for_idle(engine)) { + if (!i915_reset_flush(i915)) { struct drm_printer p = drm_info_printer(i915->drm.dev); @@ -903,20 +897,13 @@ static int igt_reset_engines(void *arg) return 0; } -static u32 fake_hangcheck(struct i915_request *rq, u32 mask) +static u32 fake_hangcheck(struct drm_i915_private *i915, u32 mask) { - struct i915_gpu_error *error = &rq->i915->gpu_error; - u32 reset_count = i915_reset_count(error); - - error->stalled_mask = mask; - - /* set_bit() must be after we have setup the backchannel (mask) */ - smp_mb__before_atomic(); - set_bit(I915_RESET_HANDOFF, &error->flags); + u32 count = i915_reset_count(&i915->gpu_error); - wake_up_all(&error->wait_queue); + i915_reset(i915, mask, NULL); - return reset_count; + return count; } static int igt_reset_wait(void *arg) @@ -962,7 +949,7 @@ static int igt_reset_wait(void *arg) goto out_rq; } - reset_count = fake_hangcheck(rq, ALL_ENGINES); + reset_count = fake_hangcheck(i915, ALL_ENGINES); timeout = i915_request_wait(rq, I915_WAIT_LOCKED, 10); if (timeout < 0) { @@ -972,7 +959,6 @@ static int igt_reset_wait(void *arg) goto out_rq; } - GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags)); if (i915_reset_count(&i915->gpu_error) == reset_count) { pr_err("No GPU reset recorded!\n"); err = -EINVAL; @@ -1162,7 +1148,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, } out_reset: - fake_hangcheck(rq, intel_engine_flag(rq->engine)); + fake_hangcheck(rq->i915, intel_engine_flag(rq->engine)); if (tsk) { struct igt_wedge_me w; @@ -1341,12 +1327,7 @@ static int igt_reset_queue(void *arg) goto fini; } - reset_count = fake_hangcheck(prev, ENGINE_MASK(id)); - - i915_reset(i915, ENGINE_MASK(id), NULL); - - GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, - &i915->gpu_error.flags)); + reset_count = fake_hangcheck(i915, ENGINE_MASK(id)); if (prev->fence.error != -EIO) { pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n", @@ -1565,6 +1546,7 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine, pr_err("%s(%s): Failed to start request %llx, at %x\n", __func__, engine->name, rq->fence.seqno, hws_seqno(&h, rq)); + i915_gem_set_wedged(i915); err = -EIO; } @@ -1588,7 +1570,6 @@ out: static void force_reset(struct drm_i915_private *i915) { i915_gem_set_wedged(i915); - set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags); i915_reset(i915, 0, NULL); } @@ -1618,6 +1599,26 @@ static int igt_atomic_reset(void *arg) if (i915_terminally_wedged(&i915->gpu_error)) goto unlock; + if (intel_has_gpu_reset(i915)) { + const typeof(*phases) *p; + + for (p = phases; p->name; p++) { + GEM_TRACE("intel_gpu_reset under %s\n", p->name); + + p->critical_section_begin(); + err = intel_gpu_reset(i915, ALL_ENGINES); + p->critical_section_end(); + + if (err) { + pr_err("intel_gpu_reset failed under %s\n", + p->name); + goto out; + } + } + + force_reset(i915); + } + if (intel_has_reset_engine(i915)) { struct intel_engine_cs *engine; enum intel_engine_id id; diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c index a8cac56be835..b15c4f26c593 100644 --- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c +++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c @@ -214,7 +214,6 @@ out_put: static int do_device_reset(struct intel_engine_cs *engine) { - set_bit(I915_RESET_HANDOFF, &engine->i915->gpu_error.flags); i915_reset(engine->i915, ENGINE_MASK(engine->id), "live_workarounds"); return 0; } @@ -394,7 +393,6 @@ static int live_gpu_reset_gt_engine_workarounds(void *arg) { struct drm_i915_private *i915 = arg; - struct i915_gpu_error *error = &i915->gpu_error; intel_wakeref_t wakeref; struct wa_lists lists; bool ok; @@ -413,7 +411,6 @@ live_gpu_reset_gt_engine_workarounds(void *arg) if (!ok) goto out; - set_bit(I915_RESET_HANDOFF, &error->flags); i915_reset(i915, ALL_ENGINES, "live_workarounds"); ok = verify_gt_engine_wa(i915, &lists, "after reset"); diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 5477ad4a7e7d..8ab5a2688a0c 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -58,8 +58,8 @@ static void mock_device_release(struct drm_device *dev) i915_gem_contexts_lost(i915); mutex_unlock(&i915->drm.struct_mutex); - cancel_delayed_work_sync(&i915->gt.retire_work); - cancel_delayed_work_sync(&i915->gt.idle_work); + drain_delayed_work(&i915->gt.retire_work); + drain_delayed_work(&i915->gt.idle_work); i915_gem_drain_workqueue(i915); mutex_lock(&i915->drm.struct_mutex); -- cgit v1.2.3 From 0ca88ba0d6347cf8c4ea9f264c384594f8fefa11 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 28 Jan 2019 10:23:55 +0000 Subject: drm/i915: Always allocate an object/vma for the HWSP Currently we only allocate an object and vma if we are using a GGTT virtual HWSP, and a plain struct page for a physical HWSP. For convenience later on with global timelines, it will be useful to always have the status page being tracked by a struct i915_vma. Make it so. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190128102356.15037-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_engine_cs.c | 109 ++++++++++++++------------- drivers/gpu/drm/i915/intel_guc_submission.c | 6 ++ drivers/gpu/drm/i915/intel_lrc.c | 12 ++- drivers/gpu/drm/i915/intel_ringbuffer.c | 21 ++++-- drivers/gpu/drm/i915/intel_ringbuffer.h | 23 ++---- drivers/gpu/drm/i915/selftests/mock_engine.c | 2 +- 6 files changed, 93 insertions(+), 80 deletions(-) (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c') diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 1a5c163b98d6..2657eb6fd914 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -506,27 +506,61 @@ void intel_engine_setup_common(struct intel_engine_cs *engine) static void cleanup_status_page(struct intel_engine_cs *engine) { + struct i915_vma *vma; + /* Prevent writes into HWSP after returning the page to the system */ intel_engine_set_hwsp_writemask(engine, ~0u); - if (HWS_NEEDS_PHYSICAL(engine->i915)) { - void *addr = fetch_and_zero(&engine->status_page.page_addr); + vma = fetch_and_zero(&engine->status_page.vma); + if (!vma) + return; - __free_page(virt_to_page(addr)); - } + if (!HWS_NEEDS_PHYSICAL(engine->i915)) + i915_vma_unpin(vma); + + i915_gem_object_unpin_map(vma->obj); + __i915_gem_object_release_unless_active(vma->obj); +} + +static int pin_ggtt_status_page(struct intel_engine_cs *engine, + struct i915_vma *vma) +{ + unsigned int flags; + + flags = PIN_GLOBAL; + if (!HAS_LLC(engine->i915)) + /* + * On g33, we cannot place HWS above 256MiB, so + * restrict its pinning to the low mappable arena. + * Though this restriction is not documented for + * gen4, gen5, or byt, they also behave similarly + * and hang if the HWS is placed at the top of the + * GTT. To generalise, it appears that all !llc + * platforms have issues with us placing the HWS + * above the mappable region (even though we never + * actually map it). + */ + flags |= PIN_MAPPABLE; + else + flags |= PIN_HIGH; - i915_vma_unpin_and_release(&engine->status_page.vma, - I915_VMA_RELEASE_MAP); + return i915_vma_pin(vma, 0, 0, flags); } static int init_status_page(struct intel_engine_cs *engine) { struct drm_i915_gem_object *obj; struct i915_vma *vma; - unsigned int flags; void *vaddr; int ret; + /* + * Though the HWS register does support 36bit addresses, historically + * we have had hangs and corruption reported due to wild writes if + * the HWS is placed above 4G. We only allow objects to be allocated + * in GFP_DMA32 for i965, and no earlier physical address users had + * access to more than 4G. + */ obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); if (IS_ERR(obj)) { DRM_ERROR("Failed to allocate status page\n"); @@ -543,61 +577,30 @@ static int init_status_page(struct intel_engine_cs *engine) goto err; } - flags = PIN_GLOBAL; - if (!HAS_LLC(engine->i915)) - /* On g33, we cannot place HWS above 256MiB, so - * restrict its pinning to the low mappable arena. - * Though this restriction is not documented for - * gen4, gen5, or byt, they also behave similarly - * and hang if the HWS is placed at the top of the - * GTT. To generalise, it appears that all !llc - * platforms have issues with us placing the HWS - * above the mappable region (even though we never - * actually map it). - */ - flags |= PIN_MAPPABLE; - else - flags |= PIN_HIGH; - ret = i915_vma_pin(vma, 0, 0, flags); - if (ret) - goto err; - vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); if (IS_ERR(vaddr)) { ret = PTR_ERR(vaddr); - goto err_unpin; + goto err; } + engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE); engine->status_page.vma = vma; - engine->status_page.ggtt_offset = i915_ggtt_offset(vma); - engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE); + + if (!HWS_NEEDS_PHYSICAL(engine->i915)) { + ret = pin_ggtt_status_page(engine, vma); + if (ret) + goto err_unpin; + } + return 0; err_unpin: - i915_vma_unpin(vma); + i915_gem_object_unpin_map(obj); err: i915_gem_object_put(obj); return ret; } -static int init_phys_status_page(struct intel_engine_cs *engine) -{ - struct page *page; - - /* - * Though the HWS register does support 36bit addresses, historically - * we have had hangs and corruption reported due to wild writes if - * the HWS is placed above 4G. - */ - page = alloc_page(GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO); - if (!page) - return -ENOMEM; - - engine->status_page.page_addr = page_address(page); - - return 0; -} - static void __intel_context_unpin(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { @@ -690,10 +693,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine) if (ret) goto err_unpin_preempt; - if (HWS_NEEDS_PHYSICAL(i915)) - ret = init_phys_status_page(engine); - else - ret = init_status_page(engine); + ret = init_status_page(engine); if (ret) goto err_breadcrumbs; @@ -1366,7 +1366,8 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine, } if (HAS_EXECLISTS(dev_priv)) { - const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; + const u32 *hws = + &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; unsigned int idx; u8 read, write; @@ -1549,7 +1550,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, spin_unlock_irqrestore(&b->rb_lock, flags); drm_printf(m, "HWSP:\n"); - hexdump(m, engine->status_page.page_addr, PAGE_SIZE); + hexdump(m, engine->status_page.addr, PAGE_SIZE); drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine))); } diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index 45e2db683fe5..4295ade0d613 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -81,6 +81,12 @@ * */ +static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine) +{ + return (i915_ggtt_offset(engine->status_page.vma) + + I915_GEM_HWS_PREEMPT_ADDR); +} + static inline struct i915_priolist *to_priolist(struct rb_node *rb) { return rb_entry(rb, struct i915_priolist, node); diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 185867106c14..2cf99c436658 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -172,6 +172,12 @@ static void execlists_init_reg_state(u32 *reg_state, struct intel_engine_cs *engine, struct intel_ring *ring); +static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine) +{ + return (i915_ggtt_offset(engine->status_page.vma) + + I915_GEM_HWS_INDEX_ADDR); +} + static inline struct i915_priolist *to_priolist(struct rb_node *rb) { return rb_entry(rb, struct i915_priolist, node); @@ -1699,7 +1705,7 @@ static void enable_execlists(struct intel_engine_cs *engine) _MASKED_BIT_DISABLE(STOP_RING)); I915_WRITE(RING_HWS_PGA(engine->mmio_base), - engine->status_page.ggtt_offset); + i915_ggtt_offset(engine->status_page.vma)); POSTING_READ(RING_HWS_PGA(engine->mmio_base)); } @@ -2244,10 +2250,10 @@ static int logical_ring_init(struct intel_engine_cs *engine) } execlists->csb_status = - &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; + &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; execlists->csb_write = - &engine->status_page.page_addr[intel_hws_csb_write_index(i915)]; + &engine->status_page.addr[intel_hws_csb_write_index(i915)]; reset_csb_pointers(execlists); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index a9efc8c71254..cb6d2aa2a829 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -43,6 +43,12 @@ */ #define LEGACY_REQUEST_SIZE 200 +static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine) +{ + return (i915_ggtt_offset(engine->status_page.vma) + + I915_GEM_HWS_INDEX_ADDR); +} + static unsigned int __intel_ring_space(unsigned int head, unsigned int tail, unsigned int size) @@ -503,12 +509,17 @@ static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys) I915_WRITE(HWS_PGA, addr); } -static void ring_setup_phys_status_page(struct intel_engine_cs *engine) +static struct page *status_page(struct intel_engine_cs *engine) { - struct page *page = virt_to_page(engine->status_page.page_addr); - phys_addr_t phys = PFN_PHYS(page_to_pfn(page)); + struct drm_i915_gem_object *obj = engine->status_page.vma->obj; - set_hws_pga(engine, phys); + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + return sg_page(obj->mm.pages->sgl); +} + +static void ring_setup_phys_status_page(struct intel_engine_cs *engine) +{ + set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine)))); set_hwstam(engine, ~0u); } @@ -575,7 +586,7 @@ static void flush_cs_tlb(struct intel_engine_cs *engine) static void ring_setup_status_page(struct intel_engine_cs *engine) { - set_hwsp(engine, engine->status_page.ggtt_offset); + set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma)); set_hwstam(engine, ~0u); flush_cs_tlb(engine); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index f2effd001540..32371ae67f24 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -32,8 +32,7 @@ struct i915_sched_attr; struct intel_hw_status_page { struct i915_vma *vma; - u32 *page_addr; - u32 ggtt_offset; + u32 *addr; }; #define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base)) @@ -671,7 +670,7 @@ static inline u32 intel_read_status_page(const struct intel_engine_cs *engine, int reg) { /* Ensure that the compiler doesn't optimize away the load. */ - return READ_ONCE(engine->status_page.page_addr[reg]); + return READ_ONCE(engine->status_page.addr[reg]); } static inline void @@ -684,12 +683,12 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) */ if (static_cpu_has(X86_FEATURE_CLFLUSH)) { mb(); - clflush(&engine->status_page.page_addr[reg]); - engine->status_page.page_addr[reg] = value; - clflush(&engine->status_page.page_addr[reg]); + clflush(&engine->status_page.addr[reg]); + engine->status_page.addr[reg] = value; + clflush(&engine->status_page.addr[reg]); mb(); } else { - WRITE_ONCE(engine->status_page.page_addr[reg], value); + WRITE_ONCE(engine->status_page.addr[reg], value); } } @@ -877,16 +876,6 @@ static inline bool intel_engine_has_started(struct intel_engine_cs *engine, void intel_engine_get_instdone(struct intel_engine_cs *engine, struct intel_instdone *instdone); -static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine) -{ - return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR; -} - -static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine) -{ - return engine->status_page.ggtt_offset + I915_GEM_HWS_PREEMPT_ADDR; -} - /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */ int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c index 905318b7ae18..4e5b4dc6df0f 100644 --- a/drivers/gpu/drm/i915/selftests/mock_engine.c +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c @@ -200,7 +200,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, engine->base.i915 = i915; snprintf(engine->base.name, sizeof(engine->base.name), "%s", name); engine->base.id = id; - engine->base.status_page.page_addr = (void *)(engine + 1); + engine->base.status_page.addr = (void *)(engine + 1); engine->base.context_pin = mock_context_pin; engine->base.request_alloc = mock_request_alloc; -- cgit v1.2.3 From 52954edd1f7030f753a63093c16826ef50805098 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 28 Jan 2019 18:18:09 +0000 Subject: drm/i915: Allocate a status page for each timeline Allocate a page for use as a status page by a group of timelines, as we only need a dword of storage for each (rounded up to the cacheline for safety) we can pack multiple timelines into the same page. Each timeline will then be able to track its own HW seqno. v2: Reuse the common per-engine HWSP for the solitary ringbuffer timeline, so that we do not have to emit (using per-gen specialised vfuncs) the breadcrumb into the distinct timeline HWSP and instead can keep on using the common MI_STORE_DWORD_INDEX. However, to maintain the sleight-of-hand for the global/per-context seqno switchover, we will store both temporarily (and so use a custom offset for the shared timeline HWSP until the switch over). v3: Keep things simple and allocate a page for each timeline, page sharing comes next. v4: I was caught repeating the same MI_STORE_DWORD_IMM over and over again in selftests. v5: And caught red handed copying create timeline + check. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190128181812.22804-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_timeline.c | 121 +++++++- drivers/gpu/drm/i915/i915_timeline.h | 21 +- drivers/gpu/drm/i915/intel_engine_cs.c | 76 +++-- drivers/gpu/drm/i915/intel_lrc.c | 22 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 10 +- drivers/gpu/drm/i915/intel_ringbuffer.h | 6 +- .../gpu/drm/i915/selftests/i915_live_selftests.h | 1 + .../gpu/drm/i915/selftests/i915_mock_selftests.h | 2 +- drivers/gpu/drm/i915/selftests/i915_timeline.c | 326 ++++++++++++++++++++- drivers/gpu/drm/i915/selftests/mock_engine.c | 14 +- 10 files changed, 543 insertions(+), 56 deletions(-) (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c') diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c index 84550f17d3df..8d5792311a8f 100644 --- a/drivers/gpu/drm/i915/i915_timeline.c +++ b/drivers/gpu/drm/i915/i915_timeline.c @@ -9,28 +9,78 @@ #include "i915_timeline.h" #include "i915_syncmap.h" -void i915_timeline_init(struct drm_i915_private *i915, - struct i915_timeline *timeline, - const char *name) +static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); + + vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + if (IS_ERR(vma)) + i915_gem_object_put(obj); + + return vma; +} + +static int hwsp_alloc(struct i915_timeline *timeline) +{ + struct i915_vma *vma; + + vma = __hwsp_alloc(timeline->i915); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + timeline->hwsp_ggtt = vma; + timeline->hwsp_offset = 0; + + return 0; +} + +int i915_timeline_init(struct drm_i915_private *i915, + struct i915_timeline *timeline, + const char *name, + struct i915_vma *global_hwsp) { struct i915_gt_timelines *gt = &i915->gt.timelines; + void *vaddr; + int err; /* * Ideally we want a set of engines on a single leaf as we expect * to mostly be tracking synchronisation between engines. It is not * a huge issue if this is not the case, but we may want to mitigate * any page crossing penalties if they become an issue. + * + * Called during early_init before we know how many engines there are. */ BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES); timeline->i915 = i915; timeline->name = name; + timeline->pin_count = 0; + + if (global_hwsp) { + timeline->hwsp_ggtt = i915_vma_get(global_hwsp); + timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR; + } else { + err = hwsp_alloc(timeline); + if (err) + return err; + } - mutex_lock(>->mutex); - list_add(&timeline->link, >->list); - mutex_unlock(>->mutex); + vaddr = i915_gem_object_pin_map(timeline->hwsp_ggtt->obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + i915_vma_put(timeline->hwsp_ggtt); + return PTR_ERR(vaddr); + } - /* Called during early_init before we know how many engines there are */ + timeline->hwsp_seqno = + memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES); timeline->fence_context = dma_fence_context_alloc(1); @@ -40,6 +90,12 @@ void i915_timeline_init(struct drm_i915_private *i915, INIT_LIST_HEAD(&timeline->requests); i915_syncmap_init(&timeline->sync); + + mutex_lock(>->mutex); + list_add(&timeline->link, >->list); + mutex_unlock(>->mutex); + + return 0; } void i915_timelines_init(struct drm_i915_private *i915) @@ -85,6 +141,7 @@ void i915_timeline_fini(struct i915_timeline *timeline) { struct i915_gt_timelines *gt = &timeline->i915->gt.timelines; + GEM_BUG_ON(timeline->pin_count); GEM_BUG_ON(!list_empty(&timeline->requests)); i915_syncmap_free(&timeline->sync); @@ -92,23 +149,69 @@ void i915_timeline_fini(struct i915_timeline *timeline) mutex_lock(>->mutex); list_del(&timeline->link); mutex_unlock(>->mutex); + + i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj); + i915_vma_put(timeline->hwsp_ggtt); } struct i915_timeline * -i915_timeline_create(struct drm_i915_private *i915, const char *name) +i915_timeline_create(struct drm_i915_private *i915, + const char *name, + struct i915_vma *global_hwsp) { struct i915_timeline *timeline; + int err; timeline = kzalloc(sizeof(*timeline), GFP_KERNEL); if (!timeline) return ERR_PTR(-ENOMEM); - i915_timeline_init(i915, timeline, name); + err = i915_timeline_init(i915, timeline, name, global_hwsp); + if (err) { + kfree(timeline); + return ERR_PTR(err); + } + kref_init(&timeline->kref); return timeline; } +int i915_timeline_pin(struct i915_timeline *tl) +{ + int err; + + if (tl->pin_count++) + return 0; + GEM_BUG_ON(!tl->pin_count); + + err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH); + if (err) + goto unpin; + + return 0; + +unpin: + tl->pin_count = 0; + return err; +} + +void i915_timeline_unpin(struct i915_timeline *tl) +{ + GEM_BUG_ON(!tl->pin_count); + if (--tl->pin_count) + return; + + /* + * Since this timeline is idle, all bariers upon which we were waiting + * must also be complete and so we can discard the last used barriers + * without loss of information. + */ + i915_syncmap_free(&tl->sync); + + __i915_vma_unpin(tl->hwsp_ggtt); +} + void __i915_timeline_free(struct kref *kref) { struct i915_timeline *timeline = diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h index 87ad2dd31c20..0c3739d53d79 100644 --- a/drivers/gpu/drm/i915/i915_timeline.h +++ b/drivers/gpu/drm/i915/i915_timeline.h @@ -32,6 +32,8 @@ #include "i915_syncmap.h" #include "i915_utils.h" +struct i915_vma; + struct i915_timeline { u64 fence_context; u32 seqno; @@ -40,6 +42,11 @@ struct i915_timeline { #define TIMELINE_CLIENT 0 /* default subclass */ #define TIMELINE_ENGINE 1 + unsigned int pin_count; + const u32 *hwsp_seqno; + struct i915_vma *hwsp_ggtt; + u32 hwsp_offset; + /** * List of breadcrumbs associated with GPU requests currently * outstanding. @@ -71,9 +78,10 @@ struct i915_timeline { struct kref kref; }; -void i915_timeline_init(struct drm_i915_private *i915, - struct i915_timeline *tl, - const char *name); +int i915_timeline_init(struct drm_i915_private *i915, + struct i915_timeline *tl, + const char *name, + struct i915_vma *hwsp); void i915_timeline_fini(struct i915_timeline *tl); static inline void @@ -96,7 +104,9 @@ i915_timeline_set_subclass(struct i915_timeline *timeline, } struct i915_timeline * -i915_timeline_create(struct drm_i915_private *i915, const char *name); +i915_timeline_create(struct drm_i915_private *i915, + const char *name, + struct i915_vma *global_hwsp); static inline struct i915_timeline * i915_timeline_get(struct i915_timeline *timeline) @@ -135,6 +145,9 @@ static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl, return __i915_timeline_sync_is_later(tl, fence->context, fence->seqno); } +int i915_timeline_pin(struct i915_timeline *tl); +void i915_timeline_unpin(struct i915_timeline *tl); + void i915_timelines_init(struct drm_i915_private *i915); void i915_timelines_park(struct drm_i915_private *i915); void i915_timelines_fini(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 2657eb6fd914..515e87846afd 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -484,26 +484,6 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine) execlists->queue = RB_ROOT_CACHED; } -/** - * intel_engines_setup_common - setup engine state not requiring hw access - * @engine: Engine to setup. - * - * Initializes @engine@ structure members shared between legacy and execlists - * submission modes which do not require hardware access. - * - * Typically done early in the submission mode specific engine setup stage. - */ -void intel_engine_setup_common(struct intel_engine_cs *engine) -{ - i915_timeline_init(engine->i915, &engine->timeline, engine->name); - i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE); - - intel_engine_init_execlist(engine); - intel_engine_init_hangcheck(engine); - intel_engine_init_batch_pool(engine); - intel_engine_init_cmd_parser(engine); -} - static void cleanup_status_page(struct intel_engine_cs *engine) { struct i915_vma *vma; @@ -601,6 +581,44 @@ err: return ret; } +/** + * intel_engines_setup_common - setup engine state not requiring hw access + * @engine: Engine to setup. + * + * Initializes @engine@ structure members shared between legacy and execlists + * submission modes which do not require hardware access. + * + * Typically done early in the submission mode specific engine setup stage. + */ +int intel_engine_setup_common(struct intel_engine_cs *engine) +{ + int err; + + err = init_status_page(engine); + if (err) + return err; + + err = i915_timeline_init(engine->i915, + &engine->timeline, + engine->name, + engine->status_page.vma); + if (err) + goto err_hwsp; + + i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE); + + intel_engine_init_execlist(engine); + intel_engine_init_hangcheck(engine); + intel_engine_init_batch_pool(engine); + intel_engine_init_cmd_parser(engine); + + return 0; + +err_hwsp: + cleanup_status_page(engine); + return err; +} + static void __intel_context_unpin(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { @@ -617,7 +635,7 @@ struct measure_breadcrumb { static int measure_breadcrumb_dw(struct intel_engine_cs *engine) { struct measure_breadcrumb *frame; - unsigned int dw; + int dw = -ENOMEM; GEM_BUG_ON(!engine->i915->gt.scratch); @@ -625,7 +643,10 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine) if (!frame) return -ENOMEM; - i915_timeline_init(engine->i915, &frame->timeline, "measure"); + if (i915_timeline_init(engine->i915, + &frame->timeline, "measure", + engine->status_page.vma)) + goto out_frame; INIT_LIST_HEAD(&frame->ring.request_list); frame->ring.timeline = &frame->timeline; @@ -642,8 +663,9 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine) dw = engine->emit_breadcrumb(&frame->rq, frame->cs) - frame->cs; i915_timeline_fini(&frame->timeline); - kfree(frame); +out_frame: + kfree(frame); return dw; } @@ -693,20 +715,14 @@ int intel_engine_init_common(struct intel_engine_cs *engine) if (ret) goto err_unpin_preempt; - ret = init_status_page(engine); - if (ret) - goto err_breadcrumbs; - ret = measure_breadcrumb_dw(engine); if (ret < 0) - goto err_status_page; + goto err_breadcrumbs; engine->emit_breadcrumb_dw = ret; return 0; -err_status_page: - cleanup_status_page(engine); err_breadcrumbs: intel_engine_fini_breadcrumbs(engine); err_unpin_preempt: diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 9ae7f77293a0..e388f37743a2 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -2206,10 +2206,14 @@ logical_ring_default_irqs(struct intel_engine_cs *engine) engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; } -static void +static int logical_ring_setup(struct intel_engine_cs *engine) { - intel_engine_setup_common(engine); + int err; + + err = intel_engine_setup_common(engine); + if (err) + return err; /* Intentionally left blank. */ engine->buffer = NULL; @@ -2219,6 +2223,8 @@ logical_ring_setup(struct intel_engine_cs *engine) logical_ring_default_vfuncs(engine); logical_ring_default_irqs(engine); + + return 0; } static int logical_ring_init(struct intel_engine_cs *engine) @@ -2267,7 +2273,9 @@ int logical_render_ring_init(struct intel_engine_cs *engine) { int ret; - logical_ring_setup(engine); + ret = logical_ring_setup(engine); + if (ret) + return ret; /* Override some for render ring. */ engine->init_context = gen8_init_rcs_context; @@ -2296,7 +2304,11 @@ int logical_render_ring_init(struct intel_engine_cs *engine) int logical_xcs_ring_init(struct intel_engine_cs *engine) { - logical_ring_setup(engine); + int err; + + err = logical_ring_setup(engine); + if (err) + return err; return logical_ring_init(engine); } @@ -2629,7 +2641,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, goto error_deref_obj; } - timeline = i915_timeline_create(ctx->i915, ctx->name); + timeline = i915_timeline_create(ctx->i915, ctx->name, NULL); if (IS_ERR(timeline)) { ret = PTR_ERR(timeline); goto error_deref_obj; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index cb6d2aa2a829..174795622eb1 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1545,9 +1545,13 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) struct intel_ring *ring; int err; - intel_engine_setup_common(engine); + err = intel_engine_setup_common(engine); + if (err) + return err; - timeline = i915_timeline_create(engine->i915, engine->name); + timeline = i915_timeline_create(engine->i915, + engine->name, + engine->status_page.vma); if (IS_ERR(timeline)) { err = PTR_ERR(timeline); goto err; @@ -1571,6 +1575,8 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) if (err) goto err_unpin; + GEM_BUG_ON(ring->timeline->hwsp_ggtt != engine->status_page.vma); + return 0; err_unpin: diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 32371ae67f24..2927b712b973 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -712,7 +712,9 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX * sizeof(u32)) #define I915_GEM_HWS_PREEMPT 0x32 #define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT * sizeof(u32)) -#define I915_GEM_HWS_SCRATCH 0x40 +#define I915_GEM_HWS_SEQNO 0x40 +#define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32)) +#define I915_GEM_HWS_SCRATCH 0x80 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH * sizeof(u32)) #define I915_HWS_CSB_BUF0_INDEX 0x10 @@ -818,7 +820,7 @@ intel_ring_set_tail(struct intel_ring *ring, unsigned int tail) void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno); -void intel_engine_setup_common(struct intel_engine_cs *engine); +int intel_engine_setup_common(struct intel_engine_cs *engine); int intel_engine_init_common(struct intel_engine_cs *engine); void intel_engine_cleanup_common(struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h index a15713cae3b3..76b4f87fc853 100644 --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h @@ -13,6 +13,7 @@ selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */ selftest(uncore, intel_uncore_live_selftests) selftest(workarounds, intel_workarounds_live_selftests) selftest(requests, i915_request_live_selftests) +selftest(timelines, i915_timeline_live_selftests) selftest(objects, i915_gem_object_live_selftests) selftest(dmabuf, i915_gem_dmabuf_live_selftests) selftest(coherency, i915_gem_coherency_live_selftests) diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h index 1b70208eeea7..4a83a1c6c406 100644 --- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h @@ -16,7 +16,7 @@ selftest(syncmap, i915_syncmap_mock_selftests) selftest(uncore, intel_uncore_mock_selftests) selftest(engine, intel_engine_cs_mock_selftests) selftest(breadcrumbs, intel_breadcrumbs_mock_selftests) -selftest(timelines, i915_gem_timeline_mock_selftests) +selftest(timelines, i915_timeline_mock_selftests) selftest(requests, i915_request_mock_selftests) selftest(objects, i915_gem_object_mock_selftests) selftest(dmabuf, i915_gem_dmabuf_mock_selftests) diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c index 19f1c6a5c8fb..1585b614510d 100644 --- a/drivers/gpu/drm/i915/selftests/i915_timeline.c +++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c @@ -7,6 +7,7 @@ #include "../i915_selftest.h" #include "i915_random.h" +#include "igt_flush_test.h" #include "mock_gem_device.h" #include "mock_timeline.h" @@ -256,7 +257,7 @@ static int bench_sync(void *arg) return 0; } -int i915_gem_timeline_mock_selftests(void) +int i915_timeline_mock_selftests(void) { static const struct i915_subtest tests[] = { SUBTEST(igt_sync), @@ -265,3 +266,326 @@ int i915_gem_timeline_mock_selftests(void) return i915_subtests(tests, NULL); } + +static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value) +{ + u32 *cs; + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + if (INTEL_GEN(rq->i915) >= 8) { + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = addr; + *cs++ = 0; + *cs++ = value; + } else if (INTEL_GEN(rq->i915) >= 4) { + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = 0; + *cs++ = addr; + *cs++ = value; + } else { + *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; + *cs++ = addr; + *cs++ = value; + *cs++ = MI_NOOP; + } + + intel_ring_advance(rq, cs); + + return 0; +} + +static u32 hwsp_address(const struct i915_timeline *tl) +{ + return i915_ggtt_offset(tl->hwsp_ggtt) + tl->hwsp_offset; +} + +static struct i915_request * +tl_write(struct i915_timeline *tl, struct intel_engine_cs *engine, u32 value) +{ + struct i915_request *rq; + int err; + + lockdep_assert_held(&tl->i915->drm.struct_mutex); /* lazy rq refs */ + + err = i915_timeline_pin(tl); + if (err) { + rq = ERR_PTR(err); + goto out; + } + + rq = i915_request_alloc(engine, engine->i915->kernel_context); + if (IS_ERR(rq)) + goto out_unpin; + + err = emit_ggtt_store_dw(rq, hwsp_address(tl), value); + i915_request_add(rq); + if (err) + rq = ERR_PTR(err); + +out_unpin: + i915_timeline_unpin(tl); +out: + if (IS_ERR(rq)) + pr_err("Failed to write to timeline!\n"); + return rq; +} + +static struct i915_timeline * +checked_i915_timeline_create(struct drm_i915_private *i915) +{ + struct i915_timeline *tl; + + tl = i915_timeline_create(i915, "live", NULL); + if (IS_ERR(tl)) + return tl; + + if (*tl->hwsp_seqno != tl->seqno) { + pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n", + *tl->hwsp_seqno, tl->seqno); + i915_timeline_put(tl); + return ERR_PTR(-EINVAL); + } + + return tl; +} + +static int live_hwsp_engine(void *arg) +{ +#define NUM_TIMELINES 4096 + struct drm_i915_private *i915 = arg; + struct i915_timeline **timelines; + struct intel_engine_cs *engine; + enum intel_engine_id id; + intel_wakeref_t wakeref; + unsigned long count, n; + int err = 0; + + /* + * Create a bunch of timelines and check we can write + * independently to each of their breadcrumb slots. + */ + + timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES, + sizeof(*timelines), + GFP_KERNEL); + if (!timelines) + return -ENOMEM; + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + count = 0; + for_each_engine(engine, i915, id) { + if (!intel_engine_can_store_dword(engine)) + continue; + + for (n = 0; n < NUM_TIMELINES; n++) { + struct i915_timeline *tl; + struct i915_request *rq; + + tl = checked_i915_timeline_create(i915); + if (IS_ERR(tl)) { + err = PTR_ERR(tl); + goto out; + } + + rq = tl_write(tl, engine, count); + if (IS_ERR(rq)) { + i915_timeline_put(tl); + err = PTR_ERR(rq); + goto out; + } + + timelines[count++] = tl; + } + } + +out: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + + for (n = 0; n < count; n++) { + struct i915_timeline *tl = timelines[n]; + + if (!err && *tl->hwsp_seqno != n) { + pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n", + n, *tl->hwsp_seqno); + err = -EINVAL; + } + i915_timeline_put(tl); + } + + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + + kvfree(timelines); + + return err; +#undef NUM_TIMELINES +} + +static int live_hwsp_alternate(void *arg) +{ +#define NUM_TIMELINES 4096 + struct drm_i915_private *i915 = arg; + struct i915_timeline **timelines; + struct intel_engine_cs *engine; + enum intel_engine_id id; + intel_wakeref_t wakeref; + unsigned long count, n; + int err = 0; + + /* + * Create a bunch of timelines and check we can write + * independently to each of their breadcrumb slots with adjacent + * engines. + */ + + timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES, + sizeof(*timelines), + GFP_KERNEL); + if (!timelines) + return -ENOMEM; + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + count = 0; + for (n = 0; n < NUM_TIMELINES; n++) { + for_each_engine(engine, i915, id) { + struct i915_timeline *tl; + struct i915_request *rq; + + if (!intel_engine_can_store_dword(engine)) + continue; + + tl = checked_i915_timeline_create(i915); + if (IS_ERR(tl)) { + err = PTR_ERR(tl); + goto out; + } + + rq = tl_write(tl, engine, count); + if (IS_ERR(rq)) { + i915_timeline_put(tl); + err = PTR_ERR(rq); + goto out; + } + + timelines[count++] = tl; + } + } + +out: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + + for (n = 0; n < count; n++) { + struct i915_timeline *tl = timelines[n]; + + if (!err && *tl->hwsp_seqno != n) { + pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n", + n, *tl->hwsp_seqno); + err = -EINVAL; + } + i915_timeline_put(tl); + } + + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + + kvfree(timelines); + + return err; +#undef NUM_TIMELINES +} + +static int live_hwsp_recycle(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + intel_wakeref_t wakeref; + unsigned long count; + int err = 0; + + /* + * Check seqno writes into one timeline at a time. We expect to + * recycle the breadcrumb slot between iterations and neither + * want to confuse ourselves or the GPU. + */ + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + count = 0; + for_each_engine(engine, i915, id) { + IGT_TIMEOUT(end_time); + + if (!intel_engine_can_store_dword(engine)) + continue; + + do { + struct i915_timeline *tl; + struct i915_request *rq; + + tl = checked_i915_timeline_create(i915); + if (IS_ERR(tl)) { + err = PTR_ERR(tl); + goto out; + } + + rq = tl_write(tl, engine, count); + if (IS_ERR(rq)) { + i915_timeline_put(tl); + err = PTR_ERR(rq); + goto out; + } + + if (i915_request_wait(rq, + I915_WAIT_LOCKED, + HZ / 5) < 0) { + pr_err("Wait for timeline writes timed out!\n"); + i915_timeline_put(tl); + err = -EIO; + goto out; + } + + if (*tl->hwsp_seqno != count) { + pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n", + count, *tl->hwsp_seqno); + err = -EINVAL; + } + + i915_timeline_put(tl); + count++; + + if (err) + goto out; + + i915_timelines_park(i915); /* Encourage recycling! */ + } while (!__igt_timeout(end_time, NULL)); + } + +out: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + + return err; +} + +int i915_timeline_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(live_hwsp_recycle), + SUBTEST(live_hwsp_engine), + SUBTEST(live_hwsp_alternate), + }; + + return i915_subtests(tests, i915); +} diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c index 4e5b4dc6df0f..919c89fd6ee5 100644 --- a/drivers/gpu/drm/i915/selftests/mock_engine.c +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c @@ -39,7 +39,12 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine) if (!ring) return NULL; - i915_timeline_init(engine->i915, &ring->timeline, engine->name); + if (i915_timeline_init(engine->i915, + &ring->timeline, engine->name, + NULL)) { + kfree(ring); + return NULL; + } ring->base.size = sz; ring->base.effective_size = sz; @@ -208,7 +213,11 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, engine->base.emit_breadcrumb = mock_emit_breadcrumb; engine->base.submit_request = mock_submit_request; - i915_timeline_init(i915, &engine->base.timeline, engine->base.name); + if (i915_timeline_init(i915, + &engine->base.timeline, + engine->base.name, + NULL)) + goto err_free; i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE); intel_engine_init_breadcrumbs(&engine->base); @@ -226,6 +235,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, err_breadcrumbs: intel_engine_fini_breadcrumbs(&engine->base); i915_timeline_fini(&engine->base.timeline); +err_free: kfree(engine); return NULL; } -- cgit v1.2.3 From 5013eb8cd601c31e6d7d1b9d3291b24e933b77b2 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 28 Jan 2019 18:18:11 +0000 Subject: drm/i915: Track the context's seqno in its own timeline HWSP Now that we have allocated ourselves a cacheline to store a breadcrumb, we can emit a write from the GPU into the timeline's HWSP of the per-context seqno as we complete each request. This drops the mirroring of the per-engine HWSP and allows each context to operate independently. We do not need to unwind the per-context timeline, and so requests are always consistent with the timeline breadcrumb, greatly simplifying the completion checks as we no longer need to be concerned about the global_seqno changing mid check. One complication though is that we have to be wary that the request may outlive the HWSP and so avoid touching the potentially danging pointer after we have retired the fence. We also have to guard our access of the HWSP with RCU, the release of the obj->mm.pages should already be RCU-safe. At this point, we are emitting both per-context and global seqno and still using the single per-engine execution timeline for resolving interrupts. v2: s/fake_complete/mark_complete/ Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190128181812.22804-5-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/i915_request.c | 3 +- drivers/gpu/drm/i915/i915_request.h | 30 ++++----- drivers/gpu/drm/i915/i915_reset.c | 1 + drivers/gpu/drm/i915/i915_timeline.c | 4 ++ drivers/gpu/drm/i915/intel_engine_cs.c | 15 ++++- drivers/gpu/drm/i915/intel_lrc.c | 31 +++++---- drivers/gpu/drm/i915/intel_ringbuffer.c | 87 ++++++++++++++++++++------ drivers/gpu/drm/i915/selftests/i915_timeline.c | 7 +-- drivers/gpu/drm/i915/selftests/mock_engine.c | 19 +++++- 10 files changed, 139 insertions(+), 60 deletions(-) (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 761714448ff3..4e0de22f0166 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2890,7 +2890,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine) */ spin_lock_irqsave(&engine->timeline.lock, flags); list_for_each_entry(request, &engine->timeline.requests, link) { - if (__i915_request_completed(request, request->global_seqno)) + if (i915_request_completed(request)) continue; active = request; diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index a076fd0b7ba6..4d58770e6a8c 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -199,6 +199,7 @@ static void __retire_engine_request(struct intel_engine_cs *engine, spin_unlock(&engine->timeline.lock); spin_lock(&rq->lock); + i915_request_mark_complete(rq); if (!i915_request_signaled(rq)) dma_fence_signal_locked(&rq->fence); if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags)) @@ -621,7 +622,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) rq->ring = ce->ring; rq->timeline = ce->ring->timeline; GEM_BUG_ON(rq->timeline == &engine->timeline); - rq->hwsp_seqno = &engine->status_page.addr[I915_GEM_HWS_INDEX]; + rq->hwsp_seqno = rq->timeline->hwsp_seqno; spin_lock_init(&rq->lock); dma_fence_init(&rq->fence, diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index ade010fe6e26..96c586d6ff4d 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -289,6 +289,7 @@ long i915_request_wait(struct i915_request *rq, static inline bool i915_request_signaled(const struct i915_request *rq) { + /* The request may live longer than its HWSP, so check flags first! */ return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags); } @@ -340,32 +341,23 @@ static inline u32 hwsp_seqno(const struct i915_request *rq) */ static inline bool i915_request_started(const struct i915_request *rq) { - u32 seqno; - - seqno = i915_request_global_seqno(rq); - if (!seqno) /* not yet submitted to HW */ - return false; + if (i915_request_signaled(rq)) + return true; - return i915_seqno_passed(hwsp_seqno(rq), seqno - 1); -} - -static inline bool -__i915_request_completed(const struct i915_request *rq, u32 seqno) -{ - GEM_BUG_ON(!seqno); - return i915_seqno_passed(hwsp_seqno(rq), seqno) && - seqno == i915_request_global_seqno(rq); + return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1); } static inline bool i915_request_completed(const struct i915_request *rq) { - u32 seqno; + if (i915_request_signaled(rq)) + return true; - seqno = i915_request_global_seqno(rq); - if (!seqno) - return false; + return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno); +} - return __i915_request_completed(rq, seqno); +static inline void i915_request_mark_complete(struct i915_request *rq) +{ + rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */ } void i915_retire_requests(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/i915_reset.c index d2dca85a543d..bd82f9b1043f 100644 --- a/drivers/gpu/drm/i915/i915_reset.c +++ b/drivers/gpu/drm/i915/i915_reset.c @@ -760,6 +760,7 @@ static void nop_submit_request(struct i915_request *request) spin_lock_irqsave(&request->engine->timeline.lock, flags); __i915_request_submit(request); + i915_request_mark_complete(request); intel_engine_write_global_seqno(request->engine, request->global_seqno); spin_unlock_irqrestore(&request->engine->timeline.lock, flags); } diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c index add8fc33cf6e..e4c11414a824 100644 --- a/drivers/gpu/drm/i915/i915_timeline.c +++ b/drivers/gpu/drm/i915/i915_timeline.c @@ -270,6 +270,10 @@ int i915_timeline_pin(struct i915_timeline *tl) if (err) goto unpin; + tl->hwsp_offset = + i915_ggtt_offset(tl->hwsp_ggtt) + + offset_in_page(tl->hwsp_offset); + return 0; unpin: diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 515e87846afd..ead9c4371fe1 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -660,10 +660,16 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine) frame->rq.ring = &frame->ring; frame->rq.timeline = &frame->timeline; + dw = i915_timeline_pin(&frame->timeline); + if (dw < 0) + goto out_timeline; + dw = engine->emit_breadcrumb(&frame->rq, frame->cs) - frame->cs; - i915_timeline_fini(&frame->timeline); + i915_timeline_unpin(&frame->timeline); +out_timeline: + i915_timeline_fini(&frame->timeline); out_frame: kfree(frame); return dw; @@ -1426,9 +1432,10 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine, char hdr[80]; snprintf(hdr, sizeof(hdr), - "\t\tELSP[%d] count=%d, ring->start=%08x, rq: ", + "\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x}, rq: ", idx, count, - i915_ggtt_offset(rq->ring->vma)); + i915_ggtt_offset(rq->ring->vma), + rq->timeline->hwsp_offset); print_request(m, rq, hdr); } else { drm_printf(m, "\t\tELSP[%d] idle\n", idx); @@ -1538,6 +1545,8 @@ void intel_engine_dump(struct intel_engine_cs *engine, rq->ring->emit); drm_printf(m, "\t\tring->space: 0x%08x\n", rq->ring->space); + drm_printf(m, "\t\tring->hwsp: 0x%08x\n", + rq->timeline->hwsp_offset); print_request_ring(m, rq); } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index e388f37743a2..fdbb3fe8eac9 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -832,10 +832,10 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) list_for_each_entry(rq, &engine->timeline.requests, link) { GEM_BUG_ON(!rq->global_seqno); - if (i915_request_signaled(rq)) - continue; + if (!i915_request_signaled(rq)) + dma_fence_set_error(&rq->fence, -EIO); - dma_fence_set_error(&rq->fence, -EIO); + i915_request_mark_complete(rq); } /* Flush the queued requests to the timeline list (for retiring). */ @@ -845,9 +845,9 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) priolist_for_each_request_consume(rq, rn, p, i) { list_del_init(&rq->sched.link); - - dma_fence_set_error(&rq->fence, -EIO); __i915_request_submit(rq); + dma_fence_set_error(&rq->fence, -EIO); + i915_request_mark_complete(rq); } rb_erase_cached(&p->node, &execlists->queue); @@ -2044,10 +2044,17 @@ static u32 *gen8_emit_breadcrumb(struct i915_request *request, u32 *cs) /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */ BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5)); - cs = gen8_emit_ggtt_write(cs, request->global_seqno, + cs = gen8_emit_ggtt_write(cs, + request->fence.seqno, + request->timeline->hwsp_offset); + + cs = gen8_emit_ggtt_write(cs, + request->global_seqno, intel_hws_seqno_address(request->engine)); + *cs++ = MI_USER_INTERRUPT; *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + request->tail = intel_ring_offset(request, cs); assert_ring_tail_valid(request->ring, request->tail); @@ -2056,18 +2063,20 @@ static u32 *gen8_emit_breadcrumb(struct i915_request *request, u32 *cs) static u32 *gen8_emit_breadcrumb_rcs(struct i915_request *request, u32 *cs) { - /* We're using qword write, seqno should be aligned to 8 bytes. */ - BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1); - cs = gen8_emit_ggtt_write_rcs(cs, - request->global_seqno, - intel_hws_seqno_address(request->engine), + request->fence.seqno, + request->timeline->hwsp_offset, PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | PIPE_CONTROL_DEPTH_CACHE_FLUSH | PIPE_CONTROL_DC_FLUSH_ENABLE | PIPE_CONTROL_FLUSH_ENABLE | PIPE_CONTROL_CS_STALL); + cs = gen8_emit_ggtt_write_rcs(cs, + request->global_seqno, + intel_hws_seqno_address(request->engine), + PIPE_CONTROL_CS_STALL); + *cs++ = MI_USER_INTERRUPT; *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 174795622eb1..ee3719324e2d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -326,6 +326,11 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) PIPE_CONTROL_DC_FLUSH_ENABLE | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL); + *cs++ = rq->timeline->hwsp_offset | PIPE_CONTROL_GLOBAL_GTT; + *cs++ = rq->fence.seqno; + + *cs++ = GFX_OP_PIPE_CONTROL(4); + *cs++ = PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; *cs++ = intel_hws_seqno_address(rq->engine) | PIPE_CONTROL_GLOBAL_GTT; *cs++ = rq->global_seqno; @@ -427,6 +432,13 @@ static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL); + *cs++ = rq->timeline->hwsp_offset; + *cs++ = rq->fence.seqno; + + *cs++ = GFX_OP_PIPE_CONTROL(4); + *cs++ = (PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_GLOBAL_GTT_IVB | + PIPE_CONTROL_CS_STALL); *cs++ = intel_hws_seqno_address(rq->engine); *cs++ = rq->global_seqno; @@ -441,10 +453,19 @@ static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) { - *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW; - *cs++ = intel_hws_seqno_address(rq->engine) | MI_FLUSH_DW_USE_GTT; + GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma); + GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); + + *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; + *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT; + *cs++ = rq->fence.seqno; + + *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; + *cs++ = I915_GEM_HWS_INDEX_ADDR | MI_FLUSH_DW_USE_GTT; *cs++ = rq->global_seqno; + *cs++ = MI_USER_INTERRUPT; + *cs++ = MI_NOOP; rq->tail = intel_ring_offset(rq, cs); assert_ring_tail_valid(rq->ring, rq->tail); @@ -457,14 +478,21 @@ static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) { int i; - *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW; - *cs++ = intel_hws_seqno_address(rq->engine) | MI_FLUSH_DW_USE_GTT; + GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma); + GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); + + *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; + *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT; + *cs++ = rq->fence.seqno; + + *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; + *cs++ = I915_GEM_HWS_INDEX_ADDR | MI_FLUSH_DW_USE_GTT; *cs++ = rq->global_seqno; for (i = 0; i < GEN7_XCS_WA; i++) { *cs++ = MI_STORE_DWORD_INDEX; - *cs++ = I915_GEM_HWS_INDEX_ADDR; - *cs++ = rq->global_seqno; + *cs++ = I915_GEM_HWS_SEQNO_ADDR; + *cs++ = rq->fence.seqno; } *cs++ = MI_FLUSH_DW; @@ -472,7 +500,6 @@ static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) *cs++ = 0; *cs++ = MI_USER_INTERRUPT; - *cs++ = MI_NOOP; rq->tail = intel_ring_offset(rq, cs); assert_ring_tail_valid(rq->ring, rq->tail); @@ -738,7 +765,7 @@ static void reset_ring(struct intel_engine_cs *engine, bool stalled) rq = NULL; spin_lock_irqsave(&tl->lock, flags); list_for_each_entry(pos, &tl->requests, link) { - if (!__i915_request_completed(pos, pos->global_seqno)) { + if (!i915_request_completed(pos)) { rq = pos; break; } @@ -880,10 +907,10 @@ static void cancel_requests(struct intel_engine_cs *engine) list_for_each_entry(request, &engine->timeline.requests, link) { GEM_BUG_ON(!request->global_seqno); - if (i915_request_signaled(request)) - continue; + if (!i915_request_signaled(request)) + dma_fence_set_error(&request->fence, -EIO); - dma_fence_set_error(&request->fence, -EIO); + i915_request_mark_complete(request); } intel_write_status_page(engine, @@ -907,14 +934,20 @@ static void i9xx_submit_request(struct i915_request *request) static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs) { + GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma); + GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); + *cs++ = MI_FLUSH; + *cs++ = MI_STORE_DWORD_INDEX; + *cs++ = I915_GEM_HWS_SEQNO_ADDR; + *cs++ = rq->fence.seqno; + *cs++ = MI_STORE_DWORD_INDEX; *cs++ = I915_GEM_HWS_INDEX_ADDR; *cs++ = rq->global_seqno; *cs++ = MI_USER_INTERRUPT; - *cs++ = MI_NOOP; rq->tail = intel_ring_offset(rq, cs); assert_ring_tail_valid(rq->ring, rq->tail); @@ -927,8 +960,15 @@ static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs) { int i; + GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma); + GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); + *cs++ = MI_FLUSH; + *cs++ = MI_STORE_DWORD_INDEX; + *cs++ = I915_GEM_HWS_SEQNO_ADDR; + *cs++ = rq->fence.seqno; + BUILD_BUG_ON(GEN5_WA_STORES < 1); for (i = 0; i < GEN5_WA_STORES; i++) { *cs++ = MI_STORE_DWORD_INDEX; @@ -937,6 +977,7 @@ static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs) } *cs++ = MI_USER_INTERRUPT; + *cs++ = MI_NOOP; rq->tail = intel_ring_offset(rq, cs); assert_ring_tail_valid(rq->ring, rq->tail); @@ -1169,6 +1210,10 @@ int intel_ring_pin(struct intel_ring *ring) GEM_BUG_ON(ring->vaddr); + ret = i915_timeline_pin(ring->timeline); + if (ret) + return ret; + flags = PIN_GLOBAL; /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ @@ -1185,28 +1230,32 @@ int intel_ring_pin(struct intel_ring *ring) else ret = i915_gem_object_set_to_cpu_domain(vma->obj, true); if (unlikely(ret)) - return ret; + goto unpin_timeline; } ret = i915_vma_pin(vma, 0, 0, flags); if (unlikely(ret)) - return ret; + goto unpin_timeline; if (i915_vma_is_map_and_fenceable(vma)) addr = (void __force *)i915_vma_pin_iomap(vma); else addr = i915_gem_object_pin_map(vma->obj, map); - if (IS_ERR(addr)) - goto err; + if (IS_ERR(addr)) { + ret = PTR_ERR(addr); + goto unpin_ring; + } vma->obj->pin_global++; ring->vaddr = addr; return 0; -err: +unpin_ring: i915_vma_unpin(vma); - return PTR_ERR(addr); +unpin_timeline: + i915_timeline_unpin(ring->timeline); + return ret; } void intel_ring_reset(struct intel_ring *ring, u32 tail) @@ -1235,6 +1284,8 @@ void intel_ring_unpin(struct intel_ring *ring) ring->vma->obj->pin_global--; i915_vma_unpin(ring->vma); + + i915_timeline_unpin(ring->timeline); } static struct i915_vma * diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c index c34340f074cf..12ea69b1a1e5 100644 --- a/drivers/gpu/drm/i915/selftests/i915_timeline.c +++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c @@ -440,11 +440,6 @@ static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value) return 0; } -static u32 hwsp_address(const struct i915_timeline *tl) -{ - return i915_ggtt_offset(tl->hwsp_ggtt) + tl->hwsp_offset; -} - static struct i915_request * tl_write(struct i915_timeline *tl, struct intel_engine_cs *engine, u32 value) { @@ -463,7 +458,7 @@ tl_write(struct i915_timeline *tl, struct intel_engine_cs *engine, u32 value) if (IS_ERR(rq)) goto out_unpin; - err = emit_ggtt_store_dw(rq, hwsp_address(tl), value); + err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value); i915_request_add(rq); if (err) rq = ERR_PTR(err); diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c index 919c89fd6ee5..95e890d7f58b 100644 --- a/drivers/gpu/drm/i915/selftests/mock_engine.c +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c @@ -30,6 +30,17 @@ struct mock_ring { struct i915_timeline timeline; }; +static void mock_timeline_pin(struct i915_timeline *tl) +{ + tl->pin_count++; +} + +static void mock_timeline_unpin(struct i915_timeline *tl) +{ + GEM_BUG_ON(!tl->pin_count); + tl->pin_count--; +} + static struct intel_ring *mock_ring(struct intel_engine_cs *engine) { const unsigned long sz = PAGE_SIZE / 2; @@ -76,6 +87,8 @@ static void advance(struct mock_request *request) { list_del_init(&request->link); mock_seqno_advance(request->base.engine, request->base.global_seqno); + i915_request_mark_complete(&request->base); + GEM_BUG_ON(!i915_request_completed(&request->base)); } static void hw_delay_complete(struct timer_list *t) @@ -108,6 +121,7 @@ static void hw_delay_complete(struct timer_list *t) static void mock_context_unpin(struct intel_context *ce) { + mock_timeline_unpin(ce->ring->timeline); i915_gem_context_put(ce->gem_context); } @@ -129,6 +143,7 @@ mock_context_pin(struct intel_engine_cs *engine, struct i915_gem_context *ctx) { struct intel_context *ce = to_intel_context(ctx, engine); + int err = -ENOMEM; if (ce->pin_count++) return ce; @@ -139,13 +154,15 @@ mock_context_pin(struct intel_engine_cs *engine, goto err; } + mock_timeline_pin(ce->ring->timeline); + ce->ops = &mock_context_ops; i915_gem_context_get(ctx); return ce; err: ce->pin_count = 0; - return ERR_PTR(-ENOMEM); + return ERR_PTR(err); } static int mock_request_alloc(struct i915_request *request) -- cgit v1.2.3 From 8547444137ec6138ce52fc1938980b737a0d4d9e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 29 Jan 2019 18:54:50 +0000 Subject: drm/i915: Identify active requests To allow requests to forgo a common execution timeline, one question we need to be able to answer is "is this request running?". To track whether a request has started on HW, we can emit a breadcrumb at the beginning of the request and check its timeline's HWSP to see if the breadcrumb has advanced past the start of this request. (This is in contrast to the global timeline where we need only ask if we are on the global timeline and if the timeline has advanced past the end of the previous request.) There is still confusion from a preempted request, which has already started but relinquished the HW to a high priority request. For the common case, this discrepancy should be negligible. However, for identification of hung requests, knowing which one was running at the time of the hang will be much more important. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190129185452.20989-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem.c | 15 +++++++++++ drivers/gpu/drm/i915/i915_gem_execbuffer.c | 12 +++++++++ drivers/gpu/drm/i915/i915_request.c | 10 +++---- drivers/gpu/drm/i915/i915_request.h | 1 + drivers/gpu/drm/i915/i915_timeline.c | 1 + drivers/gpu/drm/i915/i915_timeline.h | 2 ++ drivers/gpu/drm/i915/intel_engine_cs.c | 8 +++--- drivers/gpu/drm/i915/intel_lrc.c | 39 ++++++++++++++++++++++++---- drivers/gpu/drm/i915/intel_ringbuffer.c | 25 +++++++++++------- drivers/gpu/drm/i915/intel_ringbuffer.h | 6 +++-- drivers/gpu/drm/i915/selftests/mock_engine.c | 2 +- 11 files changed, 96 insertions(+), 25 deletions(-) (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9c499edb4c13..d92e7ab0005e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2871,6 +2871,14 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, return 0; } +static bool match_ring(struct i915_request *rq) +{ + struct drm_i915_private *dev_priv = rq->i915; + u32 ring = I915_READ(RING_START(rq->engine->mmio_base)); + + return ring == i915_ggtt_offset(rq->ring->vma); +} + struct i915_request * i915_gem_find_active_request(struct intel_engine_cs *engine) { @@ -2893,6 +2901,13 @@ i915_gem_find_active_request(struct intel_engine_cs *engine) if (i915_request_completed(request)) continue; + if (!i915_request_started(request)) + break; + + /* More than one preemptible request may match! */ + if (!match_ring(request)) + break; + active = request; break; } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index f250109e1f66..8eedf7cac493 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1976,6 +1976,18 @@ static int eb_submit(struct i915_execbuffer *eb) return err; } + /* + * After we completed waiting for other engines (using HW semaphores) + * then we can signal that this request/batch is ready to run. This + * allows us to determine if the batch is still waiting on the GPU + * or actually running by checking the breadcrumb. + */ + if (eb->engine->emit_init_breadcrumb) { + err = eb->engine->emit_init_breadcrumb(eb->request); + if (err) + return err; + } + err = eb->engine->emit_bb_start(eb->request, eb->batch->node.start + eb->batch_start_offset, diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 4d58770e6a8c..7db15b7b3de8 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -333,7 +333,7 @@ void i915_request_retire_upto(struct i915_request *rq) static u32 timeline_get_seqno(struct i915_timeline *tl) { - return ++tl->seqno; + return tl->seqno += 1 + tl->has_initial_breadcrumb; } static void move_to_timeline(struct i915_request *request, @@ -382,8 +382,8 @@ void __i915_request_submit(struct i915_request *request) intel_engine_enable_signaling(request, false); spin_unlock(&request->lock); - engine->emit_breadcrumb(request, - request->ring->vaddr + request->postfix); + engine->emit_fini_breadcrumb(request, + request->ring->vaddr + request->postfix); /* Transfer from per-context onto the global per-engine timeline */ move_to_timeline(request, &engine->timeline); @@ -657,7 +657,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) * around inside i915_request_add() there is sufficient space at * the beginning of the ring as well. */ - rq->reserved_space = 2 * engine->emit_breadcrumb_dw * sizeof(u32); + rq->reserved_space = 2 * engine->emit_fini_breadcrumb_dw * sizeof(u32); /* * Record the position of the start of the request so that @@ -908,7 +908,7 @@ void i915_request_add(struct i915_request *request) * GPU processing the request, we never over-estimate the * position of the ring's HEAD. */ - cs = intel_ring_begin(request, engine->emit_breadcrumb_dw); + cs = intel_ring_begin(request, engine->emit_fini_breadcrumb_dw); GEM_BUG_ON(IS_ERR(cs)); request->postfix = intel_ring_offset(request, cs); diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 96c586d6ff4d..340d6216791c 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -344,6 +344,7 @@ static inline bool i915_request_started(const struct i915_request *rq) if (i915_request_signaled(rq)) return true; + /* Remember: started but may have since been preempted! */ return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1); } diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c index 79838d89bdb9..5ea3af393ffe 100644 --- a/drivers/gpu/drm/i915/i915_timeline.c +++ b/drivers/gpu/drm/i915/i915_timeline.c @@ -135,6 +135,7 @@ int i915_timeline_init(struct drm_i915_private *i915, timeline->i915 = i915; timeline->name = name; timeline->pin_count = 0; + timeline->has_initial_breadcrumb = !hwsp; timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR; if (!hwsp) { diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h index ab736e2e5707..8caeb66d1cd5 100644 --- a/drivers/gpu/drm/i915/i915_timeline.h +++ b/drivers/gpu/drm/i915/i915_timeline.h @@ -48,6 +48,8 @@ struct i915_timeline { struct i915_vma *hwsp_ggtt; u32 hwsp_offset; + bool has_initial_breadcrumb; + /** * List of breadcrumbs associated with GPU requests currently * outstanding. diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index ead9c4371fe1..8dca76f6315d 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -664,7 +664,7 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine) if (dw < 0) goto out_timeline; - dw = engine->emit_breadcrumb(&frame->rq, frame->cs) - frame->cs; + dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs; i915_timeline_unpin(&frame->timeline); @@ -725,7 +725,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine) if (ret < 0) goto err_breadcrumbs; - engine->emit_breadcrumb_dw = ret; + engine->emit_fini_breadcrumb_dw = ret; return 0; @@ -1297,7 +1297,9 @@ static void print_request(struct drm_printer *m, drm_printf(m, "%s%x%s [%llx:%llx]%s @ %dms: %s\n", prefix, rq->global_seqno, - i915_request_completed(rq) ? "!" : "", + i915_request_completed(rq) ? "!" : + i915_request_started(rq) ? "*" : + "", rq->fence.context, rq->fence.seqno, buf, jiffies_to_msecs(jiffies - rq->emitted_jiffies), diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index fdbb3fe8eac9..5db16dd8e844 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -624,7 +624,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * WaIdleLiteRestore:bdw,skl * Apply the wa NOOPs to prevent * ring:HEAD == rq:TAIL as we resubmit the - * request. See gen8_emit_breadcrumb() for + * request. See gen8_emit_fini_breadcrumb() for * where we prepare the padding after the * end of the request. */ @@ -1283,6 +1283,34 @@ execlists_context_pin(struct intel_engine_cs *engine, return __execlists_context_pin(engine, ctx, ce); } +static int gen8_emit_init_breadcrumb(struct i915_request *rq) +{ + u32 *cs; + + GEM_BUG_ON(!rq->timeline->has_initial_breadcrumb); + + cs = intel_ring_begin(rq, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + /* + * Check if we have been preempted before we even get started. + * + * After this point i915_request_started() reports true, even if + * we get preempted and so are no longer running. + */ + *cs++ = MI_ARB_CHECK; + *cs++ = MI_NOOP; + + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = rq->timeline->hwsp_offset; + *cs++ = 0; + *cs++ = rq->fence.seqno - 1; + + intel_ring_advance(rq, cs); + return 0; +} + static int emit_pdps(struct i915_request *rq) { const struct intel_engine_cs * const engine = rq->engine; @@ -2039,7 +2067,7 @@ static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs) return cs; } -static u32 *gen8_emit_breadcrumb(struct i915_request *request, u32 *cs) +static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs) { /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */ BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5)); @@ -2061,7 +2089,7 @@ static u32 *gen8_emit_breadcrumb(struct i915_request *request, u32 *cs) return gen8_emit_wa_tail(request, cs); } -static u32 *gen8_emit_breadcrumb_rcs(struct i915_request *request, u32 *cs) +static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) { cs = gen8_emit_ggtt_write_rcs(cs, request->fence.seqno, @@ -2176,7 +2204,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) engine->request_alloc = execlists_request_alloc; engine->emit_flush = gen8_emit_flush; - engine->emit_breadcrumb = gen8_emit_breadcrumb; + engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb; + engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb; engine->set_default_submission = intel_execlists_set_default_submission; @@ -2289,7 +2318,7 @@ int logical_render_ring_init(struct intel_engine_cs *engine) /* Override some for render ring. */ engine->init_context = gen8_init_rcs_context; engine->emit_flush = gen8_emit_flush_render; - engine->emit_breadcrumb = gen8_emit_breadcrumb_rcs; + engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs; ret = logical_ring_init(engine); if (ret) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index ee3719324e2d..668ed67336a2 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1607,6 +1607,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) err = PTR_ERR(timeline); goto err; } + GEM_BUG_ON(timeline->has_initial_breadcrumb); ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE); i915_timeline_put(timeline); @@ -1960,6 +1961,7 @@ static int ring_request_alloc(struct i915_request *request) int ret; GEM_BUG_ON(!request->hw_context->pin_count); + GEM_BUG_ON(request->timeline->has_initial_breadcrumb); /* * Flush enough space to reduce the likelihood of waiting after @@ -2296,9 +2298,14 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, engine->context_pin = intel_ring_context_pin; engine->request_alloc = ring_request_alloc; - engine->emit_breadcrumb = i9xx_emit_breadcrumb; + /* + * Using a global execution timeline; the previous final breadcrumb is + * equivalent to our next initial bread so we can elide + * engine->emit_init_breadcrumb(). + */ + engine->emit_fini_breadcrumb = i9xx_emit_breadcrumb; if (IS_GEN(dev_priv, 5)) - engine->emit_breadcrumb = gen5_emit_breadcrumb; + engine->emit_fini_breadcrumb = gen5_emit_breadcrumb; engine->set_default_submission = i9xx_set_default_submission; @@ -2327,11 +2334,11 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine) if (INTEL_GEN(dev_priv) >= 7) { engine->init_context = intel_rcs_ctx_init; engine->emit_flush = gen7_render_ring_flush; - engine->emit_breadcrumb = gen7_rcs_emit_breadcrumb; + engine->emit_fini_breadcrumb = gen7_rcs_emit_breadcrumb; } else if (IS_GEN(dev_priv, 6)) { engine->init_context = intel_rcs_ctx_init; engine->emit_flush = gen6_render_ring_flush; - engine->emit_breadcrumb = gen6_rcs_emit_breadcrumb; + engine->emit_fini_breadcrumb = gen6_rcs_emit_breadcrumb; } else if (IS_GEN(dev_priv, 5)) { engine->emit_flush = gen4_render_ring_flush; } else { @@ -2368,9 +2375,9 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine) engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; if (IS_GEN(dev_priv, 6)) - engine->emit_breadcrumb = gen6_xcs_emit_breadcrumb; + engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb; else - engine->emit_breadcrumb = gen7_xcs_emit_breadcrumb; + engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb; } else { engine->emit_flush = bsd_ring_flush; if (IS_GEN(dev_priv, 5)) @@ -2394,9 +2401,9 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine) engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; if (IS_GEN(dev_priv, 6)) - engine->emit_breadcrumb = gen6_xcs_emit_breadcrumb; + engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb; else - engine->emit_breadcrumb = gen7_xcs_emit_breadcrumb; + engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb; return intel_init_ring_buffer(engine); } @@ -2414,7 +2421,7 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine) engine->irq_enable = hsw_vebox_irq_enable; engine->irq_disable = hsw_vebox_irq_disable; - engine->emit_breadcrumb = gen7_xcs_emit_breadcrumb; + engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb; return intel_init_ring_buffer(engine); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 2927b712b973..1f30ffb84936 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -463,8 +463,10 @@ struct intel_engine_cs { unsigned int dispatch_flags); #define I915_DISPATCH_SECURE BIT(0) #define I915_DISPATCH_PINNED BIT(1) - u32 *(*emit_breadcrumb)(struct i915_request *rq, u32 *cs); - int emit_breadcrumb_dw; + int (*emit_init_breadcrumb)(struct i915_request *rq); + u32 *(*emit_fini_breadcrumb)(struct i915_request *rq, + u32 *cs); + unsigned int emit_fini_breadcrumb_dw; /* Pass the request to the hardware queue (e.g. directly into * the legacy ringbuffer or to the end of an execlist). diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c index 95e890d7f58b..3b226ebc6bc4 100644 --- a/drivers/gpu/drm/i915/selftests/mock_engine.c +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c @@ -227,7 +227,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, engine->base.context_pin = mock_context_pin; engine->base.request_alloc = mock_request_alloc; engine->base.emit_flush = mock_emit_flush; - engine->base.emit_breadcrumb = mock_emit_breadcrumb; + engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb; engine->base.submit_request = mock_submit_request; if (i915_timeline_init(i915, -- cgit v1.2.3 From 52c0fdb25c7c919334b97976d05096b441a3eada Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 29 Jan 2019 20:52:29 +0000 Subject: drm/i915: Replace global breadcrumbs with per-context interrupt tracking A few years ago, see commit 688e6c725816 ("drm/i915: Slaughter the thundering i915_wait_request herd"), the issue of handling multiple clients waiting in parallel was brought to our attention. The requirement was that every client should be woken immediately upon its request being signaled, without incurring any cpu overhead. To handle certain fragility of our hw meant that we could not do a simple check inside the irq handler (some generations required almost unbounded delays before we could be sure of seqno coherency) and so request completion checking required delegation. Before commit 688e6c725816, the solution was simple. Every client waiting on a request would be woken on every interrupt and each would do a heavyweight check to see if their request was complete. Commit 688e6c725816 introduced an rbtree so that only the earliest waiter on the global timeline would woken, and would wake the next and so on. (Along with various complications to handle requests being reordered along the global timeline, and also a requirement for kthread to provide a delegate for fence signaling that had no process context.) The global rbtree depends on knowing the execution timeline (and global seqno). Without knowing that order, we must instead check all contexts queued to the HW to see which may have advanced. We trim that list by only checking queued contexts that are being waited on, but still we keep a list of all active contexts and their active signalers that we inspect from inside the irq handler. By moving the waiters onto the fence signal list, we can combine the client wakeup with the dma_fence signaling (a dramatic reduction in complexity, but does require the HW being coherent, the seqno must be visible from the cpu before the interrupt is raised - we keep a timer backup just in case). Having previously fixed all the issues with irq-seqno serialisation (by inserting delays onto the GPU after each request instead of random delays on the CPU after each interrupt), we can rely on the seqno state to perfom direct wakeups from the interrupt handler. This allows us to preserve our single context switch behaviour of the current routine, with the only downside that we lose the RT priority sorting of wakeups. In general, direct wakeup latency of multiple clients is about the same (about 10% better in most cases) with a reduction in total CPU time spent in the waiter (about 20-50% depending on gen). Average herd behaviour is improved, but at the cost of not delegating wakeups on task_prio. v2: Capture fence signaling state for error state and add comments to warm even the most cold of hearts. v3: Check if the request is still active before busywaiting v4: Reduce the amount of pointer misdirection with list_for_each_safe and using a local i915_request variable inside the loops v5: Add a missing pluralisation to a purely informative selftest message. References: 688e6c725816 ("drm/i915: Slaughter the thundering i915_wait_request herd") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190129205230.19056-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_debugfs.c | 28 +- drivers/gpu/drm/i915/i915_gem_context.c | 3 + drivers/gpu/drm/i915/i915_gem_context.h | 2 + drivers/gpu/drm/i915/i915_gpu_error.c | 83 +-- drivers/gpu/drm/i915/i915_gpu_error.h | 9 +- drivers/gpu/drm/i915/i915_irq.c | 82 +-- drivers/gpu/drm/i915/i915_request.c | 142 ++-- drivers/gpu/drm/i915/i915_request.h | 72 +- drivers/gpu/drm/i915/i915_reset.c | 16 +- drivers/gpu/drm/i915/i915_scheduler.c | 2 +- drivers/gpu/drm/i915/intel_breadcrumbs.c | 818 ++++++--------------- drivers/gpu/drm/i915/intel_engine_cs.c | 35 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 2 +- drivers/gpu/drm/i915/intel_ringbuffer.h | 94 +-- .../gpu/drm/i915/selftests/i915_mock_selftests.h | 1 - drivers/gpu/drm/i915/selftests/i915_request.c | 425 +++++++++++ drivers/gpu/drm/i915/selftests/igt_spinner.c | 5 - drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c | 470 ------------ drivers/gpu/drm/i915/selftests/intel_hangcheck.c | 2 +- drivers/gpu/drm/i915/selftests/lib_sw_fence.c | 54 ++ drivers/gpu/drm/i915/selftests/lib_sw_fence.h | 3 + drivers/gpu/drm/i915/selftests/mock_engine.c | 17 +- drivers/gpu/drm/i915/selftests/mock_engine.h | 6 - 23 files changed, 890 insertions(+), 1481 deletions(-) delete mode 100644 drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index c9c230499420..29d52304c189 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1315,29 +1315,16 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake)); for_each_engine(engine, dev_priv, id) { - struct intel_breadcrumbs *b = &engine->breadcrumbs; - struct rb_node *rb; - seq_printf(m, "%s:\n", engine->name); seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n", engine->hangcheck.seqno, seqno[id], intel_engine_last_submit(engine), jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp)); - seq_printf(m, "\twaiters? %s, fake irq active? %s\n", - yesno(intel_engine_has_waiter(engine)), + seq_printf(m, "\tfake irq active? %s\n", yesno(test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings))); - spin_lock_irq(&b->rb_lock); - for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { - struct intel_wait *w = rb_entry(rb, typeof(*w), node); - - seq_printf(m, "\t%s [%d] waiting for %x\n", - w->tsk->comm, w->tsk->pid, w->seqno); - } - spin_unlock_irq(&b->rb_lock); - seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", (long long)engine->hangcheck.acthd, (long long)acthd[id]); @@ -2021,18 +2008,6 @@ static int i915_swizzle_info(struct seq_file *m, void *data) return 0; } -static int count_irq_waiters(struct drm_i915_private *i915) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - int count = 0; - - for_each_engine(engine, i915, id) - count += intel_engine_has_waiter(engine); - - return count; -} - static const char *rps_power_to_str(unsigned int power) { static const char * const strings[] = { @@ -2072,7 +2047,6 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) seq_printf(m, "RPS enabled? %d\n", rps->enabled); seq_printf(m, "GPU busy? %s [%d requests]\n", yesno(dev_priv->gt.awake), dev_priv->gt.active_requests); - seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv)); seq_printf(m, "Boosts outstanding? %d\n", atomic_read(&rps->num_waiters)); seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive)); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 93e84751370f..6faf1f6faab5 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -327,6 +327,9 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) { ce->gem_context = ctx; + + INIT_LIST_HEAD(&ce->signal_link); + INIT_LIST_HEAD(&ce->signals); } static struct i915_gem_context * diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h index 3769438228f6..6ba40ff6b91f 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.h +++ b/drivers/gpu/drm/i915/i915_gem_context.h @@ -164,6 +164,8 @@ struct i915_gem_context { struct intel_context { struct i915_gem_context *gem_context; struct intel_engine_cs *active; + struct list_head signal_link; + struct list_head signals; struct i915_vma *state; struct intel_ring *ring; u32 *lrc_reg_state; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 898e06014295..304a7ef7f7fb 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -447,9 +447,14 @@ static void error_print_request(struct drm_i915_error_state_buf *m, if (!erq->seqno) return; - err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n", + err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x%s%s, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n", prefix, erq->pid, erq->ban_score, - erq->context, erq->seqno, erq->sched_attr.priority, + erq->context, erq->seqno, + test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &erq->flags) ? "!" : "", + test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &erq->flags) ? "+" : "", + erq->sched_attr.priority, jiffies_to_msecs(erq->jiffies - epoch), erq->start, erq->head, erq->tail); } @@ -530,7 +535,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m, } err_printf(m, " seqno: 0x%08x\n", ee->seqno); err_printf(m, " last_seqno: 0x%08x\n", ee->last_seqno); - err_printf(m, " waiting: %s\n", yesno(ee->waiting)); err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head); err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail); err_printf(m, " hangcheck timestamp: %dms (%lu%s)\n", @@ -804,21 +808,6 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, error->epoch); } - if (IS_ERR(ee->waiters)) { - err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n", - m->i915->engine[i]->name); - } else if (ee->num_waiters) { - err_printf(m, "%s --- %d waiters\n", - m->i915->engine[i]->name, - ee->num_waiters); - for (j = 0; j < ee->num_waiters; j++) { - err_printf(m, " seqno 0x%08x for %s [%d]\n", - ee->waiters[j].seqno, - ee->waiters[j].comm, - ee->waiters[j].pid); - } - } - print_error_obj(m, m->i915->engine[i], "ringbuffer", ee->ringbuffer); @@ -1000,8 +989,6 @@ void __i915_gpu_state_free(struct kref *error_ref) i915_error_object_free(ee->wa_ctx); kfree(ee->requests); - if (!IS_ERR_OR_NULL(ee->waiters)) - kfree(ee->waiters); } for (i = 0; i < ARRAY_SIZE(error->active_bo); i++) @@ -1205,59 +1192,6 @@ static void gen6_record_semaphore_state(struct intel_engine_cs *engine, I915_READ(RING_SYNC_2(engine->mmio_base)); } -static void error_record_engine_waiters(struct intel_engine_cs *engine, - struct drm_i915_error_engine *ee) -{ - struct intel_breadcrumbs *b = &engine->breadcrumbs; - struct drm_i915_error_waiter *waiter; - struct rb_node *rb; - int count; - - ee->num_waiters = 0; - ee->waiters = NULL; - - if (RB_EMPTY_ROOT(&b->waiters)) - return; - - if (!spin_trylock_irq(&b->rb_lock)) { - ee->waiters = ERR_PTR(-EDEADLK); - return; - } - - count = 0; - for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb)) - count++; - spin_unlock_irq(&b->rb_lock); - - waiter = NULL; - if (count) - waiter = kmalloc_array(count, - sizeof(struct drm_i915_error_waiter), - GFP_ATOMIC); - if (!waiter) - return; - - if (!spin_trylock_irq(&b->rb_lock)) { - kfree(waiter); - ee->waiters = ERR_PTR(-EDEADLK); - return; - } - - ee->waiters = waiter; - for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { - struct intel_wait *w = rb_entry(rb, typeof(*w), node); - - strcpy(waiter->comm, w->tsk->comm); - waiter->pid = w->tsk->pid; - waiter->seqno = w->seqno; - waiter++; - - if (++ee->num_waiters == count) - break; - } - spin_unlock_irq(&b->rb_lock); -} - static void error_record_engine_registers(struct i915_gpu_state *error, struct intel_engine_cs *engine, struct drm_i915_error_engine *ee) @@ -1293,7 +1227,6 @@ static void error_record_engine_registers(struct i915_gpu_state *error, intel_engine_get_instdone(engine, &ee->instdone); - ee->waiting = intel_engine_has_waiter(engine); ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base)); ee->acthd = intel_engine_get_active_head(engine); ee->seqno = intel_engine_get_seqno(engine); @@ -1367,6 +1300,7 @@ static void record_request(struct i915_request *request, { struct i915_gem_context *ctx = request->gem_context; + erq->flags = request->fence.flags; erq->context = ctx->hw_id; erq->sched_attr = request->sched.attr; erq->ban_score = atomic_read(&ctx->ban_score); @@ -1542,7 +1476,6 @@ static void gem_record_rings(struct i915_gpu_state *error) ee->engine_id = i; error_record_engine_registers(error, engine, ee); - error_record_engine_waiters(engine, ee); error_record_engine_execlists(engine, ee); request = i915_gem_find_active_request(engine); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 231173786eae..74757c424aab 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -82,8 +82,6 @@ struct i915_gpu_state { int engine_id; /* Software tracked state */ bool idle; - bool waiting; - int num_waiters; unsigned long hangcheck_timestamp; struct i915_address_space *vm; int num_requests; @@ -147,6 +145,7 @@ struct i915_gpu_state { struct drm_i915_error_object *default_state; struct drm_i915_error_request { + unsigned long flags; long jiffies; pid_t pid; u32 context; @@ -159,12 +158,6 @@ struct i915_gpu_state { } *requests, execlist[EXECLIST_MAX_PORTS]; unsigned int num_ports; - struct drm_i915_error_waiter { - char comm[TASK_COMM_LEN]; - pid_t pid; - u32 seqno; - } *waiters; - struct { u32 gfx_mode; union { diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 0fcdb14c50f4..eab085686a2a 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1169,66 +1169,6 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) return; } -static void notify_ring(struct intel_engine_cs *engine) -{ - const u32 seqno = intel_engine_get_seqno(engine); - struct i915_request *rq = NULL; - struct task_struct *tsk = NULL; - struct intel_wait *wait; - - if (unlikely(!engine->breadcrumbs.irq_armed)) - return; - - rcu_read_lock(); - - spin_lock(&engine->breadcrumbs.irq_lock); - wait = engine->breadcrumbs.irq_wait; - if (wait) { - /* - * We use a callback from the dma-fence to submit - * requests after waiting on our own requests. To - * ensure minimum delay in queuing the next request to - * hardware, signal the fence now rather than wait for - * the signaler to be woken up. We still wake up the - * waiter in order to handle the irq-seqno coherency - * issues (we may receive the interrupt before the - * seqno is written, see __i915_request_irq_complete()) - * and to handle coalescing of multiple seqno updates - * and many waiters. - */ - if (i915_seqno_passed(seqno, wait->seqno)) { - struct i915_request *waiter = wait->request; - - if (waiter && - !i915_request_signaled(waiter) && - intel_wait_check_request(wait, waiter)) - rq = i915_request_get(waiter); - - tsk = wait->tsk; - } - - engine->breadcrumbs.irq_count++; - } else { - if (engine->breadcrumbs.irq_armed) - __intel_engine_disarm_breadcrumbs(engine); - } - spin_unlock(&engine->breadcrumbs.irq_lock); - - if (rq) { - spin_lock(&rq->lock); - dma_fence_signal_locked(&rq->fence); - GEM_BUG_ON(!i915_request_completed(rq)); - spin_unlock(&rq->lock); - - i915_request_put(rq); - } - - if (tsk && tsk->state & TASK_NORMAL) - wake_up_process(tsk); - - rcu_read_unlock(); -} - static void vlv_c0_read(struct drm_i915_private *dev_priv, struct intel_rps_ei *ei) { @@ -1473,20 +1413,20 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) { if (gt_iir & GT_RENDER_USER_INTERRUPT) - notify_ring(dev_priv->engine[RCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]); if (gt_iir & ILK_BSD_USER_INTERRUPT) - notify_ring(dev_priv->engine[VCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]); } static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) { if (gt_iir & GT_RENDER_USER_INTERRUPT) - notify_ring(dev_priv->engine[RCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]); if (gt_iir & GT_BSD_USER_INTERRUPT) - notify_ring(dev_priv->engine[VCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]); if (gt_iir & GT_BLT_USER_INTERRUPT) - notify_ring(dev_priv->engine[BCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[BCS]); if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | GT_BSD_CS_ERROR_INTERRUPT | @@ -1506,7 +1446,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) tasklet = true; if (iir & GT_RENDER_USER_INTERRUPT) { - notify_ring(engine); + intel_engine_breadcrumbs_irq(engine); tasklet |= USES_GUC_SUBMISSION(engine->i915); } @@ -1852,7 +1792,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) if (HAS_VEBOX(dev_priv)) { if (pm_iir & PM_VEBOX_USER_INTERRUPT) - notify_ring(dev_priv->engine[VECS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[VECS]); if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); @@ -4276,7 +4216,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) I915_WRITE16(IIR, iir); if (iir & I915_USER_INTERRUPT) - notify_ring(dev_priv->engine[RCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]); if (iir & I915_MASTER_ERROR_INTERRUPT) i8xx_error_irq_handler(dev_priv, eir, eir_stuck); @@ -4384,7 +4324,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) I915_WRITE(IIR, iir); if (iir & I915_USER_INTERRUPT) - notify_ring(dev_priv->engine[RCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]); if (iir & I915_MASTER_ERROR_INTERRUPT) i9xx_error_irq_handler(dev_priv, eir, eir_stuck); @@ -4529,10 +4469,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) I915_WRITE(IIR, iir); if (iir & I915_USER_INTERRUPT) - notify_ring(dev_priv->engine[RCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]); if (iir & I915_BSD_USER_INTERRUPT) - notify_ring(dev_priv->engine[VCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]); if (iir & I915_MASTER_ERROR_INTERRUPT) i9xx_error_irq_handler(dev_priv, eir, eir_stuck); diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 7db15b7b3de8..9ed5baf157a3 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -60,7 +60,7 @@ static bool i915_fence_signaled(struct dma_fence *fence) static bool i915_fence_enable_signaling(struct dma_fence *fence) { - return intel_engine_enable_signaling(to_request(fence), true); + return i915_request_enable_breadcrumb(to_request(fence)); } static signed long i915_fence_wait(struct dma_fence *fence, @@ -203,7 +203,7 @@ static void __retire_engine_request(struct intel_engine_cs *engine, if (!i915_request_signaled(rq)) dma_fence_signal_locked(&rq->fence); if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags)) - intel_engine_cancel_signaling(rq); + i915_request_cancel_breadcrumb(rq); if (rq->waitboost) { GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters)); atomic_dec(&rq->i915->gt_pm.rps.num_waiters); @@ -377,9 +377,12 @@ void __i915_request_submit(struct i915_request *request) /* We may be recursing from the signal callback of another i915 fence */ spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); + GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); + set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); request->global_seqno = seqno; - if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) - intel_engine_enable_signaling(request, false); + if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) && + !i915_request_enable_breadcrumb(request)) + intel_engine_queue_breadcrumbs(engine); spin_unlock(&request->lock); engine->emit_fini_breadcrumb(request, @@ -389,8 +392,6 @@ void __i915_request_submit(struct i915_request *request) move_to_timeline(request, &engine->timeline); trace_i915_request_execute(request); - - wake_up_all(&request->execute); } void i915_request_submit(struct i915_request *request) @@ -433,7 +434,9 @@ void __i915_request_unsubmit(struct i915_request *request) spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); request->global_seqno = 0; if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) - intel_engine_cancel_signaling(request); + i915_request_cancel_breadcrumb(request); + GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); + clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); spin_unlock(&request->lock); /* Transfer back from the global per-engine timeline to per-context */ @@ -633,13 +636,11 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) /* We bump the ref for the fence chain */ i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify); - init_waitqueue_head(&rq->execute); i915_sched_node_init(&rq->sched); /* No zalloc, must clear what we need by hand */ rq->global_seqno = 0; - rq->signaling.wait.seqno = 0; rq->file_priv = NULL; rq->batch = NULL; rq->capture_list = NULL; @@ -1030,13 +1031,10 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu) return this_cpu != cpu; } -static bool __i915_spin_request(const struct i915_request *rq, - u32 seqno, int state, unsigned long timeout_us) +static bool __i915_spin_request(const struct i915_request * const rq, + int state, unsigned long timeout_us) { - struct intel_engine_cs *engine = rq->engine; - unsigned int irq, cpu; - - GEM_BUG_ON(!seqno); + unsigned int cpu; /* * Only wait for the request if we know it is likely to complete. @@ -1044,12 +1042,12 @@ static bool __i915_spin_request(const struct i915_request *rq, * We don't track the timestamps around requests, nor the average * request length, so we do not have a good indicator that this * request will complete within the timeout. What we do know is the - * order in which requests are executed by the engine and so we can - * tell if the request has started. If the request hasn't started yet, - * it is a fair assumption that it will not complete within our - * relatively short timeout. + * order in which requests are executed by the context and so we can + * tell if the request has been started. If the request is not even + * running yet, it is a fair assumption that it will not complete + * within our relatively short timeout. */ - if (!intel_engine_has_started(engine, seqno)) + if (!i915_request_is_running(rq)) return false; /* @@ -1063,20 +1061,10 @@ static bool __i915_spin_request(const struct i915_request *rq, * takes to sleep on a request, on the order of a microsecond. */ - irq = READ_ONCE(engine->breadcrumbs.irq_count); timeout_us += local_clock_us(&cpu); do { - if (intel_engine_has_completed(engine, seqno)) - return seqno == i915_request_global_seqno(rq); - - /* - * Seqno are meant to be ordered *before* the interrupt. If - * we see an interrupt without a corresponding seqno advance, - * assume we won't see one in the near future but require - * the engine->seqno_barrier() to fixup coherency. - */ - if (READ_ONCE(engine->breadcrumbs.irq_count) != irq) - break; + if (i915_request_completed(rq)) + return true; if (signal_pending_state(state, current)) break; @@ -1090,6 +1078,18 @@ static bool __i915_spin_request(const struct i915_request *rq, return false; } +struct request_wait { + struct dma_fence_cb cb; + struct task_struct *tsk; +}; + +static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb) +{ + struct request_wait *wait = container_of(cb, typeof(*wait), cb); + + wake_up_process(wait->tsk); +} + /** * i915_request_wait - wait until execution of request has finished * @rq: the request to wait upon @@ -1115,8 +1115,7 @@ long i915_request_wait(struct i915_request *rq, { const int state = flags & I915_WAIT_INTERRUPTIBLE ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; - DEFINE_WAIT_FUNC(exec, default_wake_function); - struct intel_wait wait; + struct request_wait wait; might_sleep(); GEM_BUG_ON(timeout < 0); @@ -1128,47 +1127,24 @@ long i915_request_wait(struct i915_request *rq, return -ETIME; trace_i915_request_wait_begin(rq, flags); - add_wait_queue(&rq->execute, &exec); - intel_wait_init(&wait); - if (flags & I915_WAIT_PRIORITY) - i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT); - -restart: - do { - set_current_state(state); - if (intel_wait_update_request(&wait, rq)) - break; - - if (signal_pending_state(state, current)) { - timeout = -ERESTARTSYS; - goto complete; - } - if (!timeout) { - timeout = -ETIME; - goto complete; - } + /* Optimistic short spin before touching IRQs */ + if (__i915_spin_request(rq, state, 5)) + goto out; - timeout = io_schedule_timeout(timeout); - } while (1); + if (flags & I915_WAIT_PRIORITY) + i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT); - GEM_BUG_ON(!intel_wait_has_seqno(&wait)); - GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit)); + wait.tsk = current; + if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake)) + goto out; - /* Optimistic short spin before touching IRQs */ - if (__i915_spin_request(rq, wait.seqno, state, 5)) - goto complete; + for (;;) { + set_current_state(state); - set_current_state(state); - if (intel_engine_add_wait(rq->engine, &wait)) - /* - * In order to check that we haven't missed the interrupt - * as we enabled it, we need to kick ourselves to do a - * coherent check on the seqno before we sleep. - */ - goto wakeup; + if (i915_request_completed(rq)) + break; - for (;;) { if (signal_pending_state(state, current)) { timeout = -ERESTARTSYS; break; @@ -1180,33 +1156,13 @@ restart: } timeout = io_schedule_timeout(timeout); - - if (intel_wait_complete(&wait) && - intel_wait_check_request(&wait, rq)) - break; - - set_current_state(state); - -wakeup: - if (i915_request_completed(rq)) - break; - - /* Only spin if we know the GPU is processing this request */ - if (__i915_spin_request(rq, wait.seqno, state, 2)) - break; - - if (!intel_wait_check_request(&wait, rq)) { - intel_engine_remove_wait(rq->engine, &wait); - goto restart; - } } - - intel_engine_remove_wait(rq->engine, &wait); -complete: __set_current_state(TASK_RUNNING); - remove_wait_queue(&rq->execute, &exec); - trace_i915_request_wait_end(rq); + dma_fence_remove_callback(&rq->fence, &wait.cb); + +out: + trace_i915_request_wait_end(rq); return timeout; } diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 340d6216791c..3cffb96203b9 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -38,23 +38,34 @@ struct drm_i915_gem_object; struct i915_request; struct i915_timeline; -struct intel_wait { - struct rb_node node; - struct task_struct *tsk; - struct i915_request *request; - u32 seqno; -}; - -struct intel_signal_node { - struct intel_wait wait; - struct list_head link; -}; - struct i915_capture_list { struct i915_capture_list *next; struct i915_vma *vma; }; +enum { + /* + * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW. + * + * Set by __i915_request_submit() on handing over to HW, and cleared + * by __i915_request_unsubmit() if we preempt this request. + * + * Finally cleared for consistency on retiring the request, when + * we know the HW is no longer running this request. + * + * See i915_request_is_active() + */ + I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS, + + /* + * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list + * + * Internal bookkeeping used by the breadcrumb code to track when + * a request is on the various signal_list. + */ + I915_FENCE_FLAG_SIGNAL, +}; + /** * Request queue structure. * @@ -97,7 +108,7 @@ struct i915_request { struct intel_context *hw_context; struct intel_ring *ring; struct i915_timeline *timeline; - struct intel_signal_node signaling; + struct list_head signal_link; /* * The rcu epoch of when this request was allocated. Used to judiciously @@ -116,7 +127,6 @@ struct i915_request { */ struct i915_sw_fence submit; wait_queue_entry_t submitq; - wait_queue_head_t execute; /* * A list of everyone we wait upon, and everyone who waits upon us. @@ -255,7 +265,7 @@ i915_request_put(struct i915_request *rq) * that it has passed the global seqno and the global seqno is unchanged * after the read, it is indeed complete). */ -static u32 +static inline u32 i915_request_global_seqno(const struct i915_request *request) { return READ_ONCE(request->global_seqno); @@ -277,6 +287,10 @@ void i915_request_skip(struct i915_request *request, int error); void __i915_request_unsubmit(struct i915_request *request); void i915_request_unsubmit(struct i915_request *request); +/* Note: part of the intel_breadcrumbs family */ +bool i915_request_enable_breadcrumb(struct i915_request *request); +void i915_request_cancel_breadcrumb(struct i915_request *request); + long i915_request_wait(struct i915_request *rq, unsigned int flags, long timeout) @@ -293,6 +307,11 @@ static inline bool i915_request_signaled(const struct i915_request *rq) return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags); } +static inline bool i915_request_is_active(const struct i915_request *rq) +{ + return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); +} + /** * Returns true if seq1 is later than seq2. */ @@ -330,6 +349,11 @@ static inline u32 hwsp_seqno(const struct i915_request *rq) return seqno; } +static inline bool __i915_request_has_started(const struct i915_request *rq) +{ + return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1); +} + /** * i915_request_started - check if the request has begun being executed * @rq: the request @@ -345,7 +369,23 @@ static inline bool i915_request_started(const struct i915_request *rq) return true; /* Remember: started but may have since been preempted! */ - return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1); + return __i915_request_has_started(rq); +} + +/** + * i915_request_is_running - check if the request may actually be executing + * @rq: the request + * + * Returns true if the request is currently submitted to hardware, has passed + * its start point (i.e. the context is setup and not busywaiting). Note that + * it may no longer be running by the time the function returns! + */ +static inline bool i915_request_is_running(const struct i915_request *rq) +{ + if (!i915_request_is_active(rq)) + return false; + + return __i915_request_has_started(rq); } static inline bool i915_request_completed(const struct i915_request *rq) diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/i915_reset.c index acf3c777e49d..4462007a681c 100644 --- a/drivers/gpu/drm/i915/i915_reset.c +++ b/drivers/gpu/drm/i915/i915_reset.c @@ -29,7 +29,7 @@ static void engine_skip_context(struct i915_request *rq) spin_lock(&timeline->lock); - if (rq->global_seqno) { + if (i915_request_is_active(rq)) { list_for_each_entry_continue(rq, &engine->timeline.requests, link) if (rq->gem_context == hung_ctx) @@ -751,18 +751,20 @@ static void reset_restart(struct drm_i915_private *i915) static void nop_submit_request(struct i915_request *request) { + struct intel_engine_cs *engine = request->engine; unsigned long flags; GEM_TRACE("%s fence %llx:%lld -> -EIO\n", - request->engine->name, - request->fence.context, request->fence.seqno); + engine->name, request->fence.context, request->fence.seqno); dma_fence_set_error(&request->fence, -EIO); - spin_lock_irqsave(&request->engine->timeline.lock, flags); + spin_lock_irqsave(&engine->timeline.lock, flags); __i915_request_submit(request); i915_request_mark_complete(request); - intel_engine_write_global_seqno(request->engine, request->global_seqno); - spin_unlock_irqrestore(&request->engine->timeline.lock, flags); + intel_engine_write_global_seqno(engine, request->global_seqno); + spin_unlock_irqrestore(&engine->timeline.lock, flags); + + intel_engine_queue_breadcrumbs(engine); } void i915_gem_set_wedged(struct drm_i915_private *i915) @@ -817,7 +819,7 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) for_each_engine(engine, i915, id) { reset_finish_engine(engine); - intel_engine_wakeup(engine); + intel_engine_signal_breadcrumbs(engine); } smp_mb__before_atomic(); diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index 2d172991024f..d01683167c77 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -243,7 +243,7 @@ static bool inflight(const struct i915_request *rq, { const struct i915_request *active; - if (!rq->global_seqno) + if (!i915_request_is_active(rq)) return false; active = port_request(engine->execlists.port); diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index b58915b8708b..b0795b0ad227 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -29,48 +29,149 @@ #define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_rq) -static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b) +static void irq_enable(struct intel_engine_cs *engine) +{ + if (!engine->irq_enable) + return; + + /* Caller disables interrupts */ + spin_lock(&engine->i915->irq_lock); + engine->irq_enable(engine); + spin_unlock(&engine->i915->irq_lock); +} + +static void irq_disable(struct intel_engine_cs *engine) { - struct intel_wait *wait; - unsigned int result = 0; + if (!engine->irq_disable) + return; + + /* Caller disables interrupts */ + spin_lock(&engine->i915->irq_lock); + engine->irq_disable(engine); + spin_unlock(&engine->i915->irq_lock); +} +static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) +{ lockdep_assert_held(&b->irq_lock); - wait = b->irq_wait; - if (wait) { + GEM_BUG_ON(!b->irq_enabled); + if (!--b->irq_enabled) + irq_disable(container_of(b, + struct intel_engine_cs, + breadcrumbs)); + + b->irq_armed = false; +} + +void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine) +{ + struct intel_breadcrumbs *b = &engine->breadcrumbs; + + if (!b->irq_armed) + return; + + spin_lock_irq(&b->irq_lock); + if (b->irq_armed) + __intel_breadcrumbs_disarm_irq(b); + spin_unlock_irq(&b->irq_lock); +} + +static inline bool __request_completed(const struct i915_request *rq) +{ + return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno); +} + +bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) +{ + struct intel_breadcrumbs *b = &engine->breadcrumbs; + struct intel_context *ce, *cn; + struct list_head *pos, *next; + LIST_HEAD(signal); + + spin_lock(&b->irq_lock); + + b->irq_fired = true; + if (b->irq_armed && list_empty(&b->signalers)) + __intel_breadcrumbs_disarm_irq(b); + + list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) { + GEM_BUG_ON(list_empty(&ce->signals)); + + list_for_each_safe(pos, next, &ce->signals) { + struct i915_request *rq = + list_entry(pos, typeof(*rq), signal_link); + + if (!__request_completed(rq)) + break; + + GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL, + &rq->fence.flags)); + clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); + + /* + * We may race with direct invocation of + * dma_fence_signal(), e.g. i915_request_retire(), + * in which case we can skip processing it ourselves. + */ + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &rq->fence.flags)) + continue; + + /* + * Queue for execution after dropping the signaling + * spinlock as the callback chain may end up adding + * more signalers to the same context or engine. + */ + i915_request_get(rq); + list_add_tail(&rq->signal_link, &signal); + } + /* - * N.B. Since task_asleep() and ttwu are not atomic, the - * waiter may actually go to sleep after the check, causing - * us to suppress a valid wakeup. We prefer to reduce the - * number of false positive missed_breadcrumb() warnings - * at the expense of a few false negatives, as it it easy - * to trigger a false positive under heavy load. Enough - * signal should remain from genuine missed_breadcrumb() - * for us to detect in CI. + * We process the list deletion in bulk, only using a list_add + * (not list_move) above but keeping the status of + * rq->signal_link known with the I915_FENCE_FLAG_SIGNAL bit. */ - bool was_asleep = task_asleep(wait->tsk); + if (!list_is_first(pos, &ce->signals)) { + /* Advance the list to the first incomplete request */ + __list_del_many(&ce->signals, pos); + if (&ce->signals == pos) /* now empty */ + list_del_init(&ce->signal_link); + } + } + + spin_unlock(&b->irq_lock); + + list_for_each_safe(pos, next, &signal) { + struct i915_request *rq = + list_entry(pos, typeof(*rq), signal_link); - result = ENGINE_WAKEUP_WAITER; - if (wake_up_process(wait->tsk) && was_asleep) - result |= ENGINE_WAKEUP_ASLEEP; + dma_fence_signal(&rq->fence); + i915_request_put(rq); } - return result; + return !list_empty(&signal); } -unsigned int intel_engine_wakeup(struct intel_engine_cs *engine) +bool intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine) { - struct intel_breadcrumbs *b = &engine->breadcrumbs; - unsigned long flags; - unsigned int result; + bool result; - spin_lock_irqsave(&b->irq_lock, flags); - result = __intel_breadcrumbs_wakeup(b); - spin_unlock_irqrestore(&b->irq_lock, flags); + local_irq_disable(); + result = intel_engine_breadcrumbs_irq(engine); + local_irq_enable(); return result; } +static void signal_irq_work(struct irq_work *work) +{ + struct intel_engine_cs *engine = + container_of(work, typeof(*engine), breadcrumbs.irq_work); + + intel_engine_breadcrumbs_irq(engine); +} + static unsigned long wait_timeout(void) { return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES); @@ -94,19 +195,15 @@ static void intel_breadcrumbs_hangcheck(struct timer_list *t) struct intel_engine_cs *engine = from_timer(engine, t, breadcrumbs.hangcheck); struct intel_breadcrumbs *b = &engine->breadcrumbs; - unsigned int irq_count; if (!b->irq_armed) return; - irq_count = READ_ONCE(b->irq_count); - if (b->hangcheck_interrupts != irq_count) { - b->hangcheck_interrupts = irq_count; - mod_timer(&b->hangcheck, wait_timeout()); - return; - } + if (b->irq_fired) + goto rearm; - /* We keep the hangcheck timer alive until we disarm the irq, even + /* + * We keep the hangcheck timer alive until we disarm the irq, even * if there are no waiters at present. * * If the waiter was currently running, assume it hasn't had a chance @@ -118,10 +215,13 @@ static void intel_breadcrumbs_hangcheck(struct timer_list *t) * but we still have a waiter. Assuming all batches complete within * DRM_I915_HANGCHECK_JIFFIES [1.5s]! */ - if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP) { + synchronize_hardirq(engine->i915->drm.irq); + if (intel_engine_signal_breadcrumbs(engine)) { missed_breadcrumb(engine); mod_timer(&b->fake_irq, jiffies + 1); } else { +rearm: + b->irq_fired = false; mod_timer(&b->hangcheck, wait_timeout()); } } @@ -140,11 +240,7 @@ static void intel_breadcrumbs_fake_irq(struct timer_list *t) * oldest waiter to do the coherent seqno check. */ - spin_lock_irq(&b->irq_lock); - if (b->irq_armed && !__intel_breadcrumbs_wakeup(b)) - __intel_engine_disarm_breadcrumbs(engine); - spin_unlock_irq(&b->irq_lock); - if (!b->irq_armed) + if (!intel_engine_signal_breadcrumbs(engine) && !b->irq_armed) return; /* If the user has disabled the fake-irq, restore the hangchecking */ @@ -156,43 +252,6 @@ static void intel_breadcrumbs_fake_irq(struct timer_list *t) mod_timer(&b->fake_irq, jiffies + 1); } -static void irq_enable(struct intel_engine_cs *engine) -{ - if (!engine->irq_enable) - return; - - /* Caller disables interrupts */ - spin_lock(&engine->i915->irq_lock); - engine->irq_enable(engine); - spin_unlock(&engine->i915->irq_lock); -} - -static void irq_disable(struct intel_engine_cs *engine) -{ - if (!engine->irq_disable) - return; - - /* Caller disables interrupts */ - spin_lock(&engine->i915->irq_lock); - engine->irq_disable(engine); - spin_unlock(&engine->i915->irq_lock); -} - -void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine) -{ - struct intel_breadcrumbs *b = &engine->breadcrumbs; - - lockdep_assert_held(&b->irq_lock); - GEM_BUG_ON(b->irq_wait); - GEM_BUG_ON(!b->irq_armed); - - GEM_BUG_ON(!b->irq_enabled); - if (!--b->irq_enabled) - irq_disable(engine); - - b->irq_armed = false; -} - void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine) { struct intel_breadcrumbs *b = &engine->breadcrumbs; @@ -215,40 +274,6 @@ void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine) spin_unlock_irq(&b->irq_lock); } -void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine) -{ - struct intel_breadcrumbs *b = &engine->breadcrumbs; - struct intel_wait *wait, *n; - - if (!b->irq_armed) - return; - - /* - * We only disarm the irq when we are idle (all requests completed), - * so if the bottom-half remains asleep, it missed the request - * completion. - */ - if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP) - missed_breadcrumb(engine); - - spin_lock_irq(&b->rb_lock); - - spin_lock(&b->irq_lock); - b->irq_wait = NULL; - if (b->irq_armed) - __intel_engine_disarm_breadcrumbs(engine); - spin_unlock(&b->irq_lock); - - rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) { - GEM_BUG_ON(!intel_engine_signaled(engine, wait->seqno)); - RB_CLEAR_NODE(&wait->node); - wake_up_process(wait->tsk); - } - b->waiters = RB_ROOT; - - spin_unlock_irq(&b->rb_lock); -} - static bool use_fake_irq(const struct intel_breadcrumbs *b) { const struct intel_engine_cs *engine = @@ -264,7 +289,7 @@ static bool use_fake_irq(const struct intel_breadcrumbs *b) * engine->seqno_barrier(), a timing error that should be transient * and unlikely to reoccur. */ - return READ_ONCE(b->irq_count) == b->hangcheck_interrupts; + return !b->irq_fired; } static void enable_fake_irq(struct intel_breadcrumbs *b) @@ -276,7 +301,7 @@ static void enable_fake_irq(struct intel_breadcrumbs *b) mod_timer(&b->hangcheck, wait_timeout()); } -static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b) +static bool __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b) { struct intel_engine_cs *engine = container_of(b, struct intel_engine_cs, breadcrumbs); @@ -315,536 +340,149 @@ static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b) return enabled; } -static inline struct intel_wait *to_wait(struct rb_node *node) +void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine) { - return rb_entry(node, struct intel_wait, node); -} + struct intel_breadcrumbs *b = &engine->breadcrumbs; -static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b, - struct intel_wait *wait) -{ - lockdep_assert_held(&b->rb_lock); - GEM_BUG_ON(b->irq_wait == wait); + spin_lock_init(&b->irq_lock); + INIT_LIST_HEAD(&b->signalers); - /* - * This request is completed, so remove it from the tree, mark it as - * complete, and *then* wake up the associated task. N.B. when the - * task wakes up, it will find the empty rb_node, discern that it - * has already been removed from the tree and skip the serialisation - * of the b->rb_lock and b->irq_lock. This means that the destruction - * of the intel_wait is not serialised with the interrupt handler - * by the waiter - it must instead be serialised by the caller. - */ - rb_erase(&wait->node, &b->waiters); - RB_CLEAR_NODE(&wait->node); + init_irq_work(&b->irq_work, signal_irq_work); - if (wait->tsk->state != TASK_RUNNING) - wake_up_process(wait->tsk); /* implicit smp_wmb() */ + timer_setup(&b->fake_irq, intel_breadcrumbs_fake_irq, 0); + timer_setup(&b->hangcheck, intel_breadcrumbs_hangcheck, 0); } -static inline void __intel_breadcrumbs_next(struct intel_engine_cs *engine, - struct rb_node *next) +static void cancel_fake_irq(struct intel_engine_cs *engine) { struct intel_breadcrumbs *b = &engine->breadcrumbs; - spin_lock(&b->irq_lock); - GEM_BUG_ON(!b->irq_armed); - GEM_BUG_ON(!b->irq_wait); - b->irq_wait = to_wait(next); - spin_unlock(&b->irq_lock); - - /* We always wake up the next waiter that takes over as the bottom-half - * as we may delegate not only the irq-seqno barrier to the next waiter - * but also the task of waking up concurrent waiters. - */ - if (next) - wake_up_process(to_wait(next)->tsk); + del_timer_sync(&b->fake_irq); /* may queue b->hangcheck */ + del_timer_sync(&b->hangcheck); + clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings); } -static bool __intel_engine_add_wait(struct intel_engine_cs *engine, - struct intel_wait *wait) +void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine) { struct intel_breadcrumbs *b = &engine->breadcrumbs; - struct rb_node **p, *parent, *completed; - bool first, armed; - u32 seqno; + unsigned long flags; - GEM_BUG_ON(!wait->seqno); + spin_lock_irqsave(&b->irq_lock, flags); - /* Insert the request into the retirement ordered list - * of waiters by walking the rbtree. If we are the oldest - * seqno in the tree (the first to be retired), then - * set ourselves as the bottom-half. - * - * As we descend the tree, prune completed branches since we hold the - * spinlock we know that the first_waiter must be delayed and can - * reduce some of the sequential wake up latency if we take action - * ourselves and wake up the completed tasks in parallel. Also, by - * removing stale elements in the tree, we may be able to reduce the - * ping-pong between the old bottom-half and ourselves as first-waiter. + /* + * Leave the fake_irq timer enabled (if it is running), but clear the + * bit so that it turns itself off on its next wake up and goes back + * to the long hangcheck interval if still required. */ - armed = false; - first = true; - parent = NULL; - completed = NULL; - seqno = intel_engine_get_seqno(engine); - - /* If the request completed before we managed to grab the spinlock, - * return now before adding ourselves to the rbtree. We let the - * current bottom-half handle any pending wakeups and instead - * try and get out of the way quickly. - */ - if (i915_seqno_passed(seqno, wait->seqno)) { - RB_CLEAR_NODE(&wait->node); - return first; - } - - p = &b->waiters.rb_node; - while (*p) { - parent = *p; - if (wait->seqno == to_wait(parent)->seqno) { - /* We have multiple waiters on the same seqno, select - * the highest priority task (that with the smallest - * task->prio) to serve as the bottom-half for this - * group. - */ - if (wait->tsk->prio > to_wait(parent)->tsk->prio) { - p = &parent->rb_right; - first = false; - } else { - p = &parent->rb_left; - } - } else if (i915_seqno_passed(wait->seqno, - to_wait(parent)->seqno)) { - p = &parent->rb_right; - if (i915_seqno_passed(seqno, to_wait(parent)->seqno)) - completed = parent; - else - first = false; - } else { - p = &parent->rb_left; - } - } - rb_link_node(&wait->node, parent, p); - rb_insert_color(&wait->node, &b->waiters); - - if (first) { - spin_lock(&b->irq_lock); - b->irq_wait = wait; - /* After assigning ourselves as the new bottom-half, we must - * perform a cursory check to prevent a missed interrupt. - * Either we miss the interrupt whilst programming the hardware, - * or if there was a previous waiter (for a later seqno) they - * may be woken instead of us (due to the inherent race - * in the unlocked read of b->irq_seqno_bh in the irq handler) - * and so we miss the wake up. - */ - armed = __intel_breadcrumbs_enable_irq(b); - spin_unlock(&b->irq_lock); - } - - if (completed) { - /* Advance the bottom-half (b->irq_wait) before we wake up - * the waiters who may scribble over their intel_wait - * just as the interrupt handler is dereferencing it via - * b->irq_wait. - */ - if (!first) { - struct rb_node *next = rb_next(completed); - GEM_BUG_ON(next == &wait->node); - __intel_breadcrumbs_next(engine, next); - } - - do { - struct intel_wait *crumb = to_wait(completed); - completed = rb_prev(completed); - __intel_breadcrumbs_finish(b, crumb); - } while (completed); - } - - GEM_BUG_ON(!b->irq_wait); - GEM_BUG_ON(!b->irq_armed); - GEM_BUG_ON(rb_first(&b->waiters) != &b->irq_wait->node); - - return armed; -} - -bool intel_engine_add_wait(struct intel_engine_cs *engine, - struct intel_wait *wait) -{ - struct intel_breadcrumbs *b = &engine->breadcrumbs; - bool armed; - - spin_lock_irq(&b->rb_lock); - armed = __intel_engine_add_wait(engine, wait); - spin_unlock_irq(&b->rb_lock); - if (armed) - return armed; - - /* Make the caller recheck if its request has already started. */ - return intel_engine_has_started(engine, wait->seqno); -} - -static inline bool chain_wakeup(struct rb_node *rb, int priority) -{ - return rb && to_wait(rb)->tsk->prio <= priority; -} + clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings); -static inline int wakeup_priority(struct intel_breadcrumbs *b, - struct task_struct *tsk) -{ - if (tsk == b->signaler) - return INT_MIN; + if (b->irq_enabled) + irq_enable(engine); else - return tsk->prio; -} - -static void __intel_engine_remove_wait(struct intel_engine_cs *engine, - struct intel_wait *wait) -{ - struct intel_breadcrumbs *b = &engine->breadcrumbs; - - lockdep_assert_held(&b->rb_lock); - - if (RB_EMPTY_NODE(&wait->node)) - goto out; - - if (b->irq_wait == wait) { - const int priority = wakeup_priority(b, wait->tsk); - struct rb_node *next; - - /* We are the current bottom-half. Find the next candidate, - * the first waiter in the queue on the remaining oldest - * request. As multiple seqnos may complete in the time it - * takes us to wake up and find the next waiter, we have to - * wake up that waiter for it to perform its own coherent - * completion check. - */ - next = rb_next(&wait->node); - if (chain_wakeup(next, priority)) { - /* If the next waiter is already complete, - * wake it up and continue onto the next waiter. So - * if have a small herd, they will wake up in parallel - * rather than sequentially, which should reduce - * the overall latency in waking all the completed - * clients. - * - * However, waking up a chain adds extra latency to - * the first_waiter. This is undesirable if that - * waiter is a high priority task. - */ - u32 seqno = intel_engine_get_seqno(engine); - - while (i915_seqno_passed(seqno, to_wait(next)->seqno)) { - struct rb_node *n = rb_next(next); - - __intel_breadcrumbs_finish(b, to_wait(next)); - next = n; - if (!chain_wakeup(next, priority)) - break; - } - } - - __intel_breadcrumbs_next(engine, next); - } else { - GEM_BUG_ON(rb_first(&b->waiters) == &wait->node); - } - - GEM_BUG_ON(RB_EMPTY_NODE(&wait->node)); - rb_erase(&wait->node, &b->waiters); - RB_CLEAR_NODE(&wait->node); + irq_disable(engine); -out: - GEM_BUG_ON(b->irq_wait == wait); - GEM_BUG_ON(rb_first(&b->waiters) != - (b->irq_wait ? &b->irq_wait->node : NULL)); + spin_unlock_irqrestore(&b->irq_lock, flags); } -void intel_engine_remove_wait(struct intel_engine_cs *engine, - struct intel_wait *wait) +void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine) { - struct intel_breadcrumbs *b = &engine->breadcrumbs; - - /* Quick check to see if this waiter was already decoupled from - * the tree by the bottom-half to avoid contention on the spinlock - * by the herd. - */ - if (RB_EMPTY_NODE(&wait->node)) { - GEM_BUG_ON(READ_ONCE(b->irq_wait) == wait); - return; - } - - spin_lock_irq(&b->rb_lock); - __intel_engine_remove_wait(engine, wait); - spin_unlock_irq(&b->rb_lock); + cancel_fake_irq(engine); } -static void signaler_set_rtpriority(void) +bool i915_request_enable_breadcrumb(struct i915_request *rq) { - struct sched_param param = { .sched_priority = 1 }; - - sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m); -} + struct intel_breadcrumbs *b = &rq->engine->breadcrumbs; -static int intel_breadcrumbs_signaler(void *arg) -{ - struct intel_engine_cs *engine = arg; - struct intel_breadcrumbs *b = &engine->breadcrumbs; - struct i915_request *rq, *n; + GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)); - /* Install ourselves with high priority to reduce signalling latency */ - signaler_set_rtpriority(); + if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) + return true; - do { - bool do_schedule = true; - LIST_HEAD(list); - u32 seqno; + spin_lock(&b->irq_lock); + if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags) && + !__request_completed(rq)) { + struct intel_context *ce = rq->hw_context; + struct list_head *pos; - set_current_state(TASK_INTERRUPTIBLE); - if (list_empty(&b->signals)) - goto sleep; + __intel_breadcrumbs_arm_irq(b); /* - * We are either woken up by the interrupt bottom-half, - * or by a client adding a new signaller. In both cases, - * the GPU seqno may have advanced beyond our oldest signal. - * If it has, propagate the signal, remove the waiter and - * check again with the next oldest signal. Otherwise we - * need to wait for a new interrupt from the GPU or for - * a new client. + * We keep the seqno in retirement order, so we can break + * inside intel_engine_breadcrumbs_irq as soon as we've passed + * the last completed request (or seen a request that hasn't + * event started). We could iterate the timeline->requests list, + * but keeping a separate signalers_list has the advantage of + * hopefully being much smaller than the full list and so + * provides faster iteration and detection when there are no + * more interrupts required for this context. + * + * We typically expect to add new signalers in order, so we + * start looking for our insertion point from the tail of + * the list. */ - seqno = intel_engine_get_seqno(engine); - - spin_lock_irq(&b->rb_lock); - list_for_each_entry_safe(rq, n, &b->signals, signaling.link) { - u32 this = rq->signaling.wait.seqno; + list_for_each_prev(pos, &ce->signals) { + struct i915_request *it = + list_entry(pos, typeof(*it), signal_link); - GEM_BUG_ON(!rq->signaling.wait.seqno); - - if (!i915_seqno_passed(seqno, this)) + if (i915_seqno_passed(rq->fence.seqno, it->fence.seqno)) break; - - if (likely(this == i915_request_global_seqno(rq))) { - __intel_engine_remove_wait(engine, - &rq->signaling.wait); - - rq->signaling.wait.seqno = 0; - __list_del_entry(&rq->signaling.link); - - if (!i915_request_signaled(rq)) { - list_add_tail(&rq->signaling.link, - &list); - i915_request_get(rq); - } - } } - spin_unlock_irq(&b->rb_lock); - - if (!list_empty(&list)) { - local_bh_disable(); - list_for_each_entry_safe(rq, n, &list, signaling.link) { - dma_fence_signal(&rq->fence); - GEM_BUG_ON(!i915_request_completed(rq)); - i915_request_put(rq); - } - local_bh_enable(); /* kick start the tasklets */ - - /* - * If the engine is saturated we may be continually - * processing completed requests. This angers the - * NMI watchdog if we never let anything else - * have access to the CPU. Let's pretend to be nice - * and relinquish the CPU if we burn through the - * entire RT timeslice! - */ - do_schedule = need_resched(); - } - - if (unlikely(do_schedule)) { -sleep: - if (kthread_should_park()) - kthread_parkme(); - - if (unlikely(kthread_should_stop())) - break; - - schedule(); - } - } while (1); - __set_current_state(TASK_RUNNING); - - return 0; -} + list_add(&rq->signal_link, pos); + if (pos == &ce->signals) /* catch transitions from empty list */ + list_move_tail(&ce->signal_link, &b->signalers); -static void insert_signal(struct intel_breadcrumbs *b, - struct i915_request *request, - const u32 seqno) -{ - struct i915_request *iter; - - lockdep_assert_held(&b->rb_lock); - - /* - * A reasonable assumption is that we are called to add signals - * in sequence, as the requests are submitted for execution and - * assigned a global_seqno. This will be the case for the majority - * of internally generated signals (inter-engine signaling). - * - * Out of order waiters triggering random signaling enabling will - * be more problematic, but hopefully rare enough and the list - * small enough that the O(N) insertion sort is not an issue. - */ - - list_for_each_entry_reverse(iter, &b->signals, signaling.link) - if (i915_seqno_passed(seqno, iter->signaling.wait.seqno)) - break; - - list_add(&request->signaling.link, &iter->signaling.link); -} - -bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup) -{ - struct intel_engine_cs *engine = request->engine; - struct intel_breadcrumbs *b = &engine->breadcrumbs; - struct intel_wait *wait = &request->signaling.wait; - u32 seqno; - - /* - * Note that we may be called from an interrupt handler on another - * device (e.g. nouveau signaling a fence completion causing us - * to submit a request, and so enable signaling). As such, - * we need to make sure that all other users of b->rb_lock protect - * against interrupts, i.e. use spin_lock_irqsave. - */ - - /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */ - GEM_BUG_ON(!irqs_disabled()); - lockdep_assert_held(&request->lock); - - seqno = i915_request_global_seqno(request); - if (!seqno) /* will be enabled later upon execution */ - return true; - - GEM_BUG_ON(wait->seqno); - wait->tsk = b->signaler; - wait->request = request; - wait->seqno = seqno; - - /* - * Add ourselves into the list of waiters, but registering our - * bottom-half as the signaller thread. As per usual, only the oldest - * waiter (not just signaller) is tasked as the bottom-half waking - * up all completed waiters after the user interrupt. - * - * If we are the oldest waiter, enable the irq (after which we - * must double check that the seqno did not complete). - */ - spin_lock(&b->rb_lock); - insert_signal(b, request, seqno); - wakeup &= __intel_engine_add_wait(engine, wait); - spin_unlock(&b->rb_lock); - - if (wakeup) { - wake_up_process(b->signaler); - return !intel_wait_complete(wait); + set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); } + spin_unlock(&b->irq_lock); - return true; + return !__request_completed(rq); } -void intel_engine_cancel_signaling(struct i915_request *request) +void i915_request_cancel_breadcrumb(struct i915_request *rq) { - struct intel_engine_cs *engine = request->engine; - struct intel_breadcrumbs *b = &engine->breadcrumbs; + struct intel_breadcrumbs *b = &rq->engine->breadcrumbs; - GEM_BUG_ON(!irqs_disabled()); - lockdep_assert_held(&request->lock); - - if (!READ_ONCE(request->signaling.wait.seqno)) + if (!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) return; - spin_lock(&b->rb_lock); - __intel_engine_remove_wait(engine, &request->signaling.wait); - if (fetch_and_zero(&request->signaling.wait.seqno)) - __list_del_entry(&request->signaling.link); - spin_unlock(&b->rb_lock); -} - -int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine) -{ - struct intel_breadcrumbs *b = &engine->breadcrumbs; - struct task_struct *tsk; - - spin_lock_init(&b->rb_lock); - spin_lock_init(&b->irq_lock); - - timer_setup(&b->fake_irq, intel_breadcrumbs_fake_irq, 0); - timer_setup(&b->hangcheck, intel_breadcrumbs_hangcheck, 0); - - INIT_LIST_HEAD(&b->signals); - - /* Spawn a thread to provide a common bottom-half for all signals. - * As this is an asynchronous interface we cannot steal the current - * task for handling the bottom-half to the user interrupt, therefore - * we create a thread to do the coherent seqno dance after the - * interrupt and then signal the waitqueue (via the dma-buf/fence). - */ - tsk = kthread_run(intel_breadcrumbs_signaler, engine, - "i915/signal:%d", engine->id); - if (IS_ERR(tsk)) - return PTR_ERR(tsk); - - b->signaler = tsk; - - return 0; -} + spin_lock(&b->irq_lock); + if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) { + struct intel_context *ce = rq->hw_context; -static void cancel_fake_irq(struct intel_engine_cs *engine) -{ - struct intel_breadcrumbs *b = &engine->breadcrumbs; + list_del(&rq->signal_link); + if (list_empty(&ce->signals)) + list_del_init(&ce->signal_link); - del_timer_sync(&b->fake_irq); /* may queue b->hangcheck */ - del_timer_sync(&b->hangcheck); - clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings); + clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); + } + spin_unlock(&b->irq_lock); } -void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine) +void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine, + struct drm_printer *p) { struct intel_breadcrumbs *b = &engine->breadcrumbs; - unsigned long flags; + struct intel_context *ce; + struct i915_request *rq; - spin_lock_irqsave(&b->irq_lock, flags); - - /* - * Leave the fake_irq timer enabled (if it is running), but clear the - * bit so that it turns itself off on its next wake up and goes back - * to the long hangcheck interval if still required. - */ - clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings); - - if (b->irq_enabled) - irq_enable(engine); - else - irq_disable(engine); - - spin_unlock_irqrestore(&b->irq_lock, flags); -} - -void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine) -{ - struct intel_breadcrumbs *b = &engine->breadcrumbs; + if (list_empty(&b->signalers)) + return; - /* The engines should be idle and all requests accounted for! */ - WARN_ON(READ_ONCE(b->irq_wait)); - WARN_ON(!RB_EMPTY_ROOT(&b->waiters)); - WARN_ON(!list_empty(&b->signals)); + drm_printf(p, "Signals:\n"); - if (!IS_ERR_OR_NULL(b->signaler)) - kthread_stop(b->signaler); + spin_lock_irq(&b->irq_lock); + list_for_each_entry(ce, &b->signalers, signal_link) { + list_for_each_entry(rq, &ce->signals, signal_link) { + drm_printf(p, "\t[%llx:%llx%s] @ %dms\n", + rq->fence.context, rq->fence.seqno, + i915_request_completed(rq) ? "!" : + i915_request_started(rq) ? "*" : + "", + jiffies_to_msecs(jiffies - rq->emitted_jiffies)); + } + } + spin_unlock_irq(&b->irq_lock); - cancel_fake_irq(engine); + if (test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings)) + drm_printf(p, "Fake irq active\n"); } - -#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) -#include "selftests/intel_breadcrumbs.c" -#endif diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 0a610c9691fd..71c01eb13af1 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -458,12 +458,6 @@ cleanup: void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno) { intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); - - /* After manually advancing the seqno, fake the interrupt in case - * there are any waiters for that seqno. - */ - intel_engine_wakeup(engine); - GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno); } @@ -607,6 +601,7 @@ int intel_engine_setup_common(struct intel_engine_cs *engine) i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE); + intel_engine_init_breadcrumbs(engine); intel_engine_init_execlist(engine); intel_engine_init_hangcheck(engine); intel_engine_init_batch_pool(engine); @@ -717,20 +712,14 @@ int intel_engine_init_common(struct intel_engine_cs *engine) } } - ret = intel_engine_init_breadcrumbs(engine); - if (ret) - goto err_unpin_preempt; - ret = measure_breadcrumb_dw(engine); if (ret < 0) - goto err_breadcrumbs; + goto err_unpin_preempt; engine->emit_fini_breadcrumb_dw = ret; return 0; -err_breadcrumbs: - intel_engine_fini_breadcrumbs(engine); err_unpin_preempt: if (i915->preempt_context) __intel_context_unpin(i915->preempt_context, engine); @@ -1294,12 +1283,14 @@ static void print_request(struct drm_printer *m, x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf)); - drm_printf(m, "%s%x%s [%llx:%llx]%s @ %dms: %s\n", + drm_printf(m, "%s%x%s%s [%llx:%llx]%s @ %dms: %s\n", prefix, rq->global_seqno, i915_request_completed(rq) ? "!" : i915_request_started(rq) ? "*" : "", + test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &rq->fence.flags) ? "+" : "", rq->fence.context, rq->fence.seqno, buf, jiffies_to_msecs(jiffies - rq->emitted_jiffies), @@ -1492,12 +1483,9 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m, const char *header, ...) { - struct intel_breadcrumbs * const b = &engine->breadcrumbs; struct i915_gpu_error * const error = &engine->i915->gpu_error; struct i915_request *rq; intel_wakeref_t wakeref; - unsigned long flags; - struct rb_node *rb; if (header) { va_list ap; @@ -1565,21 +1553,12 @@ void intel_engine_dump(struct intel_engine_cs *engine, intel_execlists_show_requests(engine, m, print_request, 8); - spin_lock_irqsave(&b->rb_lock, flags); - for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { - struct intel_wait *w = rb_entry(rb, typeof(*w), node); - - drm_printf(m, "\t%s [%d:%c] waiting for %x\n", - w->tsk->comm, w->tsk->pid, - task_state_to_char(w->tsk), - w->seqno); - } - spin_unlock_irqrestore(&b->rb_lock, flags); - drm_printf(m, "HWSP:\n"); hexdump(m, engine->status_page.addr, PAGE_SIZE); drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine))); + + intel_engine_print_breadcrumbs(engine, m); } static u8 user_class_map[] = { diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 668ed67336a2..b889b27f8aeb 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -743,7 +743,7 @@ static int init_ring_common(struct intel_engine_cs *engine) } /* Papering over lost _interrupts_ immediately following the restart */ - intel_engine_wakeup(engine); + intel_engine_queue_breadcrumbs(engine); out: intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 953ccc2617ff..71f8ceb937ff 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -5,6 +5,7 @@ #include #include +#include #include #include "i915_gem_batch_pool.h" @@ -381,22 +382,19 @@ struct intel_engine_cs { * the overhead of waking that client is much preferred. */ struct intel_breadcrumbs { - spinlock_t irq_lock; /* protects irq_*; irqsafe */ - struct intel_wait *irq_wait; /* oldest waiter by retirement */ + spinlock_t irq_lock; + struct list_head signalers; - spinlock_t rb_lock; /* protects the rb and wraps irq_lock */ - struct rb_root waiters; /* sorted by retirement, priority */ - struct list_head signals; /* sorted by retirement */ - struct task_struct *signaler; /* used for fence signalling */ + struct irq_work irq_work; /* for use from inside irq_lock */ struct timer_list fake_irq; /* used after a missed interrupt */ struct timer_list hangcheck; /* detect missed interrupts */ unsigned int hangcheck_interrupts; unsigned int irq_enabled; - unsigned int irq_count; - bool irq_armed : 1; + bool irq_armed; + bool irq_fired; } breadcrumbs; struct { @@ -885,83 +883,29 @@ static inline bool intel_engine_has_started(struct intel_engine_cs *engine, void intel_engine_get_instdone(struct intel_engine_cs *engine, struct intel_instdone *instdone); -/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */ -int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); - -static inline void intel_wait_init(struct intel_wait *wait) -{ - wait->tsk = current; - wait->request = NULL; -} - -static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno) -{ - wait->tsk = current; - wait->seqno = seqno; -} - -static inline bool intel_wait_has_seqno(const struct intel_wait *wait) -{ - return wait->seqno; -} - -static inline bool -intel_wait_update_seqno(struct intel_wait *wait, u32 seqno) -{ - wait->seqno = seqno; - return intel_wait_has_seqno(wait); -} - -static inline bool -intel_wait_update_request(struct intel_wait *wait, - const struct i915_request *rq) -{ - return intel_wait_update_seqno(wait, i915_request_global_seqno(rq)); -} - -static inline bool -intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno) -{ - return wait->seqno == seqno; -} - -static inline bool -intel_wait_check_request(const struct intel_wait *wait, - const struct i915_request *rq) -{ - return intel_wait_check_seqno(wait, i915_request_global_seqno(rq)); -} +void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); +void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); -static inline bool intel_wait_complete(const struct intel_wait *wait) -{ - return RB_EMPTY_NODE(&wait->node); -} +void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine); +void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine); -bool intel_engine_add_wait(struct intel_engine_cs *engine, - struct intel_wait *wait); -void intel_engine_remove_wait(struct intel_engine_cs *engine, - struct intel_wait *wait); -bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup); -void intel_engine_cancel_signaling(struct i915_request *request); +bool intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine); +void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine); -static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine) +static inline void +intel_engine_queue_breadcrumbs(struct intel_engine_cs *engine) { - return READ_ONCE(engine->breadcrumbs.irq_wait); + irq_work_queue(&engine->breadcrumbs.irq_work); } -unsigned int intel_engine_wakeup(struct intel_engine_cs *engine); -#define ENGINE_WAKEUP_WAITER BIT(0) -#define ENGINE_WAKEUP_ASLEEP BIT(1) - -void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine); -void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine); - -void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine); -void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine); +bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine); void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine); void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); +void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine, + struct drm_printer *p); + static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset) { memset(batch, 0, 6 * sizeof(u32)); diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h index 4a83a1c6c406..88e5ab586337 100644 --- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h @@ -15,7 +15,6 @@ selftest(scatterlist, scatterlist_mock_selftests) selftest(syncmap, i915_syncmap_mock_selftests) selftest(uncore, intel_uncore_mock_selftests) selftest(engine, intel_engine_cs_mock_selftests) -selftest(breadcrumbs, intel_breadcrumbs_mock_selftests) selftest(timelines, i915_timeline_mock_selftests) selftest(requests, i915_request_mock_selftests) selftest(objects, i915_gem_object_mock_selftests) diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 4d4b86b5fa11..6733dc5b6b4c 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -25,9 +25,12 @@ #include #include "../i915_selftest.h" +#include "i915_random.h" #include "igt_live_test.h" +#include "lib_sw_fence.h" #include "mock_context.h" +#include "mock_drm.h" #include "mock_gem_device.h" static int igt_add_request(void *arg) @@ -247,6 +250,254 @@ err_context_0: return err; } +struct smoketest { + struct intel_engine_cs *engine; + struct i915_gem_context **contexts; + atomic_long_t num_waits, num_fences; + int ncontexts, max_batch; + struct i915_request *(*request_alloc)(struct i915_gem_context *, + struct intel_engine_cs *); +}; + +static struct i915_request * +__mock_request_alloc(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + return mock_request(engine, ctx, 0); +} + +static struct i915_request * +__live_request_alloc(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + return i915_request_alloc(engine, ctx); +} + +static int __igt_breadcrumbs_smoketest(void *arg) +{ + struct smoketest *t = arg; + struct mutex * const BKL = &t->engine->i915->drm.struct_mutex; + const unsigned int max_batch = min(t->ncontexts, t->max_batch) - 1; + const unsigned int total = 4 * t->ncontexts + 1; + unsigned int num_waits = 0, num_fences = 0; + struct i915_request **requests; + I915_RND_STATE(prng); + unsigned int *order; + int err = 0; + + /* + * A very simple test to catch the most egregious of list handling bugs. + * + * At its heart, we simply create oodles of requests running across + * multiple kthreads and enable signaling on them, for the sole purpose + * of stressing our breadcrumb handling. The only inspection we do is + * that the fences were marked as signaled. + */ + + requests = kmalloc_array(total, sizeof(*requests), GFP_KERNEL); + if (!requests) + return -ENOMEM; + + order = i915_random_order(total, &prng); + if (!order) { + err = -ENOMEM; + goto out_requests; + } + + while (!kthread_should_stop()) { + struct i915_sw_fence *submit, *wait; + unsigned int n, count; + + submit = heap_fence_create(GFP_KERNEL); + if (!submit) { + err = -ENOMEM; + break; + } + + wait = heap_fence_create(GFP_KERNEL); + if (!wait) { + i915_sw_fence_commit(submit); + heap_fence_put(submit); + err = ENOMEM; + break; + } + + i915_random_reorder(order, total, &prng); + count = 1 + i915_prandom_u32_max_state(max_batch, &prng); + + for (n = 0; n < count; n++) { + struct i915_gem_context *ctx = + t->contexts[order[n] % t->ncontexts]; + struct i915_request *rq; + + mutex_lock(BKL); + + rq = t->request_alloc(ctx, t->engine); + if (IS_ERR(rq)) { + mutex_unlock(BKL); + err = PTR_ERR(rq); + count = n; + break; + } + + err = i915_sw_fence_await_sw_fence_gfp(&rq->submit, + submit, + GFP_KERNEL); + + requests[n] = i915_request_get(rq); + i915_request_add(rq); + + mutex_unlock(BKL); + + if (err >= 0) + err = i915_sw_fence_await_dma_fence(wait, + &rq->fence, + 0, + GFP_KERNEL); + + if (err < 0) { + i915_request_put(rq); + count = n; + break; + } + } + + i915_sw_fence_commit(submit); + i915_sw_fence_commit(wait); + + if (!wait_event_timeout(wait->wait, + i915_sw_fence_done(wait), + HZ / 2)) { + struct i915_request *rq = requests[count - 1]; + + pr_err("waiting for %d fences (last %llx:%lld) on %s timed out!\n", + count, + rq->fence.context, rq->fence.seqno, + t->engine->name); + i915_gem_set_wedged(t->engine->i915); + GEM_BUG_ON(!i915_request_completed(rq)); + i915_sw_fence_wait(wait); + err = -EIO; + } + + for (n = 0; n < count; n++) { + struct i915_request *rq = requests[n]; + + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &rq->fence.flags)) { + pr_err("%llu:%llu was not signaled!\n", + rq->fence.context, rq->fence.seqno); + err = -EINVAL; + } + + i915_request_put(rq); + } + + heap_fence_put(wait); + heap_fence_put(submit); + + if (err < 0) + break; + + num_fences += count; + num_waits++; + + cond_resched(); + } + + atomic_long_add(num_fences, &t->num_fences); + atomic_long_add(num_waits, &t->num_waits); + + kfree(order); +out_requests: + kfree(requests); + return err; +} + +static int mock_breadcrumbs_smoketest(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct smoketest t = { + .engine = i915->engine[RCS], + .ncontexts = 1024, + .max_batch = 1024, + .request_alloc = __mock_request_alloc + }; + unsigned int ncpus = num_online_cpus(); + struct task_struct **threads; + unsigned int n; + int ret = 0; + + /* + * Smoketest our breadcrumb/signal handling for requests across multiple + * threads. A very simple test to only catch the most egregious of bugs. + * See __igt_breadcrumbs_smoketest(); + */ + + threads = kmalloc_array(ncpus, sizeof(*threads), GFP_KERNEL); + if (!threads) + return -ENOMEM; + + t.contexts = + kmalloc_array(t.ncontexts, sizeof(*t.contexts), GFP_KERNEL); + if (!t.contexts) { + ret = -ENOMEM; + goto out_threads; + } + + mutex_lock(&t.engine->i915->drm.struct_mutex); + for (n = 0; n < t.ncontexts; n++) { + t.contexts[n] = mock_context(t.engine->i915, "mock"); + if (!t.contexts[n]) { + ret = -ENOMEM; + goto out_contexts; + } + } + mutex_unlock(&t.engine->i915->drm.struct_mutex); + + for (n = 0; n < ncpus; n++) { + threads[n] = kthread_run(__igt_breadcrumbs_smoketest, + &t, "igt/%d", n); + if (IS_ERR(threads[n])) { + ret = PTR_ERR(threads[n]); + ncpus = n; + break; + } + + get_task_struct(threads[n]); + } + + msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies)); + + for (n = 0; n < ncpus; n++) { + int err; + + err = kthread_stop(threads[n]); + if (err < 0 && !ret) + ret = err; + + put_task_struct(threads[n]); + } + pr_info("Completed %lu waits for %lu fence across %d cpus\n", + atomic_long_read(&t.num_waits), + atomic_long_read(&t.num_fences), + ncpus); + + mutex_lock(&t.engine->i915->drm.struct_mutex); +out_contexts: + for (n = 0; n < t.ncontexts; n++) { + if (!t.contexts[n]) + break; + mock_context_close(t.contexts[n]); + } + mutex_unlock(&t.engine->i915->drm.struct_mutex); + kfree(t.contexts); +out_threads: + kfree(threads); + + return ret; +} + int i915_request_mock_selftests(void) { static const struct i915_subtest tests[] = { @@ -254,6 +505,7 @@ int i915_request_mock_selftests(void) SUBTEST(igt_wait_request), SUBTEST(igt_fence_wait), SUBTEST(igt_request_rewind), + SUBTEST(mock_breadcrumbs_smoketest), }; struct drm_i915_private *i915; intel_wakeref_t wakeref; @@ -812,6 +1064,178 @@ out_unlock: return err; } +static int +max_batches(struct i915_gem_context *ctx, struct intel_engine_cs *engine) +{ + struct i915_request *rq; + int ret; + + /* + * Before execlists, all contexts share the same ringbuffer. With + * execlists, each context/engine has a separate ringbuffer and + * for the purposes of this test, inexhaustible. + * + * For the global ringbuffer though, we have to be very careful + * that we do not wrap while preventing the execution of requests + * with a unsignaled fence. + */ + if (HAS_EXECLISTS(ctx->i915)) + return INT_MAX; + + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) { + ret = PTR_ERR(rq); + } else { + int sz; + + ret = rq->ring->size - rq->reserved_space; + i915_request_add(rq); + + sz = rq->ring->emit - rq->head; + if (sz < 0) + sz += rq->ring->size; + ret /= sz; + ret /= 2; /* leave half spare, in case of emergency! */ + } + + return ret; +} + +static int live_breadcrumbs_smoketest(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct smoketest t[I915_NUM_ENGINES]; + unsigned int ncpus = num_online_cpus(); + unsigned long num_waits, num_fences; + struct intel_engine_cs *engine; + struct task_struct **threads; + struct igt_live_test live; + enum intel_engine_id id; + intel_wakeref_t wakeref; + struct drm_file *file; + unsigned int n; + int ret = 0; + + /* + * Smoketest our breadcrumb/signal handling for requests across multiple + * threads. A very simple test to only catch the most egregious of bugs. + * See __igt_breadcrumbs_smoketest(); + * + * On real hardware this time. + */ + + wakeref = intel_runtime_pm_get(i915); + + file = mock_file(i915); + if (IS_ERR(file)) { + ret = PTR_ERR(file); + goto out_rpm; + } + + threads = kcalloc(ncpus * I915_NUM_ENGINES, + sizeof(*threads), + GFP_KERNEL); + if (!threads) { + ret = -ENOMEM; + goto out_file; + } + + memset(&t[0], 0, sizeof(t[0])); + t[0].request_alloc = __live_request_alloc; + t[0].ncontexts = 64; + t[0].contexts = kmalloc_array(t[0].ncontexts, + sizeof(*t[0].contexts), + GFP_KERNEL); + if (!t[0].contexts) { + ret = -ENOMEM; + goto out_threads; + } + + mutex_lock(&i915->drm.struct_mutex); + for (n = 0; n < t[0].ncontexts; n++) { + t[0].contexts[n] = live_context(i915, file); + if (!t[0].contexts[n]) { + ret = -ENOMEM; + goto out_contexts; + } + } + + ret = igt_live_test_begin(&live, i915, __func__, ""); + if (ret) + goto out_contexts; + + for_each_engine(engine, i915, id) { + t[id] = t[0]; + t[id].engine = engine; + t[id].max_batch = max_batches(t[0].contexts[0], engine); + if (t[id].max_batch < 0) { + ret = t[id].max_batch; + mutex_unlock(&i915->drm.struct_mutex); + goto out_flush; + } + /* One ring interleaved between requests from all cpus */ + t[id].max_batch /= num_online_cpus() + 1; + pr_debug("Limiting batches to %d requests on %s\n", + t[id].max_batch, engine->name); + + for (n = 0; n < ncpus; n++) { + struct task_struct *tsk; + + tsk = kthread_run(__igt_breadcrumbs_smoketest, + &t[id], "igt/%d.%d", id, n); + if (IS_ERR(tsk)) { + ret = PTR_ERR(tsk); + mutex_unlock(&i915->drm.struct_mutex); + goto out_flush; + } + + get_task_struct(tsk); + threads[id * ncpus + n] = tsk; + } + } + mutex_unlock(&i915->drm.struct_mutex); + + msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies)); + +out_flush: + num_waits = 0; + num_fences = 0; + for_each_engine(engine, i915, id) { + for (n = 0; n < ncpus; n++) { + struct task_struct *tsk = threads[id * ncpus + n]; + int err; + + if (!tsk) + continue; + + err = kthread_stop(tsk); + if (err < 0 && !ret) + ret = err; + + put_task_struct(tsk); + } + + num_waits += atomic_long_read(&t[id].num_waits); + num_fences += atomic_long_read(&t[id].num_fences); + } + pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n", + num_waits, num_fences, RUNTIME_INFO(i915)->num_rings, ncpus); + + mutex_lock(&i915->drm.struct_mutex); + ret = igt_live_test_end(&live) ?: ret; +out_contexts: + mutex_unlock(&i915->drm.struct_mutex); + kfree(t[0].contexts); +out_threads: + kfree(threads); +out_file: + mock_file_free(i915, file); +out_rpm: + intel_runtime_pm_put(i915, wakeref); + + return ret; +} + int i915_request_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { @@ -819,6 +1243,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915) SUBTEST(live_all_engines), SUBTEST(live_sequential_engines), SUBTEST(live_empty_request), + SUBTEST(live_breadcrumbs_smoketest), }; if (i915_terminally_wedged(&i915->gpu_error)) diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index 0e70df0230b8..9ebd9225684e 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -185,11 +185,6 @@ void igt_spinner_fini(struct igt_spinner *spin) bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq) { - if (!wait_event_timeout(rq->execute, - READ_ONCE(rq->global_seqno), - msecs_to_jiffies(10))) - return false; - return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq), rq->fence.seqno), 10) && diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c deleted file mode 100644 index f03b407fdbe2..000000000000 --- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c +++ /dev/null @@ -1,470 +0,0 @@ -/* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include "../i915_selftest.h" -#include "i915_random.h" - -#include "mock_gem_device.h" -#include "mock_engine.h" - -static int check_rbtree(struct intel_engine_cs *engine, - const unsigned long *bitmap, - const struct intel_wait *waiters, - const int count) -{ - struct intel_breadcrumbs *b = &engine->breadcrumbs; - struct rb_node *rb; - int n; - - if (&b->irq_wait->node != rb_first(&b->waiters)) { - pr_err("First waiter does not match first element of wait-tree\n"); - return -EINVAL; - } - - n = find_first_bit(bitmap, count); - for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { - struct intel_wait *w = container_of(rb, typeof(*w), node); - int idx = w - waiters; - - if (!test_bit(idx, bitmap)) { - pr_err("waiter[%d, seqno=%d] removed but still in wait-tree\n", - idx, w->seqno); - return -EINVAL; - } - - if (n != idx) { - pr_err("waiter[%d, seqno=%d] does not match expected next element in tree [%d]\n", - idx, w->seqno, n); - return -EINVAL; - } - - n = find_next_bit(bitmap, count, n + 1); - } - - return 0; -} - -static int check_completion(struct intel_engine_cs *engine, - const unsigned long *bitmap, - const struct intel_wait *waiters, - const int count) -{ - int n; - - for (n = 0; n < count; n++) { - if (intel_wait_complete(&waiters[n]) != !!test_bit(n, bitmap)) - continue; - - pr_err("waiter[%d, seqno=%d] is %s, but expected %s\n", - n, waiters[n].seqno, - intel_wait_complete(&waiters[n]) ? "complete" : "active", - test_bit(n, bitmap) ? "active" : "complete"); - return -EINVAL; - } - - return 0; -} - -static int check_rbtree_empty(struct intel_engine_cs *engine) -{ - struct intel_breadcrumbs *b = &engine->breadcrumbs; - - if (b->irq_wait) { - pr_err("Empty breadcrumbs still has a waiter\n"); - return -EINVAL; - } - - if (!RB_EMPTY_ROOT(&b->waiters)) { - pr_err("Empty breadcrumbs, but wait-tree not empty\n"); - return -EINVAL; - } - - return 0; -} - -static int igt_random_insert_remove(void *arg) -{ - const u32 seqno_bias = 0x1000; - I915_RND_STATE(prng); - struct intel_engine_cs *engine = arg; - struct intel_wait *waiters; - const int count = 4096; - unsigned int *order; - unsigned long *bitmap; - int err = -ENOMEM; - int n; - - mock_engine_reset(engine); - - waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL); - if (!waiters) - goto out_engines; - - bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap), - GFP_KERNEL); - if (!bitmap) - goto out_waiters; - - order = i915_random_order(count, &prng); - if (!order) - goto out_bitmap; - - for (n = 0; n < count; n++) - intel_wait_init_for_seqno(&waiters[n], seqno_bias + n); - - err = check_rbtree(engine, bitmap, waiters, count); - if (err) - goto out_order; - - /* Add and remove waiters into the rbtree in random order. At each - * step, we verify that the rbtree is correctly ordered. - */ - for (n = 0; n < count; n++) { - int i = order[n]; - - intel_engine_add_wait(engine, &waiters[i]); - __set_bit(i, bitmap); - - err = check_rbtree(engine, bitmap, waiters, count); - if (err) - goto out_order; - } - - i915_random_reorder(order, count, &prng); - for (n = 0; n < count; n++) { - int i = order[n]; - - intel_engine_remove_wait(engine, &waiters[i]); - __clear_bit(i, bitmap); - - err = check_rbtree(engine, bitmap, waiters, count); - if (err) - goto out_order; - } - - err = check_rbtree_empty(engine); -out_order: - kfree(order); -out_bitmap: - kfree(bitmap); -out_waiters: - kvfree(waiters); -out_engines: - mock_engine_flush(engine); - return err; -} - -static int igt_insert_complete(void *arg) -{ - const u32 seqno_bias = 0x1000; - struct intel_engine_cs *engine = arg; - struct intel_wait *waiters; - const int count = 4096; - unsigned long *bitmap; - int err = -ENOMEM; - int n, m; - - mock_engine_reset(engine); - - waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL); - if (!waiters) - goto out_engines; - - bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap), - GFP_KERNEL); - if (!bitmap) - goto out_waiters; - - for (n = 0; n < count; n++) { - intel_wait_init_for_seqno(&waiters[n], n + seqno_bias); - intel_engine_add_wait(engine, &waiters[n]); - __set_bit(n, bitmap); - } - err = check_rbtree(engine, bitmap, waiters, count); - if (err) - goto out_bitmap; - - /* On each step, we advance the seqno so that several waiters are then - * complete (we increase the seqno by increasingly larger values to - * retire more and more waiters at once). All retired waiters should - * be woken and removed from the rbtree, and so that we check. - */ - for (n = 0; n < count; n = m) { - int seqno = 2 * n; - - GEM_BUG_ON(find_first_bit(bitmap, count) != n); - - if (intel_wait_complete(&waiters[n])) { - pr_err("waiter[%d, seqno=%d] completed too early\n", - n, waiters[n].seqno); - err = -EINVAL; - goto out_bitmap; - } - - /* complete the following waiters */ - mock_seqno_advance(engine, seqno + seqno_bias); - for (m = n; m <= seqno; m++) { - if (m == count) - break; - - GEM_BUG_ON(!test_bit(m, bitmap)); - __clear_bit(m, bitmap); - } - - intel_engine_remove_wait(engine, &waiters[n]); - RB_CLEAR_NODE(&waiters[n].node); - - err = check_rbtree(engine, bitmap, waiters, count); - if (err) { - pr_err("rbtree corrupt after seqno advance to %d\n", - seqno + seqno_bias); - goto out_bitmap; - } - - err = check_completion(engine, bitmap, waiters, count); - if (err) { - pr_err("completions after seqno advance to %d failed\n", - seqno + seqno_bias); - goto out_bitmap; - } - } - - err = check_rbtree_empty(engine); -out_bitmap: - kfree(bitmap); -out_waiters: - kvfree(waiters); -out_engines: - mock_engine_flush(engine); - return err; -} - -struct igt_wakeup { - struct task_struct *tsk; - atomic_t *ready, *set, *done; - struct intel_engine_cs *engine; - unsigned long flags; -#define STOP 0 -#define IDLE 1 - wait_queue_head_t *wq; - u32 seqno; -}; - -static bool wait_for_ready(struct igt_wakeup *w) -{ - DEFINE_WAIT(ready); - - set_bit(IDLE, &w->flags); - if (atomic_dec_and_test(w->done)) - wake_up_var(w->done); - - if (test_bit(STOP, &w->flags)) - goto out; - - for (;;) { - prepare_to_wait(w->wq, &ready, TASK_INTERRUPTIBLE); - if (atomic_read(w->ready) == 0) - break; - - schedule(); - } - finish_wait(w->wq, &ready); - -out: - clear_bit(IDLE, &w->flags); - if (atomic_dec_and_test(w->set)) - wake_up_var(w->set); - - return !test_bit(STOP, &w->flags); -} - -static int igt_wakeup_thread(void *arg) -{ - struct igt_wakeup *w = arg; - struct intel_wait wait; - - while (wait_for_ready(w)) { - GEM_BUG_ON(kthread_should_stop()); - - intel_wait_init_for_seqno(&wait, w->seqno); - intel_engine_add_wait(w->engine, &wait); - for (;;) { - set_current_state(TASK_UNINTERRUPTIBLE); - if (i915_seqno_passed(intel_engine_get_seqno(w->engine), - w->seqno)) - break; - - if (test_bit(STOP, &w->flags)) /* emergency escape */ - break; - - schedule(); - } - intel_engine_remove_wait(w->engine, &wait); - __set_current_state(TASK_RUNNING); - } - - return 0; -} - -static void igt_wake_all_sync(atomic_t *ready, - atomic_t *set, - atomic_t *done, - wait_queue_head_t *wq, - int count) -{ - atomic_set(set, count); - atomic_set(ready, 0); - wake_up_all(wq); - - wait_var_event(set, !atomic_read(set)); - atomic_set(ready, count); - atomic_set(done, count); -} - -static int igt_wakeup(void *arg) -{ - I915_RND_STATE(prng); - struct intel_engine_cs *engine = arg; - struct igt_wakeup *waiters; - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); - const int count = 4096; - const u32 max_seqno = count / 4; - atomic_t ready, set, done; - int err = -ENOMEM; - int n, step; - - mock_engine_reset(engine); - - waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL); - if (!waiters) - goto out_engines; - - /* Create a large number of threads, each waiting on a random seqno. - * Multiple waiters will be waiting for the same seqno. - */ - atomic_set(&ready, count); - for (n = 0; n < count; n++) { - waiters[n].wq = &wq; - waiters[n].ready = &ready; - waiters[n].set = &set; - waiters[n].done = &done; - waiters[n].engine = engine; - waiters[n].flags = BIT(IDLE); - - waiters[n].tsk = kthread_run(igt_wakeup_thread, &waiters[n], - "i915/igt:%d", n); - if (IS_ERR(waiters[n].tsk)) - goto out_waiters; - - get_task_struct(waiters[n].tsk); - } - - for (step = 1; step <= max_seqno; step <<= 1) { - u32 seqno; - - /* The waiter threads start paused as we assign them a random - * seqno and reset the engine. Once the engine is reset, - * we signal that the threads may begin their wait upon their - * seqno. - */ - for (n = 0; n < count; n++) { - GEM_BUG_ON(!test_bit(IDLE, &waiters[n].flags)); - waiters[n].seqno = - 1 + prandom_u32_state(&prng) % max_seqno; - } - mock_seqno_advance(engine, 0); - igt_wake_all_sync(&ready, &set, &done, &wq, count); - - /* Simulate the GPU doing chunks of work, with one or more - * seqno appearing to finish at the same time. A random number - * of threads will be waiting upon the update and hopefully be - * woken. - */ - for (seqno = 1; seqno <= max_seqno + step; seqno += step) { - usleep_range(50, 500); - mock_seqno_advance(engine, seqno); - } - GEM_BUG_ON(intel_engine_get_seqno(engine) < 1 + max_seqno); - - /* With the seqno now beyond any of the waiting threads, they - * should all be woken, see that they are complete and signal - * that they are ready for the next test. We wait until all - * threads are complete and waiting for us (i.e. not a seqno). - */ - if (!wait_var_event_timeout(&done, - !atomic_read(&done), 10 * HZ)) { - pr_err("Timed out waiting for %d remaining waiters\n", - atomic_read(&done)); - err = -ETIMEDOUT; - break; - } - - err = check_rbtree_empty(engine); - if (err) - break; - } - -out_waiters: - for (n = 0; n < count; n++) { - if (IS_ERR(waiters[n].tsk)) - break; - - set_bit(STOP, &waiters[n].flags); - } - mock_seqno_advance(engine, INT_MAX); /* wakeup any broken waiters */ - igt_wake_all_sync(&ready, &set, &done, &wq, n); - - for (n = 0; n < count; n++) { - if (IS_ERR(waiters[n].tsk)) - break; - - kthread_stop(waiters[n].tsk); - put_task_struct(waiters[n].tsk); - } - - kvfree(waiters); -out_engines: - mock_engine_flush(engine); - return err; -} - -int intel_breadcrumbs_mock_selftests(void) -{ - static const struct i915_subtest tests[] = { - SUBTEST(igt_random_insert_remove), - SUBTEST(igt_insert_complete), - SUBTEST(igt_wakeup), - }; - struct drm_i915_private *i915; - int err; - - i915 = mock_gem_device(); - if (!i915) - return -ENOMEM; - - err = i915_subtests(tests, i915->engine[RCS]); - drm_dev_put(&i915->drm); - - return err; -} diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index 2c38ea5892d9..7b6f3bea9ef8 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -1127,7 +1127,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, wait_for_completion(&arg.completion); - if (wait_for(waitqueue_active(&rq->execute), 10)) { + if (wait_for(!list_empty(&rq->fence.cb_list), 10)) { struct drm_printer p = drm_info_printer(i915->drm.dev); pr_err("igt/evict_vma kthread did not wait\n"); diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c index b26f07b55d86..2bfa72c1654b 100644 --- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c +++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c @@ -76,3 +76,57 @@ void timed_fence_fini(struct timed_fence *tf) destroy_timer_on_stack(&tf->timer); i915_sw_fence_fini(&tf->fence); } + +struct heap_fence { + struct i915_sw_fence fence; + union { + struct kref ref; + struct rcu_head rcu; + }; +}; + +static int __i915_sw_fence_call +heap_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) +{ + struct heap_fence *h = container_of(fence, typeof(*h), fence); + + switch (state) { + case FENCE_COMPLETE: + break; + + case FENCE_FREE: + heap_fence_put(&h->fence); + } + + return NOTIFY_DONE; +} + +struct i915_sw_fence *heap_fence_create(gfp_t gfp) +{ + struct heap_fence *h; + + h = kmalloc(sizeof(*h), gfp); + if (!h) + return NULL; + + i915_sw_fence_init(&h->fence, heap_fence_notify); + refcount_set(&h->ref.refcount, 2); + + return &h->fence; +} + +static void heap_fence_release(struct kref *ref) +{ + struct heap_fence *h = container_of(ref, typeof(*h), ref); + + i915_sw_fence_fini(&h->fence); + + kfree_rcu(h, rcu); +} + +void heap_fence_put(struct i915_sw_fence *fence) +{ + struct heap_fence *h = container_of(fence, typeof(*h), fence); + + kref_put(&h->ref, heap_fence_release); +} diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.h b/drivers/gpu/drm/i915/selftests/lib_sw_fence.h index 474aafb92ae1..1f9927e10f3a 100644 --- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.h +++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.h @@ -39,4 +39,7 @@ struct timed_fence { void timed_fence_init(struct timed_fence *tf, unsigned long expires); void timed_fence_fini(struct timed_fence *tf); +struct i915_sw_fence *heap_fence_create(gfp_t gfp); +void heap_fence_put(struct i915_sw_fence *fence); + #endif /* _LIB_SW_FENCE_H_ */ diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c index 3b226ebc6bc4..08f0cab02e0f 100644 --- a/drivers/gpu/drm/i915/selftests/mock_engine.c +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c @@ -86,17 +86,21 @@ static struct mock_request *first_request(struct mock_engine *engine) static void advance(struct mock_request *request) { list_del_init(&request->link); - mock_seqno_advance(request->base.engine, request->base.global_seqno); + intel_engine_write_global_seqno(request->base.engine, + request->base.global_seqno); i915_request_mark_complete(&request->base); GEM_BUG_ON(!i915_request_completed(&request->base)); + + intel_engine_queue_breadcrumbs(request->base.engine); } static void hw_delay_complete(struct timer_list *t) { struct mock_engine *engine = from_timer(engine, t, hw_delay); struct mock_request *request; + unsigned long flags; - spin_lock(&engine->hw_lock); + spin_lock_irqsave(&engine->hw_lock, flags); /* Timer fired, first request is complete */ request = first_request(engine); @@ -116,7 +120,7 @@ static void hw_delay_complete(struct timer_list *t) advance(request); } - spin_unlock(&engine->hw_lock); + spin_unlock_irqrestore(&engine->hw_lock, flags); } static void mock_context_unpin(struct intel_context *ce) @@ -191,11 +195,12 @@ static void mock_submit_request(struct i915_request *request) struct mock_request *mock = container_of(request, typeof(*mock), base); struct mock_engine *engine = container_of(request->engine, typeof(*engine), base); + unsigned long flags; i915_request_submit(request); GEM_BUG_ON(!request->global_seqno); - spin_lock_irq(&engine->hw_lock); + spin_lock_irqsave(&engine->hw_lock, flags); list_add_tail(&mock->link, &engine->hw_queue); if (mock->link.prev == &engine->hw_queue) { if (mock->delay) @@ -203,7 +208,7 @@ static void mock_submit_request(struct i915_request *request) else advance(mock); } - spin_unlock_irq(&engine->hw_lock); + spin_unlock_irqrestore(&engine->hw_lock, flags); } struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, @@ -273,7 +278,7 @@ void mock_engine_flush(struct intel_engine_cs *engine) void mock_engine_reset(struct intel_engine_cs *engine) { - intel_write_status_page(engine, I915_GEM_HWS_INDEX, 0); + intel_engine_write_global_seqno(engine, 0); } void mock_engine_free(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.h b/drivers/gpu/drm/i915/selftests/mock_engine.h index 133d0c21790d..b9cc3a245f16 100644 --- a/drivers/gpu/drm/i915/selftests/mock_engine.h +++ b/drivers/gpu/drm/i915/selftests/mock_engine.h @@ -46,10 +46,4 @@ void mock_engine_flush(struct intel_engine_cs *engine); void mock_engine_reset(struct intel_engine_cs *engine); void mock_engine_free(struct intel_engine_cs *engine); -static inline void mock_seqno_advance(struct intel_engine_cs *engine, u32 seqno) -{ - intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); - intel_engine_wakeup(engine); -} - #endif /* !__MOCK_ENGINE_H__ */ -- cgit v1.2.3