summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gt/selftest_timeline.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-19 16:24:24 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-19 16:24:24 -0700
commit574cc4539762561d96b456dbc0544d8898bd4c6e (patch)
tree07d84db8cf9fd30cbde6f539ce3a3f6116593e41 /drivers/gpu/drm/i915/gt/selftest_timeline.c
parent3c2edc36a77420d8be05d656019dbc8c31535992 (diff)
parent945b584c94f8c665b2df3834a8a6a8faf256cd5f (diff)
downloadblackbird-op-linux-574cc4539762561d96b456dbc0544d8898bd4c6e.tar.gz
blackbird-op-linux-574cc4539762561d96b456dbc0544d8898bd4c6e.zip
Merge tag 'drm-next-2019-09-18' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie: "This is the main pull request for 5.4-rc1 merge window. I don't think there is anything outstanding so next week should just be fixes, but we'll see if I missed anything. I landed some fixes earlier in the week but got delayed writing summary and sending it out, due to a mix of sick kid and jetlag! There are some fixes pending, but I'd rather get the main merge out of the way instead of delaying it longer. It's also pretty large in commit count and new amd header file size. The largest thing is four new amdgpu products (navi12/14, arcturus and renoir APU support). Otherwise it's pretty much lots of work across the board, i915 has started landing tigerlake support, lots of icelake fixes and lots of locking reworking for future gpu support, lots of header file rework (drmP.h is nearly gone), some old legacy hacks (DRM_WAIT_ON) have been put into the places they are needed. uapi: - content protection type property for HDCP core: - rework include dependencies - lots of drmP.h removals - link rate calculation robustness fix - make fb helper map only when required - add connector->DDC adapter link - DRM_WAIT_ON removed - drop DRM_AUTH usage from drivers dma-buf: - reservation object fence helper dma-fence: - shrink dma_fence struct - merge signal functions - store timestamps in dma_fence - selftests ttm: - embed drm_get_object struct into ttm_buffer_object - release_notify callback bridges: - sii902x - audio graph card support - tc358767 - aux data handling rework - ti-snd64dsi86 - debugfs support, DSI mode flags support panels: - Support for GiantPlus GPM940B0, Sharp LQ070Y3DG3B, Ortustech COM37H3M, Novatek NT39016, Sharp LS020B1DD01D, Raydium RM67191, Boe Himax8279d, Sharp LD-D5116Z01B - TI nspire, NEC NL8048HL11, LG Philips LB035Q02, Sharp LS037V7DW01, Sony ACX565AKM, Toppoly TD028TTEC1 Toppoly TD043MTEA1 i915: - Initial tigerlake platform support - Locking simplification work, general all over refactoring. - Selftests - HDCP debug info improvements - DSI properties - Icelake display PLL fixes, colorspace fixes, bandwidth fixes, DSI suspend/resume - GuC fixes - Perf fixes - ElkhartLake enablement - DP MST fixes - GVT - command parser enhancements amdgpu: - add wipe memory on release flag for buffer creation - Navi12/14 support (may be marked experimental) - Arcturus support - Renoir APU support - mclk DPM for Navi - DC display fixes - Raven scatter/gather support - RAS support for GFX - Navi12 + Arcturus power features - GPU reset for Picasso - smu11 i2c controller support amdkfd: - navi12/14 support - Arcturus support radeon: - kexec fix nouveau: - improved display color management - detect lack of GPU power cables vmwgfx: - evicition priority support - remove unused security feature msm: - msm8998 display support - better async commit support for cursor updates etnaviv: - per-process address space support - performance counter fixes - softpin support mcde: - DCS transfers fix exynos: - drmP.h cleanup lima: - reduce logging kirin: - misc clenaups komeda: - dual-link support - DT memory regions hisilicon: - misc fixes imx: - IPUv3 image converter fixes - 32-bit RGB V4L2 pixel format support ingenic: - more support for panel related cases mgag200: - cursor support fix panfrost: - export GPU features register to userspace - gpu heap allocations - per-fd address space support pl111: - CLD pads wiring support removed from DT rockchip: - rework to use DRM PSR helpers - fix bug in VOP_WIN_GET macro - DSI DT binding rework sun4i: - improve support for color encoding and range - DDC enabled GPIO tinydrm: - rework SPI support - improve MIPI-DBI support - moved to drm/tiny vkms: - rework CRC tracking dw-hdmi: - get_eld and i2s improvements gm12u320: - misc fixes meson: - global code cleanup - vpu feature detect omap: - alpha/pixel blend mode properties rcar-du: - misc fixes" * tag 'drm-next-2019-09-18' of git://anongit.freedesktop.org/drm/drm: (2112 commits) drm/nouveau/bar/gm20b: Avoid BAR1 teardown during init drm/nouveau: Fix ordering between TTM and GEM release drm/nouveau/prime: Extend DMA reservation object lock drm/nouveau: Fix fallout from reservation object rework drm/nouveau/kms/nv50-: Don't create MSTMs for eDP connectors drm/i915: Use NOEVICT for first pass on attemping to pin a GGTT mmap drm/i915: to make vgpu ppgtt notificaiton as atomic operation drm/i915: Flush the existing fence before GGTT read/write drm/i915: Hold irq-off for the entire fake lock period drm/i915/gvt: update RING_START reg of vGPU when the context is submitted to i915 drm/i915/gvt: update vgpu workload head pointer correctly drm/mcde: Fix DSI transfers drm/msm: Use the correct dma_sync calls harder drm/msm: remove unlikely() from WARN_ON() conditions drm/msm/dsi: Fix return value check for clk_get_parent drm/msm: add atomic traces drm/msm/dpu: async commit support drm/msm: async commit support drm/msm: split power control from prepare/complete_commit drm/msm: add kms->flush_commit() ...
Diffstat (limited to 'drivers/gpu/drm/i915/gt/selftest_timeline.c')
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c846
1 files changed, 846 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
new file mode 100644
index 000000000000..321481403165
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -0,0 +1,846 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2017-2018 Intel Corporation
+ */
+
+#include <linux/prime_numbers.h>
+
+#include "gem/i915_gem_pm.h"
+#include "intel_gt.h"
+
+#include "../selftests/i915_random.h"
+#include "../i915_selftest.h"
+
+#include "../selftests/igt_flush_test.h"
+#include "../selftests/mock_gem_device.h"
+#include "selftests/mock_timeline.h"
+
+static struct page *hwsp_page(struct intel_timeline *tl)
+{
+ struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj;
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ return sg_page(obj->mm.pages->sgl);
+}
+
+static unsigned long hwsp_cacheline(struct intel_timeline *tl)
+{
+ unsigned long address = (unsigned long)page_address(hwsp_page(tl));
+
+ return (address + tl->hwsp_offset) / CACHELINE_BYTES;
+}
+
+#define CACHELINES_PER_PAGE (PAGE_SIZE / CACHELINE_BYTES)
+
+struct mock_hwsp_freelist {
+ struct drm_i915_private *i915;
+ struct radix_tree_root cachelines;
+ struct intel_timeline **history;
+ unsigned long count, max;
+ struct rnd_state prng;
+};
+
+enum {
+ SHUFFLE = BIT(0),
+};
+
+static void __mock_hwsp_record(struct mock_hwsp_freelist *state,
+ unsigned int idx,
+ struct intel_timeline *tl)
+{
+ tl = xchg(&state->history[idx], tl);
+ if (tl) {
+ radix_tree_delete(&state->cachelines, hwsp_cacheline(tl));
+ intel_timeline_put(tl);
+ }
+}
+
+static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
+ unsigned int count,
+ unsigned int flags)
+{
+ struct intel_timeline *tl;
+ unsigned int idx;
+
+ while (count--) {
+ unsigned long cacheline;
+ int err;
+
+ tl = intel_timeline_create(&state->i915->gt, NULL);
+ if (IS_ERR(tl))
+ return PTR_ERR(tl);
+
+ cacheline = hwsp_cacheline(tl);
+ err = radix_tree_insert(&state->cachelines, cacheline, tl);
+ if (err) {
+ if (err == -EEXIST) {
+ pr_err("HWSP cacheline %lu already used; duplicate allocation!\n",
+ cacheline);
+ }
+ intel_timeline_put(tl);
+ return err;
+ }
+
+ idx = state->count++ % state->max;
+ __mock_hwsp_record(state, idx, tl);
+ }
+
+ if (flags & SHUFFLE)
+ i915_prandom_shuffle(state->history,
+ sizeof(*state->history),
+ min(state->count, state->max),
+ &state->prng);
+
+ count = i915_prandom_u32_max_state(min(state->count, state->max),
+ &state->prng);
+ while (count--) {
+ idx = --state->count % state->max;
+ __mock_hwsp_record(state, idx, NULL);
+ }
+
+ return 0;
+}
+
+static int mock_hwsp_freelist(void *arg)
+{
+ struct mock_hwsp_freelist state;
+ const struct {
+ const char *name;
+ unsigned int flags;
+ } phases[] = {
+ { "linear", 0 },
+ { "shuffled", SHUFFLE },
+ { },
+ }, *p;
+ unsigned int na;
+ int err = 0;
+
+ INIT_RADIX_TREE(&state.cachelines, GFP_KERNEL);
+ state.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed);
+
+ state.i915 = mock_gem_device();
+ if (!state.i915)
+ return -ENOMEM;
+
+ /*
+ * Create a bunch of timelines and check that their HWSP do not overlap.
+ * Free some, and try again.
+ */
+
+ state.max = PAGE_SIZE / sizeof(*state.history);
+ state.count = 0;
+ state.history = kcalloc(state.max, sizeof(*state.history), GFP_KERNEL);
+ if (!state.history) {
+ err = -ENOMEM;
+ goto err_put;
+ }
+
+ mutex_lock(&state.i915->drm.struct_mutex);
+ for (p = phases; p->name; p++) {
+ pr_debug("%s(%s)\n", __func__, p->name);
+ for_each_prime_number_from(na, 1, 2 * CACHELINES_PER_PAGE) {
+ err = __mock_hwsp_timeline(&state, na, p->flags);
+ if (err)
+ goto out;
+ }
+ }
+
+out:
+ for (na = 0; na < state.max; na++)
+ __mock_hwsp_record(&state, na, NULL);
+ mutex_unlock(&state.i915->drm.struct_mutex);
+ kfree(state.history);
+err_put:
+ drm_dev_put(&state.i915->drm);
+ return err;
+}
+
+struct __igt_sync {
+ const char *name;
+ u32 seqno;
+ bool expected;
+ bool set;
+};
+
+static int __igt_sync(struct intel_timeline *tl,
+ u64 ctx,
+ const struct __igt_sync *p,
+ const char *name)
+{
+ int ret;
+
+ if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
+ pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n",
+ name, p->name, ctx, p->seqno, yesno(p->expected));
+ return -EINVAL;
+ }
+
+ if (p->set) {
+ ret = __intel_timeline_sync_set(tl, ctx, p->seqno);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int igt_sync(void *arg)
+{
+ const struct __igt_sync pass[] = {
+ { "unset", 0, false, false },
+ { "new", 0, false, true },
+ { "0a", 0, true, true },
+ { "1a", 1, false, true },
+ { "1b", 1, true, true },
+ { "0b", 0, true, false },
+ { "2a", 2, false, true },
+ { "4", 4, false, true },
+ { "INT_MAX", INT_MAX, false, true },
+ { "INT_MAX-1", INT_MAX-1, true, false },
+ { "INT_MAX+1", (u32)INT_MAX+1, false, true },
+ { "INT_MAX", INT_MAX, true, false },
+ { "UINT_MAX", UINT_MAX, false, true },
+ { "wrap", 0, false, true },
+ { "unwrap", UINT_MAX, true, false },
+ {},
+ }, *p;
+ struct intel_timeline tl;
+ int order, offset;
+ int ret = -ENODEV;
+
+ mock_timeline_init(&tl, 0);
+ for (p = pass; p->name; p++) {
+ for (order = 1; order < 64; order++) {
+ for (offset = -1; offset <= (order > 1); offset++) {
+ u64 ctx = BIT_ULL(order) + offset;
+
+ ret = __igt_sync(&tl, ctx, p, "1");
+ if (ret)
+ goto out;
+ }
+ }
+ }
+ mock_timeline_fini(&tl);
+
+ mock_timeline_init(&tl, 0);
+ for (order = 1; order < 64; order++) {
+ for (offset = -1; offset <= (order > 1); offset++) {
+ u64 ctx = BIT_ULL(order) + offset;
+
+ for (p = pass; p->name; p++) {
+ ret = __igt_sync(&tl, ctx, p, "2");
+ if (ret)
+ goto out;
+ }
+ }
+ }
+
+out:
+ mock_timeline_fini(&tl);
+ return ret;
+}
+
+static unsigned int random_engine(struct rnd_state *rnd)
+{
+ return i915_prandom_u32_max_state(I915_NUM_ENGINES, rnd);
+}
+
+static int bench_sync(void *arg)
+{
+ struct rnd_state prng;
+ struct intel_timeline tl;
+ unsigned long end_time, count;
+ u64 prng32_1M;
+ ktime_t kt;
+ int order, last_order;
+
+ mock_timeline_init(&tl, 0);
+
+ /* Lookups from cache are very fast and so the random number generation
+ * and the loop itself becomes a significant factor in the per-iteration
+ * timings. We try to compensate the results by measuring the overhead
+ * of the prng and subtract it from the reported results.
+ */
+ prandom_seed_state(&prng, i915_selftest.random_seed);
+ count = 0;
+ kt = ktime_get();
+ end_time = jiffies + HZ/10;
+ do {
+ u32 x;
+
+ /* Make sure the compiler doesn't optimise away the prng call */
+ WRITE_ONCE(x, prandom_u32_state(&prng));
+
+ count++;
+ } while (!time_after(jiffies, end_time));
+ kt = ktime_sub(ktime_get(), kt);
+ pr_debug("%s: %lu random evaluations, %lluns/prng\n",
+ __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
+ prng32_1M = div64_ul(ktime_to_ns(kt) << 20, count);
+
+ /* Benchmark (only) setting random context ids */
+ prandom_seed_state(&prng, i915_selftest.random_seed);
+ count = 0;
+ kt = ktime_get();
+ end_time = jiffies + HZ/10;
+ do {
+ u64 id = i915_prandom_u64_state(&prng);
+
+ __intel_timeline_sync_set(&tl, id, 0);
+ count++;
+ } while (!time_after(jiffies, end_time));
+ kt = ktime_sub(ktime_get(), kt);
+ kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
+ pr_info("%s: %lu random insertions, %lluns/insert\n",
+ __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
+
+ /* Benchmark looking up the exact same context ids as we just set */
+ prandom_seed_state(&prng, i915_selftest.random_seed);
+ end_time = count;
+ kt = ktime_get();
+ while (end_time--) {
+ u64 id = i915_prandom_u64_state(&prng);
+
+ if (!__intel_timeline_sync_is_later(&tl, id, 0)) {
+ mock_timeline_fini(&tl);
+ pr_err("Lookup of %llu failed\n", id);
+ return -EINVAL;
+ }
+ }
+ kt = ktime_sub(ktime_get(), kt);
+ kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
+ pr_info("%s: %lu random lookups, %lluns/lookup\n",
+ __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
+
+ mock_timeline_fini(&tl);
+ cond_resched();
+
+ mock_timeline_init(&tl, 0);
+
+ /* Benchmark setting the first N (in order) contexts */
+ count = 0;
+ kt = ktime_get();
+ end_time = jiffies + HZ/10;
+ do {
+ __intel_timeline_sync_set(&tl, count++, 0);
+ } while (!time_after(jiffies, end_time));
+ kt = ktime_sub(ktime_get(), kt);
+ pr_info("%s: %lu in-order insertions, %lluns/insert\n",
+ __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
+
+ /* Benchmark looking up the exact same context ids as we just set */
+ end_time = count;
+ kt = ktime_get();
+ while (end_time--) {
+ if (!__intel_timeline_sync_is_later(&tl, end_time, 0)) {
+ pr_err("Lookup of %lu failed\n", end_time);
+ mock_timeline_fini(&tl);
+ return -EINVAL;
+ }
+ }
+ kt = ktime_sub(ktime_get(), kt);
+ pr_info("%s: %lu in-order lookups, %lluns/lookup\n",
+ __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
+
+ mock_timeline_fini(&tl);
+ cond_resched();
+
+ mock_timeline_init(&tl, 0);
+
+ /* Benchmark searching for a random context id and maybe changing it */
+ prandom_seed_state(&prng, i915_selftest.random_seed);
+ count = 0;
+ kt = ktime_get();
+ end_time = jiffies + HZ/10;
+ do {
+ u32 id = random_engine(&prng);
+ u32 seqno = prandom_u32_state(&prng);
+
+ if (!__intel_timeline_sync_is_later(&tl, id, seqno))
+ __intel_timeline_sync_set(&tl, id, seqno);
+
+ count++;
+ } while (!time_after(jiffies, end_time));
+ kt = ktime_sub(ktime_get(), kt);
+ kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
+ pr_info("%s: %lu repeated insert/lookups, %lluns/op\n",
+ __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
+ mock_timeline_fini(&tl);
+ cond_resched();
+
+ /* Benchmark searching for a known context id and changing the seqno */
+ for (last_order = 1, order = 1; order < 32;
+ ({ int tmp = last_order; last_order = order; order += tmp; })) {
+ unsigned int mask = BIT(order) - 1;
+
+ mock_timeline_init(&tl, 0);
+
+ count = 0;
+ kt = ktime_get();
+ end_time = jiffies + HZ/10;
+ do {
+ /* Without assuming too many details of the underlying
+ * implementation, try to identify its phase-changes
+ * (if any)!
+ */
+ u64 id = (u64)(count & mask) << order;
+
+ __intel_timeline_sync_is_later(&tl, id, 0);
+ __intel_timeline_sync_set(&tl, id, 0);
+
+ count++;
+ } while (!time_after(jiffies, end_time));
+ kt = ktime_sub(ktime_get(), kt);
+ pr_info("%s: %lu cyclic/%d insert/lookups, %lluns/op\n",
+ __func__, count, order,
+ (long long)div64_ul(ktime_to_ns(kt), count));
+ mock_timeline_fini(&tl);
+ cond_resched();
+ }
+
+ return 0;
+}
+
+int intel_timeline_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(mock_hwsp_freelist),
+ SUBTEST(igt_sync),
+ SUBTEST(bench_sync),
+ };
+
+ return i915_subtests(tests, NULL);
+}
+
+static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ if (INTEL_GEN(rq->i915) >= 8) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = addr;
+ *cs++ = 0;
+ *cs++ = value;
+ } else if (INTEL_GEN(rq->i915) >= 4) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = 0;
+ *cs++ = addr;
+ *cs++ = value;
+ } else {
+ *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ *cs++ = addr;
+ *cs++ = value;
+ *cs++ = MI_NOOP;
+ }
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static struct i915_request *
+tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
+{
+ struct i915_request *rq;
+ int err;
+
+ lockdep_assert_held(&tl->gt->i915->drm.struct_mutex); /* lazy rq refs */
+
+ err = intel_timeline_pin(tl);
+ if (err) {
+ rq = ERR_PTR(err);
+ goto out;
+ }
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq))
+ goto out_unpin;
+
+ err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value);
+ i915_request_add(rq);
+ if (err)
+ rq = ERR_PTR(err);
+
+out_unpin:
+ intel_timeline_unpin(tl);
+out:
+ if (IS_ERR(rq))
+ pr_err("Failed to write to timeline!\n");
+ return rq;
+}
+
+static struct intel_timeline *
+checked_intel_timeline_create(struct drm_i915_private *i915)
+{
+ struct intel_timeline *tl;
+
+ tl = intel_timeline_create(&i915->gt, NULL);
+ if (IS_ERR(tl))
+ return tl;
+
+ if (*tl->hwsp_seqno != tl->seqno) {
+ pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n",
+ *tl->hwsp_seqno, tl->seqno);
+ intel_timeline_put(tl);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return tl;
+}
+
+static int live_hwsp_engine(void *arg)
+{
+#define NUM_TIMELINES 4096
+ struct drm_i915_private *i915 = arg;
+ struct intel_timeline **timelines;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ unsigned long count, n;
+ int err = 0;
+
+ /*
+ * Create a bunch of timelines and check we can write
+ * independently to each of their breadcrumb slots.
+ */
+
+ timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES,
+ sizeof(*timelines),
+ GFP_KERNEL);
+ if (!timelines)
+ return -ENOMEM;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ count = 0;
+ for_each_engine(engine, i915, id) {
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ for (n = 0; n < NUM_TIMELINES; n++) {
+ struct intel_timeline *tl;
+ struct i915_request *rq;
+
+ tl = checked_intel_timeline_create(i915);
+ if (IS_ERR(tl)) {
+ err = PTR_ERR(tl);
+ goto out;
+ }
+
+ rq = tl_write(tl, engine, count);
+ if (IS_ERR(rq)) {
+ intel_timeline_put(tl);
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ timelines[count++] = tl;
+ }
+ }
+
+out:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ for (n = 0; n < count; n++) {
+ struct intel_timeline *tl = timelines[n];
+
+ if (!err && *tl->hwsp_seqno != n) {
+ pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
+ n, *tl->hwsp_seqno);
+ err = -EINVAL;
+ }
+ intel_timeline_put(tl);
+ }
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ kvfree(timelines);
+
+ return err;
+#undef NUM_TIMELINES
+}
+
+static int live_hwsp_alternate(void *arg)
+{
+#define NUM_TIMELINES 4096
+ struct drm_i915_private *i915 = arg;
+ struct intel_timeline **timelines;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ unsigned long count, n;
+ int err = 0;
+
+ /*
+ * Create a bunch of timelines and check we can write
+ * independently to each of their breadcrumb slots with adjacent
+ * engines.
+ */
+
+ timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES,
+ sizeof(*timelines),
+ GFP_KERNEL);
+ if (!timelines)
+ return -ENOMEM;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ count = 0;
+ for (n = 0; n < NUM_TIMELINES; n++) {
+ for_each_engine(engine, i915, id) {
+ struct intel_timeline *tl;
+ struct i915_request *rq;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ tl = checked_intel_timeline_create(i915);
+ if (IS_ERR(tl)) {
+ err = PTR_ERR(tl);
+ goto out;
+ }
+
+ rq = tl_write(tl, engine, count);
+ if (IS_ERR(rq)) {
+ intel_timeline_put(tl);
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ timelines[count++] = tl;
+ }
+ }
+
+out:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ for (n = 0; n < count; n++) {
+ struct intel_timeline *tl = timelines[n];
+
+ if (!err && *tl->hwsp_seqno != n) {
+ pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
+ n, *tl->hwsp_seqno);
+ err = -EINVAL;
+ }
+ intel_timeline_put(tl);
+ }
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ kvfree(timelines);
+
+ return err;
+#undef NUM_TIMELINES
+}
+
+static int live_hwsp_wrap(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct intel_timeline *tl;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ int err = 0;
+
+ /*
+ * Across a seqno wrap, we need to keep the old cacheline alive for
+ * foreign GPU references.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ tl = intel_timeline_create(&i915->gt, NULL);
+ if (IS_ERR(tl)) {
+ err = PTR_ERR(tl);
+ goto out_rpm;
+ }
+ if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
+ goto out_free;
+
+ err = intel_timeline_pin(tl);
+ if (err)
+ goto out_free;
+
+ for_each_engine(engine, i915, id) {
+ const u32 *hwsp_seqno[2];
+ struct i915_request *rq;
+ u32 seqno[2];
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ tl->seqno = -4u;
+
+ mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
+ err = intel_timeline_get_seqno(tl, rq, &seqno[0]);
+ mutex_unlock(&tl->mutex);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ pr_debug("seqno[0]:%08x, hwsp_offset:%08x\n",
+ seqno[0], tl->hwsp_offset);
+
+ err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[0]);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ hwsp_seqno[0] = tl->hwsp_seqno;
+
+ mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
+ err = intel_timeline_get_seqno(tl, rq, &seqno[1]);
+ mutex_unlock(&tl->mutex);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ pr_debug("seqno[1]:%08x, hwsp_offset:%08x\n",
+ seqno[1], tl->hwsp_offset);
+
+ err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[1]);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ hwsp_seqno[1] = tl->hwsp_seqno;
+
+ /* With wrap should come a new hwsp */
+ GEM_BUG_ON(seqno[1] >= seqno[0]);
+ GEM_BUG_ON(hwsp_seqno[0] == hwsp_seqno[1]);
+
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ pr_err("Wait for timeline writes timed out!\n");
+ err = -EIO;
+ goto out;
+ }
+
+ if (*hwsp_seqno[0] != seqno[0] || *hwsp_seqno[1] != seqno[1]) {
+ pr_err("Bad timeline values: found (%x, %x), expected (%x, %x)\n",
+ *hwsp_seqno[0], *hwsp_seqno[1],
+ seqno[0], seqno[1]);
+ err = -EINVAL;
+ goto out;
+ }
+
+ i915_retire_requests(i915); /* recycle HWSP */
+ }
+
+out:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ intel_timeline_unpin(tl);
+out_free:
+ intel_timeline_put(tl);
+out_rpm:
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ return err;
+}
+
+static int live_hwsp_recycle(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ unsigned long count;
+ int err = 0;
+
+ /*
+ * Check seqno writes into one timeline at a time. We expect to
+ * recycle the breadcrumb slot between iterations and neither
+ * want to confuse ourselves or the GPU.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ count = 0;
+ for_each_engine(engine, i915, id) {
+ IGT_TIMEOUT(end_time);
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ do {
+ struct intel_timeline *tl;
+ struct i915_request *rq;
+
+ tl = checked_intel_timeline_create(i915);
+ if (IS_ERR(tl)) {
+ err = PTR_ERR(tl);
+ goto out;
+ }
+
+ rq = tl_write(tl, engine, count);
+ if (IS_ERR(rq)) {
+ intel_timeline_put(tl);
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ pr_err("Wait for timeline writes timed out!\n");
+ intel_timeline_put(tl);
+ err = -EIO;
+ goto out;
+ }
+
+ if (*tl->hwsp_seqno != count) {
+ pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
+ count, *tl->hwsp_seqno);
+ err = -EINVAL;
+ }
+
+ intel_timeline_put(tl);
+ count++;
+
+ if (err)
+ goto out;
+ } while (!__igt_timeout(end_time, NULL));
+ }
+
+out:
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ return err;
+}
+
+int intel_timeline_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_hwsp_recycle),
+ SUBTEST(live_hwsp_engine),
+ SUBTEST(live_hwsp_alternate),
+ SUBTEST(live_hwsp_wrap),
+ };
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return i915_live_subtests(tests, i915);
+}
OpenPOWER on IntegriCloud