summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/selftests
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/selftests')
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_gem_object.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c25
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_coherency.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c457
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c21
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c37
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_sw_fence.c9
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_timeline.c120
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c16
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c9
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c301
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_lrc.c446
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c166
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_workarounds.c423
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.c34
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c145
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c54
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.h7
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_timeline.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.h2
30 files changed, 1928 insertions, 417 deletions
diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
index 391f3d9ffdf1..419fd4d6a8f0 100644
--- a/drivers/gpu/drm/i915/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
@@ -122,7 +122,7 @@ huge_gem_object(struct drm_i915_private *i915,
if (overflows_type(dma_size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(i915);
+ obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index a9a2fa35876f..90721b54e7ae 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -171,7 +171,7 @@ huge_pages_object(struct drm_i915_private *i915,
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(i915);
+ obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
@@ -320,7 +320,7 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(i915);
+ obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
@@ -908,10 +908,6 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
if (IS_ERR(obj))
return ERR_CAST(obj);
- err = i915_gem_object_set_to_wc_domain(obj, true);
- if (err)
- goto err;
-
cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
@@ -1449,7 +1445,7 @@ static int igt_ppgtt_pin_update(void *arg)
* huge-gtt-pages.
*/
- if (!ppgtt || !i915_vm_is_48bit(&ppgtt->vm)) {
+ if (!ppgtt || !i915_vm_is_4lvl(&ppgtt->vm)) {
pr_info("48b PPGTT not supported, skipping\n");
return 0;
}
@@ -1535,7 +1531,7 @@ static int igt_ppgtt_pin_update(void *arg)
* land in the now stale 2M page.
*/
- err = gpu_write(vma, ctx, dev_priv->engine[RCS], 0, 0xdeadbeaf);
+ err = gpu_write(vma, ctx, dev_priv->engine[RCS0], 0, 0xdeadbeaf);
if (err)
goto out_unpin;
@@ -1584,6 +1580,7 @@ static int igt_tmpfs_fallback(void *arg)
}
*vaddr = 0xdeadbeaf;
+ __i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
vma = i915_vma_instance(obj, vm, NULL);
@@ -1653,7 +1650,7 @@ static int igt_shrink_thp(void *arg)
if (err)
goto out_unpin;
- err = gpu_write(vma, ctx, i915->engine[RCS], 0, 0xdeadbeaf);
+ err = gpu_write(vma, ctx, i915->engine[RCS0], 0, 0xdeadbeaf);
if (err)
goto out_unpin;
@@ -1709,16 +1706,17 @@ int i915_gem_huge_page_mock_selftests(void)
return -ENOMEM;
/* Pretend to be a device which supports the 48b PPGTT */
- mkwrite_device_info(dev_priv)->ppgtt = INTEL_PPGTT_FULL_4LVL;
+ mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
+ mkwrite_device_info(dev_priv)->ppgtt_size = 48;
mutex_lock(&dev_priv->drm.struct_mutex);
- ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV));
+ ppgtt = i915_ppgtt_create(dev_priv);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
}
- if (!i915_vm_is_48bit(&ppgtt->vm)) {
+ if (!i915_vm_is_4lvl(&ppgtt->vm)) {
pr_err("failed to create 48b PPGTT\n");
err = -EINVAL;
goto out_close;
@@ -1734,7 +1732,6 @@ int i915_gem_huge_page_mock_selftests(void)
err = i915_subtests(tests, ppgtt);
out_close:
- i915_ppgtt_close(&ppgtt->vm);
i915_ppgtt_put(ppgtt);
out_unlock:
@@ -1764,7 +1761,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
return 0;
}
- if (i915_terminally_wedged(&dev_priv->gpu_error))
+ if (i915_terminally_wedged(dev_priv))
return 0;
file = mock_file(dev_priv);
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 337b1f98b923..27d8f853111b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -150,7 +150,7 @@ int i915_active_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_active_retire),
};
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index e77b7ed449ae..6fd70d326468 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -84,14 +84,9 @@ static void simulate_hibernate(struct drm_i915_private *i915)
static int pm_prepare(struct drm_i915_private *i915)
{
- int err = 0;
-
- if (i915_gem_suspend(i915)) {
- pr_err("i915_gem_suspend failed\n");
- err = -EINVAL;
- }
+ i915_gem_suspend(i915);
- return err;
+ return 0;
}
static void pm_suspend(struct drm_i915_private *i915)
@@ -220,5 +215,8 @@ int i915_gem_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_gem_hibernate),
};
+ if (i915_terminally_wedged(i915))
+ return 0;
+
return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
index fd89a5a33c1a..e43630b40fce 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
@@ -202,7 +202,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
return PTR_ERR(vma);
- rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context);
+ rq = i915_request_alloc(i915->engine[RCS0], i915->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
@@ -248,15 +248,15 @@ static bool always_valid(struct drm_i915_private *i915)
static bool needs_fence_registers(struct drm_i915_private *i915)
{
- return !i915_terminally_wedged(&i915->gpu_error);
+ return !i915_terminally_wedged(i915);
}
static bool needs_mi_store_dword(struct drm_i915_private *i915)
{
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return false;
- return intel_engine_can_store_dword(i915->engine[RCS]);
+ return intel_engine_can_store_dword(i915->engine[RCS0]);
}
static const struct igt_coherency_mode {
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 7eb58a9d1319..4e1b6efc6b22 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -76,7 +76,7 @@ static int live_nop_switch(void *arg)
}
for (n = 0; n < nctx; n++) {
- ctx[n] = i915_gem_create_context(i915, file->driver_priv);
+ ctx[n] = live_context(i915, file);
if (IS_ERR(ctx[n])) {
err = PTR_ERR(ctx[n]);
goto out_unlock;
@@ -220,6 +220,7 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
offset += PAGE_SIZE;
}
*cmd = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
@@ -372,7 +373,8 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
return 0;
}
-static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
+static noinline int cpu_check(struct drm_i915_gem_object *obj,
+ unsigned int idx, unsigned int max)
{
unsigned int n, m, needs_flush;
int err;
@@ -390,8 +392,10 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
for (m = 0; m < max; m++) {
if (map[m] != m) {
- pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
- n, m, map[m], m);
+ pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n",
+ __builtin_return_address(0), idx,
+ n, real_page_count(obj), m, max,
+ map[m], m);
err = -EINVAL;
goto out_unmap;
}
@@ -399,8 +403,9 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
for (; m < DW_PER_PAGE; m++) {
if (map[m] != STACK_MAGIC) {
- pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
- n, m, map[m], STACK_MAGIC);
+ pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n",
+ __builtin_return_address(0), idx, n, m,
+ map[m], STACK_MAGIC);
err = -EINVAL;
goto out_unmap;
}
@@ -478,12 +483,8 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj)
static int igt_ctx_exec(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj = NULL;
- unsigned long ncontexts, ndwords, dw;
- struct igt_live_test t;
- struct drm_file *file;
- IGT_TIMEOUT(end_time);
- LIST_HEAD(objects);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int err = -ENODEV;
/*
@@ -495,44 +496,167 @@ static int igt_ctx_exec(void *arg)
if (!DRIVER_CAPS(i915)->has_logical_contexts)
return 0;
+ for_each_engine(engine, i915, id) {
+ struct drm_i915_gem_object *obj = NULL;
+ unsigned long ncontexts, ndwords, dw;
+ struct igt_live_test t;
+ struct drm_file *file;
+ IGT_TIMEOUT(end_time);
+ LIST_HEAD(objects);
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (!engine->context_size)
+ continue; /* No logical context support in HW */
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ err = igt_live_test_begin(&t, i915, __func__, engine->name);
+ if (err)
+ goto out_unlock;
+
+ ncontexts = 0;
+ ndwords = 0;
+ dw = 0;
+ while (!time_after(jiffies, end_time)) {
+ struct i915_gem_context *ctx;
+ intel_wakeref_t wakeref;
+
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_unlock;
+ }
+
+ if (!obj) {
+ obj = create_test_object(ctx, file, &objects);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_unlock;
+ }
+ }
+
+ with_intel_runtime_pm(i915, wakeref)
+ err = gpu_fill(obj, ctx, engine, dw);
+ if (err) {
+ pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
+ ndwords, dw, max_dwords(obj),
+ engine->name, ctx->hw_id,
+ yesno(!!ctx->ppgtt), err);
+ goto out_unlock;
+ }
+
+ if (++dw == max_dwords(obj)) {
+ obj = NULL;
+ dw = 0;
+ }
+
+ ndwords++;
+ ncontexts++;
+ }
+
+ pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
+ ncontexts, engine->name, ndwords);
+
+ ncontexts = dw = 0;
+ list_for_each_entry(obj, &objects, st_link) {
+ unsigned int rem =
+ min_t(unsigned int, ndwords - dw, max_dwords(obj));
+
+ err = cpu_check(obj, ncontexts++, rem);
+ if (err)
+ break;
+
+ dw += rem;
+ }
+
+out_unlock:
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ mock_file_free(i915, file);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int igt_shared_ctx_exec(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *parent;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_live_test t;
+ struct drm_file *file;
+ int err = 0;
+
+ /*
+ * Create a few different contexts with the same mm and write
+ * through each ctx using the GPU making sure those writes end
+ * up in the expected pages of our obj.
+ */
+ if (!DRIVER_CAPS(i915)->has_logical_contexts)
+ return 0;
+
file = mock_file(i915);
if (IS_ERR(file))
return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
+ parent = live_context(i915, file);
+ if (IS_ERR(parent)) {
+ err = PTR_ERR(parent);
+ goto out_unlock;
+ }
+
+ if (!parent->ppgtt) { /* not full-ppgtt; nothing to share */
+ err = 0;
+ goto out_unlock;
+ }
+
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
goto out_unlock;
- ncontexts = 0;
- ndwords = 0;
- dw = 0;
- while (!time_after(jiffies, end_time)) {
- struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
- unsigned int id;
+ for_each_engine(engine, i915, id) {
+ unsigned long ncontexts, ndwords, dw;
+ struct drm_i915_gem_object *obj = NULL;
+ IGT_TIMEOUT(end_time);
+ LIST_HEAD(objects);
- ctx = i915_gem_create_context(i915, file->driver_priv);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto out_unlock;
- }
+ if (!intel_engine_can_store_dword(engine))
+ continue;
- for_each_engine(engine, i915, id) {
+ dw = 0;
+ ndwords = 0;
+ ncontexts = 0;
+ while (!time_after(jiffies, end_time)) {
+ struct i915_gem_context *ctx;
intel_wakeref_t wakeref;
- if (!engine->context_size)
- continue; /* No logical context support in HW */
+ ctx = kernel_context(i915);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_test;
+ }
- if (!intel_engine_can_store_dword(engine))
- continue;
+ __assign_ppgtt(ctx, parent->ppgtt);
if (!obj) {
- obj = create_test_object(ctx, file, &objects);
+ obj = create_test_object(parent, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
- goto out_unlock;
+ kernel_context_close(ctx);
+ goto out_test;
}
}
@@ -544,35 +668,39 @@ static int igt_ctx_exec(void *arg)
ndwords, dw, max_dwords(obj),
engine->name, ctx->hw_id,
yesno(!!ctx->ppgtt), err);
- goto out_unlock;
+ kernel_context_close(ctx);
+ goto out_test;
}
if (++dw == max_dwords(obj)) {
obj = NULL;
dw = 0;
}
+
ndwords++;
+ ncontexts++;
+
+ kernel_context_close(ctx);
}
- ncontexts++;
- }
- pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
- ncontexts, RUNTIME_INFO(i915)->num_rings, ndwords);
+ pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
+ ncontexts, engine->name, ndwords);
- dw = 0;
- list_for_each_entry(obj, &objects, st_link) {
- unsigned int rem =
- min_t(unsigned int, ndwords - dw, max_dwords(obj));
+ ncontexts = dw = 0;
+ list_for_each_entry(obj, &objects, st_link) {
+ unsigned int rem =
+ min_t(unsigned int, ndwords - dw, max_dwords(obj));
- err = cpu_check(obj, rem);
- if (err)
- break;
+ err = cpu_check(obj, ncontexts++, rem);
+ if (err)
+ goto out_test;
- dw += rem;
+ dw += rem;
+ }
}
-
-out_unlock:
+out_test:
if (igt_live_test_end(&t))
err = -EIO;
+out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
@@ -604,12 +732,9 @@ static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
*cmd++ = upper_32_bits(vma->node.start);
*cmd = MI_BATCH_BUFFER_END;
+ __i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- if (err)
- goto err;
-
vma = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@@ -923,7 +1048,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
unsigned int flags)
{
struct intel_sseu default_sseu = intel_device_default_sseu(i915);
- struct intel_engine_cs *engine = i915->engine[RCS];
+ struct intel_engine_cs *engine = i915->engine[RCS0];
struct drm_i915_gem_object *obj;
struct i915_gem_context *ctx;
struct intel_sseu pg_sseu;
@@ -962,11 +1087,12 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
mutex_lock(&i915->drm.struct_mutex);
- ctx = i915_gem_create_context(i915, file->driver_priv);
+ ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
ret = PTR_ERR(ctx);
goto out_unlock;
}
+ i915_gem_context_clear_bannable(ctx); /* to reset and beyond! */
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
@@ -1047,7 +1173,7 @@ static int igt_ctx_readonly(void *arg)
struct drm_i915_gem_object *obj = NULL;
struct i915_gem_context *ctx;
struct i915_hw_ppgtt *ppgtt;
- unsigned long ndwords, dw;
+ unsigned long idx, ndwords, dw;
struct igt_live_test t;
struct drm_file *file;
I915_RND_STATE(prng);
@@ -1071,7 +1197,7 @@ static int igt_ctx_readonly(void *arg)
if (err)
goto out_unlock;
- ctx = i915_gem_create_context(i915, file->driver_priv);
+ ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out_unlock;
@@ -1125,9 +1251,10 @@ static int igt_ctx_readonly(void *arg)
}
}
pr_info("Submitted %lu dwords (across %u engines)\n",
- ndwords, RUNTIME_INFO(i915)->num_rings);
+ ndwords, RUNTIME_INFO(i915)->num_engines);
dw = 0;
+ idx = 0;
list_for_each_entry(obj, &objects, st_link) {
unsigned int rem =
min_t(unsigned int, ndwords - dw, max_dwords(obj));
@@ -1137,7 +1264,7 @@ static int igt_ctx_readonly(void *arg)
if (i915_gem_object_is_readonly(obj))
num_writes = 0;
- err = cpu_check(obj, num_writes);
+ err = cpu_check(obj, idx++, num_writes);
if (err)
break;
@@ -1201,12 +1328,9 @@ static int write_to_scratch(struct i915_gem_context *ctx,
}
*cmd++ = value;
*cmd = MI_BATCH_BUFFER_END;
+ __i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- if (err)
- goto err;
-
vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@@ -1298,11 +1422,9 @@ static int read_from_scratch(struct i915_gem_context *ctx,
*cmd++ = result;
}
*cmd = MI_BATCH_BUFFER_END;
- i915_gem_object_unpin_map(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- if (err)
- goto err;
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
if (IS_ERR(vma)) {
@@ -1396,13 +1518,13 @@ static int igt_vm_isolation(void *arg)
if (err)
goto out_unlock;
- ctx_a = i915_gem_create_context(i915, file->driver_priv);
+ ctx_a = live_context(i915, file);
if (IS_ERR(ctx_a)) {
err = PTR_ERR(ctx_a);
goto out_unlock;
}
- ctx_b = i915_gem_create_context(i915, file->driver_priv);
+ ctx_b = live_context(i915, file);
if (IS_ERR(ctx_b)) {
err = PTR_ERR(ctx_b);
goto out_unlock;
@@ -1432,7 +1554,7 @@ static int igt_vm_isolation(void *arg)
div64_u64_rem(i915_prandom_u64_state(&prng),
vm_total, &offset);
- offset &= ~sizeof(u32);
+ offset &= -sizeof(u32);
offset += I915_GTT_PAGE_SIZE;
err = write_to_scratch(ctx_a, engine,
@@ -1458,7 +1580,7 @@ static int igt_vm_isolation(void *arg)
count += this;
}
pr_info("Checked %lu scratch offsets across %d engines\n",
- count, RUNTIME_INFO(i915)->num_rings);
+ count, RUNTIME_INFO(i915)->num_engines);
out_rpm:
intel_runtime_pm_put(i915, wakeref);
@@ -1472,10 +1594,10 @@ out_unlock:
}
static __maybe_unused const char *
-__engine_name(struct drm_i915_private *i915, unsigned int engines)
+__engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines)
{
struct intel_engine_cs *engine;
- unsigned int tmp;
+ intel_engine_mask_t tmp;
if (engines == ALL_ENGINES)
return "all";
@@ -1488,67 +1610,60 @@ __engine_name(struct drm_i915_private *i915, unsigned int engines)
static int __igt_switch_to_kernel_context(struct drm_i915_private *i915,
struct i915_gem_context *ctx,
- unsigned int engines)
+ intel_engine_mask_t engines)
{
struct intel_engine_cs *engine;
- unsigned int tmp;
- int err;
+ intel_engine_mask_t tmp;
+ int pass;
GEM_TRACE("Testing %s\n", __engine_name(i915, engines));
- for_each_engine_masked(engine, i915, engines, tmp) {
- struct i915_request *rq;
+ for (pass = 0; pass < 4; pass++) { /* Once busy; once idle; repeat */
+ bool from_idle = pass & 1;
+ int err;
- rq = i915_request_alloc(engine, ctx);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ if (!from_idle) {
+ for_each_engine_masked(engine, i915, engines, tmp) {
+ struct i915_request *rq;
- i915_request_add(rq);
- }
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
- err = i915_gem_switch_to_kernel_context(i915);
- if (err)
- return err;
-
- for_each_engine_masked(engine, i915, engines, tmp) {
- if (!engine_has_kernel_context_barrier(engine)) {
- pr_err("kernel context not last on engine %s!\n",
- engine->name);
- return -EINVAL;
+ i915_request_add(rq);
+ }
}
- }
- err = i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- if (err)
- return err;
+ err = i915_gem_switch_to_kernel_context(i915,
+ i915->gt.active_engines);
+ if (err)
+ return err;
- GEM_BUG_ON(i915->gt.active_requests);
- for_each_engine_masked(engine, i915, engines, tmp) {
- if (engine->last_retired_context->gem_context != i915->kernel_context) {
- pr_err("engine %s not idling in kernel context!\n",
- engine->name);
+ if (!from_idle) {
+ err = i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ return err;
+ }
+
+ if (i915->gt.active_requests) {
+ pr_err("%d active requests remain after switching to kernel context, pass %d (%s) on %s engine%s\n",
+ i915->gt.active_requests,
+ pass, from_idle ? "idle" : "busy",
+ __engine_name(i915, engines),
+ is_power_of_2(engines) ? "" : "s");
return -EINVAL;
}
- }
- err = i915_gem_switch_to_kernel_context(i915);
- if (err)
- return err;
+ /* XXX Bonus points for proving we are the kernel context! */
- if (i915->gt.active_requests) {
- pr_err("switch-to-kernel-context emitted %d requests even though it should already be idling in the kernel context\n",
- i915->gt.active_requests);
- return -EINVAL;
+ mutex_unlock(&i915->drm.struct_mutex);
+ drain_delayed_work(&i915->gt.idle_work);
+ mutex_lock(&i915->drm.struct_mutex);
}
- for_each_engine_masked(engine, i915, engines, tmp) {
- if (!intel_engine_has_kernel_context(engine)) {
- pr_err("kernel context not last on engine %s!\n",
- engine->name);
- return -EINVAL;
- }
- }
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ return -EIO;
return 0;
}
@@ -1592,8 +1707,6 @@ static int igt_switch_to_kernel_context(void *arg)
out_unlock:
GEM_TRACE_DUMP_ON(err);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
@@ -1602,10 +1715,117 @@ out_unlock:
return err;
}
+static void mock_barrier_task(void *data)
+{
+ unsigned int *counter = data;
+
+ ++*counter;
+}
+
+static int mock_context_barrier(void *arg)
+{
+#undef pr_fmt
+#define pr_fmt(x) "context_barrier_task():" # x
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *ctx;
+ struct i915_request *rq;
+ intel_wakeref_t wakeref;
+ unsigned int counter;
+ int err;
+
+ /*
+ * The context barrier provides us with a callback after it emits
+ * a request; useful for retiring old state after loading new.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ ctx = mock_context(i915, "mock");
+ if (!ctx) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ counter = 0;
+ err = context_barrier_task(ctx, 0,
+ NULL, mock_barrier_task, &counter);
+ if (err) {
+ pr_err("Failed at line %d, err=%d\n", __LINE__, err);
+ goto out;
+ }
+ if (counter == 0) {
+ pr_err("Did not retire immediately with 0 engines\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ counter = 0;
+ err = context_barrier_task(ctx, ALL_ENGINES,
+ NULL, mock_barrier_task, &counter);
+ if (err) {
+ pr_err("Failed at line %d, err=%d\n", __LINE__, err);
+ goto out;
+ }
+ if (counter == 0) {
+ pr_err("Did not retire immediately for all inactive engines\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ rq = ERR_PTR(-ENODEV);
+ with_intel_runtime_pm(i915, wakeref)
+ rq = i915_request_alloc(i915->engine[RCS0], ctx);
+ if (IS_ERR(rq)) {
+ pr_err("Request allocation failed!\n");
+ goto out;
+ }
+ i915_request_add(rq);
+ GEM_BUG_ON(list_empty(&ctx->active_engines));
+
+ counter = 0;
+ context_barrier_inject_fault = BIT(RCS0);
+ err = context_barrier_task(ctx, ALL_ENGINES,
+ NULL, mock_barrier_task, &counter);
+ context_barrier_inject_fault = 0;
+ if (err == -ENXIO)
+ err = 0;
+ else
+ pr_err("Did not hit fault injection!\n");
+ if (counter != 0) {
+ pr_err("Invoked callback on error!\n");
+ err = -EIO;
+ }
+ if (err)
+ goto out;
+
+ counter = 0;
+ err = context_barrier_task(ctx, ALL_ENGINES,
+ NULL, mock_barrier_task, &counter);
+ if (err) {
+ pr_err("Failed at line %d, err=%d\n", __LINE__, err);
+ goto out;
+ }
+ mock_device_flush(i915);
+ if (counter == 0) {
+ pr_err("Did not retire on each active engines\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ mock_context_close(ctx);
+unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+#undef pr_fmt
+#define pr_fmt(x) x
+}
+
int i915_gem_context_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_switch_to_kernel_context),
+ SUBTEST(mock_context_barrier),
};
struct drm_i915_private *i915;
int err;
@@ -1628,10 +1848,11 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
SUBTEST(igt_ctx_exec),
SUBTEST(igt_ctx_readonly),
SUBTEST(igt_ctx_sseu),
+ SUBTEST(igt_shared_ctx_exec),
SUBTEST(igt_vm_isolation),
};
- if (i915_terminally_wedged(&dev_priv->gpu_error))
+ if (i915_terminally_wedged(dev_priv))
return 0;
return i915_subtests(tests, dev_priv);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
index a7055b12e53c..2b943ee246c9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
@@ -315,6 +315,7 @@ static int igt_dmabuf_export_kmap(void *arg)
goto err;
}
memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE);
+ i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
ptr = dma_buf_kmap(dmabuf, 1);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 32dce7176f63..89766688e420 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -274,7 +274,7 @@ static int igt_evict_for_cache_color(void *arg)
err = PTR_ERR(obj);
goto cleanup;
}
- i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
quirk_add(obj, &objects);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
@@ -290,7 +290,7 @@ static int igt_evict_for_cache_color(void *arg)
err = PTR_ERR(obj);
goto cleanup;
}
- i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
quirk_add(obj, &objects);
/* Neighbouring; same colour - should fit */
@@ -455,7 +455,7 @@ static int igt_evict_contexts(void *arg)
struct i915_gem_context *ctx;
ctx = live_context(i915, file);
- if (!ctx)
+ if (IS_ERR(ctx))
break;
/* We will need some GGTT space for the rq's context */
@@ -547,7 +547,7 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_evict_contexts),
};
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 3850ef4a5ec8..9cca66e4420a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -120,7 +120,7 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(i915);
+ obj = i915_gem_object_alloc();
if (!obj)
goto err;
@@ -1010,7 +1010,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
return PTR_ERR(file);
mutex_lock(&dev_priv->drm.struct_mutex);
- ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv);
+ ppgtt = i915_ppgtt_create(dev_priv);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
@@ -1020,7 +1020,6 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
- i915_ppgtt_close(&ppgtt->vm);
i915_ppgtt_put(ppgtt);
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -1681,25 +1680,31 @@ int i915_gem_gtt_mock_selftests(void)
SUBTEST(igt_gtt_insert),
};
struct drm_i915_private *i915;
- struct i915_ggtt ggtt;
+ struct i915_ggtt *ggtt;
int err;
i915 = mock_gem_device();
if (!i915)
return -ENOMEM;
- mock_init_ggtt(i915, &ggtt);
+ ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
+ if (!ggtt) {
+ err = -ENOMEM;
+ goto out_put;
+ }
+ mock_init_ggtt(i915, ggtt);
mutex_lock(&i915->drm.struct_mutex);
- err = i915_subtests(tests, &ggtt);
+ err = i915_subtests(tests, ggtt);
mock_device_flush(i915);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_drain_freed_objects(i915);
- mock_fini_ggtt(&ggtt);
+ mock_fini_ggtt(ggtt);
+ kfree(ggtt);
+out_put:
drm_dev_put(&i915->drm);
-
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index 395ae878e0f7..971148fbe6f5 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -468,7 +468,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
if (err)
return err;
- rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context);
+ rq = i915_request_alloc(i915->engine[RCS0], i915->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
@@ -583,7 +583,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
for (loop = 0; loop < 3; loop++) {
intel_wakeref_t wakeref;
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
break;
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 6733dc5b6b4c..e6ffe2240126 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -42,7 +42,7 @@ static int igt_add_request(void *arg)
/* Basic preliminary test to create a request and let it loose! */
mutex_lock(&i915->drm.struct_mutex);
- request = mock_request(i915->engine[RCS],
+ request = mock_request(i915->engine[RCS0],
i915->kernel_context,
HZ / 10);
if (!request)
@@ -66,7 +66,7 @@ static int igt_wait_request(void *arg)
/* Submit a request, then wait upon it */
mutex_lock(&i915->drm.struct_mutex);
- request = mock_request(i915->engine[RCS], i915->kernel_context, T);
+ request = mock_request(i915->engine[RCS0], i915->kernel_context, T);
if (!request) {
err = -ENOMEM;
goto out_unlock;
@@ -136,19 +136,17 @@ static int igt_fence_wait(void *arg)
/* Submit a request, treat it as a fence and wait upon it */
mutex_lock(&i915->drm.struct_mutex);
- request = mock_request(i915->engine[RCS], i915->kernel_context, T);
+ request = mock_request(i915->engine[RCS0], i915->kernel_context, T);
if (!request) {
err = -ENOMEM;
goto out_locked;
}
- mutex_unlock(&i915->drm.struct_mutex); /* safe as we are single user */
if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
pr_err("fence wait success before submit (expected timeout)!\n");
- goto out_device;
+ goto out_locked;
}
- mutex_lock(&i915->drm.struct_mutex);
i915_request_add(request);
mutex_unlock(&i915->drm.struct_mutex);
@@ -195,7 +193,7 @@ static int igt_request_rewind(void *arg)
mutex_lock(&i915->drm.struct_mutex);
ctx[0] = mock_context(i915, "A");
- request = mock_request(i915->engine[RCS], ctx[0], 2 * HZ);
+ request = mock_request(i915->engine[RCS0], ctx[0], 2 * HZ);
if (!request) {
err = -ENOMEM;
goto err_context_0;
@@ -205,7 +203,7 @@ static int igt_request_rewind(void *arg)
i915_request_add(request);
ctx[1] = mock_context(i915, "B");
- vip = mock_request(i915->engine[RCS], ctx[1], 0);
+ vip = mock_request(i915->engine[RCS0], ctx[1], 0);
if (!vip) {
err = -ENOMEM;
goto err_context_1;
@@ -226,8 +224,7 @@ static int igt_request_rewind(void *arg)
mutex_unlock(&i915->drm.struct_mutex);
if (i915_request_wait(vip, 0, HZ) == -ETIME) {
- pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
- vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
+ pr_err("timed out waiting for high priority request\n");
goto err;
}
@@ -418,7 +415,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
{
struct drm_i915_private *i915 = arg;
struct smoketest t = {
- .engine = i915->engine[RCS],
+ .engine = i915->engine[RCS0],
.ncontexts = 1024,
.max_batch = 1024,
.request_alloc = __mock_request_alloc
@@ -622,13 +619,11 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
}
*cmd = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(i915);
+ __i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- if (err)
- goto err;
+ i915_gem_chipset_flush(i915);
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
@@ -780,10 +775,6 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
if (err)
goto err;
- err = i915_gem_object_set_to_wc_domain(obj, true);
- if (err)
- goto err;
-
cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
@@ -802,10 +793,12 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
*cmd++ = lower_32_bits(vma->node.start);
}
*cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
- i915_gem_chipset_flush(i915);
+ __i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
+ i915_gem_chipset_flush(i915);
+
return vma;
err:
@@ -1219,7 +1212,7 @@ out_flush:
num_fences += atomic_long_read(&t[id].num_fences);
}
pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n",
- num_waits, num_fences, RUNTIME_INFO(i915)->num_rings, ncpus);
+ num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus);
mutex_lock(&i915->drm.struct_mutex);
ret = igt_live_test_end(&live) ?: ret;
@@ -1246,7 +1239,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_breadcrumbs_smoketest),
};
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index 10ef0e636a24..b18eaefef798 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -133,7 +133,7 @@ static int __run_selftests(const char *name,
if (signal_pending(current))
return -EINTR;
- pr_debug(DRIVER_NAME ": Running %s\n", st->name);
+ pr_info(DRIVER_NAME ": Running %s\n", st->name);
if (data)
err = st->live(data);
else
@@ -255,7 +255,7 @@ int __i915_subtests(const char *caller,
if (!apply_subtest_filter(caller, st->name))
continue;
- pr_debug(DRIVER_NAME ": Running %s/%s\n", caller, st->name);
+ pr_info(DRIVER_NAME ": Running %s/%s\n", caller, st->name);
GEM_TRACE("Running %s/%s\n", caller, st->name);
err = st->func(data);
diff --git a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
index cdbc8f134e5e..cbf45d85cbff 100644
--- a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
@@ -571,21 +571,27 @@ static int test_timer(void *arg)
unsigned long target, delay;
struct timed_fence tf;
+ preempt_disable();
timed_fence_init(&tf, target = jiffies);
if (!i915_sw_fence_done(&tf.fence)) {
pr_err("Fence with immediate expiration not signaled\n");
goto err;
}
+ preempt_enable();
timed_fence_fini(&tf);
for_each_prime_number(delay, i915_selftest.timeout_jiffies/2) {
+ preempt_disable();
timed_fence_init(&tf, target = jiffies + delay);
if (i915_sw_fence_done(&tf.fence)) {
pr_err("Fence with future expiration (%lu jiffies) already signaled\n", delay);
goto err;
}
+ preempt_enable();
i915_sw_fence_wait(&tf.fence);
+
+ preempt_disable();
if (!i915_sw_fence_done(&tf.fence)) {
pr_err("Fence not signaled after wait\n");
goto err;
@@ -595,13 +601,14 @@ static int test_timer(void *arg)
target, jiffies);
goto err;
}
-
+ preempt_enable();
timed_fence_fini(&tf);
}
return 0;
err:
+ preempt_enable();
timed_fence_fini(&tf);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c
index 12ea69b1a1e5..bd96afcadfe7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c
@@ -64,7 +64,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
unsigned long cacheline;
int err;
- tl = i915_timeline_create(state->i915, "mock", NULL);
+ tl = i915_timeline_create(state->i915, NULL);
if (IS_ERR(tl))
return PTR_ERR(tl);
@@ -476,7 +476,7 @@ checked_i915_timeline_create(struct drm_i915_private *i915)
{
struct i915_timeline *tl;
- tl = i915_timeline_create(i915, "live", NULL);
+ tl = i915_timeline_create(i915, NULL);
if (IS_ERR(tl))
return tl;
@@ -641,6 +641,118 @@ out:
#undef NUM_TIMELINES
}
+static int live_hwsp_wrap(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_timeline *tl;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ int err = 0;
+
+ /*
+ * Across a seqno wrap, we need to keep the old cacheline alive for
+ * foreign GPU references.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(i915);
+
+ tl = i915_timeline_create(i915, NULL);
+ if (IS_ERR(tl)) {
+ err = PTR_ERR(tl);
+ goto out_rpm;
+ }
+ if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
+ goto out_free;
+
+ err = i915_timeline_pin(tl);
+ if (err)
+ goto out_free;
+
+ for_each_engine(engine, i915, id) {
+ const u32 *hwsp_seqno[2];
+ struct i915_request *rq;
+ u32 seqno[2];
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ rq = i915_request_alloc(engine, i915->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ tl->seqno = -4u;
+
+ err = i915_timeline_get_seqno(tl, rq, &seqno[0]);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ pr_debug("seqno[0]:%08x, hwsp_offset:%08x\n",
+ seqno[0], tl->hwsp_offset);
+
+ err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[0]);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ hwsp_seqno[0] = tl->hwsp_seqno;
+
+ err = i915_timeline_get_seqno(tl, rq, &seqno[1]);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ pr_debug("seqno[1]:%08x, hwsp_offset:%08x\n",
+ seqno[1], tl->hwsp_offset);
+
+ err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[1]);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ hwsp_seqno[1] = tl->hwsp_seqno;
+
+ /* With wrap should come a new hwsp */
+ GEM_BUG_ON(seqno[1] >= seqno[0]);
+ GEM_BUG_ON(hwsp_seqno[0] == hwsp_seqno[1]);
+
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ pr_err("Wait for timeline writes timed out!\n");
+ err = -EIO;
+ goto out;
+ }
+
+ if (*hwsp_seqno[0] != seqno[0] || *hwsp_seqno[1] != seqno[1]) {
+ pr_err("Bad timeline values: found (%x, %x), expected (%x, %x)\n",
+ *hwsp_seqno[0], *hwsp_seqno[1],
+ seqno[0], seqno[1]);
+ err = -EINVAL;
+ goto out;
+ }
+
+ i915_retire_requests(i915); /* recycle HWSP */
+ }
+
+out:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ i915_timeline_unpin(tl);
+out_free:
+ i915_timeline_put(tl);
+out_rpm:
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ return err;
+}
+
static int live_hwsp_recycle(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -723,7 +835,11 @@ int i915_timeline_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_hwsp_recycle),
SUBTEST(live_hwsp_engine),
SUBTEST(live_hwsp_alternate),
+ SUBTEST(live_hwsp_wrap),
};
+ if (i915_terminally_wedged(i915))
+ return 0;
+
return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index cf1de82741fa..fc594b030f5a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -725,24 +725,30 @@ int i915_vma_mock_selftests(void)
SUBTEST(igt_vma_partial),
};
struct drm_i915_private *i915;
- struct i915_ggtt ggtt;
+ struct i915_ggtt *ggtt;
int err;
i915 = mock_gem_device();
if (!i915)
return -ENOMEM;
- mock_init_ggtt(i915, &ggtt);
+ ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
+ if (!ggtt) {
+ err = -ENOMEM;
+ goto out_put;
+ }
+ mock_init_ggtt(i915, ggtt);
mutex_lock(&i915->drm.struct_mutex);
- err = i915_subtests(tests, &ggtt);
+ err = i915_subtests(tests, ggtt);
mock_device_flush(i915);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_drain_freed_objects(i915);
- mock_fini_ggtt(&ggtt);
+ mock_fini_ggtt(ggtt);
+ kfree(ggtt);
+out_put:
drm_dev_put(&i915->drm);
-
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index af66e3d4e23a..94aee4071a66 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -14,7 +14,7 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
cond_resched();
if (flags & I915_WAIT_LOCKED &&
- i915_gem_switch_to_kernel_context(i915)) {
+ i915_gem_switch_to_kernel_context(i915, i915->gt.active_engines)) {
pr_err("Failed to switch back to kernel context; declaring wedged\n");
i915_gem_set_wedged(i915);
}
@@ -29,5 +29,5 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
i915_gem_set_wedged(i915);
}
- return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
+ return i915_terminally_wedged(i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 9ebd9225684e..16890dfe74c0 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -29,7 +29,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
goto err_hws;
}
- i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
+ i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
@@ -144,6 +144,13 @@ igt_spinner_create_request(struct igt_spinner *spin,
i915_gem_chipset_flush(spin->i915);
+ if (engine->emit_init_breadcrumb &&
+ rq->timeline->has_initial_breadcrumb) {
+ err = engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto cancel_rq;
+ }
+
err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
cancel_rq:
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index c5e0a0e98fcb..b05a21eaa8f4 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -111,7 +111,7 @@ static int validate_client(struct intel_guc_client *client,
dev_priv->preempt_context : dev_priv->kernel_context;
if (client->owner != ctx_owner ||
- client->engines != INTEL_INFO(dev_priv)->ring_mask ||
+ client->engines != INTEL_INFO(dev_priv)->engine_mask ||
client->priority != client_priority ||
client->doorbell_id == GUC_DOORBELL_INVALID)
return -EINVAL;
@@ -261,7 +261,7 @@ static int igt_guc_doorbells(void *arg)
for (i = 0; i < ATTEMPTS; i++) {
clients[i] = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->ring_mask,
+ INTEL_INFO(dev_priv)->engine_mask,
i % GUC_CLIENT_PRIORITY_NUM,
dev_priv->kernel_context);
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 7b6f3bea9ef8..050bd1e19e02 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -56,6 +56,8 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915)
if (IS_ERR(h->ctx))
return PTR_ERR(h->ctx);
+ GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx));
+
h->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(h->hws)) {
err = PTR_ERR(h->hws);
@@ -68,7 +70,7 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915)
goto err_hws;
}
- i915_gem_object_set_cache_level(h->hws, I915_CACHE_LLC);
+ i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC);
vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
@@ -242,6 +244,12 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
i915_gem_chipset_flush(h->i915);
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto cancel_rq;
+ }
+
flags = 0;
if (INTEL_GEN(vm->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
@@ -334,7 +342,7 @@ static int igt_hang_sanitycheck(void *arg)
timeout = i915_request_wait(rq,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
timeout = -EIO;
i915_request_put(rq);
@@ -375,7 +383,7 @@ static int igt_global_reset(void *arg)
igt_global_reset_unlock(i915);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
err = -EIO;
return err;
@@ -393,15 +401,13 @@ static int igt_wedged_reset(void *arg)
i915_gem_set_wedged(i915);
- mutex_lock(&i915->drm.struct_mutex);
- GEM_BUG_ON(!i915_terminally_wedged(&i915->gpu_error));
+ GEM_BUG_ON(!i915_reset_failed(i915));
i915_reset(i915, ALL_ENGINES, NULL);
- mutex_unlock(&i915->drm.struct_mutex);
intel_runtime_pm_put(i915, wakeref);
igt_global_reset_unlock(i915);
- return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
+ return i915_reset_failed(i915) ? -EIO : 0;
}
static bool wait_for_idle(struct intel_engine_cs *engine)
@@ -409,6 +415,222 @@ static bool wait_for_idle(struct intel_engine_cs *engine)
return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0;
}
+static int igt_reset_nop(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ unsigned int reset_count, count;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ struct drm_file *file;
+ IGT_TIMEOUT(end_time);
+ int err = 0;
+
+ /* Check that we can reset during non-user portions of requests */
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ ctx = live_context(i915, file);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out;
+ }
+
+ i915_gem_context_clear_bannable(ctx);
+ wakeref = intel_runtime_pm_get(i915);
+ reset_count = i915_reset_count(&i915->gpu_error);
+ count = 0;
+ do {
+ mutex_lock(&i915->drm.struct_mutex);
+ for_each_engine(engine, i915, id) {
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ struct i915_request *rq;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+ }
+ }
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ igt_global_reset_lock(i915);
+ i915_reset(i915, ALL_ENGINES, NULL);
+ igt_global_reset_unlock(i915);
+ if (i915_reset_failed(i915)) {
+ err = -EIO;
+ break;
+ }
+
+ if (i915_reset_count(&i915->gpu_error) !=
+ reset_count + ++count) {
+ pr_err("Full GPU reset not recorded!\n");
+ err = -EINVAL;
+ break;
+ }
+
+ if (!i915_reset_flush(i915)) {
+ struct drm_printer p =
+ drm_info_printer(i915->drm.dev);
+
+ pr_err("%s failed to idle after reset\n",
+ engine->name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ err = -EIO;
+ break;
+ }
+
+ err = igt_flush_test(i915, 0);
+ if (err)
+ break;
+ } while (time_before(jiffies, end_time));
+ pr_info("%s: %d resets\n", __func__, count);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = igt_flush_test(i915, I915_WAIT_LOCKED);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ intel_runtime_pm_put(i915, wakeref);
+
+out:
+ mock_file_free(i915, file);
+ if (i915_reset_failed(i915))
+ err = -EIO;
+ return err;
+}
+
+static int igt_reset_nop_engine(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ struct drm_file *file;
+ int err = 0;
+
+ /* Check that we can engine-reset during non-user portions */
+
+ if (!intel_has_reset_engine(i915))
+ return 0;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ ctx = live_context(i915, file);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out;
+ }
+
+ i915_gem_context_clear_bannable(ctx);
+ wakeref = intel_runtime_pm_get(i915);
+ for_each_engine(engine, i915, id) {
+ unsigned int reset_count, reset_engine_count;
+ unsigned int count;
+ IGT_TIMEOUT(end_time);
+
+ reset_count = i915_reset_count(&i915->gpu_error);
+ reset_engine_count = i915_reset_engine_count(&i915->gpu_error,
+ engine);
+ count = 0;
+
+ set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ do {
+ int i;
+
+ if (!wait_for_idle(engine)) {
+ pr_err("%s failed to idle before reset\n",
+ engine->name);
+ err = -EIO;
+ break;
+ }
+
+ mutex_lock(&i915->drm.struct_mutex);
+ for (i = 0; i < 16; i++) {
+ struct i915_request *rq;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+ }
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ err = i915_reset_engine(engine, NULL);
+ if (err) {
+ pr_err("i915_reset_engine failed\n");
+ break;
+ }
+
+ if (i915_reset_count(&i915->gpu_error) != reset_count) {
+ pr_err("Full GPU reset recorded! (engine reset expected)\n");
+ err = -EINVAL;
+ break;
+ }
+
+ if (i915_reset_engine_count(&i915->gpu_error, engine) !=
+ reset_engine_count + ++count) {
+ pr_err("%s engine reset not recorded!\n",
+ engine->name);
+ err = -EINVAL;
+ break;
+ }
+
+ if (!i915_reset_flush(i915)) {
+ struct drm_printer p =
+ drm_info_printer(i915->drm.dev);
+
+ pr_err("%s failed to idle after reset\n",
+ engine->name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ err = -EIO;
+ break;
+ }
+ } while (time_before(jiffies, end_time));
+ clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
+
+ if (err)
+ break;
+
+ err = igt_flush_test(i915, 0);
+ if (err)
+ break;
+ }
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = igt_flush_test(i915, I915_WAIT_LOCKED);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ intel_runtime_pm_put(i915, wakeref);
+out:
+ mock_file_free(i915, file);
+ if (i915_reset_failed(i915))
+ err = -EIO;
+ return err;
+}
+
static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
{
struct intel_engine_cs *engine;
@@ -523,7 +745,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
break;
}
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
err = -EIO;
if (active) {
@@ -565,11 +787,10 @@ static int active_request_put(struct i915_request *rq)
return 0;
if (i915_request_wait(rq, 0, 5 * HZ) < 0) {
- GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld, seqno %d.\n",
+ GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld\n",
rq->engine->name,
rq->fence.context,
- rq->fence.seqno,
- i915_request_global_seqno(rq));
+ rq->fence.seqno);
GEM_TRACE_DUMP();
i915_gem_set_wedged(rq->i915);
@@ -762,7 +983,23 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
count++;
if (rq) {
- i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(i915->drm.dev);
+
+ pr_err("i915_reset_engine(%s:%s):"
+ " failed to complete request after reset\n",
+ engine->name, test_name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+ i915_request_put(rq);
+
+ GEM_TRACE_DUMP();
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ break;
+ }
+
i915_request_put(rq);
}
@@ -837,7 +1074,7 @@ unwind:
break;
}
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
err = -EIO;
if (flags & TEST_ACTIVE) {
@@ -887,7 +1124,8 @@ static int igt_reset_engines(void *arg)
return 0;
}
-static u32 fake_hangcheck(struct drm_i915_private *i915, u32 mask)
+static u32 fake_hangcheck(struct drm_i915_private *i915,
+ intel_engine_mask_t mask)
{
u32 count = i915_reset_count(&i915->gpu_error);
@@ -905,7 +1143,7 @@ static int igt_reset_wait(void *arg)
long timeout;
int err;
- if (!intel_engine_can_store_dword(i915->engine[RCS]))
+ if (!intel_engine_can_store_dword(i915->engine[RCS0]))
return 0;
/* Check that we detect a stuck waiter and issue a reset */
@@ -917,7 +1155,7 @@ static int igt_reset_wait(void *arg)
if (err)
goto unlock;
- rq = hang_create_request(&h, i915->engine[RCS]);
+ rq = hang_create_request(&h, i915->engine[RCS0]);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto fini;
@@ -963,7 +1201,7 @@ unlock:
mutex_unlock(&i915->drm.struct_mutex);
igt_global_reset_unlock(i915);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
return -EIO;
return err;
@@ -1034,13 +1272,11 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
struct hang h;
int err;
- if (!intel_engine_can_store_dword(i915->engine[RCS]))
+ if (!intel_engine_can_store_dword(i915->engine[RCS0]))
return 0;
/* Check that we can recover an unbind stuck on a hanging request */
- igt_global_reset_lock(i915);
-
mutex_lock(&i915->drm.struct_mutex);
err = hang_init(&h, i915);
if (err)
@@ -1066,7 +1302,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
goto out_obj;
}
- rq = hang_create_request(&h, i915->engine[RCS]);
+ rq = hang_create_request(&h, i915->engine[RCS0]);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_obj;
@@ -1138,7 +1374,9 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
}
out_reset:
- fake_hangcheck(rq->i915, intel_engine_flag(rq->engine));
+ igt_global_reset_lock(i915);
+ fake_hangcheck(rq->i915, rq->engine->mask);
+ igt_global_reset_unlock(i915);
if (tsk) {
struct igt_wedge_me w;
@@ -1159,9 +1397,8 @@ fini:
hang_fini(&h);
unlock:
mutex_unlock(&i915->drm.struct_mutex);
- igt_global_reset_unlock(i915);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
return -EIO;
return err;
@@ -1317,7 +1554,7 @@ static int igt_reset_queue(void *arg)
goto fini;
}
- reset_count = fake_hangcheck(i915, ENGINE_MASK(id));
+ reset_count = fake_hangcheck(i915, BIT(id));
if (prev->fence.error != -EIO) {
pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n",
@@ -1367,7 +1604,7 @@ unlock:
mutex_unlock(&i915->drm.struct_mutex);
igt_global_reset_unlock(i915);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
return -EIO;
return err;
@@ -1376,7 +1613,7 @@ unlock:
static int igt_handle_error(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine = i915->engine[RCS];
+ struct intel_engine_cs *engine = i915->engine[RCS0];
struct hang h;
struct i915_request *rq;
struct i915_gpu_state *error;
@@ -1423,7 +1660,7 @@ static int igt_handle_error(void *arg)
/* Temporarily disable error capture */
error = xchg(&i915->gpu_error.first_error, (void *)-1);
- i915_handle_error(i915, ENGINE_MASK(engine->id), 0, NULL);
+ i915_handle_error(i915, engine->mask, 0, NULL);
xchg(&i915->gpu_error.first_error, error);
@@ -1547,7 +1784,7 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
i915_request_wait(rq,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
err = -EIO;
}
@@ -1586,7 +1823,7 @@ static int igt_atomic_reset(void *arg)
/* Flush any requests before we get started and check basics */
force_reset(i915);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
goto unlock;
if (intel_has_gpu_reset(i915)) {
@@ -1642,6 +1879,8 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_global_reset), /* attempt to recover GPU first */
SUBTEST(igt_wedged_reset),
SUBTEST(igt_hang_sanitycheck),
+ SUBTEST(igt_reset_nop),
+ SUBTEST(igt_reset_nop_engine),
SUBTEST(igt_reset_idle_engine),
SUBTEST(igt_reset_active_engine),
SUBTEST(igt_reset_engines),
@@ -1660,7 +1899,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
if (!intel_has_gpu_reset(i915))
return 0;
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return -EIO; /* we're long past hope of a successful reset */
wakeref = intel_runtime_pm_get(i915);
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 58144e024751..fbee030db940 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -10,6 +10,7 @@
#include "../i915_selftest.h"
#include "igt_flush_test.h"
+#include "igt_live_test.h"
#include "igt_spinner.h"
#include "i915_random.h"
@@ -75,6 +76,185 @@ err_unlock:
return err;
}
+static int live_busywait_preempt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct intel_engine_cs *engine;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ int err = -ENOMEM;
+ u32 *map;
+
+ /*
+ * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
+ * preempt the busywaits used to synchronise between rings.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(i915);
+
+ ctx_hi = kernel_context(i915);
+ if (!ctx_hi)
+ goto err_unlock;
+ ctx_hi->sched.priority = INT_MAX;
+
+ ctx_lo = kernel_context(i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+ ctx_lo->sched.priority = INT_MIN;
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_ctx_lo;
+ }
+
+ map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto err_obj;
+ }
+
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_map;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_map;
+
+ for_each_engine(engine, i915, id) {
+ struct i915_request *lo, *hi;
+ struct igt_live_test t;
+ u32 *cs;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_vma;
+ }
+
+ /*
+ * We create two requests. The low priority request
+ * busywaits on a semaphore (inside the ringbuffer where
+ * is should be preemptible) and the high priority requests
+ * uses a MI_STORE_DWORD_IMM to update the semaphore value
+ * allowing the first request to complete. If preemption
+ * fails, we hang instead.
+ */
+
+ lo = i915_request_alloc(engine, ctx_lo);
+ if (IS_ERR(lo)) {
+ err = PTR_ERR(lo);
+ goto err_vma;
+ }
+
+ cs = intel_ring_begin(lo, 8);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(lo);
+ goto err_vma;
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+ *cs++ = 1;
+
+ /* XXX Do we need a flush + invalidate here? */
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+
+ intel_ring_advance(lo, cs);
+ i915_request_add(lo);
+
+ if (wait_for(READ_ONCE(*map), 10)) {
+ err = -ETIMEDOUT;
+ goto err_vma;
+ }
+
+ /* Low priority request should be busywaiting now */
+ if (i915_request_wait(lo, I915_WAIT_LOCKED, 1) != -ETIME) {
+ pr_err("%s: Busywaiting request did not!\n",
+ engine->name);
+ err = -EIO;
+ goto err_vma;
+ }
+
+ hi = i915_request_alloc(engine, ctx_hi);
+ if (IS_ERR(hi)) {
+ err = PTR_ERR(hi);
+ goto err_vma;
+ }
+
+ cs = intel_ring_begin(hi, 4);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(hi);
+ goto err_vma;
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+ *cs++ = 0;
+
+ intel_ring_advance(hi, cs);
+ i915_request_add(hi);
+
+ if (i915_request_wait(lo, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("%s: Failed to preempt semaphore busywait!\n",
+ engine->name);
+
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ GEM_TRACE_DUMP();
+
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto err_vma;
+ }
+ GEM_BUG_ON(READ_ONCE(*map));
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_vma;
+ }
+ }
+
+ err = 0;
+err_vma:
+ i915_vma_unpin(vma);
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+err_unlock:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
static int live_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -88,6 +268,9 @@ static int live_preempt(void *arg)
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
return 0;
+ if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
+ pr_err("Logical preemption supported, but not exposed\n");
+
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(i915);
@@ -110,8 +293,17 @@ static int live_preempt(void *arg)
I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
for_each_engine(engine, i915, id) {
+ struct igt_live_test t;
struct i915_request *rq;
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
@@ -147,7 +339,8 @@ static int live_preempt(void *arg)
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
- if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+
+ if (igt_live_test_end(&t)) {
err = -EIO;
goto err_ctx_lo;
}
@@ -201,8 +394,17 @@ static int live_late_preempt(void *arg)
goto err_ctx_hi;
for_each_engine(engine, i915, id) {
+ struct igt_live_test t;
struct i915_request *rq;
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
@@ -241,7 +443,8 @@ static int live_late_preempt(void *arg)
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
- if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+
+ if (igt_live_test_end(&t)) {
err = -EIO;
goto err_ctx_lo;
}
@@ -335,6 +538,9 @@ static int live_suppress_self_preempt(void *arg)
struct i915_request *rq_a, *rq_b;
int depth;
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
engine->execlists.preempt_hang.count = 0;
rq_a = igt_spinner_create_request(&a.spin,
@@ -407,6 +613,171 @@ err_wedged:
goto err_client_b;
}
+static int __i915_sw_fence_call
+dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ return NOTIFY_DONE;
+}
+
+static struct i915_request *dummy_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = kzalloc(sizeof(*rq), GFP_KERNEL);
+ if (!rq)
+ return NULL;
+
+ INIT_LIST_HEAD(&rq->active_list);
+ rq->engine = engine;
+
+ i915_sched_node_init(&rq->sched);
+
+ /* mark this request as permanently incomplete */
+ rq->fence.seqno = 1;
+ BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */
+ rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1;
+ GEM_BUG_ON(i915_request_completed(rq));
+
+ i915_sw_fence_init(&rq->submit, dummy_notify);
+ i915_sw_fence_commit(&rq->submit);
+
+ return rq;
+}
+
+static void dummy_request_free(struct i915_request *dummy)
+{
+ i915_request_mark_complete(dummy);
+ i915_sched_node_fini(&dummy->sched);
+ i915_sw_fence_fini(&dummy->submit);
+
+ dma_fence_free(&dummy->fence);
+}
+
+static int live_suppress_wait_preempt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct preempt_client client[4];
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ int err = -ENOMEM;
+ int i;
+
+ /*
+ * Waiters are given a little priority nudge, but not enough
+ * to actually cause any preemption. Double check that we do
+ * not needlessly generate preempt-to-idle cycles.
+ */
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(i915);
+
+ if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
+ goto err_unlock;
+ if (preempt_client_init(i915, &client[1])) /* ELSP[1] */
+ goto err_client_0;
+ if (preempt_client_init(i915, &client[2])) /* head of queue */
+ goto err_client_1;
+ if (preempt_client_init(i915, &client[3])) /* bystander */
+ goto err_client_2;
+
+ for_each_engine(engine, i915, id) {
+ int depth;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (!engine->emit_init_breadcrumb)
+ continue;
+
+ for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
+ struct i915_request *rq[ARRAY_SIZE(client)];
+ struct i915_request *dummy;
+
+ engine->execlists.preempt_hang.count = 0;
+
+ dummy = dummy_request(engine);
+ if (!dummy)
+ goto err_client_3;
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ rq[i] = igt_spinner_create_request(&client[i].spin,
+ client[i].ctx, engine,
+ MI_NOOP);
+ if (IS_ERR(rq[i])) {
+ err = PTR_ERR(rq[i]);
+ goto err_wedged;
+ }
+
+ /* Disable NEWCLIENT promotion */
+ __i915_active_request_set(&rq[i]->timeline->last_request,
+ dummy);
+ i915_request_add(rq[i]);
+ }
+
+ dummy_request_free(dummy);
+
+ GEM_BUG_ON(i915_request_completed(rq[0]));
+ if (!igt_wait_for_spinner(&client[0].spin, rq[0])) {
+ pr_err("%s: First client failed to start\n",
+ engine->name);
+ goto err_wedged;
+ }
+ GEM_BUG_ON(!i915_request_started(rq[0]));
+
+ if (i915_request_wait(rq[depth],
+ I915_WAIT_LOCKED |
+ I915_WAIT_PRIORITY,
+ 1) != -ETIME) {
+ pr_err("%s: Waiter depth:%d completed!\n",
+ engine->name, depth);
+ goto err_wedged;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(client); i++)
+ igt_spinner_end(&client[i].spin);
+
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ goto err_wedged;
+
+ if (engine->execlists.preempt_hang.count) {
+ pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n",
+ engine->name,
+ engine->execlists.preempt_hang.count,
+ depth);
+ err = -EINVAL;
+ goto err_client_3;
+ }
+ }
+ }
+
+ err = 0;
+err_client_3:
+ preempt_client_fini(&client[3]);
+err_client_2:
+ preempt_client_fini(&client[2]);
+err_client_1:
+ preempt_client_fini(&client[1]);
+err_client_0:
+ preempt_client_fini(&client[0]);
+err_unlock:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+
+err_wedged:
+ for (i = 0; i < ARRAY_SIZE(client); i++)
+ igt_spinner_end(&client[i].spin);
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto err_client_3;
+}
+
static int live_chain_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -438,11 +809,39 @@ static int live_chain_preempt(void *arg)
struct i915_sched_attr attr = {
.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
};
- int count, i;
+ struct igt_live_test t;
+ struct i915_request *rq;
+ int ring_size, count, i;
- for_each_prime_number_from(count, 1, 32) { /* must fit ring! */
- struct i915_request *rq;
+ if (!intel_engine_has_preemption(engine))
+ continue;
+ rq = igt_spinner_create_request(&lo.spin,
+ lo.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+
+ ring_size = rq->wa_tail - rq->head;
+ if (ring_size < 0)
+ ring_size += rq->ring->size;
+ ring_size = rq->ring->size / ring_size;
+ pr_debug("%s(%s): Using maximum of %d requests\n",
+ __func__, engine->name, ring_size);
+
+ igt_spinner_end(&lo.spin);
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 2) < 0) {
+ pr_err("Timed out waiting to flush %s\n", engine->name);
+ goto err_wedged;
+ }
+
+ if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_wedged;
+ }
+
+ for_each_prime_number_from(count, 1, ring_size) {
rq = igt_spinner_create_request(&hi.spin,
hi.ctx, engine,
MI_ARB_CHECK);
@@ -484,6 +883,26 @@ static int live_chain_preempt(void *arg)
goto err_wedged;
}
igt_spinner_end(&lo.spin);
+
+ rq = i915_request_alloc(engine, lo.ctx);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(i915->drm.dev);
+
+ pr_err("Failed to flush low priority chain of %d requests\n",
+ count);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+ goto err_wedged;
+ }
+ }
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_wedged;
}
}
@@ -767,7 +1186,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
count, flags,
- RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
+ RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
return 0;
}
@@ -795,7 +1214,7 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
count, flags,
- RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
+ RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
return 0;
}
@@ -808,6 +1227,7 @@ static int live_preempt_smoke(void *arg)
};
const unsigned int phase[] = { 0, BATCH };
intel_wakeref_t wakeref;
+ struct igt_live_test t;
int err = -ENOMEM;
u32 *cs;
int n;
@@ -838,11 +1258,13 @@ static int live_preempt_smoke(void *arg)
for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
cs[n] = MI_ARB_CHECK;
cs[n] = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(smoke.batch);
i915_gem_object_unpin_map(smoke.batch);
- err = i915_gem_object_set_to_gtt_domain(smoke.batch, false);
- if (err)
+ if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) {
+ err = -EIO;
goto err_batch;
+ }
for (n = 0; n < smoke.ncontext; n++) {
smoke.contexts[n] = kernel_context(smoke.i915);
@@ -861,7 +1283,7 @@ static int live_preempt_smoke(void *arg)
}
err_ctx:
- if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED))
+ if (igt_live_test_end(&t))
err = -EIO;
for (n = 0; n < smoke.ncontext; n++) {
@@ -884,9 +1306,11 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_sanitycheck),
+ SUBTEST(live_busywait_preempt),
SUBTEST(live_preempt),
SUBTEST(live_late_preempt),
SUBTEST(live_suppress_self_preempt),
+ SUBTEST(live_suppress_wait_preempt),
SUBTEST(live_chain_preempt),
SUBTEST(live_preempt_hang),
SUBTEST(live_preempt_smoke),
@@ -895,7 +1319,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
if (!HAS_EXECLISTS(i915))
return 0;
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 81d9d31042a9..e0d7ebecb215 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -119,9 +119,143 @@ int intel_uncore_mock_selftests(void)
return 0;
}
-static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_priv)
+static int live_forcewake_ops(void *arg)
+{
+ static const struct reg {
+ const char *name;
+ unsigned long platforms;
+ unsigned int offset;
+ } registers[] = {
+ {
+ "RING_START",
+ INTEL_GEN_MASK(6, 7),
+ 0x38,
+ },
+ {
+ "RING_MI_MODE",
+ INTEL_GEN_MASK(8, BITS_PER_LONG),
+ 0x9c,
+ }
+ };
+ const struct reg *r;
+ struct drm_i915_private *i915 = arg;
+ struct intel_uncore_forcewake_domain *domain;
+ struct intel_uncore *uncore = &i915->uncore;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ unsigned int tmp;
+ int err = 0;
+
+ GEM_BUG_ON(i915->gt.awake);
+
+ /* vlv/chv with their pcu behave differently wrt reads */
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ pr_debug("PCU fakes forcewake badly; skipping\n");
+ return 0;
+ }
+
+ /*
+ * Not quite as reliable across the gen as one would hope.
+ *
+ * Either our theory of operation is incorrect, or there remain
+ * external parties interfering with the powerwells.
+ *
+ * https://bugs.freedesktop.org/show_bug.cgi?id=110210
+ */
+ if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
+ return 0;
+
+ /* We have to pick carefully to get the exact behaviour we need */
+ for (r = registers; r->name; r++)
+ if (r->platforms & INTEL_INFO(i915)->gen_mask)
+ break;
+ if (!r->name) {
+ pr_debug("Forcewaked register not known for %s; skipping\n",
+ intel_platform_name(INTEL_INFO(i915)->platform));
+ return 0;
+ }
+
+ wakeref = intel_runtime_pm_get(i915);
+
+ for_each_fw_domain(domain, uncore, tmp) {
+ smp_store_mb(domain->active, false);
+ if (!hrtimer_cancel(&domain->timer))
+ continue;
+
+ intel_uncore_fw_release_timer(&domain->timer);
+ }
+
+ for_each_engine(engine, i915, id) {
+ i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset);
+ u32 __iomem *reg = uncore->regs + engine->mmio_base + r->offset;
+ enum forcewake_domains fw_domains;
+ u32 val;
+
+ if (!engine->default_state)
+ continue;
+
+ fw_domains = intel_uncore_forcewake_for_reg(uncore, mmio,
+ FW_REG_READ);
+ if (!fw_domains)
+ continue;
+
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
+ if (!domain->wake_count)
+ continue;
+
+ pr_err("fw_domain %s still active, aborting test!\n",
+ intel_uncore_forcewake_domain_to_str(domain->id));
+ err = -EINVAL;
+ goto out_rpm;
+ }
+
+ intel_uncore_forcewake_get(uncore, fw_domains);
+ val = readl(reg);
+ intel_uncore_forcewake_put(uncore, fw_domains);
+
+ /* Flush the forcewake release (delayed onto a timer) */
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
+ smp_store_mb(domain->active, false);
+ if (hrtimer_cancel(&domain->timer))
+ intel_uncore_fw_release_timer(&domain->timer);
+
+ preempt_disable();
+ err = wait_ack_clear(domain, FORCEWAKE_KERNEL);
+ preempt_enable();
+ if (err) {
+ pr_err("Failed to clear fw_domain %s\n",
+ intel_uncore_forcewake_domain_to_str(domain->id));
+ goto out_rpm;
+ }
+ }
+
+ if (!val) {
+ pr_err("%s:%s was zero while fw was held!\n",
+ engine->name, r->name);
+ err = -EINVAL;
+ goto out_rpm;
+ }
+
+ /* We then expect the read to return 0 outside of the fw */
+ if (wait_for(readl(reg) == 0, 100)) {
+ pr_err("%s:%s=%0x, fw_domains 0x%x still up after 100ms!\n",
+ engine->name, r->name, readl(reg), fw_domains);
+ err = -ETIMEDOUT;
+ goto out_rpm;
+ }
+ }
+
+out_rpm:
+ intel_runtime_pm_put(i915, wakeref);
+ return err;
+}
+
+static int live_forcewake_domains(void *arg)
{
#define FW_RANGE 0x40000
+ struct drm_i915_private *dev_priv = arg;
+ struct intel_uncore *uncore = &dev_priv->uncore;
unsigned long *valid;
u32 offset;
int err;
@@ -137,48 +271,52 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri
if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
return 0;
- valid = kcalloc(BITS_TO_LONGS(FW_RANGE), sizeof(*valid),
- GFP_KERNEL);
+ valid = bitmap_zalloc(FW_RANGE, GFP_KERNEL);
if (!valid)
return -ENOMEM;
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
- check_for_unclaimed_mmio(dev_priv);
+ check_for_unclaimed_mmio(uncore);
for (offset = 0; offset < FW_RANGE; offset += 4) {
i915_reg_t reg = { offset };
(void)I915_READ_FW(reg);
- if (!check_for_unclaimed_mmio(dev_priv))
+ if (!check_for_unclaimed_mmio(uncore))
set_bit(offset, valid);
}
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
err = 0;
for_each_set_bit(offset, valid, FW_RANGE) {
i915_reg_t reg = { offset };
iosf_mbi_punit_acquire();
- intel_uncore_forcewake_reset(dev_priv);
+ intel_uncore_forcewake_reset(uncore);
iosf_mbi_punit_release();
- check_for_unclaimed_mmio(dev_priv);
+ check_for_unclaimed_mmio(uncore);
(void)I915_READ(reg);
- if (check_for_unclaimed_mmio(dev_priv)) {
+ if (check_for_unclaimed_mmio(uncore)) {
pr_err("Unclaimed mmio read to register 0x%04x\n",
offset);
err = -EINVAL;
}
}
- kfree(valid);
+ bitmap_free(valid);
return err;
}
int intel_uncore_live_selftests(struct drm_i915_private *i915)
{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_forcewake_ops),
+ SUBTEST(live_forcewake_domains),
+ };
+
int err;
/* Confirm the table we load is still valid */
@@ -188,9 +326,5 @@ int intel_uncore_live_selftests(struct drm_i915_private *i915)
if (err)
return err;
- err = intel_uncore_check_forcewake_domains(i915);
- if (err)
- return err;
-
- return 0;
+ return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index b15c4f26c593..567b6f8dae86 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -12,6 +12,14 @@
#include "igt_spinner.h"
#include "igt_wedge_me.h"
#include "mock_context.h"
+#include "mock_drm.h"
+
+static const struct wo_register {
+ enum intel_platform platform;
+ u32 reg;
+} wo_registers[] = {
+ { INTEL_GEMINILAKE, 0x731c }
+};
#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 4)
struct wa_lists {
@@ -74,7 +82,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
if (IS_ERR(result))
return result;
- i915_gem_object_set_cache_level(result, I915_CACHE_LLC);
+ i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
cs = i915_gem_object_pin_map(result, I915_MAP_WB);
if (IS_ERR(cs)) {
@@ -82,6 +90,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
goto err_obj;
}
memset(cs, 0xc5, PAGE_SIZE);
+ i915_gem_object_flush_map(result);
i915_gem_object_unpin_map(result);
vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
@@ -181,7 +190,7 @@ static int check_whitelist(struct i915_gem_context *ctx,
err = 0;
igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
err = i915_gem_object_set_to_cpu_domain(results, false);
- if (i915_terminally_wedged(&ctx->i915->gpu_error))
+ if (i915_terminally_wedged(ctx->i915))
err = -EIO;
if (err)
goto out_put;
@@ -214,7 +223,7 @@ out_put:
static int do_device_reset(struct intel_engine_cs *engine)
{
- i915_reset(engine->i915, ENGINE_MASK(engine->id), "live_workarounds");
+ i915_reset(engine->i915, engine->mask, "live_workarounds");
return 0;
}
@@ -236,15 +245,11 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
+
rq = ERR_PTR(-ENODEV);
- with_intel_runtime_pm(engine->i915, wakeref) {
- if (spin)
- rq = igt_spinner_create_request(spin,
- ctx, engine,
- MI_NOOP);
- else
- rq = i915_request_alloc(engine, ctx);
- }
+ with_intel_runtime_pm(engine->i915, wakeref)
+ rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
kernel_context_close(ctx);
@@ -273,7 +278,6 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
const char *name)
{
struct drm_i915_private *i915 = engine->i915;
- bool want_spin = reset == do_engine_reset;
struct i915_gem_context *ctx;
struct igt_spinner spin;
intel_wakeref_t wakeref;
@@ -282,11 +286,9 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
engine->whitelist.count, name);
- if (want_spin) {
- err = igt_spinner_init(&spin, i915);
- if (err)
- return err;
- }
+ err = igt_spinner_init(&spin, i915);
+ if (err)
+ return err;
ctx = kernel_context(i915);
if (IS_ERR(ctx))
@@ -298,17 +300,15 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
goto out;
}
- err = switch_to_scratch_context(engine, want_spin ? &spin : NULL);
+ err = switch_to_scratch_context(engine, &spin);
if (err)
goto out;
with_intel_runtime_pm(i915, wakeref)
err = reset(engine);
- if (want_spin) {
- igt_spinner_end(&spin);
- igt_spinner_fini(&spin);
- }
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
if (err) {
pr_err("%s reset failed\n", name);
@@ -340,10 +340,379 @@ out:
return err;
}
+static struct i915_vma *create_scratch(struct i915_gem_context *ctx)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ void *ptr;
+ int err;
+
+ obj = i915_gem_object_create_internal(ctx->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
+
+ ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(ptr)) {
+ err = PTR_ERR(ptr);
+ goto err_obj;
+ }
+ memset(ptr, 0xc5, PAGE_SIZE);
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_obj;
+
+ err = i915_gem_object_set_to_cpu_domain(obj, false);
+ if (err)
+ goto err_obj;
+
+ return vma;
+
+err_obj:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static struct i915_vma *create_batch(struct i915_gem_context *ctx)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_obj;
+
+ err = i915_gem_object_set_to_wc_domain(obj, true);
+ if (err)
+ goto err_obj;
+
+ return vma;
+
+err_obj:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static u32 reg_write(u32 old, u32 new, u32 rsvd)
+{
+ if (rsvd == 0x0000ffff) {
+ old &= ~(new >> 16);
+ old |= new & (new >> 16);
+ } else {
+ old &= ~rsvd;
+ old |= new & rsvd;
+ }
+
+ return old;
+}
+
+static bool wo_register(struct intel_engine_cs *engine, u32 reg)
+{
+ enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
+ if (wo_registers[i].platform == platform &&
+ wo_registers[i].reg == reg)
+ return true;
+ }
+
+ return false;
+}
+
+static int check_dirty_whitelist(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ const u32 values[] = {
+ 0x00000000,
+ 0x01010101,
+ 0x10100101,
+ 0x03030303,
+ 0x30300303,
+ 0x05050505,
+ 0x50500505,
+ 0x0f0f0f0f,
+ 0xf00ff00f,
+ 0x10101010,
+ 0xf0f01010,
+ 0x30303030,
+ 0xa0a03030,
+ 0x50505050,
+ 0xc0c05050,
+ 0xf0f0f0f0,
+ 0x11111111,
+ 0x33333333,
+ 0x55555555,
+ 0x0000ffff,
+ 0x00ff00ff,
+ 0xff0000ff,
+ 0xffff00ff,
+ 0xffffffff,
+ };
+ struct i915_vma *scratch;
+ struct i915_vma *batch;
+ int err = 0, i, v;
+ u32 *cs, *results;
+
+ scratch = create_scratch(ctx);
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
+
+ batch = create_batch(ctx);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_scratch;
+ }
+
+ for (i = 0; i < engine->whitelist.count; i++) {
+ u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+ u64 addr = scratch->node.start;
+ struct i915_request *rq;
+ u32 srm, lrm, rsvd;
+ u32 expect;
+ int idx;
+
+ if (wo_register(engine, reg))
+ continue;
+
+ srm = MI_STORE_REGISTER_MEM;
+ lrm = MI_LOAD_REGISTER_MEM;
+ if (INTEL_GEN(ctx->i915) >= 8)
+ lrm++, srm++;
+
+ pr_debug("%s: Writing garbage to %x\n",
+ engine->name, reg);
+
+ cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out_batch;
+ }
+
+ /* SRM original */
+ *cs++ = srm;
+ *cs++ = reg;
+ *cs++ = lower_32_bits(addr);
+ *cs++ = upper_32_bits(addr);
+
+ idx = 1;
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ /* LRI garbage */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = reg;
+ *cs++ = values[v];
+
+ /* SRM result */
+ *cs++ = srm;
+ *cs++ = reg;
+ *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
+ *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
+ idx++;
+ }
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ /* LRI garbage */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = reg;
+ *cs++ = ~values[v];
+
+ /* SRM result */
+ *cs++ = srm;
+ *cs++ = reg;
+ *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
+ *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
+ idx++;
+ }
+ GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
+
+ /* LRM original -- don't leave garbage in the context! */
+ *cs++ = lrm;
+ *cs++ = reg;
+ *cs++ = lower_32_bits(addr);
+ *cs++ = upper_32_bits(addr);
+
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(batch->obj);
+ i915_gem_object_unpin_map(batch->obj);
+ i915_gem_chipset_flush(ctx->i915);
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_batch;
+ }
+
+ if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
+ err = engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto err_request;
+ }
+
+ err = engine->emit_bb_start(rq,
+ batch->node.start, PAGE_SIZE,
+ 0);
+ if (err)
+ goto err_request;
+
+err_request:
+ i915_request_add(rq);
+ if (err)
+ goto out_batch;
+
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ pr_err("%s: Futzing %x timedout; cancelling test\n",
+ engine->name, reg);
+ i915_gem_set_wedged(ctx->i915);
+ err = -EIO;
+ goto out_batch;
+ }
+
+ results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(results)) {
+ err = PTR_ERR(results);
+ goto out_batch;
+ }
+
+ GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
+ rsvd = results[ARRAY_SIZE(values)]; /* detect write masking */
+ if (!rsvd) {
+ pr_err("%s: Unable to write to whitelisted register %x\n",
+ engine->name, reg);
+ err = -EINVAL;
+ goto out_unpin;
+ }
+
+ expect = results[0];
+ idx = 1;
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ expect = reg_write(expect, values[v], rsvd);
+ if (results[idx] != expect)
+ err++;
+ idx++;
+ }
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ expect = reg_write(expect, ~values[v], rsvd);
+ if (results[idx] != expect)
+ err++;
+ idx++;
+ }
+ if (err) {
+ pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
+ engine->name, err, reg);
+
+ pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
+ engine->name, reg, results[0], rsvd);
+
+ expect = results[0];
+ idx = 1;
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ u32 w = values[v];
+
+ expect = reg_write(expect, w, rsvd);
+ pr_info("Wrote %08x, read %08x, expect %08x\n",
+ w, results[idx], expect);
+ idx++;
+ }
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ u32 w = ~values[v];
+
+ expect = reg_write(expect, w, rsvd);
+ pr_info("Wrote %08x, read %08x, expect %08x\n",
+ w, results[idx], expect);
+ idx++;
+ }
+
+ err = -EINVAL;
+ }
+out_unpin:
+ i915_gem_object_unpin_map(scratch->obj);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED))
+ err = -EIO;
+out_batch:
+ i915_vma_unpin_and_release(&batch, 0);
+out_scratch:
+ i915_vma_unpin_and_release(&scratch, 0);
+ return err;
+}
+
+static int live_dirty_whitelist(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ struct drm_file *file;
+ int err = 0;
+
+ /* Can the user write to the whitelisted registers? */
+
+ if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
+ return 0;
+
+ wakeref = intel_runtime_pm_get(i915);
+
+ mutex_unlock(&i915->drm.struct_mutex);
+ file = mock_file(i915);
+ mutex_lock(&i915->drm.struct_mutex);
+ if (IS_ERR(file)) {
+ err = PTR_ERR(file);
+ goto out_rpm;
+ }
+
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_file;
+ }
+
+ for_each_engine(engine, i915, id) {
+ if (engine->whitelist.count == 0)
+ continue;
+
+ err = check_dirty_whitelist(ctx, engine);
+ if (err)
+ goto out_file;
+ }
+
+out_file:
+ mutex_unlock(&i915->drm.struct_mutex);
+ mock_file_free(i915, file);
+ mutex_lock(&i915->drm.struct_mutex);
+out_rpm:
+ intel_runtime_pm_put(i915, wakeref);
+ return err;
+}
+
static int live_reset_whitelist(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine = i915->engine[RCS];
+ struct intel_engine_cs *engine = i915->engine[RCS0];
int err = 0;
/* If we reset the gpu, we should not lose the RING_NONPRIV */
@@ -381,10 +750,11 @@ static bool verify_gt_engine_wa(struct drm_i915_private *i915,
enum intel_engine_id id;
bool ok = true;
- ok &= wa_list_verify(i915, &lists->gt_wa_list, str);
+ ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
for_each_engine(engine, i915, id)
- ok &= wa_list_verify(i915, &lists->engine[id].wa_list, str);
+ ok &= wa_list_verify(engine->uncore,
+ &lists->engine[id].wa_list, str);
return ok;
}
@@ -513,13 +883,14 @@ err:
int intel_workarounds_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(live_dirty_whitelist),
SUBTEST(live_reset_whitelist),
SUBTEST(live_gpu_reset_gt_engine_workarounds),
SUBTEST(live_engine_reset_gt_engine_workarounds),
};
int err;
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return 0;
mutex_lock(&i915->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index b646cdcdd602..0426093bf1d9 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -30,7 +30,6 @@ mock_context(struct drm_i915_private *i915,
const char *name)
{
struct i915_gem_context *ctx;
- unsigned int n;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -41,25 +40,31 @@ mock_context(struct drm_i915_private *i915,
INIT_LIST_HEAD(&ctx->link);
ctx->i915 = i915;
+ ctx->hw_contexts = RB_ROOT;
+ spin_lock_init(&ctx->hw_contexts_lock);
+
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
INIT_LIST_HEAD(&ctx->hw_id_link);
-
- for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++)
- intel_context_init(&ctx->__engine[n], ctx, i915->engine[n]);
+ INIT_LIST_HEAD(&ctx->active_engines);
+ mutex_init(&ctx->mutex);
ret = i915_gem_context_pin_hw_id(ctx);
if (ret < 0)
goto err_handles;
if (name) {
+ struct i915_hw_ppgtt *ppgtt;
+
ctx->name = kstrdup(name, GFP_KERNEL);
if (!ctx->name)
goto err_put;
- ctx->ppgtt = mock_ppgtt(i915, name);
- if (!ctx->ppgtt)
+ ppgtt = mock_ppgtt(i915, name);
+ if (!ppgtt)
goto err_put;
+
+ __set_ppgtt(ctx, ppgtt);
}
return ctx;
@@ -87,9 +92,24 @@ void mock_init_contexts(struct drm_i915_private *i915)
struct i915_gem_context *
live_context(struct drm_i915_private *i915, struct drm_file *file)
{
+ struct i915_gem_context *ctx;
+ int err;
+
lockdep_assert_held(&i915->drm.struct_mutex);
- return i915_gem_create_context(i915, file->driver_priv);
+ ctx = i915_gem_create_context(i915, 0);
+ if (IS_ERR(ctx))
+ return ctx;
+
+ err = gem_context_register(ctx, file->driver_priv);
+ if (err < 0)
+ goto err_ctx;
+
+ return ctx;
+
+err_ctx:
+ context_close(ctx);
+ return ERR_PTR(err);
}
struct i915_gem_context *
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 08f0cab02e0f..61a8206ed677 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -50,13 +50,12 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
if (!ring)
return NULL;
- if (i915_timeline_init(engine->i915,
- &ring->timeline, engine->name,
- NULL)) {
+ if (i915_timeline_init(engine->i915, &ring->timeline, NULL)) {
kfree(ring);
return NULL;
}
+ kref_init(&ring->base.ref);
ring->base.size = sz;
ring->base.effective_size = sz;
ring->base.vaddr = (void *)(ring + 1);
@@ -76,28 +75,26 @@ static void mock_ring_free(struct intel_ring *base)
kfree(ring);
}
-static struct mock_request *first_request(struct mock_engine *engine)
+static struct i915_request *first_request(struct mock_engine *engine)
{
return list_first_entry_or_null(&engine->hw_queue,
- struct mock_request,
- link);
+ struct i915_request,
+ mock.link);
}
-static void advance(struct mock_request *request)
+static void advance(struct i915_request *request)
{
- list_del_init(&request->link);
- intel_engine_write_global_seqno(request->base.engine,
- request->base.global_seqno);
- i915_request_mark_complete(&request->base);
- GEM_BUG_ON(!i915_request_completed(&request->base));
+ list_del_init(&request->mock.link);
+ i915_request_mark_complete(request);
+ GEM_BUG_ON(!i915_request_completed(request));
- intel_engine_queue_breadcrumbs(request->base.engine);
+ intel_engine_queue_breadcrumbs(request->engine);
}
static void hw_delay_complete(struct timer_list *t)
{
struct mock_engine *engine = from_timer(engine, t, hw_delay);
- struct mock_request *request;
+ struct i915_request *request;
unsigned long flags;
spin_lock_irqsave(&engine->hw_lock, flags);
@@ -112,8 +109,9 @@ static void hw_delay_complete(struct timer_list *t)
* requeue the timer for the next delayed request.
*/
while ((request = first_request(engine))) {
- if (request->delay) {
- mod_timer(&engine->hw_delay, jiffies + request->delay);
+ if (request->mock.delay) {
+ mod_timer(&engine->hw_delay,
+ jiffies + request->mock.delay);
break;
}
@@ -126,55 +124,43 @@ static void hw_delay_complete(struct timer_list *t)
static void mock_context_unpin(struct intel_context *ce)
{
mock_timeline_unpin(ce->ring->timeline);
- i915_gem_context_put(ce->gem_context);
}
-static void mock_context_destroy(struct intel_context *ce)
+static void mock_context_destroy(struct kref *ref)
{
- GEM_BUG_ON(ce->pin_count);
+ struct intel_context *ce = container_of(ref, typeof(*ce), ref);
+
+ GEM_BUG_ON(intel_context_is_pinned(ce));
if (ce->ring)
mock_ring_free(ce->ring);
-}
-static const struct intel_context_ops mock_context_ops = {
- .unpin = mock_context_unpin,
- .destroy = mock_context_destroy,
-};
+ intel_context_free(ce);
+}
-static struct intel_context *
-mock_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static int mock_context_pin(struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
- int err = -ENOMEM;
-
- if (ce->pin_count++)
- return ce;
-
if (!ce->ring) {
- ce->ring = mock_ring(engine);
+ ce->ring = mock_ring(ce->engine);
if (!ce->ring)
- goto err;
+ return -ENOMEM;
}
mock_timeline_pin(ce->ring->timeline);
+ return 0;
+}
- ce->ops = &mock_context_ops;
- i915_gem_context_get(ctx);
- return ce;
+static const struct intel_context_ops mock_context_ops = {
+ .pin = mock_context_pin,
+ .unpin = mock_context_unpin,
-err:
- ce->pin_count = 0;
- return ERR_PTR(err);
-}
+ .destroy = mock_context_destroy,
+};
static int mock_request_alloc(struct i915_request *request)
{
- struct mock_request *mock = container_of(request, typeof(*mock), base);
-
- INIT_LIST_HEAD(&mock->link);
- mock->delay = 0;
+ INIT_LIST_HEAD(&request->mock.link);
+ request->mock.delay = 0;
return 0;
}
@@ -192,25 +178,55 @@ static u32 *mock_emit_breadcrumb(struct i915_request *request, u32 *cs)
static void mock_submit_request(struct i915_request *request)
{
- struct mock_request *mock = container_of(request, typeof(*mock), base);
struct mock_engine *engine =
container_of(request->engine, typeof(*engine), base);
unsigned long flags;
i915_request_submit(request);
- GEM_BUG_ON(!request->global_seqno);
spin_lock_irqsave(&engine->hw_lock, flags);
- list_add_tail(&mock->link, &engine->hw_queue);
- if (mock->link.prev == &engine->hw_queue) {
- if (mock->delay)
- mod_timer(&engine->hw_delay, jiffies + mock->delay);
+ list_add_tail(&request->mock.link, &engine->hw_queue);
+ if (list_is_first(&request->mock.link, &engine->hw_queue)) {
+ if (request->mock.delay)
+ mod_timer(&engine->hw_delay,
+ jiffies + request->mock.delay);
else
- advance(mock);
+ advance(request);
}
spin_unlock_irqrestore(&engine->hw_lock, flags);
}
+static void mock_reset_prepare(struct intel_engine_cs *engine)
+{
+}
+
+static void mock_reset(struct intel_engine_cs *engine, bool stalled)
+{
+ GEM_BUG_ON(stalled);
+}
+
+static void mock_reset_finish(struct intel_engine_cs *engine)
+{
+}
+
+static void mock_cancel_requests(struct intel_engine_cs *engine)
+{
+ struct i915_request *request;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+
+ /* Mark all submitted requests as skipped. */
+ list_for_each_entry(request, &engine->timeline.requests, sched.link) {
+ if (!i915_request_signaled(request))
+ dma_fence_set_error(&request->fence, -EIO);
+
+ i915_request_mark_complete(request);
+ }
+
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+}
+
struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
const char *name,
int id)
@@ -227,18 +243,21 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
engine->base.i915 = i915;
snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
engine->base.id = id;
+ engine->base.mask = BIT(id);
engine->base.status_page.addr = (void *)(engine + 1);
- engine->base.context_pin = mock_context_pin;
+ engine->base.cops = &mock_context_ops;
engine->base.request_alloc = mock_request_alloc;
engine->base.emit_flush = mock_emit_flush;
engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb;
engine->base.submit_request = mock_submit_request;
- if (i915_timeline_init(i915,
- &engine->base.timeline,
- engine->base.name,
- NULL))
+ engine->base.reset.prepare = mock_reset_prepare;
+ engine->base.reset.reset = mock_reset;
+ engine->base.reset.finish = mock_reset_finish;
+ engine->base.cancel_requests = mock_cancel_requests;
+
+ if (i915_timeline_init(i915, &engine->base.timeline, NULL))
goto err_free;
i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE);
@@ -249,7 +268,8 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
timer_setup(&engine->hw_delay, hw_delay_complete, 0);
INIT_LIST_HEAD(&engine->hw_queue);
- if (IS_ERR(intel_context_pin(i915->kernel_context, &engine->base)))
+ if (pin_context(i915->kernel_context, &engine->base,
+ &engine->base.kernel_context))
goto err_breadcrumbs;
return &engine->base;
@@ -266,19 +286,18 @@ void mock_engine_flush(struct intel_engine_cs *engine)
{
struct mock_engine *mock =
container_of(engine, typeof(*mock), base);
- struct mock_request *request, *rn;
+ struct i915_request *request, *rn;
del_timer_sync(&mock->hw_delay);
spin_lock_irq(&mock->hw_lock);
- list_for_each_entry_safe(request, rn, &mock->hw_queue, link)
+ list_for_each_entry_safe(request, rn, &mock->hw_queue, mock.link)
advance(request);
spin_unlock_irq(&mock->hw_lock);
}
void mock_engine_reset(struct intel_engine_cs *engine)
{
- intel_engine_write_global_seqno(engine, 0);
}
void mock_engine_free(struct intel_engine_cs *engine)
@@ -293,7 +312,7 @@ void mock_engine_free(struct intel_engine_cs *engine)
if (ce)
intel_context_unpin(ce);
- __intel_context_unpin(engine->i915->kernel_context, engine);
+ intel_context_unpin(engine->kernel_context);
intel_engine_fini_breadcrumbs(engine);
i915_timeline_fini(&engine->timeline);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 14ae46fda49f..60bbf8b4df40 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -79,12 +79,6 @@ static void mock_device_release(struct drm_device *dev)
destroy_workqueue(i915->wq);
- kmem_cache_destroy(i915->priorities);
- kmem_cache_destroy(i915->dependencies);
- kmem_cache_destroy(i915->requests);
- kmem_cache_destroy(i915->vmas);
- kmem_cache_destroy(i915->objects);
-
i915_gemfs_fini(i915);
drm_mode_config_cleanup(&i915->drm);
@@ -115,6 +109,10 @@ static void mock_retire_work_handler(struct work_struct *work)
static void mock_idle_work_handler(struct work_struct *work)
{
+ struct drm_i915_private *i915 =
+ container_of(work, typeof(*i915), gt.idle_work.work);
+
+ i915->gt.active_engines = 0;
}
static int pm_domain_resume(struct device *dev)
@@ -184,11 +182,12 @@ struct drm_i915_private *mock_gem_device(void)
I915_GTT_PAGE_SIZE_64K |
I915_GTT_PAGE_SIZE_2M;
- mock_uncore_init(i915);
+ mock_uncore_init(&i915->uncore);
i915_gem_init__mm(i915);
init_waitqueue_head(&i915->gpu_error.wait_queue);
init_waitqueue_head(&i915->gpu_error.reset_queue);
+ init_srcu_struct(&i915->gpu_error.reset_backoff_srcu);
mutex_init(&i915->gpu_error.wedge_mutex);
i915->wq = alloc_ordered_workqueue("mock", 0);
@@ -202,31 +201,6 @@ struct drm_i915_private *mock_gem_device(void)
i915->gt.awake = true;
- i915->objects = KMEM_CACHE(mock_object, SLAB_HWCACHE_ALIGN);
- if (!i915->objects)
- goto err_wq;
-
- i915->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
- if (!i915->vmas)
- goto err_objects;
-
- i915->requests = KMEM_CACHE(mock_request,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT |
- SLAB_TYPESAFE_BY_RCU);
- if (!i915->requests)
- goto err_vmas;
-
- i915->dependencies = KMEM_CACHE(i915_dependency,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT);
- if (!i915->dependencies)
- goto err_requests;
-
- i915->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
- if (!i915->priorities)
- goto err_dependencies;
-
i915_timelines_init(i915);
INIT_LIST_HEAD(&i915->gt.active_rings);
@@ -236,13 +210,13 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_ggtt(i915, &i915->ggtt);
- mkwrite_device_info(i915)->ring_mask = BIT(0);
+ mkwrite_device_info(i915)->engine_mask = BIT(0);
i915->kernel_context = mock_context(i915, NULL);
if (!i915->kernel_context)
goto err_unlock;
- i915->engine[RCS] = mock_engine(i915, "mock", RCS);
- if (!i915->engine[RCS])
+ i915->engine[RCS0] = mock_engine(i915, "mock", RCS0);
+ if (!i915->engine[RCS0])
goto err_context;
mutex_unlock(&i915->drm.struct_mutex);
@@ -256,16 +230,6 @@ err_context:
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
i915_timelines_fini(i915);
- kmem_cache_destroy(i915->priorities);
-err_dependencies:
- kmem_cache_destroy(i915->dependencies);
-err_requests:
- kmem_cache_destroy(i915->requests);
-err_vmas:
- kmem_cache_destroy(i915->vmas);
-err_objects:
- kmem_cache_destroy(i915->objects);
-err_wq:
destroy_workqueue(i915->wq);
err_drv:
drm_mode_config_cleanup(&i915->drm);
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c
index 0dc29e242597..d1a7c9608712 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.c
+++ b/drivers/gpu/drm/i915/selftests/mock_request.c
@@ -31,29 +31,25 @@ mock_request(struct intel_engine_cs *engine,
unsigned long delay)
{
struct i915_request *request;
- struct mock_request *mock;
/* NB the i915->requests slab cache is enlarged to fit mock_request */
request = i915_request_alloc(engine, context);
if (IS_ERR(request))
return NULL;
- mock = container_of(request, typeof(*mock), base);
- mock->delay = delay;
-
- return &mock->base;
+ request->mock.delay = delay;
+ return request;
}
bool mock_cancel_request(struct i915_request *request)
{
- struct mock_request *mock = container_of(request, typeof(*mock), base);
struct mock_engine *engine =
container_of(request->engine, typeof(*engine), base);
bool was_queued;
spin_lock_irq(&engine->hw_lock);
- was_queued = !list_empty(&mock->link);
- list_del_init(&mock->link);
+ was_queued = !list_empty(&request->mock.link);
+ list_del_init(&request->mock.link);
spin_unlock_irq(&engine->hw_lock);
if (was_queued)
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.h b/drivers/gpu/drm/i915/selftests/mock_request.h
index 995fb728380c..4acf0211df20 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.h
+++ b/drivers/gpu/drm/i915/selftests/mock_request.h
@@ -29,13 +29,6 @@
#include "../i915_request.h"
-struct mock_request {
- struct i915_request base;
-
- struct list_head link;
- unsigned long delay;
-};
-
struct i915_request *
mock_request(struct intel_engine_cs *engine,
struct i915_gem_context *context,
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/selftests/mock_timeline.c
index d2de9ece2118..e084476469ef 100644
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/mock_timeline.c
@@ -14,8 +14,8 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context)
timeline->fence_context = context;
spin_lock_init(&timeline->lock);
+ mutex_init(&timeline->mutex);
- INIT_ACTIVE_REQUEST(&timeline->barrier);
INIT_ACTIVE_REQUEST(&timeline->last_request);
INIT_LIST_HEAD(&timeline->requests);
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c
index 8ef14c7e5e38..ff8999c63a12 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c
@@ -26,21 +26,21 @@
#define __nop_write(x) \
static void \
-nop_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { }
+nop_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { }
__nop_write(8)
__nop_write(16)
__nop_write(32)
#define __nop_read(x) \
static u##x \
-nop_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { return 0; }
+nop_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { return 0; }
__nop_read(8)
__nop_read(16)
__nop_read(32)
__nop_read(64)
-void mock_uncore_init(struct drm_i915_private *i915)
+void mock_uncore_init(struct intel_uncore *uncore)
{
- ASSIGN_WRITE_MMIO_VFUNCS(i915, nop);
- ASSIGN_READ_MMIO_VFUNCS(i915, nop);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, nop);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, nop);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.h b/drivers/gpu/drm/i915/selftests/mock_uncore.h
index d79aa3ca4d51..dacb36b5ffcd 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.h
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.h
@@ -25,6 +25,6 @@
#ifndef __MOCK_UNCORE_H
#define __MOCK_UNCORE_H
-void mock_uncore_init(struct drm_i915_private *i915);
+void mock_uncore_init(struct intel_uncore *uncore);
#endif /* !__MOCK_UNCORE_H */
OpenPOWER on IntegriCloud