diff options
Diffstat (limited to 'drivers/gpu/drm/i915/selftests/i915_gem_gtt.c')
-rw-r--r-- | drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 114 |
1 files changed, 98 insertions, 16 deletions
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 4a28d713a7d8..d8064276431c 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -885,6 +885,84 @@ static int shrink_hole(struct drm_i915_private *i915, return err; } +static int shrink_boom(struct drm_i915_private *i915, + struct i915_address_space *vm, + u64 hole_start, u64 hole_end, + unsigned long end_time) +{ + unsigned int sizes[] = { SZ_2M, SZ_1G }; + struct drm_i915_gem_object *purge; + struct drm_i915_gem_object *explode; + int err; + int i; + + /* + * Catch the case which shrink_hole seems to miss. The setup here + * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while + * ensuring that all vma assiocated with the respective pd/pdp are + * unpinned at the time. + */ + + for (i = 0; i < ARRAY_SIZE(sizes); ++i) { + unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; + unsigned int size = sizes[i]; + struct i915_vma *vma; + + purge = fake_dma_object(i915, size); + if (IS_ERR(purge)) + return PTR_ERR(purge); + + vma = i915_vma_instance(purge, vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_purge; + } + + err = i915_vma_pin(vma, 0, 0, flags); + if (err) + goto err_purge; + + /* Should now be ripe for purging */ + i915_vma_unpin(vma); + + explode = fake_dma_object(i915, size); + if (IS_ERR(explode)) { + err = PTR_ERR(purge); + goto err_purge; + } + + vm->fault_attr.probability = 100; + vm->fault_attr.interval = 1; + atomic_set(&vm->fault_attr.times, -1); + + vma = i915_vma_instance(explode, vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_explode; + } + + err = i915_vma_pin(vma, 0, 0, flags | size); + if (err) + goto err_explode; + + i915_vma_unpin(vma); + + i915_gem_object_put(purge); + i915_gem_object_put(explode); + + memset(&vm->fault_attr, 0, sizeof(vm->fault_attr)); + } + + return 0; + +err_explode: + i915_gem_object_put(explode); +err_purge: + i915_gem_object_put(purge); + memset(&vm->fault_attr, 0, sizeof(vm->fault_attr)); + return err; +} + static int exercise_ppgtt(struct drm_i915_private *dev_priv, int (*func)(struct drm_i915_private *i915, struct i915_address_space *vm, @@ -953,6 +1031,11 @@ static int igt_ppgtt_shrink(void *arg) return exercise_ppgtt(arg, shrink_hole); } +static int igt_ppgtt_shrink_boom(void *arg) +{ + return exercise_ppgtt(arg, shrink_boom); +} + static int sort_holes(void *priv, struct list_head *A, struct list_head *B) { struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack); @@ -1052,35 +1135,38 @@ static int igt_ggtt_page(void *arg) memset(&tmp, 0, sizeof(tmp)); err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp, - 1024 * PAGE_SIZE, 0, + count * PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, 0, ggtt->mappable_end, DRM_MM_INSERT_LOW); if (err) goto out_unpin; + intel_runtime_pm_get(i915); + + for (n = 0; n < count; n++) { + u64 offset = tmp.start + n * PAGE_SIZE; + + ggtt->base.insert_page(&ggtt->base, + i915_gem_object_get_dma_address(obj, 0), + offset, I915_CACHE_NONE, 0); + } + order = i915_random_order(count, &prng); if (!order) { err = -ENOMEM; goto out_remove; } - intel_runtime_pm_get(i915); for (n = 0; n < count; n++) { u64 offset = tmp.start + order[n] * PAGE_SIZE; u32 __iomem *vaddr; - ggtt->base.insert_page(&ggtt->base, - i915_gem_object_get_dma_address(obj, 0), - offset, I915_CACHE_NONE, 0); - vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset); iowrite32(n, vaddr + n); io_mapping_unmap_atomic(vaddr); - - wmb(); - ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE); } + i915_gem_flush_ggtt_writes(i915); i915_random_reorder(order, count, &prng); for (n = 0; n < count; n++) { @@ -1088,16 +1174,10 @@ static int igt_ggtt_page(void *arg) u32 __iomem *vaddr; u32 val; - ggtt->base.insert_page(&ggtt->base, - i915_gem_object_get_dma_address(obj, 0), - offset, I915_CACHE_NONE, 0); - vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset); val = ioread32(vaddr + n); io_mapping_unmap_atomic(vaddr); - ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE); - if (val != n) { pr_err("insert page failed: found %d, expected %d\n", val, n); @@ -1105,10 +1185,11 @@ static int igt_ggtt_page(void *arg) break; } } - intel_runtime_pm_put(i915); kfree(order); out_remove: + ggtt->base.clear_range(&ggtt->base, tmp.start, tmp.size); + intel_runtime_pm_put(i915); drm_mm_remove_node(&tmp); out_unpin: i915_gem_object_unpin_pages(obj); @@ -1579,6 +1660,7 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_ppgtt_pot), SUBTEST(igt_ppgtt_fill), SUBTEST(igt_ppgtt_shrink), + SUBTEST(igt_ppgtt_shrink_boom), SUBTEST(igt_ggtt_lowlevel), SUBTEST(igt_ggtt_drunk), SUBTEST(igt_ggtt_walk), |