diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2018-06-07 16:40:46 +0100 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2018-06-07 21:53:11 +0100 |
commit | 93f2cde2a4f7947f6330ecfb9b27d13e2f4d43af (patch) | |
tree | d3ba0b2c9afc7baa17ccbdaca8d5e9a6b6acb5a0 /drivers | |
parent | 520ea7c581bf3ba4761c1fb61c53b11767665b62 (diff) | |
download | talos-op-linux-93f2cde2a4f7947f6330ecfb9b27d13e2f4d43af.tar.gz talos-op-linux-93f2cde2a4f7947f6330ecfb9b27d13e2f4d43af.zip |
drm/i915: Decouple vma vfuncs from vm
To allow for future non-object backed vma, we need to be able to
specialise the callbacks for binding, et al, the vma. For example,
instead of calling vma->vm->bind_vma(), we now call
vma->ops->bind_vma(). This gives us the opportunity to later override the
operation for a custom vma.
v2: flip order of unbind/bind
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180607154047.9171-2-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.c | 57 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.h | 27 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_vma.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_vma.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/mock_gtt.c | 18 |
5 files changed, 66 insertions, 48 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 7c45a516aa6b..d841bf296d19 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1616,12 +1616,13 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) gen8_ppgtt_notify_vgt(ppgtt, true); ppgtt->vm.cleanup = gen8_ppgtt_cleanup; - ppgtt->vm.bind_vma = gen8_ppgtt_bind_vma; - ppgtt->vm.unbind_vma = ppgtt_unbind_vma; - ppgtt->vm.set_pages = ppgtt_set_pages; - ppgtt->vm.clear_pages = clear_pages; ppgtt->debug_dump = gen8_dump_ppgtt; + ppgtt->vm.vma_ops.bind_vma = gen8_ppgtt_bind_vma; + ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma; + ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages; + ppgtt->vm.vma_ops.clear_pages = clear_pages; + return 0; free_scratch: @@ -2059,13 +2060,14 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) ppgtt->vm.clear_range = gen6_ppgtt_clear_range; ppgtt->vm.insert_entries = gen6_ppgtt_insert_entries; - ppgtt->vm.bind_vma = gen6_ppgtt_bind_vma; - ppgtt->vm.unbind_vma = ppgtt_unbind_vma; - ppgtt->vm.set_pages = ppgtt_set_pages; - ppgtt->vm.clear_pages = clear_pages; ppgtt->vm.cleanup = gen6_ppgtt_cleanup; ppgtt->debug_dump = gen6_dump_ppgtt; + ppgtt->vm.vma_ops.bind_vma = gen6_ppgtt_bind_vma; + ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma; + ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages; + ppgtt->vm.vma_ops.clear_pages = clear_pages; + DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n", ppgtt->node.size >> 20, ppgtt->node.start / PAGE_SIZE); @@ -2793,11 +2795,11 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915) i915->mm.aliasing_ppgtt = ppgtt; - GEM_BUG_ON(ggtt->vm.bind_vma != ggtt_bind_vma); - ggtt->vm.bind_vma = aliasing_gtt_bind_vma; + GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma); + ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; - GEM_BUG_ON(ggtt->vm.unbind_vma != ggtt_unbind_vma); - ggtt->vm.unbind_vma = aliasing_gtt_unbind_vma; + GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma); + ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; return 0; @@ -2817,8 +2819,8 @@ void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915) i915_ppgtt_put(ppgtt); - ggtt->vm.bind_vma = ggtt_bind_vma; - ggtt->vm.unbind_vma = ggtt_unbind_vma; + ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; + ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; } int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) @@ -3310,10 +3312,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT; ggtt->vm.cleanup = gen6_gmch_remove; - ggtt->vm.bind_vma = ggtt_bind_vma; - ggtt->vm.unbind_vma = ggtt_unbind_vma; - ggtt->vm.set_pages = ggtt_set_pages; - ggtt->vm.clear_pages = clear_pages; ggtt->vm.insert_page = gen8_ggtt_insert_page; ggtt->vm.clear_range = nop_clear_range; if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv)) @@ -3331,6 +3329,11 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ggtt->invalidate = gen6_ggtt_invalidate; + ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; + ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; + ggtt->vm.vma_ops.set_pages = ggtt_set_pages; + ggtt->vm.vma_ops.clear_pages = clear_pages; + setup_private_pat(dev_priv); return ggtt_probe_common(ggtt, size); @@ -3370,10 +3373,6 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.clear_range = gen6_ggtt_clear_range; ggtt->vm.insert_page = gen6_ggtt_insert_page; ggtt->vm.insert_entries = gen6_ggtt_insert_entries; - ggtt->vm.bind_vma = ggtt_bind_vma; - ggtt->vm.unbind_vma = ggtt_unbind_vma; - ggtt->vm.set_pages = ggtt_set_pages; - ggtt->vm.clear_pages = clear_pages; ggtt->vm.cleanup = gen6_gmch_remove; ggtt->invalidate = gen6_ggtt_invalidate; @@ -3389,6 +3388,11 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt) else ggtt->vm.pte_encode = snb_pte_encode; + ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; + ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; + ggtt->vm.vma_ops.set_pages = ggtt_set_pages; + ggtt->vm.vma_ops.clear_pages = clear_pages; + return ggtt_probe_common(ggtt, size); } @@ -3419,14 +3423,15 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.insert_page = i915_ggtt_insert_page; ggtt->vm.insert_entries = i915_ggtt_insert_entries; ggtt->vm.clear_range = i915_ggtt_clear_range; - ggtt->vm.bind_vma = ggtt_bind_vma; - ggtt->vm.unbind_vma = ggtt_unbind_vma; - ggtt->vm.set_pages = ggtt_set_pages; - ggtt->vm.clear_pages = clear_pages; ggtt->vm.cleanup = i915_gmch_remove; ggtt->invalidate = gmch_ggtt_invalidate; + ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; + ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; + ggtt->vm.vma_ops.set_pages = ggtt_set_pages; + ggtt->vm.vma_ops.clear_pages = clear_pages; + if (unlikely(ggtt->do_idle_maps)) DRM_INFO("applying Ironlake quirks for intel_iommu\n"); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 197c2c06ecb7..16307ba7e303 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -58,6 +58,7 @@ struct drm_i915_file_private; struct drm_i915_fence_reg; +struct i915_vma; typedef u32 gen6_pte_t; typedef u64 gen8_pte_t; @@ -254,6 +255,21 @@ struct i915_pml4 { struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4]; }; +struct i915_vma_ops { + /* Map an object into an address space with the given cache flags. */ + int (*bind_vma)(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags); + /* + * Unmap an object from an address space. This usually consists of + * setting the valid PTE entries to a reserved scratch page. + */ + void (*unbind_vma)(struct i915_vma *vma); + + int (*set_pages)(struct i915_vma *vma); + void (*clear_pages)(struct i915_vma *vma); +}; + struct i915_address_space { struct drm_mm mm; struct drm_i915_private *i915; @@ -331,15 +347,8 @@ struct i915_address_space { enum i915_cache_level cache_level, u32 flags); void (*cleanup)(struct i915_address_space *vm); - /** Unmap an object from an address space. This usually consists of - * setting the valid PTE entries to a reserved scratch page. */ - void (*unbind_vma)(struct i915_vma *vma); - /* Map an object into an address space with the given cache flags. */ - int (*bind_vma)(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags); - int (*set_pages)(struct i915_vma *vma); - void (*clear_pages)(struct i915_vma *vma); + + struct i915_vma_ops vma_ops; I915_SELFTEST_DECLARE(struct fault_attr fault_attr); I915_SELFTEST_DECLARE(bool scrub_64K); diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index b71265066cd1..e82aa804cdba 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -95,6 +95,7 @@ vma_create(struct drm_i915_gem_object *obj, init_request_active(&vma->last_read[i], i915_vma_retire); init_request_active(&vma->last_fence, NULL); vma->vm = vm; + vma->ops = &vm->vma_ops; vma->obj = obj; vma->resv = obj->resv; vma->size = obj->base.size; @@ -280,7 +281,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, GEM_BUG_ON(!vma->pages); trace_i915_vma_bind(vma, bind_flags); - ret = vma->vm->bind_vma(vma, cache_level, bind_flags); + ret = vma->ops->bind_vma(vma, cache_level, bind_flags); if (ret) return ret; @@ -543,7 +544,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) GEM_BUG_ON(vma->pages); - ret = vma->vm->set_pages(vma); + ret = vma->ops->set_pages(vma); if (ret) goto err_unpin; @@ -622,7 +623,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) return 0; err_clear: - vma->vm->clear_pages(vma); + vma->ops->clear_pages(vma); err_unpin: if (vma->obj) i915_gem_object_unpin_pages(vma->obj); @@ -637,7 +638,7 @@ i915_vma_remove(struct i915_vma *vma) GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); - vma->vm->clear_pages(vma); + vma->ops->clear_pages(vma); drm_mm_remove_node(&vma->node); list_move_tail(&vma->vm_link, &vma->vm->unbound_list); @@ -906,7 +907,7 @@ int i915_vma_unbind(struct i915_vma *vma) if (likely(!vma->vm->closed)) { trace_i915_vma_unbind(vma); - vma->vm->unbind_vma(vma); + vma->ops->unbind_vma(vma); } vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index f0532f1a4953..4321476a6a32 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -49,6 +49,7 @@ struct i915_vma { struct drm_mm_node node; struct drm_i915_gem_object *obj; struct i915_address_space *vm; + const struct i915_vma_ops *ops; struct drm_i915_fence_reg *fence; struct reservation_object *resv; /** Alias of obj->resv */ struct sg_table *pages; diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index 556c546f2715..6a7f4da7b523 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -80,12 +80,13 @@ mock_ppgtt(struct drm_i915_private *i915, ppgtt->vm.clear_range = nop_clear_range; ppgtt->vm.insert_page = mock_insert_page; ppgtt->vm.insert_entries = mock_insert_entries; - ppgtt->vm.bind_vma = mock_bind_ppgtt; - ppgtt->vm.unbind_vma = mock_unbind_ppgtt; - ppgtt->vm.set_pages = ppgtt_set_pages; - ppgtt->vm.clear_pages = clear_pages; ppgtt->vm.cleanup = mock_cleanup; + ppgtt->vm.vma_ops.bind_vma = mock_bind_ppgtt; + ppgtt->vm.vma_ops.unbind_vma = mock_unbind_ppgtt; + ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages; + ppgtt->vm.vma_ops.clear_pages = clear_pages; + return ppgtt; } @@ -116,12 +117,13 @@ void mock_init_ggtt(struct drm_i915_private *i915) ggtt->vm.clear_range = nop_clear_range; ggtt->vm.insert_page = mock_insert_page; ggtt->vm.insert_entries = mock_insert_entries; - ggtt->vm.bind_vma = mock_bind_ggtt; - ggtt->vm.unbind_vma = mock_unbind_ggtt; - ggtt->vm.set_pages = ggtt_set_pages; - ggtt->vm.clear_pages = clear_pages; ggtt->vm.cleanup = mock_cleanup; + ggtt->vm.vma_ops.bind_vma = mock_bind_ggtt; + ggtt->vm.vma_ops.unbind_vma = mock_unbind_ggtt; + ggtt->vm.vma_ops.set_pages = ggtt_set_pages; + ggtt->vm.vma_ops.clear_pages = clear_pages; + i915_address_space_init(&ggtt->vm, i915, "global"); } |