diff options
Diffstat (limited to 'drivers/gpu/drm/qxl/qxl_object.c')
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_object.c | 52 |
1 files changed, 31 insertions, 21 deletions
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c index 4928fa602944..ab72dc3476e9 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c @@ -33,14 +33,14 @@ static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo) struct qxl_device *qdev; bo = to_qxl_bo(tbo); - qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; + qdev = (struct qxl_device *)bo->tbo.base.dev->dev_private; qxl_surface_evict(qdev, bo, false); WARN_ON_ONCE(bo->map_count > 0); mutex_lock(&qdev->gem.mutex); list_del_init(&bo->list); mutex_unlock(&qdev->gem.mutex); - drm_gem_object_release(&bo->gem_base); + drm_gem_object_release(&bo->tbo.base); kfree(bo); } @@ -54,9 +54,14 @@ bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo) void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned) { u32 c = 0; - u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0; + u32 pflag = 0; unsigned int i; + if (pinned) + pflag |= TTM_PL_FLAG_NO_EVICT; + if (qbo->tbo.base.size <= PAGE_SIZE) + pflag |= TTM_PL_FLAG_TOPDOWN; + qbo->placement.placement = qbo->placements; qbo->placement.busy_placement = qbo->placements; if (domain == QXL_GEM_DOMAIN_VRAM) @@ -77,6 +82,19 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned) } } +static const struct drm_gem_object_funcs qxl_object_funcs = { + .free = qxl_gem_object_free, + .open = qxl_gem_object_open, + .close = qxl_gem_object_close, + .pin = qxl_gem_prime_pin, + .unpin = qxl_gem_prime_unpin, + .get_sg_table = qxl_gem_prime_get_sg_table, + .vmap = qxl_gem_prime_vmap, + .vunmap = qxl_gem_prime_vunmap, + .mmap = drm_gem_ttm_mmap, + .print_info = drm_gem_ttm_print_info, +}; + int qxl_bo_create(struct qxl_device *qdev, unsigned long size, bool kernel, bool pinned, u32 domain, struct qxl_surface *surf, @@ -95,11 +113,12 @@ int qxl_bo_create(struct qxl_device *qdev, if (bo == NULL) return -ENOMEM; size = roundup(size, PAGE_SIZE); - r = drm_gem_object_init(&qdev->ddev, &bo->gem_base, size); + r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size); if (unlikely(r)) { kfree(bo); return r; } + bo->tbo.base.funcs = &qxl_object_funcs; bo->type = domain; bo->pin_count = pinned ? 1 : 0; bo->surface_id = 0; @@ -148,7 +167,6 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr) void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset) { - struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type]; void *rptr; int ret; struct io_mapping *map; @@ -160,9 +178,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, else goto fallback; - (void) ttm_mem_io_lock(man, false); - ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem); - ttm_mem_io_unlock(man); + ret = qxl_ttm_io_mem_reserve(bo->tbo.bdev, &bo->tbo.mem); return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset); fallback: @@ -193,17 +209,11 @@ void qxl_bo_kunmap(struct qxl_bo *bo) void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *pmap) { - struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type]; - if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) && (bo->tbo.mem.mem_type != TTM_PL_PRIV)) goto fallback; io_mapping_unmap_atomic(pmap); - - (void) ttm_mem_io_lock(man, false); - ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem); - ttm_mem_io_unlock(man); return; fallback: qxl_bo_kunmap(bo); @@ -214,20 +224,20 @@ void qxl_bo_unref(struct qxl_bo **bo) if ((*bo) == NULL) return; - drm_gem_object_put_unlocked(&(*bo)->gem_base); + drm_gem_object_put_unlocked(&(*bo)->tbo.base); *bo = NULL; } struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo) { - drm_gem_object_get(&bo->gem_base); + drm_gem_object_get(&bo->tbo.base); return bo; } static int __qxl_bo_pin(struct qxl_bo *bo) { struct ttm_operation_ctx ctx = { false, false }; - struct drm_device *ddev = bo->gem_base.dev; + struct drm_device *ddev = bo->tbo.base.dev; int r; if (bo->pin_count) { @@ -247,7 +257,7 @@ static int __qxl_bo_pin(struct qxl_bo *bo) static int __qxl_bo_unpin(struct qxl_bo *bo) { struct ttm_operation_ctx ctx = { false, false }; - struct drm_device *ddev = bo->gem_base.dev; + struct drm_device *ddev = bo->tbo.base.dev; int r, i; if (!bo->pin_count) { @@ -310,13 +320,13 @@ void qxl_bo_force_delete(struct qxl_device *qdev) dev_err(qdev->ddev.dev, "Userspace still has active objects !\n"); list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) { dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n", - &bo->gem_base, bo, (unsigned long)bo->gem_base.size, - *((unsigned long *)&bo->gem_base.refcount)); + &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size, + *((unsigned long *)&bo->tbo.base.refcount)); mutex_lock(&qdev->gem.mutex); list_del_init(&bo->list); mutex_unlock(&qdev->gem.mutex); /* this should unref the ttm bo */ - drm_gem_object_put_unlocked(&bo->gem_base); + drm_gem_object_put_unlocked(&bo->tbo.base); } } |