diff options
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 64 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_manager.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_vm.c | 79 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_page_alloc.c | 2 |
4 files changed, 130 insertions, 20 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 22b57020790d..cba11f13d994 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -70,6 +70,7 @@ static inline int ttm_mem_type_from_place(const struct ttm_place *place, static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) { struct ttm_mem_type_manager *man = &bdev->man[mem_type]; + struct drm_printer p = drm_debug_printer(TTM_PFX); pr_err(" has_type: %d\n", man->has_type); pr_err(" use_type: %d\n", man->use_type); @@ -79,7 +80,7 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) pr_err(" available_caching: 0x%08X\n", man->available_caching); pr_err(" default_caching: 0x%08X\n", man->default_caching); if (mem_type != TTM_PL_SYSTEM) - (*man->func->debug)(man, TTM_PFX); + (*man->func->debug)(man, &p); } static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, @@ -394,14 +395,33 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) ww_mutex_unlock (&bo->resv->lock); } +static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo) +{ + int r; + + if (bo->resv == &bo->ttm_resv) + return 0; + + reservation_object_init(&bo->ttm_resv); + BUG_ON(!reservation_object_trylock(&bo->ttm_resv)); + + r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv); + if (r) { + reservation_object_unlock(&bo->ttm_resv); + reservation_object_fini(&bo->ttm_resv); + } + + return r; +} + static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) { struct reservation_object_list *fobj; struct dma_fence *fence; int i; - fobj = reservation_object_get_list(bo->resv); - fence = reservation_object_get_excl(bo->resv); + fobj = reservation_object_get_list(&bo->ttm_resv); + fence = reservation_object_get_excl(&bo->ttm_resv); if (fence && !fence->ops->signaled) dma_fence_enable_sw_signaling(fence); @@ -430,8 +450,19 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) ttm_bo_cleanup_memtype_use(bo); return; - } else - ttm_bo_flush_all_fences(bo); + } + + ret = ttm_bo_individualize_resv(bo); + if (ret) { + /* Last resort, if we fail to allocate memory for the + * fences block for the BO to become idle and free it. + */ + spin_unlock(&glob->lru_lock); + ttm_bo_wait(bo, true, true); + ttm_bo_cleanup_memtype_use(bo); + return; + } + ttm_bo_flush_all_fences(bo); /* * Make NO_EVICT bos immediately available to @@ -443,6 +474,8 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) ttm_bo_add_to_lru(bo); } + if (bo->resv != &bo->ttm_resv) + reservation_object_unlock(&bo->ttm_resv); __ttm_bo_unreserve(bo); } @@ -471,17 +504,25 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, bool no_wait_gpu) { struct ttm_bo_global *glob = bo->glob; + struct reservation_object *resv; int ret; - ret = ttm_bo_wait(bo, false, true); + if (unlikely(list_empty(&bo->ddestroy))) + resv = bo->resv; + else + resv = &bo->ttm_resv; + + if (reservation_object_test_signaled_rcu(resv, true)) + ret = 0; + else + ret = -EBUSY; if (ret && !no_wait_gpu) { long lret; ww_mutex_unlock(&bo->resv->lock); spin_unlock(&glob->lru_lock); - lret = reservation_object_wait_timeout_rcu(bo->resv, - true, + lret = reservation_object_wait_timeout_rcu(resv, true, interruptible, 30 * HZ); @@ -505,13 +546,6 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, spin_unlock(&glob->lru_lock); return 0; } - - /* - * remove sync_obj with ttm_bo_wait, the wait should be - * finished, and no new wait object should have been added. - */ - ret = ttm_bo_wait(bo, false, true); - WARN_ON(ret); } if (ret || unlikely(list_empty(&bo->ddestroy))) { diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index 90a6c0b03afc..a7c232dc39cb 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c @@ -136,13 +136,12 @@ static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) } static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, - const char *prefix) + struct drm_printer *printer) { struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; - struct drm_printer p = drm_debug_printer(prefix); spin_lock(&rman->lock); - drm_mm_print(&rman->mm, &p); + drm_mm_print(&rman->mm, printer); spin_unlock(&rman->lock); } diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index b442d12f2f7d..a01e5c90fd87 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -294,10 +294,87 @@ static void ttm_bo_vm_close(struct vm_area_struct *vma) vma->vm_private_data = NULL; } +static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, + unsigned long offset, + void *buf, int len, int write) +{ + unsigned long page = offset >> PAGE_SHIFT; + unsigned long bytes_left = len; + int ret; + + /* Copy a page at a time, that way no extra virtual address + * mapping is needed + */ + offset -= page << PAGE_SHIFT; + do { + unsigned long bytes = min(bytes_left, PAGE_SIZE - offset); + struct ttm_bo_kmap_obj map; + void *ptr; + bool is_iomem; + + ret = ttm_bo_kmap(bo, page, 1, &map); + if (ret) + return ret; + + ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset; + WARN_ON_ONCE(is_iomem); + if (write) + memcpy(ptr, buf, bytes); + else + memcpy(buf, ptr, bytes); + ttm_bo_kunmap(&map); + + page++; + bytes_left -= bytes; + offset = 0; + } while (bytes_left); + + return len; +} + +static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, + void *buf, int len, int write) +{ + unsigned long offset = (addr) - vma->vm_start; + struct ttm_buffer_object *bo = vma->vm_private_data; + int ret; + + if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages) + return -EIO; + + ret = ttm_bo_reserve(bo, true, false, NULL); + if (ret) + return ret; + + switch (bo->mem.mem_type) { + case TTM_PL_SYSTEM: + if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { + ret = ttm_tt_swapin(bo->ttm); + if (unlikely(ret != 0)) + return ret; + } + /* fall through */ + case TTM_PL_TT: + ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write); + break; + default: + if (bo->bdev->driver->access_memory) + ret = bo->bdev->driver->access_memory( + bo, offset, buf, len, write); + else + ret = -EIO; + } + + ttm_bo_unreserve(bo); + + return ret; +} + static const struct vm_operations_struct ttm_bo_vm_ops = { .fault = ttm_bo_vm_fault, .open = ttm_bo_vm_open, - .close = ttm_bo_vm_close + .close = ttm_bo_vm_close, + .access = ttm_bo_vm_access }; static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev, diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index eeddc1e48409..871599826773 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -615,7 +615,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, } else { pr_err("Failed to fill pool (%p)\n", pool); /* If we have any pages left put them to the pool. */ - list_for_each_entry(p, &pool->list, lru) { + list_for_each_entry(p, &new_pages, lru) { ++cpages; } list_splice(&new_pages, &pool->list); |