diff options
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 173 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 138 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_vm.c | 29 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_execbuf_util.c | 169 |
4 files changed, 395 insertions, 114 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 3ca77dc03915..0b6a55ac2f87 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -169,7 +169,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) } EXPORT_SYMBOL(ttm_bo_wait_unreserved); -static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) +void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man; @@ -191,11 +191,7 @@ static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) } } -/** - * Call with the lru_lock held. - */ - -static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) +int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) { int put_count = 0; @@ -224,9 +220,21 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, int ret; while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { - if (use_sequence && bo->seq_valid && - (sequence - bo->val_seq < (1 << 31))) { - return -EAGAIN; + /** + * Deadlock avoidance for multi-bo reserving. + */ + if (use_sequence && bo->seq_valid) { + /** + * We've already reserved this one. + */ + if (unlikely(sequence == bo->val_seq)) + return -EDEADLK; + /** + * Already reserved by a thread that will not back + * off for us. We need to back off. + */ + if (unlikely(sequence - bo->val_seq < (1 << 31))) + return -EAGAIN; } if (no_wait) @@ -241,6 +249,14 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, } if (use_sequence) { + /** + * Wake up waiters that may need to recheck for deadlock, + * if we decreased the sequence number. + */ + if (unlikely((bo->val_seq - sequence < (1 << 31)) + || !bo->seq_valid)) + wake_up_all(&bo->event_queue); + bo->val_seq = sequence; bo->seq_valid = true; } else { @@ -256,6 +272,13 @@ static void ttm_bo_ref_bug(struct kref *list_kref) BUG(); } +void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, + bool never_free) +{ + kref_sub(&bo->list_kref, count, + (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list); +} + int ttm_bo_reserve(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence) @@ -271,20 +294,24 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo, put_count = ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); - while (put_count--) - kref_put(&bo->list_kref, ttm_bo_ref_bug); + ttm_bo_list_ref_sub(bo, put_count, true); return ret; } +void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo) +{ + ttm_bo_add_to_lru(bo); + atomic_set(&bo->reserved, 0); + wake_up_all(&bo->event_queue); +} + void ttm_bo_unreserve(struct ttm_buffer_object *bo) { struct ttm_bo_global *glob = bo->glob; spin_lock(&glob->lru_lock); - ttm_bo_add_to_lru(bo); - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); + ttm_bo_unreserve_locked(bo); spin_unlock(&glob->lru_lock); } EXPORT_SYMBOL(ttm_bo_unreserve); @@ -351,8 +378,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, int ret = 0; if (old_is_pci || new_is_pci || - ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) - ttm_bo_unmap_virtual(bo); + ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { + ret = ttm_mem_io_lock(old_man, true); + if (unlikely(ret != 0)) + goto out_err; + ttm_bo_unmap_virtual_locked(bo); + ttm_mem_io_unlock(old_man); + } /* * Create and bind a ttm if required. @@ -374,11 +406,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, } if (bo->mem.mem_type == TTM_PL_SYSTEM) { + if (bdev->driver->move_notify) + bdev->driver->move_notify(bo, mem); bo->mem = *mem; mem->mm_node = NULL; goto moved; } - } if (bdev->driver->move_notify) @@ -405,11 +438,9 @@ moved: } if (bo->mem.mm_node) { - spin_lock(&bo->lock); bo->offset = (bo->mem.start << PAGE_SHIFT) + bdev->man[bo->mem.mem_type].gpu_offset; bo->cur_placement = bo->mem.placement; - spin_unlock(&bo->lock); } else bo->offset = 0; @@ -441,7 +472,6 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) ttm_tt_destroy(bo->ttm); bo->ttm = NULL; } - ttm_bo_mem_put(bo, &bo->mem); atomic_set(&bo->reserved, 0); @@ -463,14 +493,14 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) int put_count; int ret; - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); (void) ttm_bo_wait(bo, false, false, true); if (!bo->sync_obj) { spin_lock(&glob->lru_lock); /** - * Lock inversion between bo::reserve and bo::lock here, + * Lock inversion between bo:reserve and bdev::fence_lock here, * but that's OK, since we're only trylocking. */ @@ -479,14 +509,13 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) if (unlikely(ret == -EBUSY)) goto queue; - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); put_count = ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); ttm_bo_cleanup_memtype_use(bo); - while (put_count--) - kref_put(&bo->list_kref, ttm_bo_ref_bug); + ttm_bo_list_ref_sub(bo, put_count, true); return; } else { @@ -501,7 +530,7 @@ queue: kref_get(&bo->list_kref); list_add_tail(&bo->ddestroy, &bdev->ddestroy); spin_unlock(&glob->lru_lock); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); if (sync_obj) { driver->sync_obj_flush(sync_obj, sync_obj_arg); @@ -526,14 +555,15 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool no_wait_reserve, bool no_wait_gpu) { + struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_global *glob = bo->glob; int put_count; int ret = 0; retry: - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); if (unlikely(ret != 0)) return ret; @@ -569,8 +599,7 @@ retry: spin_unlock(&glob->lru_lock); ttm_bo_cleanup_memtype_use(bo); - while (put_count--) - kref_put(&bo->list_kref, ttm_bo_ref_bug); + ttm_bo_list_ref_sub(bo, put_count, true); return 0; } @@ -641,6 +670,7 @@ static void ttm_bo_release(struct kref *kref) struct ttm_buffer_object *bo = container_of(kref, struct ttm_buffer_object, kref); struct ttm_bo_device *bdev = bo->bdev; + struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; if (likely(bo->vm_node != NULL)) { rb_erase(&bo->vm_rb, &bdev->addr_space_rb); @@ -648,6 +678,9 @@ static void ttm_bo_release(struct kref *kref) bo->vm_node = NULL; } write_unlock(&bdev->vm_lock); + ttm_mem_io_lock(man, false); + ttm_mem_io_free_vm(bo); + ttm_mem_io_unlock(man); ttm_bo_cleanup_refs_or_queue(bo); kref_put(&bo->list_kref, ttm_bo_release_list); write_lock(&bdev->vm_lock); @@ -687,9 +720,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, struct ttm_placement placement; int ret = 0; - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); if (unlikely(ret != 0)) { if (ret != -ERESTARTSYS) { @@ -704,7 +737,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, evict_mem = bo->mem; evict_mem.mm_node = NULL; - evict_mem.bus.io_reserved = false; + evict_mem.bus.io_reserved_vm = false; + evict_mem.bus.io_reserved_count = 0; placement.fpfn = 0; placement.lpfn = 0; @@ -791,8 +825,7 @@ retry: BUG_ON(ret != 0); - while (put_count--) - kref_put(&bo->list_kref, ttm_bo_ref_bug); + ttm_bo_list_ref_sub(bo, put_count, true); ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu); ttm_bo_unreserve(bo); @@ -1025,6 +1058,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, { int ret = 0; struct ttm_mem_reg mem; + struct ttm_bo_device *bdev = bo->bdev; BUG_ON(!atomic_read(&bo->reserved)); @@ -1033,15 +1067,16 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, * Have the driver move function wait for idle when necessary, * instead of doing it here. */ - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); if (ret) return ret; mem.num_pages = bo->num_pages; mem.size = mem.num_pages << PAGE_SHIFT; mem.page_alignment = bo->mem.page_alignment; - mem.bus.io_reserved = false; + mem.bus.io_reserved_vm = false; + mem.bus.io_reserved_count = 0; /* * Determine where to move the buffer. */ @@ -1152,7 +1187,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev, } bo->destroy = destroy; - spin_lock_init(&bo->lock); kref_init(&bo->kref); kref_init(&bo->list_kref); atomic_set(&bo->cpu_writers, 0); @@ -1161,6 +1195,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, INIT_LIST_HEAD(&bo->lru); INIT_LIST_HEAD(&bo->ddestroy); INIT_LIST_HEAD(&bo->swap); + INIT_LIST_HEAD(&bo->io_reserve_lru); bo->bdev = bdev; bo->glob = bdev->glob; bo->type = type; @@ -1170,7 +1205,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev, bo->mem.num_pages = bo->num_pages; bo->mem.mm_node = NULL; bo->mem.page_alignment = page_alignment; - bo->mem.bus.io_reserved = false; + bo->mem.bus.io_reserved_vm = false; + bo->mem.bus.io_reserved_count = 0; bo->buffer_start = buffer_start & PAGE_MASK; bo->priv_flags = 0; bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); @@ -1344,6 +1380,10 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, BUG_ON(type >= TTM_NUM_MEM_TYPES); man = &bdev->man[type]; BUG_ON(man->has_type); + man->io_reserve_fastpath = true; + man->use_io_reserve_lru = false; + mutex_init(&man->io_reserve_mutex); + INIT_LIST_HEAD(&man->io_reserve_lru); ret = bdev->driver->init_mem_type(bdev, type, man); if (ret) @@ -1461,8 +1501,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) list_del(&bdev->device_list); mutex_unlock(&glob->device_list_mutex); - if (!cancel_delayed_work(&bdev->wq)) - flush_scheduled_work(); + cancel_delayed_work_sync(&bdev->wq); while (ttm_bo_delayed_delete(bdev, true)) ; @@ -1516,7 +1555,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, bdev->dev_mapping = NULL; bdev->glob = glob; bdev->need_dma32 = need_dma32; - + bdev->val_seq = 0; + spin_lock_init(&bdev->fence_lock); mutex_lock(&glob->device_list_mutex); list_add_tail(&bdev->device_list, &glob->device_list); mutex_unlock(&glob->device_list_mutex); @@ -1550,7 +1590,7 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) return true; } -void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) +void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; loff_t offset = (loff_t) bo->addr_space_offset; @@ -1559,8 +1599,20 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) if (!bdev->dev_mapping) return; unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); - ttm_mem_io_free(bdev, &bo->mem); + ttm_mem_io_free_vm(bo); } + +void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) +{ + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; + + ttm_mem_io_lock(man, false); + ttm_bo_unmap_virtual_locked(bo); + ttm_mem_io_unlock(man); +} + + EXPORT_SYMBOL(ttm_bo_unmap_virtual); static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) @@ -1640,6 +1692,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, bool interruptible, bool no_wait) { struct ttm_bo_driver *driver = bo->bdev->driver; + struct ttm_bo_device *bdev = bo->bdev; void *sync_obj; void *sync_obj_arg; int ret = 0; @@ -1653,9 +1706,9 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, void *tmp_obj = bo->sync_obj; bo->sync_obj = NULL; clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); driver->sync_obj_unref(&tmp_obj); - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); continue; } @@ -1664,29 +1717,29 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, sync_obj = driver->sync_obj_ref(bo->sync_obj); sync_obj_arg = bo->sync_obj_arg; - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); ret = driver->sync_obj_wait(sync_obj, sync_obj_arg, lazy, interruptible); if (unlikely(ret != 0)) { driver->sync_obj_unref(&sync_obj); - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); return ret; } - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); if (likely(bo->sync_obj == sync_obj && bo->sync_obj_arg == sync_obj_arg)) { void *tmp_obj = bo->sync_obj; bo->sync_obj = NULL; clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); driver->sync_obj_unref(&sync_obj); driver->sync_obj_unref(&tmp_obj); - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); } else { - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); driver->sync_obj_unref(&sync_obj); - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); } } return 0; @@ -1695,6 +1748,7 @@ EXPORT_SYMBOL(ttm_bo_wait); int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) { + struct ttm_bo_device *bdev = bo->bdev; int ret = 0; /* @@ -1704,9 +1758,9 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) ret = ttm_bo_reserve(bo, true, no_wait, false, 0); if (unlikely(ret != 0)) return ret; - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, true, no_wait); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); if (likely(ret == 0)) atomic_inc(&bo->cpu_writers); ttm_bo_unreserve(bo); @@ -1772,16 +1826,15 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) put_count = ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); - while (put_count--) - kref_put(&bo->list_kref, ttm_bo_ref_bug); + ttm_bo_list_ref_sub(bo, put_count, true); /** * Wait for GPU, then move to system cached. */ - spin_lock(&bo->lock); + spin_lock(&bo->bdev->fence_lock); ret = ttm_bo_wait(bo, false, false, false); - spin_unlock(&bo->lock); + spin_unlock(&bo->bdev->fence_lock); if (unlikely(ret != 0)) goto out; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 3106d5bcce32..77dbf408c0d0 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -75,37 +75,123 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_move_ttm); -int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) { - int ret; + if (likely(man->io_reserve_fastpath)) + return 0; + + if (interruptible) + return mutex_lock_interruptible(&man->io_reserve_mutex); + + mutex_lock(&man->io_reserve_mutex); + return 0; +} - if (!mem->bus.io_reserved) { - mem->bus.io_reserved = true; +void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) +{ + if (likely(man->io_reserve_fastpath)) + return; + + mutex_unlock(&man->io_reserve_mutex); +} + +static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) +{ + struct ttm_buffer_object *bo; + + if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru)) + return -EAGAIN; + + bo = list_first_entry(&man->io_reserve_lru, + struct ttm_buffer_object, + io_reserve_lru); + list_del_init(&bo->io_reserve_lru); + ttm_bo_unmap_virtual_locked(bo); + + return 0; +} + +static int ttm_mem_io_reserve(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem) +{ + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + int ret = 0; + + if (!bdev->driver->io_mem_reserve) + return 0; + if (likely(man->io_reserve_fastpath)) + return bdev->driver->io_mem_reserve(bdev, mem); + + if (bdev->driver->io_mem_reserve && + mem->bus.io_reserved_count++ == 0) { +retry: ret = bdev->driver->io_mem_reserve(bdev, mem); + if (ret == -EAGAIN) { + ret = ttm_mem_io_evict(man); + if (ret == 0) + goto retry; + } + } + return ret; +} + +static void ttm_mem_io_free(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem) +{ + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + + if (likely(man->io_reserve_fastpath)) + return; + + if (bdev->driver->io_mem_reserve && + --mem->bus.io_reserved_count == 0 && + bdev->driver->io_mem_free) + bdev->driver->io_mem_free(bdev, mem); + +} + +int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) +{ + struct ttm_mem_reg *mem = &bo->mem; + int ret; + + if (!mem->bus.io_reserved_vm) { + struct ttm_mem_type_manager *man = + &bo->bdev->man[mem->mem_type]; + + ret = ttm_mem_io_reserve(bo->bdev, mem); if (unlikely(ret != 0)) return ret; + mem->bus.io_reserved_vm = true; + if (man->use_io_reserve_lru) + list_add_tail(&bo->io_reserve_lru, + &man->io_reserve_lru); } return 0; } -void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) { - if (bdev->driver->io_mem_reserve) { - if (mem->bus.io_reserved) { - mem->bus.io_reserved = false; - bdev->driver->io_mem_free(bdev, mem); - } + struct ttm_mem_reg *mem = &bo->mem; + + if (mem->bus.io_reserved_vm) { + mem->bus.io_reserved_vm = false; + list_del_init(&bo->io_reserve_lru); + ttm_mem_io_free(bo->bdev, mem); } } int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, void **virtual) { + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; int ret; void *addr; *virtual = NULL; + (void) ttm_mem_io_lock(man, false); ret = ttm_mem_io_reserve(bdev, mem); + ttm_mem_io_unlock(man); if (ret || !mem->bus.is_iomem) return ret; @@ -117,7 +203,9 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, else addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); if (!addr) { + (void) ttm_mem_io_lock(man, false); ttm_mem_io_free(bdev, mem); + ttm_mem_io_unlock(man); return -ENOMEM; } } @@ -134,7 +222,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, if (virtual && mem->bus.addr == NULL) iounmap(virtual); + (void) ttm_mem_io_lock(man, false); ttm_mem_io_free(bdev, mem); + ttm_mem_io_unlock(man); } static int ttm_copy_io_page(void *dst, void *src, unsigned long page) @@ -231,7 +321,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; struct ttm_tt *ttm = bo->ttm; struct ttm_mem_reg *old_mem = &bo->mem; - struct ttm_mem_reg old_copy = *old_mem; + struct ttm_mem_reg old_copy; void *old_iomap; void *new_iomap; int ret; @@ -280,8 +370,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, } mb(); out2: - ttm_bo_free_old_node(bo); - + old_copy = *old_mem; *old_mem = *new_mem; new_mem->mm_node = NULL; @@ -292,9 +381,10 @@ out2: } out1: - ttm_mem_reg_iounmap(bdev, new_mem, new_iomap); + ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); out: ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); + ttm_bo_mem_put(bo, &old_copy); return ret; } EXPORT_SYMBOL(ttm_bo_move_memcpy); @@ -337,11 +427,11 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, * TODO: Explicit member copy would probably be better here. */ - spin_lock_init(&fbo->lock); init_waitqueue_head(&fbo->event_queue); INIT_LIST_HEAD(&fbo->ddestroy); INIT_LIST_HEAD(&fbo->lru); INIT_LIST_HEAD(&fbo->swap); + INIT_LIST_HEAD(&fbo->io_reserve_lru); fbo->vm_node = NULL; atomic_set(&fbo->cpu_writers, 0); @@ -453,6 +543,8 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, unsigned long num_pages, struct ttm_bo_kmap_obj *map) { + struct ttm_mem_type_manager *man = + &bo->bdev->man[bo->mem.mem_type]; unsigned long offset, size; int ret; @@ -467,7 +559,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) return -EPERM; #endif + (void) ttm_mem_io_lock(man, false); ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); + ttm_mem_io_unlock(man); if (ret) return ret; if (!bo->mem.bus.is_iomem) { @@ -482,12 +576,15 @@ EXPORT_SYMBOL(ttm_bo_kmap); void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) { + struct ttm_buffer_object *bo = map->bo; + struct ttm_mem_type_manager *man = + &bo->bdev->man[bo->mem.mem_type]; + if (!map->virtual) return; switch (map->bo_kmap_type) { case ttm_bo_map_iomap: iounmap(map->virtual); - ttm_mem_io_free(map->bo->bdev, &map->bo->mem); break; case ttm_bo_map_vmap: vunmap(map->virtual); @@ -500,6 +597,9 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) default: BUG(); } + (void) ttm_mem_io_lock(man, false); + ttm_mem_io_free(map->bo->bdev, &map->bo->mem); + ttm_mem_io_unlock(man); map->virtual = NULL; map->page = NULL; } @@ -520,7 +620,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct ttm_buffer_object *ghost_obj; void *tmp_obj = NULL; - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); if (bo->sync_obj) { tmp_obj = bo->sync_obj; bo->sync_obj = NULL; @@ -529,7 +629,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, bo->sync_obj_arg = sync_obj_arg; if (evict) { ret = ttm_bo_wait(bo, false, false, false); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); if (tmp_obj) driver->sync_obj_unref(&tmp_obj); if (ret) @@ -552,7 +652,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, */ set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); if (tmp_obj) driver->sync_obj_unref(&tmp_obj); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index fe6cb77899f4..221b924acebe 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -83,6 +83,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) int i; unsigned long address = (unsigned long)vmf->virtual_address; int retval = VM_FAULT_NOPAGE; + struct ttm_mem_type_manager *man = + &bdev->man[bo->mem.mem_type]; /* * Work around locking order reversal in fault / nopfn @@ -118,24 +120,28 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) * move. */ - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { ret = ttm_bo_wait(bo, false, true, false); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); if (unlikely(ret != 0)) { retval = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; goto out_unlock; } } else - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); - - ret = ttm_mem_io_reserve(bdev, &bo->mem); - if (ret) { - retval = VM_FAULT_SIGBUS; + ret = ttm_mem_io_lock(man, true); + if (unlikely(ret != 0)) { + retval = VM_FAULT_NOPAGE; goto out_unlock; } + ret = ttm_mem_io_reserve_vm(bo); + if (unlikely(ret != 0)) { + retval = VM_FAULT_SIGBUS; + goto out_io_unlock; + } page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + bo->vm_node->start - vma->vm_pgoff; @@ -144,7 +150,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (unlikely(page_offset >= bo->num_pages)) { retval = VM_FAULT_SIGBUS; - goto out_unlock; + goto out_io_unlock; } /* @@ -182,7 +188,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) page = ttm_tt_get_page(ttm, page_offset); if (unlikely(!page && i == 0)) { retval = VM_FAULT_OOM; - goto out_unlock; + goto out_io_unlock; } else if (unlikely(!page)) { break; } @@ -200,14 +206,15 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) else if (unlikely(ret != 0)) { retval = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; - goto out_unlock; + goto out_io_unlock; } address += PAGE_SIZE; if (unlikely(++page_offset >= page_last)) break; } - +out_io_unlock: + ttm_mem_io_unlock(man); out_unlock: ttm_bo_unreserve(bo); return retval; diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index c285c2902d15..3832fe10b4df 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -32,7 +32,7 @@ #include <linux/sched.h> #include <linux/module.h> -void ttm_eu_backoff_reservation(struct list_head *list) +static void ttm_eu_backoff_reservation_locked(struct list_head *list) { struct ttm_validate_buffer *entry; @@ -41,10 +41,77 @@ void ttm_eu_backoff_reservation(struct list_head *list) if (!entry->reserved) continue; + if (entry->removed) { + ttm_bo_add_to_lru(bo); + entry->removed = false; + + } entry->reserved = false; - ttm_bo_unreserve(bo); + atomic_set(&bo->reserved, 0); + wake_up_all(&bo->event_queue); + } +} + +static void ttm_eu_del_from_lru_locked(struct list_head *list) +{ + struct ttm_validate_buffer *entry; + + list_for_each_entry(entry, list, head) { + struct ttm_buffer_object *bo = entry->bo; + if (!entry->reserved) + continue; + + if (!entry->removed) { + entry->put_count = ttm_bo_del_from_lru(bo); + entry->removed = true; + } } } + +static void ttm_eu_list_ref_sub(struct list_head *list) +{ + struct ttm_validate_buffer *entry; + + list_for_each_entry(entry, list, head) { + struct ttm_buffer_object *bo = entry->bo; + + if (entry->put_count) { + ttm_bo_list_ref_sub(bo, entry->put_count, true); + entry->put_count = 0; + } + } +} + +static int ttm_eu_wait_unreserved_locked(struct list_head *list, + struct ttm_buffer_object *bo) +{ + struct ttm_bo_global *glob = bo->glob; + int ret; + + ttm_eu_del_from_lru_locked(list); + spin_unlock(&glob->lru_lock); + ret = ttm_bo_wait_unreserved(bo, true); + spin_lock(&glob->lru_lock); + if (unlikely(ret != 0)) + ttm_eu_backoff_reservation_locked(list); + return ret; +} + + +void ttm_eu_backoff_reservation(struct list_head *list) +{ + struct ttm_validate_buffer *entry; + struct ttm_bo_global *glob; + + if (list_empty(list)) + return; + + entry = list_first_entry(list, struct ttm_validate_buffer, head); + glob = entry->bo->glob; + spin_lock(&glob->lru_lock); + ttm_eu_backoff_reservation_locked(list); + spin_unlock(&glob->lru_lock); +} EXPORT_SYMBOL(ttm_eu_backoff_reservation); /* @@ -59,37 +126,76 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation); * buffers in different orders. */ -int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq) +int ttm_eu_reserve_buffers(struct list_head *list) { + struct ttm_bo_global *glob; struct ttm_validate_buffer *entry; int ret; + uint32_t val_seq; + + if (list_empty(list)) + return 0; + + list_for_each_entry(entry, list, head) { + entry->reserved = false; + entry->put_count = 0; + entry->removed = false; + } + + entry = list_first_entry(list, struct ttm_validate_buffer, head); + glob = entry->bo->glob; retry: + spin_lock(&glob->lru_lock); + val_seq = entry->bo->bdev->val_seq++; + list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; - entry->reserved = false; - ret = ttm_bo_reserve(bo, true, false, true, val_seq); - if (ret != 0) { - ttm_eu_backoff_reservation(list); - if (ret == -EAGAIN) { - ret = ttm_bo_wait_unreserved(bo, true); - if (unlikely(ret != 0)) - return ret; - goto retry; - } else +retry_this_bo: + ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq); + switch (ret) { + case 0: + break; + case -EBUSY: + ret = ttm_eu_wait_unreserved_locked(list, bo); + if (unlikely(ret != 0)) { + spin_unlock(&glob->lru_lock); + ttm_eu_list_ref_sub(list); return ret; + } + goto retry_this_bo; + case -EAGAIN: + ttm_eu_backoff_reservation_locked(list); + spin_unlock(&glob->lru_lock); + ttm_eu_list_ref_sub(list); + ret = ttm_bo_wait_unreserved(bo, true); + if (unlikely(ret != 0)) + return ret; + goto retry; + default: + ttm_eu_backoff_reservation_locked(list); + spin_unlock(&glob->lru_lock); + ttm_eu_list_ref_sub(list); + return ret; } entry->reserved = true; if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { - ttm_eu_backoff_reservation(list); + ttm_eu_backoff_reservation_locked(list); + spin_unlock(&glob->lru_lock); + ttm_eu_list_ref_sub(list); ret = ttm_bo_wait_cpu(bo, false); if (ret) return ret; goto retry; } } + + ttm_eu_del_from_lru_locked(list); + spin_unlock(&glob->lru_lock); + ttm_eu_list_ref_sub(list); + return 0; } EXPORT_SYMBOL(ttm_eu_reserve_buffers); @@ -97,21 +203,36 @@ EXPORT_SYMBOL(ttm_eu_reserve_buffers); void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) { struct ttm_validate_buffer *entry; + struct ttm_buffer_object *bo; + struct ttm_bo_global *glob; + struct ttm_bo_device *bdev; + struct ttm_bo_driver *driver; - list_for_each_entry(entry, list, head) { - struct ttm_buffer_object *bo = entry->bo; - struct ttm_bo_driver *driver = bo->bdev->driver; - void *old_sync_obj; + if (list_empty(list)) + return; + + bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo; + bdev = bo->bdev; + driver = bdev->driver; + glob = bo->glob; - spin_lock(&bo->lock); - old_sync_obj = bo->sync_obj; + spin_lock(&bdev->fence_lock); + spin_lock(&glob->lru_lock); + + list_for_each_entry(entry, list, head) { + bo = entry->bo; + entry->old_sync_obj = bo->sync_obj; bo->sync_obj = driver->sync_obj_ref(sync_obj); bo->sync_obj_arg = entry->new_sync_obj_arg; - spin_unlock(&bo->lock); - ttm_bo_unreserve(bo); + ttm_bo_unreserve_locked(bo); entry->reserved = false; - if (old_sync_obj) - driver->sync_obj_unref(&old_sync_obj); + } + spin_unlock(&glob->lru_lock); + spin_unlock(&bdev->fence_lock); + + list_for_each_entry(entry, list, head) { + if (entry->old_sync_obj) + driver->sync_obj_unref(&entry->old_sync_obj); } } EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); |