summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c271
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c169
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c20
4 files changed, 295 insertions, 169 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 2845fceb2fbd..6953dd264172 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -166,28 +166,35 @@ static void ttm_bo_release_list(struct kref *list_kref)
ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
}
-void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
+static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
reservation_object_assert_held(bo->resv);
- if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
- BUG_ON(!list_empty(&bo->lru));
+ if (!list_empty(&bo->lru))
+ return;
- man = &bdev->man[bo->mem.mem_type];
- list_add_tail(&bo->lru, &man->lru[bo->priority]);
- kref_get(&bo->list_kref);
+ if (mem->placement & TTM_PL_FLAG_NO_EVICT)
+ return;
- if (bo->ttm && !(bo->ttm->page_flags &
- (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
- list_add_tail(&bo->swap,
- &bdev->glob->swap_lru[bo->priority]);
- kref_get(&bo->list_kref);
- }
+ man = &bdev->man[mem->mem_type];
+ list_add_tail(&bo->lru, &man->lru[bo->priority]);
+ kref_get(&bo->list_kref);
+
+ if (bo->ttm && !(bo->ttm->page_flags &
+ (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
+ list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
+ kref_get(&bo->list_kref);
}
}
+
+void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
+{
+ ttm_bo_add_mem_to_lru(bo, &bo->mem);
+}
EXPORT_SYMBOL(ttm_bo_add_to_lru);
static void ttm_bo_ref_bug(struct kref *list_kref)
@@ -766,32 +773,72 @@ EXPORT_SYMBOL(ttm_bo_eviction_valuable);
* b. Otherwise, trylock it.
*/
static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx, bool *locked)
+ struct ttm_operation_ctx *ctx, bool *locked, bool *busy)
{
bool ret = false;
- *locked = false;
if (bo->resv == ctx->resv) {
reservation_object_assert_held(bo->resv);
if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT
|| !list_empty(&bo->ddestroy))
ret = true;
+ *locked = false;
+ if (busy)
+ *busy = false;
} else {
- *locked = reservation_object_trylock(bo->resv);
- ret = *locked;
+ ret = reservation_object_trylock(bo->resv);
+ *locked = ret;
+ if (busy)
+ *busy = !ret;
}
return ret;
}
+/**
+ * ttm_mem_evict_wait_busy - wait for a busy BO to become available
+ *
+ * @busy_bo: BO which couldn't be locked with trylock
+ * @ctx: operation context
+ * @ticket: acquire ticket
+ *
+ * Try to lock a busy buffer object to avoid failing eviction.
+ */
+static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
+ struct ttm_operation_ctx *ctx,
+ struct ww_acquire_ctx *ticket)
+{
+ int r;
+
+ if (!busy_bo || !ticket)
+ return -EBUSY;
+
+ if (ctx->interruptible)
+ r = reservation_object_lock_interruptible(busy_bo->resv,
+ ticket);
+ else
+ r = reservation_object_lock(busy_bo->resv, ticket);
+
+ /*
+ * TODO: It would be better to keep the BO locked until allocation is at
+ * least tried one more time, but that would mean a much larger rework
+ * of TTM.
+ */
+ if (!r)
+ reservation_object_unlock(busy_bo->resv);
+
+ return r == -EDEADLK ? -EAGAIN : r;
+}
+
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
uint32_t mem_type,
const struct ttm_place *place,
- struct ttm_operation_ctx *ctx)
+ struct ttm_operation_ctx *ctx,
+ struct ww_acquire_ctx *ticket)
{
+ struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
- struct ttm_buffer_object *bo = NULL;
bool locked = false;
unsigned i;
int ret;
@@ -799,8 +846,15 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &man->lru[i], lru) {
- if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked))
+ bool busy;
+
+ if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
+ &busy)) {
+ if (busy && !busy_bo &&
+ bo->resv->lock.ctx != ticket)
+ busy_bo = bo;
continue;
+ }
if (place && !bdev->driver->eviction_valuable(bo,
place)) {
@@ -819,8 +873,13 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
}
if (!bo) {
+ if (busy_bo)
+ ttm_bo_get(busy_bo);
spin_unlock(&glob->lru_lock);
- return -EBUSY;
+ ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
+ if (busy_bo)
+ ttm_bo_put(busy_bo);
+ return ret;
}
kref_get(&bo->list_kref);
@@ -892,13 +951,12 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
* space, or we've evicted everything and there isn't enough space.
*/
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
- uint32_t mem_type,
- const struct ttm_place *place,
- struct ttm_mem_reg *mem,
- struct ttm_operation_ctx *ctx)
+ const struct ttm_place *place,
+ struct ttm_mem_reg *mem,
+ struct ttm_operation_ctx *ctx)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
int ret;
do {
@@ -907,11 +965,12 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ret;
if (mem->mm_node)
break;
- ret = ttm_mem_evict_first(bdev, mem_type, place, ctx);
+ ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx,
+ bo->resv->lock.ctx);
if (unlikely(ret != 0))
return ret;
} while (1);
- mem->mem_type = mem_type;
+
return ttm_bo_add_move_fence(bo, man, mem);
}
@@ -960,6 +1019,59 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
}
/**
+ * ttm_bo_mem_placement - check if placement is compatible
+ * @bo: BO to find memory for
+ * @place: where to search
+ * @mem: the memory object to fill in
+ * @ctx: operation context
+ *
+ * Check if placement is compatible and fill in mem structure.
+ * Returns -EBUSY if placement won't work or negative error code.
+ * 0 when placement can be used.
+ */
+static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_mem_reg *mem,
+ struct ttm_operation_ctx *ctx)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ uint32_t mem_type = TTM_PL_SYSTEM;
+ struct ttm_mem_type_manager *man;
+ uint32_t cur_flags = 0;
+ int ret;
+
+ ret = ttm_mem_type_from_place(place, &mem_type);
+ if (ret)
+ return ret;
+
+ man = &bdev->man[mem_type];
+ if (!man->has_type || !man->use_type)
+ return -EBUSY;
+
+ if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
+ return -EBUSY;
+
+ cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags);
+ /*
+ * Use the access and other non-mapping-related flag bits from
+ * the memory placement flags to the current flags
+ */
+ ttm_flag_masked(&cur_flags, place->flags, ~TTM_PL_MASK_MEMTYPE);
+
+ mem->mem_type = mem_type;
+ mem->placement = cur_flags;
+
+ if (bo->mem.mem_type < mem_type && !list_empty(&bo->lru)) {
+ spin_lock(&bo->bdev->glob->lru_lock);
+ ttm_bo_del_from_lru(bo);
+ ttm_bo_add_mem_to_lru(bo, mem);
+ spin_unlock(&bo->bdev->glob->lru_lock);
+ }
+
+ return 0;
+}
+
+/**
* Creates space for memory region @mem according to its type.
*
* This function first searches for free space in compatible memory types in
@@ -973,12 +1085,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *man;
- uint32_t mem_type = TTM_PL_SYSTEM;
- uint32_t cur_flags = 0;
bool type_found = false;
- bool type_ok = false;
- bool has_erestartsys = false;
int i, ret;
ret = reservation_object_reserve_shared(bo->resv, 1);
@@ -988,97 +1095,70 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
mem->mm_node = NULL;
for (i = 0; i < placement->num_placement; ++i) {
const struct ttm_place *place = &placement->placement[i];
+ struct ttm_mem_type_manager *man;
- ret = ttm_mem_type_from_place(place, &mem_type);
- if (ret)
- return ret;
- man = &bdev->man[mem_type];
- if (!man->has_type || !man->use_type)
- continue;
-
- type_ok = ttm_bo_mt_compatible(man, mem_type, place,
- &cur_flags);
-
- if (!type_ok)
+ ret = ttm_bo_mem_placement(bo, place, mem, ctx);
+ if (ret == -EBUSY)
continue;
+ if (ret)
+ goto error;
type_found = true;
- cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
- cur_flags);
- /*
- * Use the access and other non-mapping-related flag bits from
- * the memory placement flags to the current flags
- */
- ttm_flag_masked(&cur_flags, place->flags,
- ~TTM_PL_MASK_MEMTYPE);
-
- if (mem_type == TTM_PL_SYSTEM)
- break;
+ mem->mm_node = NULL;
+ if (mem->mem_type == TTM_PL_SYSTEM)
+ return 0;
+ man = &bdev->man[mem->mem_type];
ret = (*man->func->get_node)(man, bo, place, mem);
if (unlikely(ret))
- return ret;
+ goto error;
if (mem->mm_node) {
ret = ttm_bo_add_move_fence(bo, man, mem);
if (unlikely(ret)) {
(*man->func->put_node)(man, mem);
- return ret;
+ goto error;
}
- break;
+ return 0;
}
}
- if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
- mem->mem_type = mem_type;
- mem->placement = cur_flags;
- return 0;
- }
-
for (i = 0; i < placement->num_busy_placement; ++i) {
const struct ttm_place *place = &placement->busy_placement[i];
- ret = ttm_mem_type_from_place(place, &mem_type);
- if (ret)
- return ret;
- man = &bdev->man[mem_type];
- if (!man->has_type || !man->use_type)
- continue;
- if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
+ ret = ttm_bo_mem_placement(bo, place, mem, ctx);
+ if (ret == -EBUSY)
continue;
+ if (ret)
+ goto error;
type_found = true;
- cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
- cur_flags);
- /*
- * Use the access and other non-mapping-related flag bits from
- * the memory placement flags to the current flags
- */
- ttm_flag_masked(&cur_flags, place->flags,
- ~TTM_PL_MASK_MEMTYPE);
-
- if (mem_type == TTM_PL_SYSTEM) {
- mem->mem_type = mem_type;
- mem->placement = cur_flags;
- mem->mm_node = NULL;
+ mem->mm_node = NULL;
+ if (mem->mem_type == TTM_PL_SYSTEM)
return 0;
- }
- ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, ctx);
- if (ret == 0 && mem->mm_node) {
- mem->placement = cur_flags;
+ ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
+ if (ret == 0 && mem->mm_node)
return 0;
- }
- if (ret == -ERESTARTSYS)
- has_erestartsys = true;
+
+ if (ret && ret != -EBUSY)
+ goto error;
}
+ ret = -ENOMEM;
if (!type_found) {
pr_err(TTM_PFX "No compatible memory type found\n");
- return -EINVAL;
+ ret = -EINVAL;
+ }
+
+error:
+ if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
+ spin_lock(&bo->bdev->glob->lru_lock);
+ ttm_bo_move_to_lru_tail(bo, NULL);
+ spin_unlock(&bo->bdev->glob->lru_lock);
}
- return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
+ return ret;
}
EXPORT_SYMBOL(ttm_bo_mem_space);
@@ -1401,7 +1481,8 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
while (!list_empty(&man->lru[i])) {
spin_unlock(&glob->lru_lock);
- ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
+ ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx,
+ NULL);
if (ret)
return ret;
spin_lock(&glob->lru_lock);
@@ -1658,6 +1739,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
mutex_lock(&ttm_global_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
mutex_unlock(&ttm_global_mutex);
+ bdev->vm_ops = &ttm_bo_vm_ops;
return 0;
out_no_sys:
@@ -1772,7 +1854,8 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &glob->swap_lru[i], swap) {
- if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked)) {
+ if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
+ NULL)) {
ret = 0;
break;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 895d77d799e4..9f918b992f7e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -539,13 +539,13 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
tmp = pgprot_noncached(tmp);
#endif
#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
- defined(__powerpc__)
+ defined(__powerpc__) || defined(__mips__)
if (caching_flags & TTM_PL_FLAG_WC)
tmp = pgprot_writecombine(tmp);
else
tmp = pgprot_noncached(tmp);
#endif
-#if defined(__sparc__) || defined(__mips__)
+#if defined(__sparc__)
tmp = pgprot_noncached(tmp);
#endif
return tmp;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 6dacff49c1cc..0c4576cbafcf 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -42,8 +42,6 @@
#include <linux/uaccess.h>
#include <linux/mem_encrypt.h>
-#define TTM_BO_VM_NUM_PREFAULT 16
-
static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
struct vm_fault *vmf)
{
@@ -106,25 +104,30 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
+ page_offset;
}
-static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
+/**
+ * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback
+ * @bo: The buffer object
+ * @vmf: The fault structure handed to the callback
+ *
+ * vm callbacks like fault() and *_mkwrite() allow for the mm_sem to be dropped
+ * during long waits, and after the wait the callback will be restarted. This
+ * is to allow other threads using the same virtual memory space concurrent
+ * access to map(), unmap() completely unrelated buffer objects. TTM buffer
+ * object reservations sometimes wait for GPU and should therefore be
+ * considered long waits. This function reserves the buffer object interruptibly
+ * taking this into account. Starvation is avoided by the vm system not
+ * allowing too many repeated restarts.
+ * This function is intended to be used in customized fault() and _mkwrite()
+ * handlers.
+ *
+ * Return:
+ * 0 on success and the bo was reserved.
+ * VM_FAULT_RETRY if blocking wait.
+ * VM_FAULT_NOPAGE if blocking wait and retrying was not allowed.
+ */
+vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
+ struct vm_fault *vmf)
{
- struct vm_area_struct *vma = vmf->vma;
- struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
- vma->vm_private_data;
- struct ttm_bo_device *bdev = bo->bdev;
- unsigned long page_offset;
- unsigned long page_last;
- unsigned long pfn;
- struct ttm_tt *ttm = NULL;
- struct page *page;
- int err;
- int i;
- vm_fault_t ret = VM_FAULT_NOPAGE;
- unsigned long address = vmf->address;
- struct ttm_mem_type_manager *man =
- &bdev->man[bo->mem.mem_type];
- struct vm_area_struct cvma;
-
/*
* Work around locking order reversal in fault / nopfn
* between mmap_sem and bo_reserve: Perform a trylock operation
@@ -151,14 +154,55 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
return VM_FAULT_NOPAGE;
}
+ return 0;
+}
+EXPORT_SYMBOL(ttm_bo_vm_reserve);
+
+/**
+ * ttm_bo_vm_fault_reserved - TTM fault helper
+ * @vmf: The struct vm_fault given as argument to the fault callback
+ * @prot: The page protection to be used for this memory area.
+ * @num_prefault: Maximum number of prefault pages. The caller may want to
+ * specify this based on madvice settings and the size of the GPU object
+ * backed by the memory.
+ *
+ * This function inserts one or more page table entries pointing to the
+ * memory backing the buffer object, and then returns a return code
+ * instructing the caller to retry the page access.
+ *
+ * Return:
+ * VM_FAULT_NOPAGE on success or pending signal
+ * VM_FAULT_SIGBUS on unspecified error
+ * VM_FAULT_OOM on out-of-memory
+ * VM_FAULT_RETRY if retryable wait
+ */
+vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
+ pgprot_t prot,
+ pgoff_t num_prefault)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct vm_area_struct cvma = *vma;
+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
+ vma->vm_private_data;
+ struct ttm_bo_device *bdev = bo->bdev;
+ unsigned long page_offset;
+ unsigned long page_last;
+ unsigned long pfn;
+ struct ttm_tt *ttm = NULL;
+ struct page *page;
+ int err;
+ pgoff_t i;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
+ unsigned long address = vmf->address;
+ struct ttm_mem_type_manager *man =
+ &bdev->man[bo->mem.mem_type];
+
/*
* Refuse to fault imported pages. This should be handled
* (if at all) by redirecting mmap to the exporter.
*/
- if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
- ret = VM_FAULT_SIGBUS;
- goto out_unlock;
- }
+ if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
+ return VM_FAULT_SIGBUS;
if (bdev->driver->fault_reserve_notify) {
struct dma_fence *moving = dma_fence_get(bo->moving);
@@ -169,11 +213,9 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
break;
case -EBUSY:
case -ERESTARTSYS:
- ret = VM_FAULT_NOPAGE;
- goto out_unlock;
+ return VM_FAULT_NOPAGE;
default:
- ret = VM_FAULT_SIGBUS;
- goto out_unlock;
+ return VM_FAULT_SIGBUS;
}
if (bo->moving != moving) {
@@ -189,21 +231,12 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
* move.
*/
ret = ttm_bo_vm_fault_idle(bo, vmf);
- if (unlikely(ret != 0)) {
- if (ret == VM_FAULT_RETRY &&
- !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
- /* The BO has already been unreserved. */
- return ret;
- }
-
- goto out_unlock;
- }
+ if (unlikely(ret != 0))
+ return ret;
err = ttm_mem_io_lock(man, true);
- if (unlikely(err != 0)) {
- ret = VM_FAULT_NOPAGE;
- goto out_unlock;
- }
+ if (unlikely(err != 0))
+ return VM_FAULT_NOPAGE;
err = ttm_mem_io_reserve_vm(bo);
if (unlikely(err != 0)) {
ret = VM_FAULT_SIGBUS;
@@ -220,18 +253,8 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
goto out_io_unlock;
}
- /*
- * Make a local vma copy to modify the page_prot member
- * and vm_flags if necessary. The vma parameter is protected
- * by mmap_sem in write mode.
- */
- cvma = *vma;
- cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
-
- if (bo->mem.bus.is_iomem) {
- cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
- cvma.vm_page_prot);
- } else {
+ cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, prot);
+ if (!bo->mem.bus.is_iomem) {
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
@@ -240,24 +263,21 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
};
ttm = bo->ttm;
- cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
- cvma.vm_page_prot);
-
- /* Allocate all page at once, most common usage */
- if (ttm_tt_populate(ttm, &ctx)) {
+ if (ttm_tt_populate(bo->ttm, &ctx)) {
ret = VM_FAULT_OOM;
goto out_io_unlock;
}
+ } else {
+ /* Iomem should not be marked encrypted */
+ cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
}
/*
* Speculatively prefault a number of pages. Only error on
* first page.
*/
- for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
+ for (i = 0; i < num_prefault; ++i) {
if (bo->mem.bus.is_iomem) {
- /* Iomem should not be marked encrypted */
- cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
pfn = ttm_bo_io_mem_pfn(bo, page_offset);
} else {
page = ttm->pages[page_offset];
@@ -295,7 +315,26 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
ret = VM_FAULT_NOPAGE;
out_io_unlock:
ttm_mem_io_unlock(man);
-out_unlock:
+ return ret;
+}
+EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
+
+static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ pgprot_t prot;
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ vm_fault_t ret;
+
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ return ret;
+
+ prot = vm_get_page_prot(vma->vm_flags);
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
+
reservation_object_unlock(bo->resv);
return ret;
}
@@ -395,7 +434,7 @@ static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
return ret;
}
-static const struct vm_operations_struct ttm_bo_vm_ops = {
+const struct vm_operations_struct ttm_bo_vm_ops = {
.fault = ttm_bo_vm_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
@@ -448,7 +487,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
if (unlikely(ret != 0))
goto out_unref;
- vma->vm_ops = &ttm_bo_vm_ops;
+ vma->vm_ops = bdev->vm_ops;
/*
* Note: We're transferring the bo reference to
@@ -480,7 +519,7 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
ttm_bo_get(bo);
- vma->vm_ops = &ttm_bo_vm_ops;
+ vma->vm_ops = bo->bdev->vm_ops;
vma->vm_private_data = bo;
vma->vm_flags |= VM_MIXEDMAP;
vma->vm_flags |= VM_IO | VM_DONTEXPAND;
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 0075eb9a0b52..957ec375a4ba 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -69,7 +69,8 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- ttm_bo_add_to_lru(bo);
+ if (list_empty(&bo->lru))
+ ttm_bo_add_to_lru(bo);
reservation_object_unlock(bo->resv);
}
spin_unlock(&glob->lru_lock);
@@ -93,7 +94,7 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr,
- struct list_head *dups)
+ struct list_head *dups, bool del_lru)
{
struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
@@ -172,11 +173,11 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
list_add(&entry->head, list);
}
- if (ticket)
- ww_acquire_done(ticket);
- spin_lock(&glob->lru_lock);
- ttm_eu_del_from_lru_locked(list);
- spin_unlock(&glob->lru_lock);
+ if (del_lru) {
+ spin_lock(&glob->lru_lock);
+ ttm_eu_del_from_lru_locked(list);
+ spin_unlock(&glob->lru_lock);
+ }
return 0;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -203,7 +204,10 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
reservation_object_add_shared_fence(bo->resv, fence);
else
reservation_object_add_excl_fence(bo->resv, fence);
- ttm_bo_add_to_lru(bo);
+ if (list_empty(&bo->lru))
+ ttm_bo_add_to_lru(bo);
+ else
+ ttm_bo_move_to_lru_tail(bo, NULL);
reservation_object_unlock(bo->resv);
}
spin_unlock(&glob->lru_lock);
OpenPOWER on IntegriCloud