summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c514
1 files changed, 299 insertions, 215 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 9a4e3b63f1cb..b53d273eb7a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -90,11 +90,9 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
struct amdgpu_bo_list_entry *list;
unsigned i, idx;
- mutex_lock(&vm->mutex);
list = drm_malloc_ab(vm->max_pde_used + 2,
sizeof(struct amdgpu_bo_list_entry));
if (!list) {
- mutex_unlock(&vm->mutex);
return NULL;
}
@@ -119,7 +117,6 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
list[idx].tv.shared = true;
list_add(&list[idx++].tv.head, head);
}
- mutex_unlock(&vm->mutex);
return list;
}
@@ -127,18 +124,18 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
/**
* amdgpu_vm_grab_id - allocate the next free VMID
*
- * @ring: ring we want to submit job to
* @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ * @sync: sync object where we add dependencies
*
- * Allocate an id for the vm (cayman+).
- * Returns the fence we need to sync to (if any).
+ * Allocate an id for the vm, adding fences to the sync obj as necessary.
*
- * Global and local mutex must be locked!
+ * Global mutex must be locked!
*/
-struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
- struct amdgpu_vm *vm)
+int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ struct amdgpu_sync *sync)
{
- struct amdgpu_fence *best[AMDGPU_MAX_RINGS] = {};
+ struct fence *best[AMDGPU_MAX_RINGS] = {};
struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
struct amdgpu_device *adev = ring->adev;
@@ -146,41 +143,55 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
unsigned i;
/* check if the id is still valid */
- if (vm_id->id && vm_id->last_id_use &&
- vm_id->last_id_use == adev->vm_manager.active[vm_id->id])
- return NULL;
+ if (vm_id->id) {
+ unsigned id = vm_id->id;
+ long owner;
+
+ owner = atomic_long_read(&adev->vm_manager.ids[id].owner);
+ if (owner == (long)vm) {
+ trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
+ return 0;
+ }
+ }
/* we definately need to flush */
vm_id->pd_gpu_addr = ~0ll;
/* skip over VMID 0, since it is the system VM */
for (i = 1; i < adev->vm_manager.nvm; ++i) {
- struct amdgpu_fence *fence = adev->vm_manager.active[i];
+ struct fence *fence = adev->vm_manager.ids[i].active;
+ struct amdgpu_ring *fring;
if (fence == NULL) {
/* found a free one */
vm_id->id = i;
trace_amdgpu_vm_grab_id(i, ring->idx);
- return NULL;
+ return 0;
}
- if (amdgpu_fence_is_earlier(fence, best[fence->ring->idx])) {
- best[fence->ring->idx] = fence;
- choices[fence->ring == ring ? 0 : 1] = i;
+ fring = amdgpu_ring_from_fence(fence);
+ if (best[fring->idx] == NULL ||
+ fence_is_later(best[fring->idx], fence)) {
+ best[fring->idx] = fence;
+ choices[fring == ring ? 0 : 1] = i;
}
}
for (i = 0; i < 2; ++i) {
if (choices[i]) {
+ struct fence *fence;
+
+ fence = adev->vm_manager.ids[choices[i]].active;
vm_id->id = choices[i];
+
trace_amdgpu_vm_grab_id(choices[i], ring->idx);
- return adev->vm_manager.active[choices[i]];
+ return amdgpu_sync_fence(ring->adev, sync, fence);
}
}
/* should never happen */
BUG();
- return NULL;
+ return -EINVAL;
}
/**
@@ -196,17 +207,26 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
*/
void amdgpu_vm_flush(struct amdgpu_ring *ring,
struct amdgpu_vm *vm,
- struct amdgpu_fence *updates)
+ struct fence *updates)
{
uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
+ struct fence *flushed_updates = vm_id->flushed_updates;
+ bool is_later;
- if (pd_addr != vm_id->pd_gpu_addr || !vm_id->flushed_updates ||
- amdgpu_fence_is_earlier(vm_id->flushed_updates, updates)) {
+ if (!flushed_updates)
+ is_later = true;
+ else if (!updates)
+ is_later = false;
+ else
+ is_later = fence_is_later(updates, flushed_updates);
+ if (pd_addr != vm_id->pd_gpu_addr || is_later) {
trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
- amdgpu_fence_unref(&vm_id->flushed_updates);
- vm_id->flushed_updates = amdgpu_fence_ref(updates);
+ if (is_later) {
+ vm_id->flushed_updates = fence_get(updates);
+ fence_put(flushed_updates);
+ }
vm_id->pd_gpu_addr = pd_addr;
amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
}
@@ -226,16 +246,14 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
*/
void amdgpu_vm_fence(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
- struct amdgpu_fence *fence)
+ struct fence *fence)
{
- unsigned ridx = fence->ring->idx;
- unsigned vm_id = vm->ids[ridx].id;
-
- amdgpu_fence_unref(&adev->vm_manager.active[vm_id]);
- adev->vm_manager.active[vm_id] = amdgpu_fence_ref(fence);
+ struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence);
+ unsigned vm_id = vm->ids[ring->idx].id;
- amdgpu_fence_unref(&vm->ids[ridx].last_id_use);
- vm->ids[ridx].last_id_use = amdgpu_fence_ref(fence);
+ fence_put(adev->vm_manager.ids[vm_id].active);
+ adev->vm_manager.ids[vm_id].active = fence_get(fence);
+ atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm);
}
/**
@@ -300,57 +318,72 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
}
}
+int amdgpu_vm_free_job(struct amdgpu_job *job)
+{
+ int i;
+ for (i = 0; i < job->num_ibs; i++)
+ amdgpu_ib_free(job->adev, &job->ibs[i]);
+ kfree(job->ibs);
+ return 0;
+}
+
/**
* amdgpu_vm_clear_bo - initially clear the page dir/table
*
* @adev: amdgpu_device pointer
* @bo: bo to clear
+ *
+ * need to reserve bo first before calling it.
*/
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_bo *bo)
{
struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
- struct amdgpu_ib ib;
+ struct fence *fence = NULL;
+ struct amdgpu_ib *ib;
unsigned entries;
uint64_t addr;
int r;
- r = amdgpu_bo_reserve(bo, false);
- if (r)
- return r;
-
r = reservation_object_reserve_shared(bo->tbo.resv);
if (r)
return r;
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
if (r)
- goto error_unreserve;
+ goto error;
addr = amdgpu_bo_gpu_offset(bo);
entries = amdgpu_bo_size(bo) / 8;
- r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, &ib);
- if (r)
- goto error_unreserve;
-
- ib.length_dw = 0;
-
- amdgpu_vm_update_pages(adev, &ib, addr, 0, entries, 0, 0, 0);
- amdgpu_vm_pad_ib(adev, &ib);
- WARN_ON(ib.length_dw > 64);
+ ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+ if (!ib)
+ goto error;
- r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
+ r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
if (r)
goto error_free;
- amdgpu_bo_fence(bo, ib.fence, true);
+ ib->length_dw = 0;
+
+ amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0);
+ amdgpu_vm_pad_ib(adev, ib);
+ WARN_ON(ib->length_dw > 64);
+ r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+ &amdgpu_vm_free_job,
+ AMDGPU_FENCE_OWNER_VM,
+ &fence);
+ if (!r)
+ amdgpu_bo_fence(bo, fence, true);
+ fence_put(fence);
+ if (amdgpu_enable_scheduler)
+ return 0;
error_free:
- amdgpu_ib_free(adev, &ib);
+ amdgpu_ib_free(adev, ib);
+ kfree(ib);
-error_unreserve:
- amdgpu_bo_unreserve(bo);
+error:
return r;
}
@@ -400,7 +433,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
uint64_t last_pde = ~0, last_pt = ~0;
unsigned count = 0, pt_idx, ndw;
- struct amdgpu_ib ib;
+ struct amdgpu_ib *ib;
+ struct fence *fence = NULL;
+
int r;
/* padding, etc. */
@@ -413,10 +448,16 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
if (ndw > 0xfffff)
return -ENOMEM;
- r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib);
- if (r)
+ ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+ if (!ib)
+ return -ENOMEM;
+
+ r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
+ if (r) {
+ kfree(ib);
return r;
- ib.length_dw = 0;
+ }
+ ib->length_dw = 0;
/* walk over the address space and update the page directory */
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
@@ -436,7 +477,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
((last_pt + incr * count) != pt)) {
if (count) {
- amdgpu_vm_update_pages(adev, &ib, last_pde,
+ amdgpu_vm_update_pages(adev, ib, last_pde,
last_pt, count, incr,
AMDGPU_PTE_VALID, 0);
}
@@ -450,23 +491,37 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
}
if (count)
- amdgpu_vm_update_pages(adev, &ib, last_pde, last_pt, count,
+ amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count,
incr, AMDGPU_PTE_VALID, 0);
- if (ib.length_dw != 0) {
- amdgpu_vm_pad_ib(adev, &ib);
- amdgpu_sync_resv(adev, &ib.sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
- WARN_ON(ib.length_dw > ndw);
- r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
- if (r) {
- amdgpu_ib_free(adev, &ib);
- return r;
- }
- amdgpu_bo_fence(pd, ib.fence, true);
+ if (ib->length_dw != 0) {
+ amdgpu_vm_pad_ib(adev, ib);
+ amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
+ WARN_ON(ib->length_dw > ndw);
+ r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+ &amdgpu_vm_free_job,
+ AMDGPU_FENCE_OWNER_VM,
+ &fence);
+ if (r)
+ goto error_free;
+
+ amdgpu_bo_fence(pd, fence, true);
+ fence_put(vm->page_directory_fence);
+ vm->page_directory_fence = fence_get(fence);
+ fence_put(fence);
+ }
+
+ if (!amdgpu_enable_scheduler || ib->length_dw == 0) {
+ amdgpu_ib_free(adev, ib);
+ kfree(ib);
}
- amdgpu_ib_free(adev, &ib);
return 0;
+
+error_free:
+ amdgpu_ib_free(adev, ib);
+ kfree(ib);
+ return r;
}
/**
@@ -572,9 +627,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
{
uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
uint64_t last_pte = ~0, last_dst = ~0;
+ void *owner = AMDGPU_FENCE_OWNER_VM;
unsigned count = 0;
uint64_t addr;
+ /* sync to everything on unmapping */
+ if (!(flags & AMDGPU_PTE_VALID))
+ owner = AMDGPU_FENCE_OWNER_UNDEFINED;
+
/* walk over the address space and update the page tables */
for (addr = start; addr < end; ) {
uint64_t pt_idx = addr >> amdgpu_vm_block_size;
@@ -583,8 +643,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
uint64_t pte;
int r;
- amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv,
- AMDGPU_FENCE_OWNER_VM);
+ amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, owner);
r = reservation_object_reserve_shared(pt->tbo.resv);
if (r)
return r;
@@ -627,31 +686,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
}
/**
- * amdgpu_vm_fence_pts - fence page tables after an update
- *
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- * @fence: fence to use
- *
- * Fence the page tables in the range @start - @end (cayman+).
- *
- * Global and local mutex must be locked!
- */
-static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
- uint64_t start, uint64_t end,
- struct amdgpu_fence *fence)
-{
- unsigned i;
-
- start >>= amdgpu_vm_block_size;
- end >>= amdgpu_vm_block_size;
-
- for (i = start; i <= end; ++i)
- amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
-}
-
-/**
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
*
* @adev: amdgpu_device pointer
@@ -670,12 +704,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping,
uint64_t addr, uint32_t gtt_flags,
- struct amdgpu_fence **fence)
+ struct fence **fence)
{
struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
unsigned nptes, ncmds, ndw;
uint32_t flags = gtt_flags;
- struct amdgpu_ib ib;
+ struct amdgpu_ib *ib;
+ struct fence *f = NULL;
int r;
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -722,46 +757,53 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (ndw > 0xfffff)
return -ENOMEM;
- r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib);
- if (r)
- return r;
- ib.length_dw = 0;
-
- if (!(flags & AMDGPU_PTE_VALID)) {
- unsigned i;
+ ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+ if (!ib)
+ return -ENOMEM;
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_fence *f = vm->ids[i].last_id_use;
- amdgpu_sync_fence(&ib.sync, f);
- }
+ r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
+ if (r) {
+ kfree(ib);
+ return r;
}
- r = amdgpu_vm_update_ptes(adev, vm, &ib, mapping->it.start,
+ ib->length_dw = 0;
+
+ r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start,
mapping->it.last + 1, addr + mapping->offset,
flags, gtt_flags);
if (r) {
- amdgpu_ib_free(adev, &ib);
+ amdgpu_ib_free(adev, ib);
+ kfree(ib);
return r;
}
- amdgpu_vm_pad_ib(adev, &ib);
- WARN_ON(ib.length_dw > ndw);
+ amdgpu_vm_pad_ib(adev, ib);
+ WARN_ON(ib->length_dw > ndw);
+ r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+ &amdgpu_vm_free_job,
+ AMDGPU_FENCE_OWNER_VM,
+ &f);
+ if (r)
+ goto error_free;
- r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
- if (r) {
- amdgpu_ib_free(adev, &ib);
- return r;
- }
- amdgpu_vm_fence_pts(vm, mapping->it.start,
- mapping->it.last + 1, ib.fence);
+ amdgpu_bo_fence(vm->page_directory, f, true);
if (fence) {
- amdgpu_fence_unref(fence);
- *fence = amdgpu_fence_ref(ib.fence);
+ fence_put(*fence);
+ *fence = fence_get(f);
+ }
+ fence_put(f);
+ if (!amdgpu_enable_scheduler) {
+ amdgpu_ib_free(adev, ib);
+ kfree(ib);
}
- amdgpu_ib_free(adev, &ib);
-
return 0;
+
+error_free:
+ amdgpu_ib_free(adev, ib);
+ kfree(ib);
+ return r;
}
/**
@@ -787,28 +829,40 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
int r;
if (mem) {
- addr = mem->start << PAGE_SHIFT;
+ addr = (u64)mem->start << PAGE_SHIFT;
if (mem->mem_type != TTM_PL_TT)
addr += adev->vm_manager.vram_base_offset;
} else {
addr = 0;
}
- if (addr == bo_va->addr)
- return 0;
-
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
- list_for_each_entry(mapping, &bo_va->mappings, list) {
+ spin_lock(&vm->status_lock);
+ if (!list_empty(&bo_va->vm_status))
+ list_splice_init(&bo_va->valids, &bo_va->invalids);
+ spin_unlock(&vm->status_lock);
+
+ list_for_each_entry(mapping, &bo_va->invalids, list) {
r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr,
flags, &bo_va->last_pt_update);
if (r)
return r;
}
- bo_va->addr = addr;
+ if (trace_amdgpu_vm_bo_mapping_enabled()) {
+ list_for_each_entry(mapping, &bo_va->valids, list)
+ trace_amdgpu_vm_bo_mapping(mapping);
+
+ list_for_each_entry(mapping, &bo_va->invalids, list)
+ trace_amdgpu_vm_bo_mapping(mapping);
+ }
+
spin_lock(&vm->status_lock);
+ list_splice_init(&bo_va->invalids, &bo_va->valids);
list_del_init(&bo_va->vm_status);
+ if (!mem)
+ list_add(&bo_va->vm_status, &vm->cleared);
spin_unlock(&vm->status_lock);
return 0;
@@ -831,17 +885,21 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping;
int r;
+ spin_lock(&vm->freed_lock);
while (!list_empty(&vm->freed)) {
mapping = list_first_entry(&vm->freed,
struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list);
-
+ spin_unlock(&vm->freed_lock);
r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL);
kfree(mapping);
if (r)
return r;
+ spin_lock(&vm->freed_lock);
}
+ spin_unlock(&vm->freed_lock);
+
return 0;
}
@@ -861,15 +919,16 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_sync *sync)
{
struct amdgpu_bo_va *bo_va = NULL;
- int r;
+ int r = 0;
spin_lock(&vm->status_lock);
while (!list_empty(&vm->invalidated)) {
bo_va = list_first_entry(&vm->invalidated,
struct amdgpu_bo_va, vm_status);
spin_unlock(&vm->status_lock);
-
+ mutex_lock(&bo_va->mutex);
r = amdgpu_vm_bo_update(adev, bo_va, NULL);
+ mutex_unlock(&bo_va->mutex);
if (r)
return r;
@@ -878,8 +937,9 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
spin_unlock(&vm->status_lock);
if (bo_va)
- amdgpu_sync_fence(sync, bo_va->last_pt_update);
- return 0;
+ r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
+
+ return r;
}
/**
@@ -907,15 +967,13 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
}
bo_va->vm = vm;
bo_va->bo = bo;
- bo_va->addr = 0;
bo_va->ref_count = 1;
INIT_LIST_HEAD(&bo_va->bo_list);
- INIT_LIST_HEAD(&bo_va->mappings);
+ INIT_LIST_HEAD(&bo_va->valids);
+ INIT_LIST_HEAD(&bo_va->invalids);
INIT_LIST_HEAD(&bo_va->vm_status);
-
- mutex_lock(&vm->mutex);
+ mutex_init(&bo_va->mutex);
list_add_tail(&bo_va->bo_list, &bo->va);
- mutex_unlock(&vm->mutex);
return bo_va;
}
@@ -932,7 +990,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
* Add a mapping of the BO at the specefied addr into the VM.
* Returns 0 for success, error for failure.
*
- * Object has to be reserved and gets unreserved by this function!
+ * Object has to be reserved and unreserved outside!
*/
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
@@ -948,32 +1006,27 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
/* validate the parameters */
if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
- size == 0 || size & AMDGPU_GPU_PAGE_MASK) {
- amdgpu_bo_unreserve(bo_va->bo);
+ size == 0 || size & AMDGPU_GPU_PAGE_MASK)
return -EINVAL;
- }
/* make sure object fit at this offset */
eaddr = saddr + size;
- if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) {
- amdgpu_bo_unreserve(bo_va->bo);
+ if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
return -EINVAL;
- }
last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
if (last_pfn > adev->vm_manager.max_pfn) {
dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
last_pfn, adev->vm_manager.max_pfn);
- amdgpu_bo_unreserve(bo_va->bo);
return -EINVAL;
}
- mutex_lock(&vm->mutex);
-
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ spin_lock(&vm->it_lock);
it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
+ spin_unlock(&vm->it_lock);
if (it) {
struct amdgpu_bo_va_mapping *tmp;
tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
@@ -981,16 +1034,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
"0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
tmp->it.start, tmp->it.last + 1);
- amdgpu_bo_unreserve(bo_va->bo);
r = -EINVAL;
- goto error_unlock;
+ goto error;
}
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
if (!mapping) {
- amdgpu_bo_unreserve(bo_va->bo);
r = -ENOMEM;
- goto error_unlock;
+ goto error;
}
INIT_LIST_HEAD(&mapping->list);
@@ -999,12 +1050,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
mapping->offset = offset;
mapping->flags = flags;
- list_add(&mapping->list, &bo_va->mappings);
+ mutex_lock(&bo_va->mutex);
+ list_add(&mapping->list, &bo_va->invalids);
+ mutex_unlock(&bo_va->mutex);
+ spin_lock(&vm->it_lock);
interval_tree_insert(&mapping->it, &vm->va);
+ spin_unlock(&vm->it_lock);
trace_amdgpu_vm_bo_map(bo_va, mapping);
- bo_va->addr = 0;
-
/* Make sure the page tables are allocated */
saddr >>= amdgpu_vm_block_size;
eaddr >>= amdgpu_vm_block_size;
@@ -1014,56 +1067,48 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
if (eaddr > vm->max_pde_used)
vm->max_pde_used = eaddr;
- amdgpu_bo_unreserve(bo_va->bo);
-
/* walk over the address space and allocate the page tables */
for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
+ struct reservation_object *resv = vm->page_directory->tbo.resv;
struct amdgpu_bo *pt;
if (vm->page_tables[pt_idx].bo)
continue;
- /* drop mutex to allocate and clear page table */
- mutex_unlock(&vm->mutex);
-
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
AMDGPU_GPU_PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &pt);
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+ NULL, resv, &pt);
if (r)
goto error_free;
+ /* Keep a reference to the page table to avoid freeing
+ * them up in the wrong order.
+ */
+ pt->parent = amdgpu_bo_ref(vm->page_directory);
+
r = amdgpu_vm_clear_bo(adev, pt);
if (r) {
amdgpu_bo_unref(&pt);
goto error_free;
}
- /* aquire mutex again */
- mutex_lock(&vm->mutex);
- if (vm->page_tables[pt_idx].bo) {
- /* someone else allocated the pt in the meantime */
- mutex_unlock(&vm->mutex);
- amdgpu_bo_unref(&pt);
- mutex_lock(&vm->mutex);
- continue;
- }
-
vm->page_tables[pt_idx].addr = 0;
vm->page_tables[pt_idx].bo = pt;
}
- mutex_unlock(&vm->mutex);
return 0;
error_free:
- mutex_lock(&vm->mutex);
list_del(&mapping->list);
+ spin_lock(&vm->it_lock);
interval_tree_remove(&mapping->it, &vm->va);
+ spin_unlock(&vm->it_lock);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
kfree(mapping);
-error_unlock:
- mutex_unlock(&vm->mutex);
+error:
return r;
}
@@ -1077,7 +1122,7 @@ error_unlock:
* Remove a mapping of the BO at the specefied addr from the VM.
* Returns 0 for success, error for failure.
*
- * Object has to be reserved and gets unreserved by this function!
+ * Object has to be reserved and unreserved outside!
*/
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
@@ -1085,32 +1130,42 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_vm *vm = bo_va->vm;
+ bool valid = true;
saddr /= AMDGPU_GPU_PAGE_SIZE;
-
- list_for_each_entry(mapping, &bo_va->mappings, list) {
+ mutex_lock(&bo_va->mutex);
+ list_for_each_entry(mapping, &bo_va->valids, list) {
if (mapping->it.start == saddr)
break;
}
- if (&mapping->list == &bo_va->mappings) {
- amdgpu_bo_unreserve(bo_va->bo);
- return -ENOENT;
- }
+ if (&mapping->list == &bo_va->valids) {
+ valid = false;
+
+ list_for_each_entry(mapping, &bo_va->invalids, list) {
+ if (mapping->it.start == saddr)
+ break;
+ }
- mutex_lock(&vm->mutex);
+ if (&mapping->list == &bo_va->invalids) {
+ mutex_unlock(&bo_va->mutex);
+ return -ENOENT;
+ }
+ }
+ mutex_unlock(&bo_va->mutex);
list_del(&mapping->list);
+ spin_lock(&vm->it_lock);
interval_tree_remove(&mapping->it, &vm->va);
+ spin_unlock(&vm->it_lock);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
- if (bo_va->addr) {
- /* clear the old address */
+ if (valid) {
+ spin_lock(&vm->freed_lock);
list_add(&mapping->list, &vm->freed);
+ spin_unlock(&vm->freed_lock);
} else {
kfree(mapping);
}
- mutex_unlock(&vm->mutex);
- amdgpu_bo_unreserve(bo_va->bo);
return 0;
}
@@ -1133,25 +1188,30 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
list_del(&bo_va->bo_list);
- mutex_lock(&vm->mutex);
-
spin_lock(&vm->status_lock);
list_del(&bo_va->vm_status);
spin_unlock(&vm->status_lock);
- list_for_each_entry_safe(mapping, next, &bo_va->mappings, list) {
+ list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
+ spin_lock(&vm->it_lock);
interval_tree_remove(&mapping->it, &vm->va);
+ spin_unlock(&vm->it_lock);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
- if (bo_va->addr)
- list_add(&mapping->list, &vm->freed);
- else
- kfree(mapping);
+ spin_lock(&vm->freed_lock);
+ list_add(&mapping->list, &vm->freed);
+ spin_unlock(&vm->freed_lock);
}
- amdgpu_fence_unref(&bo_va->last_pt_update);
+ list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
+ list_del(&mapping->list);
+ spin_lock(&vm->it_lock);
+ interval_tree_remove(&mapping->it, &vm->va);
+ spin_unlock(&vm->it_lock);
+ kfree(mapping);
+ }
+ fence_put(bo_va->last_pt_update);
+ mutex_destroy(&bo_va->mutex);
kfree(bo_va);
-
- mutex_unlock(&vm->mutex);
}
/**
@@ -1169,12 +1229,10 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va;
list_for_each_entry(bo_va, &bo->va, bo_list) {
- if (bo_va->addr) {
- spin_lock(&bo_va->vm->status_lock);
- list_del(&bo_va->vm_status);
+ spin_lock(&bo_va->vm->status_lock);
+ if (list_empty(&bo_va->vm_status))
list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
- spin_unlock(&bo_va->vm->status_lock);
- }
+ spin_unlock(&bo_va->vm->status_lock);
}
}
@@ -1196,14 +1254,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
vm->ids[i].id = 0;
vm->ids[i].flushed_updates = NULL;
- vm->ids[i].last_id_use = NULL;
}
- mutex_init(&vm->mutex);
vm->va = RB_ROOT;
spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->invalidated);
+ INIT_LIST_HEAD(&vm->cleared);
INIT_LIST_HEAD(&vm->freed);
-
+ spin_lock_init(&vm->it_lock);
+ spin_lock_init(&vm->freed_lock);
pd_size = amdgpu_vm_directory_size(adev);
pd_entries = amdgpu_vm_num_pdes(adev);
@@ -1215,13 +1273,22 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
return -ENOMEM;
}
+ vm->page_directory_fence = NULL;
+
r = amdgpu_bo_create(adev, pd_size, align, true,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
- NULL, &vm->page_directory);
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+ NULL, NULL, &vm->page_directory);
if (r)
return r;
-
+ r = amdgpu_bo_reserve(vm->page_directory, false);
+ if (r) {
+ amdgpu_bo_unref(&vm->page_directory);
+ vm->page_directory = NULL;
+ return r;
+ }
r = amdgpu_vm_clear_bo(adev, vm->page_directory);
+ amdgpu_bo_unreserve(vm->page_directory);
if (r) {
amdgpu_bo_unref(&vm->page_directory);
vm->page_directory = NULL;
@@ -1263,11 +1330,28 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
kfree(vm->page_tables);
amdgpu_bo_unref(&vm->page_directory);
-
+ fence_put(vm->page_directory_fence);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- amdgpu_fence_unref(&vm->ids[i].flushed_updates);
- amdgpu_fence_unref(&vm->ids[i].last_id_use);
+ unsigned id = vm->ids[i].id;
+
+ atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner,
+ (long)vm, 0);
+ fence_put(vm->ids[i].flushed_updates);
}
- mutex_destroy(&vm->mutex);
+}
+
+/**
+ * amdgpu_vm_manager_fini - cleanup VM manager
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Cleanup the VM manager and free resources.
+ */
+void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
+{
+ unsigned i;
+
+ for (i = 0; i < AMDGPU_NUM_VM; ++i)
+ fence_put(adev->vm_manager.ids[i].active);
}
OpenPOWER on IntegriCloud