diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 607 |
1 files changed, 322 insertions, 285 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 9599f7559b3d..264c5968a1d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -55,7 +55,7 @@ * * @adev: amdgpu_device pointer * - * Calculate the number of page directory entries (cayman+). + * Calculate the number of page directory entries. */ static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev) { @@ -67,7 +67,7 @@ static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev) * * @adev: amdgpu_device pointer * - * Calculate the size of the page directory in bytes (cayman+). + * Calculate the size of the page directory in bytes. */ static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev) { @@ -89,8 +89,6 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct amdgpu_bo_list_entry *entry) { entry->robj = vm->page_directory; - entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM; - entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM; entry->priority = 0; entry->tv.bo = &vm->page_directory->tbo; entry->tv.shared = true; @@ -154,29 +152,34 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, * @vm: vm to allocate id for * @ring: ring we want to submit job to * @sync: sync object where we add dependencies + * @fence: fence protecting ID from reuse * * Allocate an id for the vm, adding fences to the sync obj as necessary. - * - * Global mutex must be locked! */ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync) + struct amdgpu_sync *sync, struct fence *fence) { - struct fence *best[AMDGPU_MAX_RINGS] = {}; struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; struct amdgpu_device *adev = ring->adev; + struct amdgpu_vm_manager_id *id; + int r; - unsigned choices[2] = {}; - unsigned i; + mutex_lock(&adev->vm_manager.lock); /* check if the id is still valid */ if (vm_id->id) { - unsigned id = vm_id->id; long owner; - owner = atomic_long_read(&adev->vm_manager.ids[id].owner); + id = &adev->vm_manager.ids[vm_id->id]; + owner = atomic_long_read(&id->owner); if (owner == (long)vm) { - trace_amdgpu_vm_grab_id(vm_id->id, ring->idx); + list_move_tail(&id->list, &adev->vm_manager.ids_lru); + trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx); + + fence_put(id->active); + id->active = fence_get(fence); + + mutex_unlock(&adev->vm_manager.lock); return 0; } } @@ -184,41 +187,24 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, /* we definately need to flush */ vm_id->pd_gpu_addr = ~0ll; - /* skip over VMID 0, since it is the system VM */ - for (i = 1; i < adev->vm_manager.nvm; ++i) { - struct fence *fence = adev->vm_manager.ids[i].active; - struct amdgpu_ring *fring; - - if (fence == NULL) { - /* found a free one */ - vm_id->id = i; - trace_amdgpu_vm_grab_id(i, ring->idx); - return 0; - } - - fring = amdgpu_ring_from_fence(fence); - if (best[fring->idx] == NULL || - fence_is_later(best[fring->idx], fence)) { - best[fring->idx] = fence; - choices[fring == ring ? 0 : 1] = i; - } - } + id = list_first_entry(&adev->vm_manager.ids_lru, + struct amdgpu_vm_manager_id, + list); + list_move_tail(&id->list, &adev->vm_manager.ids_lru); + atomic_long_set(&id->owner, (long)vm); - for (i = 0; i < 2; ++i) { - if (choices[i]) { - struct fence *fence; + vm_id->id = id - adev->vm_manager.ids; + trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx); - fence = adev->vm_manager.ids[choices[i]].active; - vm_id->id = choices[i]; + r = amdgpu_sync_fence(ring->adev, sync, id->active); - trace_amdgpu_vm_grab_id(choices[i], ring->idx); - return amdgpu_sync_fence(ring->adev, sync, fence); - } + if (!r) { + fence_put(id->active); + id->active = fence_get(fence); } - /* should never happen */ - BUG(); - return -EINVAL; + mutex_unlock(&adev->vm_manager.lock); + return r; } /** @@ -228,9 +214,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, * @vm: vm we want to flush * @updates: last vm update that we waited for * - * Flush the vm (cayman+). - * - * Global and local mutex must be locked! + * Flush the vm. */ void amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_vm *vm, @@ -260,36 +244,12 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring, } /** - * amdgpu_vm_fence - remember fence for vm - * - * @adev: amdgpu_device pointer - * @vm: vm we want to fence - * @fence: fence to remember - * - * Fence the vm (cayman+). - * Set the fence used to protect page table and id. - * - * Global and local mutex must be locked! - */ -void amdgpu_vm_fence(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct fence *fence) -{ - struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence); - unsigned vm_id = vm->ids[ring->idx].id; - - fence_put(adev->vm_manager.ids[vm_id].active); - adev->vm_manager.ids[vm_id].active = fence_get(fence); - atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm); -} - -/** * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo * * @vm: requested vm * @bo: requested buffer object * - * Find @bo inside the requested vm (cayman+). + * Find @bo inside the requested vm. * Search inside the @bos vm list for the requested vm * Returns the found bo_va or NULL if none is found * @@ -312,32 +272,40 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, * amdgpu_vm_update_pages - helper to call the right asic function * * @adev: amdgpu_device pointer + * @gtt: GART instance to use for mapping + * @gtt_flags: GTT hw access flags * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: hw access flags - * @gtt_flags: GTT hw access flags * * Traces the parameters and calls the right asic functions * to setup the page table using the DMA. */ static void amdgpu_vm_update_pages(struct amdgpu_device *adev, + struct amdgpu_gart *gtt, + uint32_t gtt_flags, struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, - uint32_t flags, uint32_t gtt_flags) + uint32_t flags) { trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); - if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) { - uint64_t src = adev->gart.table_addr + (addr >> 12) * 8; + if ((gtt == &adev->gart) && (flags == gtt_flags)) { + uint64_t src = gtt->table_addr + (addr >> 12) * 8; amdgpu_vm_copy_pte(adev, ib, pe, src, count); - } else if ((flags & AMDGPU_PTE_SYSTEM) || (count < 3)) { - amdgpu_vm_write_pte(adev, ib, pe, addr, - count, incr, flags); + } else if (gtt) { + dma_addr_t *pages_addr = gtt->pages_addr; + amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr, + count, incr, flags); + + } else if (count < 3) { + amdgpu_vm_write_pte(adev, ib, NULL, pe, addr, + count, incr, flags); } else { amdgpu_vm_set_pte_pde(adev, ib, pe, addr, @@ -345,15 +313,6 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev, } } -int amdgpu_vm_free_job(struct amdgpu_job *job) -{ - int i; - for (i = 0; i < job->num_ibs; i++) - amdgpu_ib_free(job->adev, &job->ibs[i]); - kfree(job->ibs); - return 0; -} - /** * amdgpu_vm_clear_bo - initially clear the page dir/table * @@ -363,15 +322,18 @@ int amdgpu_vm_free_job(struct amdgpu_job *job) * need to reserve bo first before calling it. */ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, + struct amdgpu_vm *vm, struct amdgpu_bo *bo) { - struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; + struct amdgpu_ring *ring; struct fence *fence = NULL; - struct amdgpu_ib *ib; + struct amdgpu_job *job; unsigned entries; uint64_t addr; int r; + ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); + r = reservation_object_reserve_shared(bo->tbo.resv); if (r) return r; @@ -383,56 +345,57 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, addr = amdgpu_bo_gpu_offset(bo); entries = amdgpu_bo_size(bo) / 8; - ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); - if (!ib) + r = amdgpu_job_alloc_with_ib(adev, 64, &job); + if (r) goto error; - r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib); + amdgpu_vm_update_pages(adev, NULL, 0, &job->ibs[0], addr, 0, entries, + 0, 0); + amdgpu_ring_pad_ib(ring, &job->ibs[0]); + + WARN_ON(job->ibs[0].length_dw > 64); + r = amdgpu_job_submit(job, ring, &vm->entity, + AMDGPU_FENCE_OWNER_VM, &fence); if (r) goto error_free; - ib->length_dw = 0; - - amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0); - amdgpu_vm_pad_ib(adev, ib); - WARN_ON(ib->length_dw > 64); - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, - &amdgpu_vm_free_job, - AMDGPU_FENCE_OWNER_VM, - &fence); - if (!r) - amdgpu_bo_fence(bo, fence, true); + amdgpu_bo_fence(bo, fence, true); fence_put(fence); - if (amdgpu_enable_scheduler) - return 0; + return 0; error_free: - amdgpu_ib_free(adev, ib); - kfree(ib); + amdgpu_job_free(job); error: return r; } /** - * amdgpu_vm_map_gart - get the physical address of a gart page + * amdgpu_vm_map_gart - Resolve gart mapping of addr * - * @adev: amdgpu_device pointer + * @pages_addr: optional DMA address to use for lookup * @addr: the unmapped addr * * Look up the physical address of the page that the pte resolves - * to (cayman+). - * Returns the physical address of the page. + * to and return the pointer for the page table entry. */ -uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr) +uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) { uint64_t result; - /* page table offset */ - result = adev->gart.pages_addr[addr >> PAGE_SHIFT]; + if (pages_addr) { + /* page table offset */ + result = pages_addr[addr >> PAGE_SHIFT]; - /* in case cpu page size != gpu page size*/ - result |= addr & (~PAGE_MASK); + /* in case cpu page size != gpu page size*/ + result |= addr & (~PAGE_MASK); + + } else { + /* No mapping required */ + result = addr; + } + + result &= 0xFFFFFFFFFFFFF000ULL; return result; } @@ -446,45 +409,37 @@ uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr) * @end: end of GPU address range * * Allocates new page tables if necessary - * and updates the page directory (cayman+). + * and updates the page directory. * Returns 0 for success, error for failure. - * - * Global and local mutex must be locked! */ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, struct amdgpu_vm *vm) { - struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; + struct amdgpu_ring *ring; struct amdgpu_bo *pd = vm->page_directory; uint64_t pd_addr = amdgpu_bo_gpu_offset(pd); uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; uint64_t last_pde = ~0, last_pt = ~0; unsigned count = 0, pt_idx, ndw; + struct amdgpu_job *job; struct amdgpu_ib *ib; struct fence *fence = NULL; int r; + ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); + /* padding, etc. */ ndw = 64; /* assume the worst case */ ndw += vm->max_pde_used * 6; - /* update too big for an IB */ - if (ndw > 0xfffff) - return -ENOMEM; - - ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); - if (!ib) - return -ENOMEM; - - r = amdgpu_ib_get(ring, NULL, ndw * 4, ib); - if (r) { - kfree(ib); + r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); + if (r) return r; - } - ib->length_dw = 0; + + ib = &job->ibs[0]; /* walk over the address space and update the page directory */ for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { @@ -504,9 +459,10 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, ((last_pt + incr * count) != pt)) { if (count) { - amdgpu_vm_update_pages(adev, ib, last_pde, - last_pt, count, incr, - AMDGPU_PTE_VALID, 0); + amdgpu_vm_update_pages(adev, NULL, 0, ib, + last_pde, last_pt, + count, incr, + AMDGPU_PTE_VALID); } count = 1; @@ -518,17 +474,16 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, } if (count) - amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count, - incr, AMDGPU_PTE_VALID, 0); + amdgpu_vm_update_pages(adev, NULL, 0, ib, last_pde, last_pt, + count, incr, AMDGPU_PTE_VALID); if (ib->length_dw != 0) { - amdgpu_vm_pad_ib(adev, ib); - amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); + amdgpu_ring_pad_ib(ring, ib); + amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv, + AMDGPU_FENCE_OWNER_VM); WARN_ON(ib->length_dw > ndw); - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, - &amdgpu_vm_free_job, - AMDGPU_FENCE_OWNER_VM, - &fence); + r = amdgpu_job_submit(job, ring, &vm->entity, + AMDGPU_FENCE_OWNER_VM, &fence); if (r) goto error_free; @@ -536,18 +491,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, fence_put(vm->page_directory_fence); vm->page_directory_fence = fence_get(fence); fence_put(fence); - } - if (!amdgpu_enable_scheduler || ib->length_dw == 0) { - amdgpu_ib_free(adev, ib); - kfree(ib); + } else { + amdgpu_job_free(job); } return 0; error_free: - amdgpu_ib_free(adev, ib); - kfree(ib); + amdgpu_job_free(job); return r; } @@ -555,20 +507,20 @@ error_free: * amdgpu_vm_frag_ptes - add fragment information to PTEs * * @adev: amdgpu_device pointer + * @gtt: GART instance to use for mapping + * @gtt_flags: GTT hw mapping flags * @ib: IB for the update * @pe_start: first PTE to handle * @pe_end: last PTE to handle * @addr: addr those PTEs should point to * @flags: hw mapping flags - * @gtt_flags: GTT hw mapping flags - * - * Global and local mutex must be locked! */ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, + struct amdgpu_gart *gtt, + uint32_t gtt_flags, struct amdgpu_ib *ib, uint64_t pe_start, uint64_t pe_end, - uint64_t addr, uint32_t flags, - uint32_t gtt_flags) + uint64_t addr, uint32_t flags) { /** * The MC L1 TLB supports variable sized pages, based on a fragment @@ -598,36 +550,39 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, unsigned count; + /* Abort early if there isn't anything to do */ + if (pe_start == pe_end) + return; + /* system pages are non continuously */ - if ((flags & AMDGPU_PTE_SYSTEM) || !(flags & AMDGPU_PTE_VALID) || - (frag_start >= frag_end)) { + if (gtt || !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { count = (pe_end - pe_start) / 8; - amdgpu_vm_update_pages(adev, ib, pe_start, addr, count, - AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags); + amdgpu_vm_update_pages(adev, gtt, gtt_flags, ib, pe_start, + addr, count, AMDGPU_GPU_PAGE_SIZE, + flags); return; } /* handle the 4K area at the beginning */ if (pe_start != frag_start) { count = (frag_start - pe_start) / 8; - amdgpu_vm_update_pages(adev, ib, pe_start, addr, count, - AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags); + amdgpu_vm_update_pages(adev, NULL, 0, ib, pe_start, addr, + count, AMDGPU_GPU_PAGE_SIZE, flags); addr += AMDGPU_GPU_PAGE_SIZE * count; } /* handle the area in the middle */ count = (frag_end - frag_start) / 8; - amdgpu_vm_update_pages(adev, ib, frag_start, addr, count, - AMDGPU_GPU_PAGE_SIZE, flags | frag_flags, - gtt_flags); + amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_start, addr, count, + AMDGPU_GPU_PAGE_SIZE, flags | frag_flags); /* handle the 4K area at the end */ if (frag_end != pe_end) { addr += AMDGPU_GPU_PAGE_SIZE * count; count = (pe_end - frag_end) / 8; - amdgpu_vm_update_pages(adev, ib, frag_end, addr, count, - AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags); + amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_end, addr, + count, AMDGPU_GPU_PAGE_SIZE, flags); } } @@ -635,122 +590,105 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, * amdgpu_vm_update_ptes - make sure that page tables are valid * * @adev: amdgpu_device pointer + * @gtt: GART instance to use for mapping + * @gtt_flags: GTT hw mapping flags * @vm: requested vm * @start: start of GPU address range * @end: end of GPU address range * @dst: destination address to map to * @flags: mapping flags * - * Update the page tables in the range @start - @end (cayman+). - * - * Global and local mutex must be locked! + * Update the page tables in the range @start - @end. */ -static int amdgpu_vm_update_ptes(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct amdgpu_ib *ib, - uint64_t start, uint64_t end, - uint64_t dst, uint32_t flags, - uint32_t gtt_flags) +static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, + struct amdgpu_gart *gtt, + uint32_t gtt_flags, + struct amdgpu_vm *vm, + struct amdgpu_ib *ib, + uint64_t start, uint64_t end, + uint64_t dst, uint32_t flags) { - uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; - uint64_t last_pte = ~0, last_dst = ~0; - void *owner = AMDGPU_FENCE_OWNER_VM; - unsigned count = 0; - uint64_t addr; + const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; - /* sync to everything on unmapping */ - if (!(flags & AMDGPU_PTE_VALID)) - owner = AMDGPU_FENCE_OWNER_UNDEFINED; + uint64_t last_pe_start = ~0, last_pe_end = ~0, last_dst = ~0; + uint64_t addr; /* walk over the address space and update the page tables */ for (addr = start; addr < end; ) { uint64_t pt_idx = addr >> amdgpu_vm_block_size; struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj; unsigned nptes; - uint64_t pte; - int r; - - amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, owner); - r = reservation_object_reserve_shared(pt->tbo.resv); - if (r) - return r; + uint64_t pe_start; if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; else nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); - pte = amdgpu_bo_gpu_offset(pt); - pte += (addr & mask) * 8; + pe_start = amdgpu_bo_gpu_offset(pt); + pe_start += (addr & mask) * 8; - if ((last_pte + 8 * count) != pte) { + if (last_pe_end != pe_start) { - if (count) { - amdgpu_vm_frag_ptes(adev, ib, last_pte, - last_pte + 8 * count, - last_dst, flags, - gtt_flags); - } + amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib, + last_pe_start, last_pe_end, + last_dst, flags); - count = nptes; - last_pte = pte; + last_pe_start = pe_start; + last_pe_end = pe_start + 8 * nptes; last_dst = dst; } else { - count += nptes; + last_pe_end += 8 * nptes; } addr += nptes; dst += nptes * AMDGPU_GPU_PAGE_SIZE; } - if (count) { - amdgpu_vm_frag_ptes(adev, ib, last_pte, - last_pte + 8 * count, - last_dst, flags, gtt_flags); - } - - return 0; + amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib, + last_pe_start, last_pe_end, + last_dst, flags); } /** * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table * * @adev: amdgpu_device pointer + * @gtt: GART instance to use for mapping + * @gtt_flags: flags as they are used for GTT * @vm: requested vm - * @mapping: mapped range and flags to use for the update + * @start: start of mapped range + * @last: last mapped entry + * @flags: flags for the entries * @addr: addr to set the area to - * @gtt_flags: flags as they are used for GTT * @fence: optional resulting fence * - * Fill in the page table entries for @mapping. + * Fill in the page table entries between @start and @last. * Returns 0 for success, -EINVAL for failure. - * - * Object have to be reserved and mutex must be locked! */ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, + struct amdgpu_gart *gtt, + uint32_t gtt_flags, struct amdgpu_vm *vm, - struct amdgpu_bo_va_mapping *mapping, - uint64_t addr, uint32_t gtt_flags, + uint64_t start, uint64_t last, + uint32_t flags, uint64_t addr, struct fence **fence) { - struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; + struct amdgpu_ring *ring; + void *owner = AMDGPU_FENCE_OWNER_VM; unsigned nptes, ncmds, ndw; - uint32_t flags = gtt_flags; + struct amdgpu_job *job; struct amdgpu_ib *ib; struct fence *f = NULL; int r; - /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here - * but in case of something, we filter the flags in first place - */ - if (!(mapping->flags & AMDGPU_PTE_READABLE)) - flags &= ~AMDGPU_PTE_READABLE; - if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) - flags &= ~AMDGPU_PTE_WRITEABLE; + ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); - trace_amdgpu_vm_bo_update(mapping); + /* sync to everything on unmapping */ + if (!(flags & AMDGPU_PTE_VALID)) + owner = AMDGPU_FENCE_OWNER_UNDEFINED; - nptes = mapping->it.last - mapping->it.start + 1; + nptes = last - start + 1; /* * reserve space for one command every (1 << BLOCK_SIZE) @@ -761,11 +699,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, /* padding, etc. */ ndw = 64; - if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) { + if ((gtt == &adev->gart) && (flags == gtt_flags)) { /* only copy commands needed */ ndw += ncmds * 7; - } else if (flags & AMDGPU_PTE_SYSTEM) { + } else if (gtt) { /* header for write data commands */ ndw += ncmds * 4; @@ -780,38 +718,28 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, ndw += 2 * 10; } - /* update too big for an IB */ - if (ndw > 0xfffff) - return -ENOMEM; - - ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); - if (!ib) - return -ENOMEM; - - r = amdgpu_ib_get(ring, NULL, ndw * 4, ib); - if (r) { - kfree(ib); + r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); + if (r) return r; - } - ib->length_dw = 0; + ib = &job->ibs[0]; - r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start, - mapping->it.last + 1, addr + mapping->offset, - flags, gtt_flags); + r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, + owner); + if (r) + goto error_free; - if (r) { - amdgpu_ib_free(adev, ib); - kfree(ib); - return r; - } + r = reservation_object_reserve_shared(vm->page_directory->tbo.resv); + if (r) + goto error_free; + + amdgpu_vm_update_ptes(adev, gtt, gtt_flags, vm, ib, start, last + 1, + addr, flags); - amdgpu_vm_pad_ib(adev, ib); + amdgpu_ring_pad_ib(ring, ib); WARN_ON(ib->length_dw > ndw); - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, - &amdgpu_vm_free_job, - AMDGPU_FENCE_OWNER_VM, - &f); + r = amdgpu_job_submit(job, ring, &vm->entity, + AMDGPU_FENCE_OWNER_VM, &f); if (r) goto error_free; @@ -821,19 +749,76 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, *fence = fence_get(f); } fence_put(f); - if (!amdgpu_enable_scheduler) { - amdgpu_ib_free(adev, ib); - kfree(ib); - } return 0; error_free: - amdgpu_ib_free(adev, ib); - kfree(ib); + amdgpu_job_free(job); return r; } /** + * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks + * + * @adev: amdgpu_device pointer + * @gtt: GART instance to use for mapping + * @vm: requested vm + * @mapping: mapped range and flags to use for the update + * @addr: addr to set the area to + * @gtt_flags: flags as they are used for GTT + * @fence: optional resulting fence + * + * Split the mapping into smaller chunks so that each update fits + * into a SDMA IB. + * Returns 0 for success, -EINVAL for failure. + */ +static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, + struct amdgpu_gart *gtt, + uint32_t gtt_flags, + struct amdgpu_vm *vm, + struct amdgpu_bo_va_mapping *mapping, + uint64_t addr, struct fence **fence) +{ + const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE; + + uint64_t start = mapping->it.start; + uint32_t flags = gtt_flags; + int r; + + /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here + * but in case of something, we filter the flags in first place + */ + if (!(mapping->flags & AMDGPU_PTE_READABLE)) + flags &= ~AMDGPU_PTE_READABLE; + if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) + flags &= ~AMDGPU_PTE_WRITEABLE; + + trace_amdgpu_vm_bo_update(mapping); + + addr += mapping->offset; + + if (!gtt || ((gtt == &adev->gart) && (flags == gtt_flags))) + return amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm, + start, mapping->it.last, + flags, addr, fence); + + while (start != mapping->it.last + 1) { + uint64_t last; + + last = min((uint64_t)mapping->it.last, start + max_size); + r = amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm, + start, last, flags, addr, + fence); + if (r) + return r; + + start = last + 1; + addr += max_size; + } + + return 0; +} + +/** * amdgpu_vm_bo_update - update all BO mappings in the vm page table * * @adev: amdgpu_device pointer @@ -851,14 +836,25 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, { struct amdgpu_vm *vm = bo_va->vm; struct amdgpu_bo_va_mapping *mapping; + struct amdgpu_gart *gtt = NULL; uint32_t flags; uint64_t addr; int r; if (mem) { addr = (u64)mem->start << PAGE_SHIFT; - if (mem->mem_type != TTM_PL_TT) + switch (mem->mem_type) { + case TTM_PL_TT: + gtt = &bo_va->bo->adev->gart; + break; + + case TTM_PL_VRAM: addr += adev->vm_manager.vram_base_offset; + break; + + default: + break; + } } else { addr = 0; } @@ -871,8 +867,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, spin_unlock(&vm->status_lock); list_for_each_entry(mapping, &bo_va->invalids, list) { - r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr, - flags, &bo_va->last_pt_update); + r = amdgpu_vm_bo_split_mapping(adev, gtt, flags, vm, mapping, addr, + &bo_va->last_pt_update); if (r) return r; } @@ -918,7 +914,8 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping, list); list_del(&mapping->list); spin_unlock(&vm->freed_lock); - r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL); + r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, vm, mapping, + 0, NULL); kfree(mapping); if (r) return r; @@ -976,7 +973,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, * @vm: requested vm * @bo: amdgpu buffer object * - * Add @bo into the requested vm (cayman+). + * Add @bo into the requested vm. * Add @bo to the list of bos associated with the vm * Returns newly added bo_va or NULL for failure * @@ -1117,15 +1114,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, */ pt->parent = amdgpu_bo_ref(vm->page_directory); - r = amdgpu_vm_clear_bo(adev, pt); + r = amdgpu_vm_clear_bo(adev, vm, pt); if (r) { amdgpu_bo_unref(&pt); goto error_free; } entry->robj = pt; - entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM; - entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM; entry->priority = 0; entry->tv.bo = &entry->robj->tbo; entry->tv.shared = true; @@ -1210,7 +1205,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, * @adev: amdgpu_device pointer * @bo_va: requested bo_va * - * Remove @bo_va->bo from the requested vm (cayman+). + * Remove @bo_va->bo from the requested vm. * * Object have to be reserved! */ @@ -1255,7 +1250,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, * @vm: requested vm * @bo: amdgpu buffer object * - * Mark @bo as invalid (cayman+). + * Mark @bo as invalid. */ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, struct amdgpu_bo *bo) @@ -1276,13 +1271,16 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, * @adev: amdgpu_device pointer * @vm: requested vm * - * Init @vm fields (cayman+). + * Init @vm fields. */ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) { const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, AMDGPU_VM_PTE_COUNT * 8); unsigned pd_size, pd_entries; + unsigned ring_instance; + struct amdgpu_ring *ring; + struct amd_sched_rq *rq; int i, r; for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { @@ -1306,6 +1304,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) return -ENOMEM; } + /* create scheduler entity for page table updates */ + + ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring); + ring_instance %= adev->vm_manager.vm_pte_num_rings; + ring = adev->vm_manager.vm_pte_rings[ring_instance]; + rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; + r = amd_sched_entity_init(&ring->sched, &vm->entity, + rq, amdgpu_sched_jobs); + if (r) + return r; + vm->page_directory_fence = NULL; r = amdgpu_bo_create(adev, pd_size, align, true, @@ -1313,22 +1322,27 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) AMDGPU_GEM_CREATE_NO_CPU_ACCESS, NULL, NULL, &vm->page_directory); if (r) - return r; + goto error_free_sched_entity; + r = amdgpu_bo_reserve(vm->page_directory, false); - if (r) { - amdgpu_bo_unref(&vm->page_directory); - vm->page_directory = NULL; - return r; - } - r = amdgpu_vm_clear_bo(adev, vm->page_directory); + if (r) + goto error_free_page_directory; + + r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory); amdgpu_bo_unreserve(vm->page_directory); - if (r) { - amdgpu_bo_unref(&vm->page_directory); - vm->page_directory = NULL; - return r; - } + if (r) + goto error_free_page_directory; return 0; + +error_free_page_directory: + amdgpu_bo_unref(&vm->page_directory); + vm->page_directory = NULL; + +error_free_sched_entity: + amd_sched_entity_fini(&ring->sched, &vm->entity); + + return r; } /** @@ -1337,7 +1351,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) * @adev: amdgpu_device pointer * @vm: requested vm * - * Tear down @vm (cayman+). + * Tear down @vm. * Unbind the VM and remove all bos from the vm bo list */ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) @@ -1345,6 +1359,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) struct amdgpu_bo_va_mapping *mapping, *tmp; int i; + amd_sched_entity_fini(vm->entity.sched, &vm->entity); + if (!RB_EMPTY_ROOT(&vm->va)) { dev_err(adev->dev, "still active bo inside vm\n"); } @@ -1375,6 +1391,27 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) } /** + * amdgpu_vm_manager_init - init the VM manager + * + * @adev: amdgpu_device pointer + * + * Initialize the VM manager structures + */ +void amdgpu_vm_manager_init(struct amdgpu_device *adev) +{ + unsigned i; + + INIT_LIST_HEAD(&adev->vm_manager.ids_lru); + + /* skip over VMID 0, since it is the system VM */ + for (i = 1; i < adev->vm_manager.num_ids; ++i) + list_add_tail(&adev->vm_manager.ids[i].list, + &adev->vm_manager.ids_lru); + + atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); +} + +/** * amdgpu_vm_manager_fini - cleanup VM manager * * @adev: amdgpu_device pointer |