From d1f6dc1a9a106a73510181cfad9b4a7a0b140990 Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Thu, 19 Oct 2017 14:29:46 -0400 Subject: drm/amdgpu: Avoid accessing job->entity after the job is scheduled. Bug: amdgpu_job_free_cb was accessing s_job->s_entity when the allocated amdgpu_ctx (and the entity inside it) were already deallocated from amdgpu_cs_parser_fini. Fix: Save job's priority on it's creation instead of accessing it from s_entity later on. Signed-off-by: Andrey Grodzovsky Reviewed-by: Andres Rodriguez Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 0cfc68db575b..a58e3c5dd84b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -104,7 +104,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job) { struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); - amdgpu_ring_priority_put(job->ring, amd_sched_get_job_priority(s_job)); + amdgpu_ring_priority_put(job->ring, s_job->s_priority); dma_fence_put(job->fence); amdgpu_sync_free(&job->sync); amdgpu_sync_free(&job->dep_sync); @@ -141,8 +141,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, job->fence_ctx = entity->fence_context; *f = dma_fence_get(&job->base.s_fence->finished); amdgpu_job_free_resources(job); - amdgpu_ring_priority_get(job->ring, - amd_sched_get_job_priority(&job->base)); + amdgpu_ring_priority_get(job->ring, job->base.s_priority); amd_sched_entity_push_job(&job->base); return 0; -- cgit v1.2.3 From a4176cb484ac457a08b44c93da06fce09c6e281c Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Tue, 24 Oct 2017 13:30:16 -0400 Subject: drm/amdgpu: Remove job->s_entity to avoid keeping reference to stale pointer. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Andrey Grodzovsky Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 7 ++++--- drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h | 9 ++++----- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 19 +++++++++---------- drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 7 ++++--- 5 files changed, 22 insertions(+), 22 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 16947bad5b49..bf1aad00bb8d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1203,7 +1203,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, amdgpu_ring_priority_get(job->ring, job->base.s_priority); trace_amdgpu_cs_ioctl(job); - amd_sched_entity_push_job(&job->base); + amd_sched_entity_push_job(&job->base, entity); ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); amdgpu_mn_unlock(p->mn); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index a58e3c5dd84b..f60662e03761 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -142,12 +142,13 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, *f = dma_fence_get(&job->base.s_fence->finished); amdgpu_job_free_resources(job); amdgpu_ring_priority_get(job->ring, job->base.s_priority); - amd_sched_entity_push_job(&job->base); + amd_sched_entity_push_job(&job->base, entity); return 0; } -static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) +static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job, + struct amd_sched_entity *s_entity) { struct amdgpu_job *job = to_amdgpu_job(sched_job); struct amdgpu_vm *vm = job->vm; @@ -155,7 +156,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) struct dma_fence *fence = amdgpu_sync_get_fence(&job->dep_sync); int r; - if (amd_sched_dependency_optimized(fence, sched_job->s_entity)) { + if (amd_sched_dependency_optimized(fence, s_entity)) { r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence); if (r) DRM_ERROR("Error adding fence to sync (%d)\n", r); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h index 705380eb693c..eebe323c7159 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h @@ -13,8 +13,8 @@ #define TRACE_INCLUDE_FILE gpu_sched_trace TRACE_EVENT(amd_sched_job, - TP_PROTO(struct amd_sched_job *sched_job), - TP_ARGS(sched_job), + TP_PROTO(struct amd_sched_job *sched_job, struct amd_sched_entity *entity), + TP_ARGS(sched_job, entity), TP_STRUCT__entry( __field(struct amd_sched_entity *, entity) __field(struct dma_fence *, fence) @@ -25,12 +25,11 @@ TRACE_EVENT(amd_sched_job, ), TP_fast_assign( - __entry->entity = sched_job->s_entity; + __entry->entity = entity; __entry->id = sched_job->id; __entry->fence = &sched_job->s_fence->finished; __entry->name = sched_job->sched->name; - __entry->job_count = spsc_queue_count( - &sched_job->s_entity->job_queue); + __entry->job_count = spsc_queue_count(&entity->job_queue); __entry->hw_job_count = atomic_read( &sched_job->sched->hw_rq_count); ), diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 1a2267ce62a8..f116de798204 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -341,11 +341,10 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity) if (!sched_job) return NULL; - while ((entity->dependency = sched->ops->dependency(sched_job))) + while ((entity->dependency = sched->ops->dependency(sched_job, entity))) if (amd_sched_entity_add_dependency_cb(entity)) return NULL; - sched_job->s_entity = NULL; spsc_queue_pop(&entity->job_queue); return sched_job; } @@ -357,13 +356,13 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity) * * Returns 0 for success, negative error code otherwise. */ -void amd_sched_entity_push_job(struct amd_sched_job *sched_job) +void amd_sched_entity_push_job(struct amd_sched_job *sched_job, + struct amd_sched_entity *entity) { struct amd_gpu_scheduler *sched = sched_job->sched; - struct amd_sched_entity *entity = sched_job->s_entity; bool first = false; - trace_amd_sched_job(sched_job); + trace_amd_sched_job(sched_job, entity); spin_lock(&entity->queue_lock); first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); @@ -442,11 +441,12 @@ static void amd_sched_job_timedout(struct work_struct *work) job->sched->ops->timedout_job(job); } -static void amd_sched_set_guilty(struct amd_sched_job *s_job) +static void amd_sched_set_guilty(struct amd_sched_job *s_job, + struct amd_sched_entity *s_entity) { if (atomic_inc_return(&s_job->karma) > s_job->sched->hang_limit) - if (s_job->s_entity->guilty) - atomic_set(s_job->s_entity->guilty, 1); + if (s_entity->guilty) + atomic_set(s_entity->guilty, 1); } void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_job *bad) @@ -477,7 +477,7 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_jo list_for_each_entry_safe(entity, tmp, &rq->entities, list) { if (bad->s_fence->scheduled.context == entity->fence_context) { found = true; - amd_sched_set_guilty(bad); + amd_sched_set_guilty(bad, entity); break; } } @@ -541,7 +541,6 @@ int amd_sched_job_init(struct amd_sched_job *job, void *owner) { job->sched = sched; - job->s_entity = entity; job->s_priority = entity->rq - sched->sched_rq; job->s_fence = amd_sched_fence_create(entity, owner); if (!job->s_fence) diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index f9e3a83cddc6..b590fcc2786a 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -91,7 +91,6 @@ struct amd_sched_fence { struct amd_sched_job { struct spsc_node queue_node; struct amd_gpu_scheduler *sched; - struct amd_sched_entity *s_entity; struct amd_sched_fence *s_fence; struct dma_fence_cb finish_cb; struct work_struct finish_work; @@ -125,7 +124,8 @@ static inline bool amd_sched_invalidate_job(struct amd_sched_job *s_job, int thr * these functions should be implemented in driver side */ struct amd_sched_backend_ops { - struct dma_fence *(*dependency)(struct amd_sched_job *sched_job); + struct dma_fence *(*dependency)(struct amd_sched_job *sched_job, + struct amd_sched_entity *s_entity); struct dma_fence *(*run_job)(struct amd_sched_job *sched_job); void (*timedout_job)(struct amd_sched_job *sched_job); void (*free_job)(struct amd_sched_job *sched_job); @@ -161,7 +161,8 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, uint32_t jobs, atomic_t* guilty); void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity); -void amd_sched_entity_push_job(struct amd_sched_job *sched_job); +void amd_sched_entity_push_job(struct amd_sched_job *sched_job, + struct amd_sched_entity *entity); void amd_sched_entity_set_rq(struct amd_sched_entity *entity, struct amd_sched_rq *rq); -- cgit v1.2.3 From 48f05f2955e4a3183b219d6dfdb1c28e17d03da7 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Wed, 25 Oct 2017 16:21:08 +0800 Subject: amd/scheduler:imple job skip feature(v3) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit jobs are skipped under two cases 1)when the entity behind this job marked guilty, the job poped from this entity's queue will be dropped in sched_main loop. 2)in job_recovery(), skip the scheduling job if its karma detected above limit, and also skipped as well for other jobs sharing the same fence context. this approach is becuase job_recovery() cannot access job->entity due to entity may already dead. v2: some logic fix v3: when entity detected guilty, don't drop the job in the poping stage, instead set its fence error as -ECANCELED in run_job(), skip the scheduling either:1) fence->error < 0 or 2) there was a VRAM LOST occurred on this job. this way we can unify the job skipping logic. with this feature we can introduce new gpu recover feature. Signed-off-by: Monk Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 13 +++++---- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 39 ++++++++++++++++----------- 2 files changed, 31 insertions(+), 21 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index f60662e03761..0a90c768dbc1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -180,7 +180,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job, static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job) { - struct dma_fence *fence = NULL; + struct dma_fence *fence = NULL, *finished; struct amdgpu_device *adev; struct amdgpu_job *job; int r; @@ -190,15 +190,18 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job) return NULL; } job = to_amdgpu_job(sched_job); + finished = &job->base.s_fence->finished; adev = job->adev; BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL)); trace_amdgpu_sched_run_job(job); - /* skip ib schedule when vram is lost */ - if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) { - dma_fence_set_error(&job->base.s_fence->finished, -ECANCELED); - DRM_ERROR("Skip scheduling IBs!\n"); + + if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) + dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */ + + if (finished->error < 0) { + DRM_INFO("Skip scheduling IBs!\n"); } else { r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, &fence); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index f116de798204..941b5920b97b 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -345,6 +345,10 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity) if (amd_sched_entity_add_dependency_cb(entity)) return NULL; + /* skip jobs from entity that marked guilty */ + if (entity->guilty && atomic_read(entity->guilty)) + dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); + spsc_queue_pop(&entity->job_queue); return sched_job; } @@ -441,14 +445,6 @@ static void amd_sched_job_timedout(struct work_struct *work) job->sched->ops->timedout_job(job); } -static void amd_sched_set_guilty(struct amd_sched_job *s_job, - struct amd_sched_entity *s_entity) -{ - if (atomic_inc_return(&s_job->karma) > s_job->sched->hang_limit) - if (s_entity->guilty) - atomic_set(s_entity->guilty, 1); -} - void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_job *bad) { struct amd_sched_job *s_job; @@ -468,21 +464,24 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_jo spin_unlock(&sched->job_list_lock); if (bad) { - bool found = false; - - for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++ ) { + /* don't increase @bad's karma if it's from KERNEL RQ, + * becuase sometimes GPU hang would cause kernel jobs (like VM updating jobs) + * corrupt but keep in mind that kernel jobs always considered good. + */ + for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_KERNEL; i++ ) { struct amd_sched_rq *rq = &sched->sched_rq[i]; spin_lock(&rq->lock); list_for_each_entry_safe(entity, tmp, &rq->entities, list) { if (bad->s_fence->scheduled.context == entity->fence_context) { - found = true; - amd_sched_set_guilty(bad, entity); + if (atomic_inc_return(&bad->karma) > bad->sched->hang_limit) + if (entity->guilty) + atomic_set(entity->guilty, 1); break; } } spin_unlock(&rq->lock); - if (found) + if (&entity->list != &rq->entities) break; } } @@ -500,6 +499,7 @@ void amd_sched_job_kickout(struct amd_sched_job *s_job) void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) { struct amd_sched_job *s_job, *tmp; + bool found_guilty = false; int r; spin_lock(&sched->job_list_lock); @@ -511,6 +511,15 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { struct amd_sched_fence *s_fence = s_job->s_fence; struct dma_fence *fence; + uint64_t guilty_context; + + if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { + found_guilty = true; + guilty_context = s_job->s_fence->scheduled.context; + } + + if (found_guilty && s_job->s_fence->scheduled.context == guilty_context) + dma_fence_set_error(&s_fence->finished, -ECANCELED); spin_unlock(&sched->job_list_lock); fence = sched->ops->run_job(s_job); @@ -526,7 +535,6 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) r); dma_fence_put(fence); } else { - DRM_ERROR("Failed to run job!\n"); amd_sched_process_job(NULL, &s_fence->cb); } spin_lock(&sched->job_list_lock); @@ -664,7 +672,6 @@ static int amd_sched_main(void *param) r); dma_fence_put(fence); } else { - DRM_ERROR("Failed to run job!\n"); amd_sched_process_job(NULL, &s_fence->cb); } -- cgit v1.2.3 From 5740682e66cef57626a328d237698cad329c0449 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Wed, 25 Oct 2017 16:37:02 +0800 Subject: drm/amdgpu:implement new GPU recover(v3) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 1,new imple names amdgpu_gpu_recover which gives more hint on what it does compared with gpu_reset 2,gpu_recover unify bare-metal and SR-IOV, only the asic reset part is implemented differently 3,gpu_recover will increase hang job karma and mark its entity/context as guilty if exceeds limit V2: 4,in scheduler main routine the job from guilty context will be immedialy fake signaled after it poped from queue and its fence be set with "-ECANCELED" error 5,in scheduler recovery routine all jobs from the guilty entity would be dropped 6,in run_job() routine the real IB submission would be skipped if @skip parameter equales true or there was VRAM lost occured. V3: 7,replace deprecated gpu reset, use new gpu recover Signed-off-by: Monk Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 +- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 322 ++++++++++++++--------------- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 10 +- drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 5 +- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 1 - drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 2 +- drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c | 2 +- 8 files changed, 166 insertions(+), 184 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 88fa19b1a802..5714b7e8cb09 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -178,6 +178,10 @@ extern int amdgpu_cik_support; #define CIK_CURSOR_WIDTH 128 #define CIK_CURSOR_HEIGHT 128 +/* GPU RESET flags */ +#define AMDGPU_RESET_INFO_VRAM_LOST (1 << 0) +#define AMDGPU_RESET_INFO_FULLRESET (1 << 1) + struct amdgpu_device; struct amdgpu_ib; struct amdgpu_cs_parser; @@ -1833,7 +1837,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i)) /* Common functions */ -int amdgpu_gpu_reset(struct amdgpu_device *adev); +int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job* job); bool amdgpu_need_backup(struct amdgpu_device *adev); void amdgpu_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_need_post(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index e521850e9409..e287eeda2dab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2827,163 +2827,172 @@ err: return r; } -/** - * amdgpu_sriov_gpu_reset - reset the asic +/* + * amdgpu_reset - reset ASIC/GPU for bare-metal or passthrough * * @adev: amdgpu device pointer - * @job: which job trigger hang + * @reset_flags: output param tells caller the reset result * - * Attempt the reset the GPU if it has hung (all asics). - * for SRIOV case. - * Returns 0 for success or an error on failure. - */ -int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job) + * attempt to do soft-reset or full-reset and reinitialize Asic + * return 0 means successed otherwise failed +*/ +static int amdgpu_reset(struct amdgpu_device *adev, uint64_t* reset_flags) { - int i, j, r = 0; - int resched; - struct amdgpu_bo *bo, *tmp; - struct amdgpu_ring *ring; - struct dma_fence *fence = NULL, *next = NULL; + bool need_full_reset, vram_lost = 0; + int r; - mutex_lock(&adev->virt.lock_reset); - atomic_inc(&adev->gpu_reset_counter); - adev->in_sriov_reset = true; + need_full_reset = amdgpu_need_full_reset(adev); - /* block TTM */ - resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); + if (!need_full_reset) { + amdgpu_pre_soft_reset(adev); + r = amdgpu_soft_reset(adev); + amdgpu_post_soft_reset(adev); + if (r || amdgpu_check_soft_reset(adev)) { + DRM_INFO("soft reset failed, will fallback to full reset!\n"); + need_full_reset = true; + } - /* we start from the ring trigger GPU hang */ - j = job ? job->ring->idx : 0; + } - /* block scheduler */ - for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) { - ring = adev->rings[i % AMDGPU_MAX_RINGS]; - if (!ring || !ring->sched.thread) - continue; + if (need_full_reset) { + r = amdgpu_suspend(adev); - kthread_park(ring->sched.thread); +retry: + amdgpu_atombios_scratch_regs_save(adev); + r = amdgpu_asic_reset(adev); + amdgpu_atombios_scratch_regs_restore(adev); + /* post card */ + amdgpu_atom_asic_init(adev->mode_info.atom_context); - if (job && j != i) - continue; + if (!r) { + dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); + r = amdgpu_resume_phase1(adev); + if (r) + goto out; - /* here give the last chance to check if job removed from mirror-list - * since we already pay some time on kthread_park */ - if (job && list_empty(&job->base.node)) { - kthread_unpark(ring->sched.thread); - goto give_up_reset; + vram_lost = amdgpu_check_vram_lost(adev); + if (vram_lost) { + DRM_ERROR("VRAM is lost!\n"); + atomic_inc(&adev->vram_lost_counter); + } + + r = amdgpu_ttm_recover_gart(adev); + if (r) + goto out; + + r = amdgpu_resume_phase2(adev); + if (r) + goto out; + + if (vram_lost) + amdgpu_fill_reset_magic(adev); } + } - if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit)) - amd_sched_job_kickout(&job->base); +out: + if (!r) { + amdgpu_irq_gpu_reset_resume_helper(adev); + r = amdgpu_ib_ring_tests(adev); + if (r) { + dev_err(adev->dev, "ib ring test failed (%d).\n", r); + r = amdgpu_suspend(adev); + need_full_reset = true; + goto retry; + } + } - /* only do job_reset on the hang ring if @job not NULL */ - amd_sched_hw_job_reset(&ring->sched, NULL); + if (reset_flags) { + if (vram_lost) + (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST; - /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ - amdgpu_fence_driver_force_completion(ring); + if (need_full_reset) + (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET; } - /* request to take full control of GPU before re-initialization */ - if (job) - amdgpu_virt_reset_gpu(adev); - else - amdgpu_virt_request_full_gpu(adev, true); + return r; +} + +/* + * amdgpu_reset_sriov - reset ASIC for SR-IOV vf + * + * @adev: amdgpu device pointer + * @reset_flags: output param tells caller the reset result + * + * do VF FLR and reinitialize Asic + * return 0 means successed otherwise failed +*/ +static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags, bool from_hypervisor) +{ + int r; + if (from_hypervisor) + r = amdgpu_virt_request_full_gpu(adev, true); + else + r = amdgpu_virt_reset_gpu(adev); + if (r) + return r; /* Resume IP prior to SMC */ - amdgpu_sriov_reinit_early(adev); + r = amdgpu_sriov_reinit_early(adev); + if (r) + goto error; /* we need recover gart prior to run SMC/CP/SDMA resume */ amdgpu_ttm_recover_gart(adev); /* now we are okay to resume SMC/CP/SDMA */ - amdgpu_sriov_reinit_late(adev); + r = amdgpu_sriov_reinit_late(adev); + if (r) + goto error; amdgpu_irq_gpu_reset_resume_helper(adev); - - if (amdgpu_ib_ring_tests(adev)) + r = amdgpu_ib_ring_tests(adev); + if (r) dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r); +error: /* release full control of GPU after ib test */ amdgpu_virt_release_full_gpu(adev, true); - DRM_INFO("recover vram bo from shadow\n"); - - ring = adev->mman.buffer_funcs_ring; - mutex_lock(&adev->shadow_list_lock); - list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) { - next = NULL; - amdgpu_recover_vram_from_shadow(adev, ring, bo, &next); - if (fence) { - r = dma_fence_wait(fence, false); - if (r) { - WARN(r, "recovery from shadow isn't completed\n"); - break; - } - } - - dma_fence_put(fence); - fence = next; - } - mutex_unlock(&adev->shadow_list_lock); - - if (fence) { - r = dma_fence_wait(fence, false); - if (r) - WARN(r, "recovery from shadow isn't completed\n"); - } - dma_fence_put(fence); - - for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) { - ring = adev->rings[i % AMDGPU_MAX_RINGS]; - if (!ring || !ring->sched.thread) - continue; - - if (job && j != i) { - kthread_unpark(ring->sched.thread); - continue; - } - - amd_sched_job_recovery(&ring->sched); - kthread_unpark(ring->sched.thread); - } + if (reset_flags) { + /* will get vram_lost from GIM in future, now all + * reset request considered VRAM LOST + */ + (*reset_flags) |= ~AMDGPU_RESET_INFO_VRAM_LOST; + atomic_inc(&adev->vram_lost_counter); - drm_helper_resume_force_mode(adev->ddev); -give_up_reset: - ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); - if (r) { - /* bad news, how to tell it to userspace ? */ - dev_info(adev->dev, "GPU reset failed\n"); - } else { - dev_info(adev->dev, "GPU reset successed!\n"); + /* VF FLR or hotlink reset is always full-reset */ + (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET; } - adev->in_sriov_reset = false; - mutex_unlock(&adev->virt.lock_reset); return r; } /** - * amdgpu_gpu_reset - reset the asic + * amdgpu_gpu_recover - reset the asic and recover scheduler * * @adev: amdgpu device pointer + * @job: which job trigger hang * - * Attempt the reset the GPU if it has hung (all asics). + * Attempt to reset the GPU if it has hung (all asics). * Returns 0 for success or an error on failure. */ -int amdgpu_gpu_reset(struct amdgpu_device *adev) +int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job) { struct drm_atomic_state *state = NULL; - int i, r; - int resched; - bool need_full_reset, vram_lost = false; + uint64_t reset_flags = 0; + int i, r, resched; if (!amdgpu_check_soft_reset(adev)) { DRM_INFO("No hardware hang detected. Did some blocks stall?\n"); return 0; } + dev_info(adev->dev, "GPU reset begin!\n"); + + mutex_lock(&adev->virt.lock_reset); atomic_inc(&adev->gpu_reset_counter); + adev->in_sriov_reset = 1; /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); @@ -2997,69 +3006,26 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) if (!ring || !ring->sched.thread) continue; + + /* only focus on the ring hit timeout if &job not NULL */ + if (job && job->ring->idx != i) + continue; + kthread_park(ring->sched.thread); - amd_sched_hw_job_reset(&ring->sched, NULL); + amd_sched_hw_job_reset(&ring->sched, &job->base); + /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ amdgpu_fence_driver_force_completion(ring); } - need_full_reset = amdgpu_need_full_reset(adev); - - if (!need_full_reset) { - amdgpu_pre_soft_reset(adev); - r = amdgpu_soft_reset(adev); - amdgpu_post_soft_reset(adev); - if (r || amdgpu_check_soft_reset(adev)) { - DRM_INFO("soft reset failed, will fallback to full reset!\n"); - need_full_reset = true; - } - } - - if (need_full_reset) { - r = amdgpu_suspend(adev); - -retry: - amdgpu_atombios_scratch_regs_save(adev); - r = amdgpu_asic_reset(adev); - amdgpu_atombios_scratch_regs_restore(adev); - /* post card */ - amdgpu_atom_asic_init(adev->mode_info.atom_context); + if (amdgpu_sriov_vf(adev)) + r = amdgpu_reset_sriov(adev, &reset_flags, job ? false : true); + else + r = amdgpu_reset(adev, &reset_flags); - if (!r) { - dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); - r = amdgpu_resume_phase1(adev); - if (r) - goto out; - vram_lost = amdgpu_check_vram_lost(adev); - if (vram_lost) { - DRM_ERROR("VRAM is lost!\n"); - atomic_inc(&adev->vram_lost_counter); - } - r = amdgpu_ttm_recover_gart(adev); - if (r) - goto out; - r = amdgpu_resume_phase2(adev); - if (r) - goto out; - if (vram_lost) - amdgpu_fill_reset_magic(adev); - } - } -out: if (!r) { - amdgpu_irq_gpu_reset_resume_helper(adev); - r = amdgpu_ib_ring_tests(adev); - if (r) { - dev_err(adev->dev, "ib ring test failed (%d).\n", r); - r = amdgpu_suspend(adev); - need_full_reset = true; - goto retry; - } - /** - * recovery vm page tables, since we cannot depend on VRAM is - * consistent after gpu full reset. - */ - if (need_full_reset && amdgpu_need_backup(adev)) { + if (((reset_flags & AMDGPU_RESET_INFO_FULLRESET) && !(adev->flags & AMD_IS_APU)) || + (reset_flags & AMDGPU_RESET_INFO_VRAM_LOST)) { struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; struct amdgpu_bo *bo, *tmp; struct dma_fence *fence = NULL, *next = NULL; @@ -3088,40 +3054,56 @@ out: } dma_fence_put(fence); } + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; if (!ring || !ring->sched.thread) continue; + /* only focus on the ring hit timeout if &job not NULL */ + if (job && job->ring->idx != i) + continue; + amd_sched_job_recovery(&ring->sched); kthread_unpark(ring->sched.thread); } } else { - dev_err(adev->dev, "asic resume failed (%d).\n", r); for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - if (adev->rings[i] && adev->rings[i]->sched.thread) { - kthread_unpark(adev->rings[i]->sched.thread); - } + struct amdgpu_ring *ring = adev->rings[i]; + + if (!ring || !ring->sched.thread) + continue; + + /* only focus on the ring hit timeout if &job not NULL */ + if (job && job->ring->idx != i) + continue; + + kthread_unpark(adev->rings[i]->sched.thread); } } if (amdgpu_device_has_dc_support(adev)) { - r = drm_atomic_helper_resume(adev->ddev, state); + if (drm_atomic_helper_resume(adev->ddev, state)) + dev_info(adev->dev, "drm resume failed:%d\n", r); amdgpu_dm_display_resume(adev); - } else + } else { drm_helper_resume_force_mode(adev->ddev); + } ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); + if (r) { /* bad news, how to tell it to userspace ? */ - dev_info(adev->dev, "GPU reset failed\n"); - } - else { - dev_info(adev->dev, "GPU reset successed!\n"); + dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter)); + amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); + } else { + dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter)); } amdgpu_vf_error_trans_all(adev); + adev->in_sriov_reset = 0; + mutex_unlock(&adev->virt.lock_reset); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index eda89dfdef5b..604ac03a42e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -694,25 +694,25 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data) } /** - * amdgpu_debugfs_gpu_reset - manually trigger a gpu reset + * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover * * Manually trigger a gpu reset at the next fence wait. */ -static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data) +static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct amdgpu_device *adev = dev->dev_private; - seq_printf(m, "gpu reset\n"); - amdgpu_gpu_reset(adev); + seq_printf(m, "gpu recover\n"); + amdgpu_gpu_recover(adev, NULL); return 0; } static const struct drm_info_list amdgpu_debugfs_fence_list[] = { {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL}, - {"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL} + {"amdgpu_gpu_recover", &amdgpu_debugfs_gpu_recover, 0, NULL} }; static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 32590e4f9f7a..c340774082ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -88,7 +88,7 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work) reset_work); if (!amdgpu_sriov_vf(adev)) - amdgpu_gpu_reset(adev); + amdgpu_gpu_recover(adev, NULL); } /* Disable *all* interrupts */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 0a90c768dbc1..18770a880393 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -37,10 +37,7 @@ static void amdgpu_job_timedout(struct amd_sched_job *s_job) atomic_read(&job->ring->fence_drv.last_seq), job->ring->fence_drv.sync_seq); - if (amdgpu_sriov_vf(job->adev)) - amdgpu_sriov_gpu_reset(job->adev, job); - else - amdgpu_gpu_reset(job->adev); + amdgpu_gpu_recover(job->adev, job); } int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index d149aca71a44..20bdb8fb0b8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -288,7 +288,6 @@ int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init); int amdgpu_virt_reset_gpu(struct amdgpu_device *adev); int amdgpu_virt_wait_reset(struct amdgpu_device *adev); -int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job); int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev); void amdgpu_virt_free_mm_table(struct amdgpu_device *adev); int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size, diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index f91aab38637c..c32d0b0868e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -254,7 +254,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) } /* Trigger recovery due to world switch failure */ - amdgpu_sriov_gpu_reset(adev, NULL); + amdgpu_gpu_recover(adev, NULL); } static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c index 27b03c773b1b..818ec0fe2f51 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c @@ -519,7 +519,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work) } /* Trigger recovery due to world switch failure */ - amdgpu_sriov_gpu_reset(adev, NULL); + amdgpu_gpu_recover(adev, NULL); } static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev, -- cgit v1.2.3 From cebb52b7bc325863600aff930407bba773010938 Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Mon, 13 Nov 2017 14:47:52 -0500 Subject: drm/amdgpu: Get rid of dep_sync as a seperate object. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead mark fence as explicit in it's amdgpu_sync_entry. v2: Fix use after free bug and add new parameter description. Signed-off-by: Andrey Grodzovsky Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 14 +++++++------- drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 24 +++++++++++------------- drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 15 ++++++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 12 ++++++------ 7 files changed, 37 insertions(+), 35 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index ecc2e60e5f0c..5e2958a79928 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1121,7 +1121,6 @@ struct amdgpu_job { struct amdgpu_vm *vm; struct amdgpu_ring *ring; struct amdgpu_sync sync; - struct amdgpu_sync dep_sync; struct amdgpu_sync sched_sync; struct amdgpu_ib *ibs; struct dma_fence *fence; /* the hw fence */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 93d3cef66503..4cea9ab237ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -786,7 +786,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) return r; r = amdgpu_sync_fence(adev, &p->job->sync, - fpriv->prt_va->last_pt_update); + fpriv->prt_va->last_pt_update, false); if (r) return r; @@ -800,7 +800,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) return r; f = bo_va->last_pt_update; - r = amdgpu_sync_fence(adev, &p->job->sync, f); + r = amdgpu_sync_fence(adev, &p->job->sync, f, false); if (r) return r; } @@ -823,7 +823,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) return r; f = bo_va->last_pt_update; - r = amdgpu_sync_fence(adev, &p->job->sync, f); + r = amdgpu_sync_fence(adev, &p->job->sync, f, false); if (r) return r; } @@ -834,7 +834,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) if (r) return r; - r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update); + r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false); if (r) return r; @@ -1038,8 +1038,8 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, amdgpu_ctx_put(ctx); return r; } else if (fence) { - r = amdgpu_sync_fence(p->adev, &p->job->dep_sync, - fence); + r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, + true); dma_fence_put(fence); amdgpu_ctx_put(ctx); if (r) @@ -1058,7 +1058,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p, if (r) return r; - r = amdgpu_sync_fence(p->adev, &p->job->dep_sync, fence); + r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true); dma_fence_put(fence); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 659997bfff30..0cf86eb357d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -164,7 +164,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, } if (ring->funcs->emit_pipeline_sync && job && - ((tmp = amdgpu_sync_get_fence(&job->sched_sync)) || + ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) || amdgpu_vm_need_pipeline_sync(ring, job))) { need_pipe_sync = true; dma_fence_put(tmp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 18770a880393..bdc210ac74f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -60,7 +60,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, (*job)->num_ibs = num_ibs; amdgpu_sync_create(&(*job)->sync); - amdgpu_sync_create(&(*job)->dep_sync); amdgpu_sync_create(&(*job)->sched_sync); (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter); @@ -104,7 +103,6 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job) amdgpu_ring_priority_put(job->ring, s_job->s_priority); dma_fence_put(job->fence); amdgpu_sync_free(&job->sync); - amdgpu_sync_free(&job->dep_sync); amdgpu_sync_free(&job->sched_sync); kfree(job); } @@ -115,7 +113,6 @@ void amdgpu_job_free(struct amdgpu_job *job) dma_fence_put(job->fence); amdgpu_sync_free(&job->sync); - amdgpu_sync_free(&job->dep_sync); amdgpu_sync_free(&job->sched_sync); kfree(job); } @@ -149,17 +146,18 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job, { struct amdgpu_job *job = to_amdgpu_job(sched_job); struct amdgpu_vm *vm = job->vm; - - struct dma_fence *fence = amdgpu_sync_get_fence(&job->dep_sync); + bool explicit = false; int r; - - if (amd_sched_dependency_optimized(fence, s_entity)) { - r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence); - if (r) - DRM_ERROR("Error adding fence to sync (%d)\n", r); + struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync, &explicit); + + if (fence && explicit) { + if (amd_sched_dependency_optimized(fence, s_entity)) { + r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false); + if (r) + DRM_ERROR("Error adding fence to sync (%d)\n", r); + } } - if (!fence) - fence = amdgpu_sync_get_fence(&job->sync); + while (fence == NULL && vm && !job->vm_id) { struct amdgpu_ring *ring = job->ring; @@ -169,7 +167,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job, if (r) DRM_ERROR("Error getting VM ID (%d)\n", r); - fence = amdgpu_sync_get_fence(&job->sync); + fence = amdgpu_sync_get_fence(&job->sync, NULL); } return fence; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index a4bf21f8f1c1..f3d1a25b660f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -35,6 +35,7 @@ struct amdgpu_sync_entry { struct hlist_node node; struct dma_fence *fence; + bool explicit; }; static struct kmem_cache *amdgpu_sync_slab; @@ -141,7 +142,7 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f) * */ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, - struct dma_fence *f) + struct dma_fence *f, bool explicit) { struct amdgpu_sync_entry *e; @@ -159,6 +160,8 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, if (!e) return -ENOMEM; + e->explicit = explicit; + hash_add(sync->fences, &e->node, f->context); e->fence = dma_fence_get(f); return 0; @@ -189,7 +192,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, /* always sync to the exclusive fence */ f = reservation_object_get_excl(resv); - r = amdgpu_sync_fence(adev, sync, f); + r = amdgpu_sync_fence(adev, sync, f, false); if (explicit_sync) return r; @@ -220,7 +223,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, continue; } - r = amdgpu_sync_fence(adev, sync, f); + r = amdgpu_sync_fence(adev, sync, f, false); if (r) break; } @@ -275,19 +278,21 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, * amdgpu_sync_get_fence - get the next fence from the sync object * * @sync: sync object to use + * @explicit: true if the next fence is explicit * * Get and removes the next fence from the sync object not signaled yet. */ -struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) +struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit) { struct amdgpu_sync_entry *e; struct hlist_node *tmp; struct dma_fence *f; int i; - hash_for_each_safe(sync->fences, i, tmp, e, node) { f = e->fence; + if (explicit) + *explicit = e->explicit; hash_del(&e->node); kmem_cache_free(amdgpu_sync_slab, e); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h index 70d7e3a279a0..7aba38d5c9df 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h @@ -41,7 +41,7 @@ struct amdgpu_sync { void amdgpu_sync_create(struct amdgpu_sync *sync); int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, - struct dma_fence *f); + struct dma_fence *f, bool explicit); int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, struct reservation_object *resv, @@ -49,7 +49,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, bool explicit_sync); struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, struct amdgpu_ring *ring); -struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); +struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit); int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr); void amdgpu_sync_free(struct amdgpu_sync *sync); int amdgpu_sync_init(void); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 7de519b86b78..3ecdbdfb04dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -488,7 +488,7 @@ static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm, id->pd_gpu_addr = 0; tmp = amdgpu_sync_peek_fence(&id->active, ring); if (tmp) { - r = amdgpu_sync_fence(adev, sync, tmp); + r = amdgpu_sync_fence(adev, sync, tmp, false); return r; } } @@ -496,7 +496,7 @@ static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm, /* Good we can use this VMID. Remember this submission as * user of the VMID. */ - r = amdgpu_sync_fence(ring->adev, &id->active, fence); + r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); if (r) goto out; @@ -583,7 +583,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, } - r = amdgpu_sync_fence(ring->adev, sync, &array->base); + r = amdgpu_sync_fence(ring->adev, sync, &array->base, false); dma_fence_put(&array->base); if (r) goto error; @@ -626,7 +626,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, /* Good we can use this VMID. Remember this submission as * user of the VMID. */ - r = amdgpu_sync_fence(ring->adev, &id->active, fence); + r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); if (r) goto error; @@ -646,7 +646,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, id = idle; /* Remember this submission as user of the VMID */ - r = amdgpu_sync_fence(ring->adev, &id->active, fence); + r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); if (r) goto error; @@ -1657,7 +1657,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, addr = 0; } - r = amdgpu_sync_fence(adev, &job->sync, exclusive); + r = amdgpu_sync_fence(adev, &job->sync, exclusive, false); if (r) goto error_free; -- cgit v1.2.3 From 1b1f42d8fde4fef1ed7873bf5aa91755f8c3de35 Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Wed, 6 Dec 2017 17:49:39 +0100 Subject: drm: move amd_gpu_scheduler into common location MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This moves and renames the AMDGPU scheduler to a common location in DRM in order to facilitate re-use by other drivers. This is mostly a straight forward rename with no code changes. One notable exception is the function to_drm_sched_fence(), which is no longer a inline header function to avoid the need to export the drm_sched_fence_ops_scheduled and drm_sched_fence_ops_finished structures. Reviewed-by: Chunming Zhou Tested-by: Dieter Nützel Acked-by: Alex Deucher Signed-off-by: Lucas Stach Signed-off-by: Alex Deucher --- drivers/gpu/drm/Kconfig | 5 + drivers/gpu/drm/Makefile | 1 + drivers/gpu/drm/amd/amdgpu/Makefile | 5 +- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 16 +- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 6 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 38 +- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 20 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 14 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 12 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 20 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 6 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 8 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 8 +- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 8 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 14 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 10 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 7 +- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 4 +- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 8 +- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 8 +- drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h | 82 --- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 744 ------------------------ drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 186 ------ drivers/gpu/drm/amd/scheduler/sched_fence.c | 173 ------ drivers/gpu/drm/amd/scheduler/spsc_queue.h | 121 ---- drivers/gpu/drm/scheduler/Makefile | 4 + drivers/gpu/drm/scheduler/gpu_scheduler.c | 744 ++++++++++++++++++++++++ drivers/gpu/drm/scheduler/sched_fence.c | 187 ++++++ include/drm/gpu_scheduler.h | 176 ++++++ include/drm/gpu_scheduler_trace.h | 82 +++ include/drm/spsc_queue.h | 122 ++++ 39 files changed, 1440 insertions(+), 1427 deletions(-) delete mode 100644 drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h delete mode 100644 drivers/gpu/drm/amd/scheduler/gpu_scheduler.c delete mode 100644 drivers/gpu/drm/amd/scheduler/gpu_scheduler.h delete mode 100644 drivers/gpu/drm/amd/scheduler/sched_fence.c delete mode 100644 drivers/gpu/drm/amd/scheduler/spsc_queue.h create mode 100644 drivers/gpu/drm/scheduler/Makefile create mode 100644 drivers/gpu/drm/scheduler/gpu_scheduler.c create mode 100644 drivers/gpu/drm/scheduler/sched_fence.c create mode 100644 include/drm/gpu_scheduler.h create mode 100644 include/drm/gpu_scheduler_trace.h create mode 100644 include/drm/spsc_queue.h (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c') diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 4d9f21831741..ee38a3db1890 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -149,6 +149,10 @@ config DRM_VM bool depends on DRM && MMU +config DRM_SCHED + tristate + depends on DRM + source "drivers/gpu/drm/i2c/Kconfig" source "drivers/gpu/drm/arm/Kconfig" @@ -178,6 +182,7 @@ config DRM_AMDGPU depends on DRM && PCI && MMU select FW_LOADER select DRM_KMS_HELPER + select DRM_SCHED select DRM_TTM select POWER_SUPPLY select HWMON diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index e9500844333e..1f6ba9e34e31 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -101,3 +101,4 @@ obj-$(CONFIG_DRM_MXSFB) += mxsfb/ obj-$(CONFIG_DRM_TINYDRM) += tinydrm/ obj-$(CONFIG_DRM_PL111) += pl111/ obj-$(CONFIG_DRM_TVE200) += tve200/ +obj-$(CONFIG_DRM_SCHED) += scheduler/ diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 90202cf4cd1e..a7391d49ad40 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -135,10 +135,7 @@ amdgpu-y += \ amdgpu-y += amdgpu_cgs.o # GPU scheduler -amdgpu-y += \ - ../scheduler/gpu_scheduler.o \ - ../scheduler/sched_fence.o \ - amdgpu_job.o +amdgpu-y += amdgpu_job.o # ACP componet ifneq ($(CONFIG_DRM_AMD_ACP),) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 5e2958a79928..5c8648ec2cd2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -45,6 +45,7 @@ #include #include #include +#include #include #include "dm_pp_interface.h" @@ -68,7 +69,6 @@ #include "amdgpu_vcn.h" #include "amdgpu_mn.h" #include "amdgpu_dm.h" -#include "gpu_scheduler.h" #include "amdgpu_virt.h" #include "amdgpu_gart.h" @@ -689,7 +689,7 @@ struct amdgpu_ib { uint32_t flags; }; -extern const struct amd_sched_backend_ops amdgpu_sched_ops; +extern const struct drm_sched_backend_ops amdgpu_sched_ops; int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, struct amdgpu_job **job, struct amdgpu_vm *vm); @@ -699,7 +699,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, void amdgpu_job_free_resources(struct amdgpu_job *job); void amdgpu_job_free(struct amdgpu_job *job); int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, - struct amd_sched_entity *entity, void *owner, + struct drm_sched_entity *entity, void *owner, struct dma_fence **f); /* @@ -732,7 +732,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev, struct amdgpu_ctx_ring { uint64_t sequence; struct dma_fence **fences; - struct amd_sched_entity entity; + struct drm_sched_entity entity; }; struct amdgpu_ctx { @@ -746,8 +746,8 @@ struct amdgpu_ctx { struct dma_fence **fences; struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; bool preamble_presented; - enum amd_sched_priority init_priority; - enum amd_sched_priority override_priority; + enum drm_sched_priority init_priority; + enum drm_sched_priority override_priority; struct mutex lock; atomic_t guilty; }; @@ -767,7 +767,7 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, uint64_t seq); void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, - enum amd_sched_priority priority); + enum drm_sched_priority priority); int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); @@ -1116,7 +1116,7 @@ struct amdgpu_cs_parser { #define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */ struct amdgpu_job { - struct amd_sched_job base; + struct drm_sched_job base; struct amdgpu_device *adev; struct amdgpu_vm *vm; struct amdgpu_ring *ring; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 4cea9ab237ac..44523a88ebb2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1150,7 +1150,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs) { struct amdgpu_ring *ring = p->job->ring; - struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity; + struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity; struct amdgpu_job *job; unsigned i; uint64_t seq; @@ -1173,7 +1173,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, job = p->job; p->job = NULL; - r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp); + r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp); if (r) { amdgpu_job_free(job); amdgpu_mn_unlock(p->mn); @@ -1202,7 +1202,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, amdgpu_ring_priority_get(job->ring, job->base.s_priority); trace_amdgpu_cs_ioctl(job); - amd_sched_entity_push_job(&job->base, entity); + drm_sched_entity_push_job(&job->base, entity); ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); amdgpu_mn_unlock(p->mn); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index d71dc164b469..09d35051fdd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -28,10 +28,10 @@ #include "amdgpu_sched.h" static int amdgpu_ctx_priority_permit(struct drm_file *filp, - enum amd_sched_priority priority) + enum drm_sched_priority priority) { /* NORMAL and below are accessible by everyone */ - if (priority <= AMD_SCHED_PRIORITY_NORMAL) + if (priority <= DRM_SCHED_PRIORITY_NORMAL) return 0; if (capable(CAP_SYS_NICE)) @@ -44,14 +44,14 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp, } static int amdgpu_ctx_init(struct amdgpu_device *adev, - enum amd_sched_priority priority, + enum drm_sched_priority priority, struct drm_file *filp, struct amdgpu_ctx *ctx) { unsigned i, j; int r; - if (priority < 0 || priority >= AMD_SCHED_PRIORITY_MAX) + if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) return -EINVAL; r = amdgpu_ctx_priority_permit(filp, priority); @@ -78,19 +78,19 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, ctx->reset_counter_query = ctx->reset_counter; ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter); ctx->init_priority = priority; - ctx->override_priority = AMD_SCHED_PRIORITY_UNSET; + ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; /* create context entity for each ring */ for (i = 0; i < adev->num_rings; i++) { struct amdgpu_ring *ring = adev->rings[i]; - struct amd_sched_rq *rq; + struct drm_sched_rq *rq; rq = &ring->sched.sched_rq[priority]; if (ring == &adev->gfx.kiq.ring) continue; - r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity, + r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity, rq, amdgpu_sched_jobs, &ctx->guilty); if (r) goto failed; @@ -104,7 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, failed: for (j = 0; j < i; j++) - amd_sched_entity_fini(&adev->rings[j]->sched, + drm_sched_entity_fini(&adev->rings[j]->sched, &ctx->rings[j].entity); kfree(ctx->fences); ctx->fences = NULL; @@ -126,7 +126,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) ctx->fences = NULL; for (i = 0; i < adev->num_rings; i++) - amd_sched_entity_fini(&adev->rings[i]->sched, + drm_sched_entity_fini(&adev->rings[i]->sched, &ctx->rings[i].entity); amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr); @@ -137,7 +137,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) static int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, struct drm_file *filp, - enum amd_sched_priority priority, + enum drm_sched_priority priority, uint32_t *id) { struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; @@ -266,7 +266,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, { int r; uint32_t id; - enum amd_sched_priority priority; + enum drm_sched_priority priority; union drm_amdgpu_ctx *args = data; struct amdgpu_device *adev = dev->dev_private; @@ -278,8 +278,8 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, /* For backwards compatibility reasons, we need to accept * ioctls with garbage in the priority field */ - if (priority == AMD_SCHED_PRIORITY_INVALID) - priority = AMD_SCHED_PRIORITY_NORMAL; + if (priority == DRM_SCHED_PRIORITY_INVALID) + priority = DRM_SCHED_PRIORITY_NORMAL; switch (args->in.op) { case AMDGPU_CTX_OP_ALLOC_CTX: @@ -385,18 +385,18 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, } void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, - enum amd_sched_priority priority) + enum drm_sched_priority priority) { int i; struct amdgpu_device *adev = ctx->adev; - struct amd_sched_rq *rq; - struct amd_sched_entity *entity; + struct drm_sched_rq *rq; + struct drm_sched_entity *entity; struct amdgpu_ring *ring; - enum amd_sched_priority ctx_prio; + enum drm_sched_priority ctx_prio; ctx->override_priority = priority; - ctx_prio = (ctx->override_priority == AMD_SCHED_PRIORITY_UNSET) ? + ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? ctx->init_priority : ctx->override_priority; for (i = 0; i < adev->num_rings; i++) { @@ -407,7 +407,7 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) continue; - amd_sched_entity_set_rq(entity, rq); + drm_sched_entity_set_rq(entity, rq); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 70c9e5756b02..98cc4df02b14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3058,7 +3058,7 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job) continue; kthread_park(ring->sched.thread); - amd_sched_hw_job_reset(&ring->sched, &job->base); + drm_sched_hw_job_reset(&ring->sched, &job->base); /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ amdgpu_fence_driver_force_completion(ring); @@ -3111,7 +3111,7 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job) if (job && job->ring->idx != i) continue; - amd_sched_job_recovery(&ring->sched); + drm_sched_job_recovery(&ring->sched); kthread_unpark(ring->sched.thread); } } else { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 31383e004947..1d8011bca182 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -912,7 +912,7 @@ static int __init amdgpu_init(void) if (r) goto error_fence; - r = amd_sched_fence_slab_init(); + r = drm_sched_fence_slab_init(); if (r) goto error_sched; @@ -944,7 +944,7 @@ static void __exit amdgpu_exit(void) pci_unregister_driver(pdriver); amdgpu_unregister_atpx_handler(); amdgpu_sync_fini(); - amd_sched_fence_slab_fini(); + drm_sched_fence_slab_fini(); amdgpu_fence_slab_fini(); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 604ac03a42e4..14699637913a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -445,7 +445,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, */ timeout = MAX_SCHEDULE_TIMEOUT; } - r = amd_sched_init(&ring->sched, &amdgpu_sched_ops, + r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, num_hw_submission, amdgpu_job_hang_limit, timeout, ring->name); if (r) { @@ -503,7 +503,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) } amdgpu_irq_put(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); - amd_sched_fini(&ring->sched); + drm_sched_fini(&ring->sched); del_timer_sync(&ring->fence_drv.fallback_timer); for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) dma_fence_put(ring->fence_drv.fences[j]); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index bdc210ac74f8..013c0a8cfb60 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -28,7 +28,7 @@ #include "amdgpu.h" #include "amdgpu_trace.h" -static void amdgpu_job_timedout(struct amd_sched_job *s_job) +static void amdgpu_job_timedout(struct drm_sched_job *s_job) { struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); @@ -96,7 +96,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job) amdgpu_ib_free(job->adev, &job->ibs[i], f); } -static void amdgpu_job_free_cb(struct amd_sched_job *s_job) +static void amdgpu_job_free_cb(struct drm_sched_job *s_job) { struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); @@ -118,7 +118,7 @@ void amdgpu_job_free(struct amdgpu_job *job) } int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, - struct amd_sched_entity *entity, void *owner, + struct drm_sched_entity *entity, void *owner, struct dma_fence **f) { int r; @@ -127,7 +127,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, if (!f) return -EINVAL; - r = amd_sched_job_init(&job->base, &ring->sched, entity, owner); + r = drm_sched_job_init(&job->base, &ring->sched, entity, owner); if (r) return r; @@ -136,13 +136,13 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, *f = dma_fence_get(&job->base.s_fence->finished); amdgpu_job_free_resources(job); amdgpu_ring_priority_get(job->ring, job->base.s_priority); - amd_sched_entity_push_job(&job->base, entity); + drm_sched_entity_push_job(&job->base, entity); return 0; } -static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job, - struct amd_sched_entity *s_entity) +static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, + struct drm_sched_entity *s_entity) { struct amdgpu_job *job = to_amdgpu_job(sched_job); struct amdgpu_vm *vm = job->vm; @@ -151,7 +151,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job, struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync, &explicit); if (fence && explicit) { - if (amd_sched_dependency_optimized(fence, s_entity)) { + if (drm_sched_dependency_optimized(fence, s_entity)) { r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false); if (r) DRM_ERROR("Error adding fence to sync (%d)\n", r); @@ -173,7 +173,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job, return fence; } -static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job) +static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) { struct dma_fence *fence = NULL, *finished; struct amdgpu_device *adev; @@ -211,7 +211,7 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job) return fence; } -const struct amd_sched_backend_ops amdgpu_sched_ops = { +const struct drm_sched_backend_ops amdgpu_sched_ops = { .dependency = amdgpu_job_dependency, .run_job = amdgpu_job_run, .timedout_job = amdgpu_job_timedout, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index a98fbbb4739f..41c75f9632dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -164,7 +164,7 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring) * Release a request for executing at @priority */ void amdgpu_ring_priority_put(struct amdgpu_ring *ring, - enum amd_sched_priority priority) + enum drm_sched_priority priority) { int i; @@ -175,7 +175,7 @@ void amdgpu_ring_priority_put(struct amdgpu_ring *ring, return; /* no need to restore if the job is already at the lowest priority */ - if (priority == AMD_SCHED_PRIORITY_NORMAL) + if (priority == DRM_SCHED_PRIORITY_NORMAL) return; mutex_lock(&ring->priority_mutex); @@ -184,8 +184,8 @@ void amdgpu_ring_priority_put(struct amdgpu_ring *ring, goto out_unlock; /* decay priority to the next level with a job available */ - for (i = priority; i >= AMD_SCHED_PRIORITY_MIN; i--) { - if (i == AMD_SCHED_PRIORITY_NORMAL + for (i = priority; i >= DRM_SCHED_PRIORITY_MIN; i--) { + if (i == DRM_SCHED_PRIORITY_NORMAL || atomic_read(&ring->num_jobs[i])) { ring->priority = i; ring->funcs->set_priority(ring, i); @@ -206,7 +206,7 @@ out_unlock: * Request a ring's priority to be raised to @priority (refcounted). */ void amdgpu_ring_priority_get(struct amdgpu_ring *ring, - enum amd_sched_priority priority) + enum drm_sched_priority priority) { if (!ring->funcs->set_priority) return; @@ -317,12 +317,12 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, } ring->max_dw = max_dw; - ring->priority = AMD_SCHED_PRIORITY_NORMAL; + ring->priority = DRM_SCHED_PRIORITY_NORMAL; mutex_init(&ring->priority_mutex); INIT_LIST_HEAD(&ring->lru_list); amdgpu_ring_lru_touch(adev, ring); - for (i = 0; i < AMD_SCHED_PRIORITY_MAX; ++i) + for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i) atomic_set(&ring->num_jobs[i], 0); if (amdgpu_debugfs_ring_init(adev, ring)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index a6b89e3932a5..641e3fd7ba3c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -25,7 +25,7 @@ #define __AMDGPU_RING_H__ #include -#include "gpu_scheduler.h" +#include /* max number of rings */ #define AMDGPU_MAX_RINGS 18 @@ -154,14 +154,14 @@ struct amdgpu_ring_funcs { void (*emit_tmz)(struct amdgpu_ring *ring, bool start); /* priority functions */ void (*set_priority) (struct amdgpu_ring *ring, - enum amd_sched_priority priority); + enum drm_sched_priority priority); }; struct amdgpu_ring { struct amdgpu_device *adev; const struct amdgpu_ring_funcs *funcs; struct amdgpu_fence_driver fence_drv; - struct amd_gpu_scheduler sched; + struct drm_gpu_scheduler sched; struct list_head lru_list; struct amdgpu_bo *ring_obj; @@ -196,7 +196,7 @@ struct amdgpu_ring { unsigned vm_inv_eng; bool has_compute_vm_bug; - atomic_t num_jobs[AMD_SCHED_PRIORITY_MAX]; + atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX]; struct mutex priority_mutex; /* protected by priority_mutex */ int priority; @@ -212,9 +212,9 @@ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); void amdgpu_ring_commit(struct amdgpu_ring *ring); void amdgpu_ring_undo(struct amdgpu_ring *ring); void amdgpu_ring_priority_get(struct amdgpu_ring *ring, - enum amd_sched_priority priority); + enum drm_sched_priority priority); void amdgpu_ring_priority_put(struct amdgpu_ring *ring, - enum amd_sched_priority priority); + enum drm_sched_priority priority); int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned ring_size, struct amdgpu_irq_src *irq_src, unsigned irq_type); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index 290cc3f9c433..86a0715d9431 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -29,29 +29,29 @@ #include "amdgpu_vm.h" -enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority) +enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority) { switch (amdgpu_priority) { case AMDGPU_CTX_PRIORITY_VERY_HIGH: - return AMD_SCHED_PRIORITY_HIGH_HW; + return DRM_SCHED_PRIORITY_HIGH_HW; case AMDGPU_CTX_PRIORITY_HIGH: - return AMD_SCHED_PRIORITY_HIGH_SW; + return DRM_SCHED_PRIORITY_HIGH_SW; case AMDGPU_CTX_PRIORITY_NORMAL: - return AMD_SCHED_PRIORITY_NORMAL; + return DRM_SCHED_PRIORITY_NORMAL; case AMDGPU_CTX_PRIORITY_LOW: case AMDGPU_CTX_PRIORITY_VERY_LOW: - return AMD_SCHED_PRIORITY_LOW; + return DRM_SCHED_PRIORITY_LOW; case AMDGPU_CTX_PRIORITY_UNSET: - return AMD_SCHED_PRIORITY_UNSET; + return DRM_SCHED_PRIORITY_UNSET; default: WARN(1, "Invalid context priority %d\n", amdgpu_priority); - return AMD_SCHED_PRIORITY_INVALID; + return DRM_SCHED_PRIORITY_INVALID; } } static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, int fd, - enum amd_sched_priority priority) + enum drm_sched_priority priority) { struct file *filp = fcheck(fd); struct drm_file *file; @@ -86,11 +86,11 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data, { union drm_amdgpu_sched *args = data; struct amdgpu_device *adev = dev->dev_private; - enum amd_sched_priority priority; + enum drm_sched_priority priority; int r; priority = amdgpu_to_sched_priority(args->in.priority); - if (args->in.flags || priority == AMD_SCHED_PRIORITY_INVALID) + if (args->in.flags || priority == DRM_SCHED_PRIORITY_INVALID) return -EINVAL; switch (args->in.op) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h index b28c067d3822..2a1a0c734bdd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h @@ -27,7 +27,7 @@ #include -enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority); +enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority); int amdgpu_sched_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index bb79fd3f3c36..df65c66dc956 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -64,7 +64,7 @@ void amdgpu_sync_create(struct amdgpu_sync *sync) static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct dma_fence *f) { - struct amd_sched_fence *s_fence = to_amd_sched_fence(f); + struct drm_sched_fence *s_fence = to_drm_sched_fence(f); if (s_fence) { struct amdgpu_ring *ring; @@ -85,7 +85,7 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, */ static void *amdgpu_sync_get_owner(struct dma_fence *f) { - struct amd_sched_fence *s_fence = to_amd_sched_fence(f); + struct drm_sched_fence *s_fence = to_drm_sched_fence(f); if (s_fence) return s_fence->owner; @@ -248,7 +248,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, hash_for_each_safe(sync->fences, i, tmp, e, node) { struct dma_fence *f = e->fence; - struct amd_sched_fence *s_fence = to_amd_sched_fence(f); + struct drm_sched_fence *s_fence = to_drm_sched_fence(f); if (dma_fence_is_signaled(f)) { hash_del(&e->node); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 952e0bf3bc84..7db9556b389b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -76,7 +76,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev) { struct drm_global_reference *global_ref; struct amdgpu_ring *ring; - struct amd_sched_rq *rq; + struct drm_sched_rq *rq; int r; adev->mman.mem_global_referenced = false; @@ -108,8 +108,8 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev) mutex_init(&adev->mman.gtt_window_lock); ring = adev->mman.buffer_funcs_ring; - rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; - r = amd_sched_entity_init(&ring->sched, &adev->mman.entity, + rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; + r = drm_sched_entity_init(&ring->sched, &adev->mman.entity, rq, amdgpu_sched_jobs, NULL); if (r) { DRM_ERROR("Failed setting up TTM BO move run queue.\n"); @@ -131,7 +131,7 @@ error_mem: static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) { if (adev->mman.mem_global_referenced) { - amd_sched_entity_fini(adev->mman.entity.sched, + drm_sched_entity_fini(adev->mman.entity.sched, &adev->mman.entity); mutex_destroy(&adev->mman.gtt_window_lock); drm_global_item_unref(&adev->mman.bo_global_ref.ref); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 4f9433e61406..167856f6080f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -25,7 +25,7 @@ #define __AMDGPU_TTM_H__ #include "amdgpu.h" -#include "gpu_scheduler.h" +#include #define AMDGPU_PL_GDS (TTM_PL_PRIV + 0) #define AMDGPU_PL_GWS (TTM_PL_PRIV + 1) @@ -55,7 +55,7 @@ struct amdgpu_mman { struct mutex gtt_window_lock; /* Scheduler entity for buffer moves */ - struct amd_sched_entity entity; + struct drm_sched_entity entity; }; struct amdgpu_copy_mem { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 2f2a9e17fdb4..916e51670bfd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -116,7 +116,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work); int amdgpu_uvd_sw_init(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - struct amd_sched_rq *rq; + struct drm_sched_rq *rq; unsigned long bo_size; const char *fw_name; const struct common_firmware_header *hdr; @@ -230,8 +230,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) } ring = &adev->uvd.ring; - rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; - r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity, + rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; + r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity, rq, amdgpu_sched_jobs, NULL); if (r != 0) { DRM_ERROR("Failed setting up UVD run queue.\n"); @@ -272,7 +272,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) int i; kfree(adev->uvd.saved_bo); - amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); + drm_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo, &adev->uvd.gpu_addr, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index 845eea993f75..32ea20b99e53 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -51,8 +51,8 @@ struct amdgpu_uvd { struct amdgpu_irq_src irq; bool address_64_bit; bool use_ctx_buf; - struct amd_sched_entity entity; - struct amd_sched_entity entity_enc; + struct drm_sched_entity entity; + struct drm_sched_entity entity_enc; uint32_t srbm_soft_reset; unsigned num_enc_rings; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index ba6d846b08ff..641deb0527ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -85,7 +85,7 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work); int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) { struct amdgpu_ring *ring; - struct amd_sched_rq *rq; + struct drm_sched_rq *rq; const char *fw_name; const struct common_firmware_header *hdr; unsigned ucode_version, version_major, version_minor, binary_id; @@ -174,8 +174,8 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) } ring = &adev->vce.ring[0]; - rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; - r = amd_sched_entity_init(&ring->sched, &adev->vce.entity, + rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; + r = drm_sched_entity_init(&ring->sched, &adev->vce.entity, rq, amdgpu_sched_jobs, NULL); if (r != 0) { DRM_ERROR("Failed setting up VCE run queue.\n"); @@ -207,7 +207,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) if (adev->vce.vcpu_bo == NULL) return 0; - amd_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity); + drm_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity); amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, (void **)&adev->vce.cpu_addr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index 5ce54cde472d..162cae94e3b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -46,7 +46,7 @@ struct amdgpu_vce { struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; struct amdgpu_irq_src irq; unsigned harvest_config; - struct amd_sched_entity entity; + struct drm_sched_entity entity; uint32_t srbm_soft_reset; unsigned num_rings; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index d7ba048c2f80..88e204d537f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -51,7 +51,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work); int amdgpu_vcn_sw_init(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - struct amd_sched_rq *rq; + struct drm_sched_rq *rq; unsigned long bo_size; const char *fw_name; const struct common_firmware_header *hdr; @@ -104,8 +104,8 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) } ring = &adev->vcn.ring_dec; - rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; - r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_dec, + rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; + r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec, rq, amdgpu_sched_jobs, NULL); if (r != 0) { DRM_ERROR("Failed setting up VCN dec run queue.\n"); @@ -113,8 +113,8 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) } ring = &adev->vcn.ring_enc[0]; - rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; - r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_enc, + rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; + r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc, rq, amdgpu_sched_jobs, NULL); if (r != 0) { DRM_ERROR("Failed setting up VCN enc run queue.\n"); @@ -130,9 +130,9 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) kfree(adev->vcn.saved_bo); - amd_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec); + drm_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec); - amd_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc); + drm_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc); amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo, &adev->vcn.gpu_addr, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index d50ba0657854..2fd7db891689 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -56,8 +56,8 @@ struct amdgpu_vcn { struct amdgpu_ring ring_dec; struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS]; struct amdgpu_irq_src irq; - struct amd_sched_entity entity_dec; - struct amd_sched_entity entity_enc; + struct drm_sched_entity entity_dec; + struct drm_sched_entity entity_enc; unsigned num_enc_rings; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 3ecdbdfb04dd..dbe37d621796 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2643,7 +2643,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, AMDGPU_VM_PTE_COUNT(adev) * 8); unsigned ring_instance; struct amdgpu_ring *ring; - struct amd_sched_rq *rq; + struct drm_sched_rq *rq; int r, i; u64 flags; uint64_t init_pde_value = 0; @@ -2663,8 +2663,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring); ring_instance %= adev->vm_manager.vm_pte_num_rings; ring = adev->vm_manager.vm_pte_rings[ring_instance]; - rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; - r = amd_sched_entity_init(&ring->sched, &vm->entity, + rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; + r = drm_sched_entity_init(&ring->sched, &vm->entity, rq, amdgpu_sched_jobs, NULL); if (r) return r; @@ -2744,7 +2744,7 @@ error_free_root: vm->root.base.bo = NULL; error_free_sched_entity: - amd_sched_entity_fini(&ring->sched, &vm->entity); + drm_sched_entity_fini(&ring->sched, &vm->entity); return r; } @@ -2803,7 +2803,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); } - amd_sched_entity_fini(vm->entity.sched, &vm->entity); + drm_sched_entity_fini(vm->entity.sched, &vm->entity); if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { dev_err(adev->dev, "still active bo inside vm\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 43ea131dd411..159980414964 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -24,10 +24,11 @@ #ifndef __AMDGPU_VM_H__ #define __AMDGPU_VM_H__ -#include #include +#include +#include +#include -#include "gpu_scheduler.h" #include "amdgpu_sync.h" #include "amdgpu_ring.h" @@ -175,7 +176,7 @@ struct amdgpu_vm { spinlock_t freed_lock; /* Scheduler entity for page table updates */ - struct amd_sched_entity entity; + struct drm_sched_entity entity; /* client id and PASID (TODO: replace client_id with PASID) */ u64 client_id; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index d02493cf9175..c7dc69031fb5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -6472,10 +6472,10 @@ static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev, mutex_unlock(&adev->srbm_mutex); } static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring, - enum amd_sched_priority priority) + enum drm_sched_priority priority) { struct amdgpu_device *adev = ring->adev; - bool acquire = priority == AMD_SCHED_PRIORITY_HIGH_HW; + bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW; if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE) return; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 0e8b887cf03e..86123448a8ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -412,10 +412,10 @@ static int uvd_v6_0_sw_init(void *handle) return r; if (uvd_v6_0_enc_support(adev)) { - struct amd_sched_rq *rq; + struct drm_sched_rq *rq; ring = &adev->uvd.ring_enc[0]; - rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; - r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc, + rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; + r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc, rq, amdgpu_sched_jobs, NULL); if (r) { DRM_ERROR("Failed setting up UVD ENC run queue.\n"); @@ -456,7 +456,7 @@ static int uvd_v6_0_sw_fini(void *handle) return r; if (uvd_v6_0_enc_support(adev)) { - amd_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc); + drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc); for (i = 0; i < adev->uvd.num_enc_rings; ++i) amdgpu_ring_fini(&adev->uvd.ring_enc[i]); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 660fa41dc877..416611150edd 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -385,7 +385,7 @@ static int uvd_v7_0_early_init(void *handle) static int uvd_v7_0_sw_init(void *handle) { struct amdgpu_ring *ring; - struct amd_sched_rq *rq; + struct drm_sched_rq *rq; int i, r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -416,8 +416,8 @@ static int uvd_v7_0_sw_init(void *handle) } ring = &adev->uvd.ring_enc[0]; - rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; - r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc, + rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; + r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc, rq, amdgpu_sched_jobs, NULL); if (r) { DRM_ERROR("Failed setting up UVD ENC run queue.\n"); @@ -472,7 +472,7 @@ static int uvd_v7_0_sw_fini(void *handle) if (r) return r; - amd_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc); + drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc); for (i = 0; i < adev->uvd.num_enc_rings; ++i) amdgpu_ring_fini(&adev->uvd.ring_enc[i]); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h deleted file mode 100644 index b42a78922505..000000000000 --- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) -#define _GPU_SCHED_TRACE_H_ - -#include -#include -#include - -#include - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM gpu_sched -#define TRACE_INCLUDE_FILE gpu_sched_trace - -TRACE_EVENT(amd_sched_job, - TP_PROTO(struct amd_sched_job *sched_job, struct amd_sched_entity *entity), - TP_ARGS(sched_job, entity), - TP_STRUCT__entry( - __field(struct amd_sched_entity *, entity) - __field(struct dma_fence *, fence) - __field(const char *, name) - __field(uint64_t, id) - __field(u32, job_count) - __field(int, hw_job_count) - ), - - TP_fast_assign( - __entry->entity = entity; - __entry->id = sched_job->id; - __entry->fence = &sched_job->s_fence->finished; - __entry->name = sched_job->sched->name; - __entry->job_count = spsc_queue_count(&entity->job_queue); - __entry->hw_job_count = atomic_read( - &sched_job->sched->hw_rq_count); - ), - TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d", - __entry->entity, __entry->id, - __entry->fence, __entry->name, - __entry->job_count, __entry->hw_job_count) -); - -TRACE_EVENT(amd_sched_process_job, - TP_PROTO(struct amd_sched_fence *fence), - TP_ARGS(fence), - TP_STRUCT__entry( - __field(struct dma_fence *, fence) - ), - - TP_fast_assign( - __entry->fence = &fence->finished; - ), - TP_printk("fence=%p signaled", __entry->fence) -); - -#endif - -/* This part must be outside protection */ -#undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . -#include diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c deleted file mode 100644 index dcb987e6d94a..000000000000 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ /dev/null @@ -1,744 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * - */ -#include -#include -#include -#include -#include -#include "gpu_scheduler.h" - -#include "spsc_queue.h" - -#define CREATE_TRACE_POINTS -#include "gpu_sched_trace.h" - -#define to_amd_sched_job(sched_job) \ - container_of((sched_job), struct amd_sched_job, queue_node) - -static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); -static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); -static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb); - -/* Initialize a given run queue struct */ -static void amd_sched_rq_init(struct amd_sched_rq *rq) -{ - spin_lock_init(&rq->lock); - INIT_LIST_HEAD(&rq->entities); - rq->current_entity = NULL; -} - -static void amd_sched_rq_add_entity(struct amd_sched_rq *rq, - struct amd_sched_entity *entity) -{ - if (!list_empty(&entity->list)) - return; - spin_lock(&rq->lock); - list_add_tail(&entity->list, &rq->entities); - spin_unlock(&rq->lock); -} - -static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, - struct amd_sched_entity *entity) -{ - if (list_empty(&entity->list)) - return; - spin_lock(&rq->lock); - list_del_init(&entity->list); - if (rq->current_entity == entity) - rq->current_entity = NULL; - spin_unlock(&rq->lock); -} - -/** - * Select an entity which could provide a job to run - * - * @rq The run queue to check. - * - * Try to find a ready entity, returns NULL if none found. - */ -static struct amd_sched_entity * -amd_sched_rq_select_entity(struct amd_sched_rq *rq) -{ - struct amd_sched_entity *entity; - - spin_lock(&rq->lock); - - entity = rq->current_entity; - if (entity) { - list_for_each_entry_continue(entity, &rq->entities, list) { - if (amd_sched_entity_is_ready(entity)) { - rq->current_entity = entity; - spin_unlock(&rq->lock); - return entity; - } - } - } - - list_for_each_entry(entity, &rq->entities, list) { - - if (amd_sched_entity_is_ready(entity)) { - rq->current_entity = entity; - spin_unlock(&rq->lock); - return entity; - } - - if (entity == rq->current_entity) - break; - } - - spin_unlock(&rq->lock); - - return NULL; -} - -/** - * Init a context entity used by scheduler when submit to HW ring. - * - * @sched The pointer to the scheduler - * @entity The pointer to a valid amd_sched_entity - * @rq The run queue this entity belongs - * @kernel If this is an entity for the kernel - * @jobs The max number of jobs in the job queue - * - * return 0 if succeed. negative error code on failure -*/ -int amd_sched_entity_init(struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity, - struct amd_sched_rq *rq, - uint32_t jobs, atomic_t *guilty) -{ - if (!(sched && entity && rq)) - return -EINVAL; - - memset(entity, 0, sizeof(struct amd_sched_entity)); - INIT_LIST_HEAD(&entity->list); - entity->rq = rq; - entity->sched = sched; - entity->guilty = guilty; - - spin_lock_init(&entity->rq_lock); - spin_lock_init(&entity->queue_lock); - spsc_queue_init(&entity->job_queue); - - atomic_set(&entity->fence_seq, 0); - entity->fence_context = dma_fence_context_alloc(2); - - return 0; -} - -/** - * Query if entity is initialized - * - * @sched Pointer to scheduler instance - * @entity The pointer to a valid scheduler entity - * - * return true if entity is initialized, false otherwise -*/ -static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity) -{ - return entity->sched == sched && - entity->rq != NULL; -} - -/** - * Check if entity is idle - * - * @entity The pointer to a valid scheduler entity - * - * Return true if entity don't has any unscheduled jobs. - */ -static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) -{ - rmb(); - if (spsc_queue_peek(&entity->job_queue) == NULL) - return true; - - return false; -} - -/** - * Check if entity is ready - * - * @entity The pointer to a valid scheduler entity - * - * Return true if entity could provide a job. - */ -static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity) -{ - if (spsc_queue_peek(&entity->job_queue) == NULL) - return false; - - if (READ_ONCE(entity->dependency)) - return false; - - return true; -} - -/** - * Destroy a context entity - * - * @sched Pointer to scheduler instance - * @entity The pointer to a valid scheduler entity - * - * Cleanup and free the allocated resources. - */ -void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity) -{ - int r; - - if (!amd_sched_entity_is_initialized(sched, entity)) - return; - /** - * The client will not queue more IBs during this fini, consume existing - * queued IBs or discard them on SIGKILL - */ - if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL) - r = -ERESTARTSYS; - else - r = wait_event_killable(sched->job_scheduled, - amd_sched_entity_is_idle(entity)); - amd_sched_entity_set_rq(entity, NULL); - if (r) { - struct amd_sched_job *job; - - /* Park the kernel for a moment to make sure it isn't processing - * our enity. - */ - kthread_park(sched->thread); - kthread_unpark(sched->thread); - if (entity->dependency) { - dma_fence_remove_callback(entity->dependency, - &entity->cb); - dma_fence_put(entity->dependency); - entity->dependency = NULL; - } - - while ((job = to_amd_sched_job(spsc_queue_pop(&entity->job_queue)))) { - struct amd_sched_fence *s_fence = job->s_fence; - amd_sched_fence_scheduled(s_fence); - dma_fence_set_error(&s_fence->finished, -ESRCH); - amd_sched_fence_finished(s_fence); - WARN_ON(s_fence->parent); - dma_fence_put(&s_fence->finished); - sched->ops->free_job(job); - } - } -} - -static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) -{ - struct amd_sched_entity *entity = - container_of(cb, struct amd_sched_entity, cb); - entity->dependency = NULL; - dma_fence_put(f); - amd_sched_wakeup(entity->sched); -} - -static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb) -{ - struct amd_sched_entity *entity = - container_of(cb, struct amd_sched_entity, cb); - entity->dependency = NULL; - dma_fence_put(f); -} - -void amd_sched_entity_set_rq(struct amd_sched_entity *entity, - struct amd_sched_rq *rq) -{ - if (entity->rq == rq) - return; - - spin_lock(&entity->rq_lock); - - if (entity->rq) - amd_sched_rq_remove_entity(entity->rq, entity); - - entity->rq = rq; - if (rq) - amd_sched_rq_add_entity(rq, entity); - - spin_unlock(&entity->rq_lock); -} - -bool amd_sched_dependency_optimized(struct dma_fence* fence, - struct amd_sched_entity *entity) -{ - struct amd_gpu_scheduler *sched = entity->sched; - struct amd_sched_fence *s_fence; - - if (!fence || dma_fence_is_signaled(fence)) - return false; - if (fence->context == entity->fence_context) - return true; - s_fence = to_amd_sched_fence(fence); - if (s_fence && s_fence->sched == sched) - return true; - - return false; -} - -static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) -{ - struct amd_gpu_scheduler *sched = entity->sched; - struct dma_fence * fence = entity->dependency; - struct amd_sched_fence *s_fence; - - if (fence->context == entity->fence_context) { - /* We can ignore fences from ourself */ - dma_fence_put(entity->dependency); - return false; - } - - s_fence = to_amd_sched_fence(fence); - if (s_fence && s_fence->sched == sched) { - - /* - * Fence is from the same scheduler, only need to wait for - * it to be scheduled - */ - fence = dma_fence_get(&s_fence->scheduled); - dma_fence_put(entity->dependency); - entity->dependency = fence; - if (!dma_fence_add_callback(fence, &entity->cb, - amd_sched_entity_clear_dep)) - return true; - - /* Ignore it when it is already scheduled */ - dma_fence_put(fence); - return false; - } - - if (!dma_fence_add_callback(entity->dependency, &entity->cb, - amd_sched_entity_wakeup)) - return true; - - dma_fence_put(entity->dependency); - return false; -} - -static struct amd_sched_job * -amd_sched_entity_pop_job(struct amd_sched_entity *entity) -{ - struct amd_gpu_scheduler *sched = entity->sched; - struct amd_sched_job *sched_job = to_amd_sched_job( - spsc_queue_peek(&entity->job_queue)); - - if (!sched_job) - return NULL; - - while ((entity->dependency = sched->ops->dependency(sched_job, entity))) - if (amd_sched_entity_add_dependency_cb(entity)) - return NULL; - - /* skip jobs from entity that marked guilty */ - if (entity->guilty && atomic_read(entity->guilty)) - dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); - - spsc_queue_pop(&entity->job_queue); - return sched_job; -} - -/** - * Submit a job to the job queue - * - * @sched_job The pointer to job required to submit - * - * Returns 0 for success, negative error code otherwise. - */ -void amd_sched_entity_push_job(struct amd_sched_job *sched_job, - struct amd_sched_entity *entity) -{ - struct amd_gpu_scheduler *sched = sched_job->sched; - bool first = false; - - trace_amd_sched_job(sched_job, entity); - - spin_lock(&entity->queue_lock); - first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); - - spin_unlock(&entity->queue_lock); - - /* first job wakes up scheduler */ - if (first) { - /* Add the entity to the run queue */ - spin_lock(&entity->rq_lock); - amd_sched_rq_add_entity(entity->rq, entity); - spin_unlock(&entity->rq_lock); - amd_sched_wakeup(sched); - } -} - -/* job_finish is called after hw fence signaled - */ -static void amd_sched_job_finish(struct work_struct *work) -{ - struct amd_sched_job *s_job = container_of(work, struct amd_sched_job, - finish_work); - struct amd_gpu_scheduler *sched = s_job->sched; - - /* remove job from ring_mirror_list */ - spin_lock(&sched->job_list_lock); - list_del_init(&s_job->node); - if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { - struct amd_sched_job *next; - - spin_unlock(&sched->job_list_lock); - cancel_delayed_work_sync(&s_job->work_tdr); - spin_lock(&sched->job_list_lock); - - /* queue TDR for next job */ - next = list_first_entry_or_null(&sched->ring_mirror_list, - struct amd_sched_job, node); - - if (next) - schedule_delayed_work(&next->work_tdr, sched->timeout); - } - spin_unlock(&sched->job_list_lock); - dma_fence_put(&s_job->s_fence->finished); - sched->ops->free_job(s_job); -} - -static void amd_sched_job_finish_cb(struct dma_fence *f, - struct dma_fence_cb *cb) -{ - struct amd_sched_job *job = container_of(cb, struct amd_sched_job, - finish_cb); - schedule_work(&job->finish_work); -} - -static void amd_sched_job_begin(struct amd_sched_job *s_job) -{ - struct amd_gpu_scheduler *sched = s_job->sched; - - dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb, - amd_sched_job_finish_cb); - - spin_lock(&sched->job_list_lock); - list_add_tail(&s_job->node, &sched->ring_mirror_list); - if (sched->timeout != MAX_SCHEDULE_TIMEOUT && - list_first_entry_or_null(&sched->ring_mirror_list, - struct amd_sched_job, node) == s_job) - schedule_delayed_work(&s_job->work_tdr, sched->timeout); - spin_unlock(&sched->job_list_lock); -} - -static void amd_sched_job_timedout(struct work_struct *work) -{ - struct amd_sched_job *job = container_of(work, struct amd_sched_job, - work_tdr.work); - - job->sched->ops->timedout_job(job); -} - -void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_job *bad) -{ - struct amd_sched_job *s_job; - struct amd_sched_entity *entity, *tmp; - int i;; - - spin_lock(&sched->job_list_lock); - list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { - if (s_job->s_fence->parent && - dma_fence_remove_callback(s_job->s_fence->parent, - &s_job->s_fence->cb)) { - dma_fence_put(s_job->s_fence->parent); - s_job->s_fence->parent = NULL; - atomic_dec(&sched->hw_rq_count); - } - } - spin_unlock(&sched->job_list_lock); - - if (bad && bad->s_priority != AMD_SCHED_PRIORITY_KERNEL) { - atomic_inc(&bad->karma); - /* don't increase @bad's karma if it's from KERNEL RQ, - * becuase sometimes GPU hang would cause kernel jobs (like VM updating jobs) - * corrupt but keep in mind that kernel jobs always considered good. - */ - for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_KERNEL; i++ ) { - struct amd_sched_rq *rq = &sched->sched_rq[i]; - - spin_lock(&rq->lock); - list_for_each_entry_safe(entity, tmp, &rq->entities, list) { - if (bad->s_fence->scheduled.context == entity->fence_context) { - if (atomic_read(&bad->karma) > bad->sched->hang_limit) - if (entity->guilty) - atomic_set(entity->guilty, 1); - break; - } - } - spin_unlock(&rq->lock); - if (&entity->list != &rq->entities) - break; - } - } -} - -void amd_sched_job_kickout(struct amd_sched_job *s_job) -{ - struct amd_gpu_scheduler *sched = s_job->sched; - - spin_lock(&sched->job_list_lock); - list_del_init(&s_job->node); - spin_unlock(&sched->job_list_lock); -} - -void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) -{ - struct amd_sched_job *s_job, *tmp; - bool found_guilty = false; - int r; - - spin_lock(&sched->job_list_lock); - s_job = list_first_entry_or_null(&sched->ring_mirror_list, - struct amd_sched_job, node); - if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT) - schedule_delayed_work(&s_job->work_tdr, sched->timeout); - - list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { - struct amd_sched_fence *s_fence = s_job->s_fence; - struct dma_fence *fence; - uint64_t guilty_context; - - if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { - found_guilty = true; - guilty_context = s_job->s_fence->scheduled.context; - } - - if (found_guilty && s_job->s_fence->scheduled.context == guilty_context) - dma_fence_set_error(&s_fence->finished, -ECANCELED); - - spin_unlock(&sched->job_list_lock); - fence = sched->ops->run_job(s_job); - atomic_inc(&sched->hw_rq_count); - if (fence) { - s_fence->parent = dma_fence_get(fence); - r = dma_fence_add_callback(fence, &s_fence->cb, - amd_sched_process_job); - if (r == -ENOENT) - amd_sched_process_job(fence, &s_fence->cb); - else if (r) - DRM_ERROR("fence add callback failed (%d)\n", - r); - dma_fence_put(fence); - } else { - amd_sched_process_job(NULL, &s_fence->cb); - } - spin_lock(&sched->job_list_lock); - } - spin_unlock(&sched->job_list_lock); -} - -/* init a sched_job with basic field */ -int amd_sched_job_init(struct amd_sched_job *job, - struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity, - void *owner) -{ - job->sched = sched; - job->s_priority = entity->rq - sched->sched_rq; - job->s_fence = amd_sched_fence_create(entity, owner); - if (!job->s_fence) - return -ENOMEM; - job->id = atomic64_inc_return(&sched->job_id_count); - - INIT_WORK(&job->finish_work, amd_sched_job_finish); - INIT_LIST_HEAD(&job->node); - INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout); - - return 0; -} - -/** - * Return ture if we can push more jobs to the hw. - */ -static bool amd_sched_ready(struct amd_gpu_scheduler *sched) -{ - return atomic_read(&sched->hw_rq_count) < - sched->hw_submission_limit; -} - -/** - * Wake up the scheduler when it is ready - */ -static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) -{ - if (amd_sched_ready(sched)) - wake_up_interruptible(&sched->wake_up_worker); -} - -/** - * Select next entity to process -*/ -static struct amd_sched_entity * -amd_sched_select_entity(struct amd_gpu_scheduler *sched) -{ - struct amd_sched_entity *entity; - int i; - - if (!amd_sched_ready(sched)) - return NULL; - - /* Kernel run queue has higher priority than normal run queue*/ - for (i = AMD_SCHED_PRIORITY_MAX - 1; i >= AMD_SCHED_PRIORITY_MIN; i--) { - entity = amd_sched_rq_select_entity(&sched->sched_rq[i]); - if (entity) - break; - } - - return entity; -} - -static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) -{ - struct amd_sched_fence *s_fence = - container_of(cb, struct amd_sched_fence, cb); - struct amd_gpu_scheduler *sched = s_fence->sched; - - dma_fence_get(&s_fence->finished); - atomic_dec(&sched->hw_rq_count); - amd_sched_fence_finished(s_fence); - - trace_amd_sched_process_job(s_fence); - dma_fence_put(&s_fence->finished); - wake_up_interruptible(&sched->wake_up_worker); -} - -static bool amd_sched_blocked(struct amd_gpu_scheduler *sched) -{ - if (kthread_should_park()) { - kthread_parkme(); - return true; - } - - return false; -} - -static int amd_sched_main(void *param) -{ - struct sched_param sparam = {.sched_priority = 1}; - struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param; - int r; - - sched_setscheduler(current, SCHED_FIFO, &sparam); - - while (!kthread_should_stop()) { - struct amd_sched_entity *entity = NULL; - struct amd_sched_fence *s_fence; - struct amd_sched_job *sched_job; - struct dma_fence *fence; - - wait_event_interruptible(sched->wake_up_worker, - (!amd_sched_blocked(sched) && - (entity = amd_sched_select_entity(sched))) || - kthread_should_stop()); - - if (!entity) - continue; - - sched_job = amd_sched_entity_pop_job(entity); - if (!sched_job) - continue; - - s_fence = sched_job->s_fence; - - atomic_inc(&sched->hw_rq_count); - amd_sched_job_begin(sched_job); - - fence = sched->ops->run_job(sched_job); - amd_sched_fence_scheduled(s_fence); - - if (fence) { - s_fence->parent = dma_fence_get(fence); - r = dma_fence_add_callback(fence, &s_fence->cb, - amd_sched_process_job); - if (r == -ENOENT) - amd_sched_process_job(fence, &s_fence->cb); - else if (r) - DRM_ERROR("fence add callback failed (%d)\n", - r); - dma_fence_put(fence); - } else { - amd_sched_process_job(NULL, &s_fence->cb); - } - - wake_up(&sched->job_scheduled); - } - return 0; -} - -/** - * Init a gpu scheduler instance - * - * @sched The pointer to the scheduler - * @ops The backend operations for this scheduler. - * @hw_submissions Number of hw submissions to do. - * @name Name used for debugging - * - * Return 0 on success, otherwise error code. -*/ -int amd_sched_init(struct amd_gpu_scheduler *sched, - const struct amd_sched_backend_ops *ops, - unsigned hw_submission, - unsigned hang_limit, - long timeout, - const char *name) -{ - int i; - sched->ops = ops; - sched->hw_submission_limit = hw_submission; - sched->name = name; - sched->timeout = timeout; - sched->hang_limit = hang_limit; - for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++) - amd_sched_rq_init(&sched->sched_rq[i]); - - init_waitqueue_head(&sched->wake_up_worker); - init_waitqueue_head(&sched->job_scheduled); - INIT_LIST_HEAD(&sched->ring_mirror_list); - spin_lock_init(&sched->job_list_lock); - atomic_set(&sched->hw_rq_count, 0); - atomic64_set(&sched->job_id_count, 0); - - /* Each scheduler will run on a seperate kernel thread */ - sched->thread = kthread_run(amd_sched_main, sched, sched->name); - if (IS_ERR(sched->thread)) { - DRM_ERROR("Failed to create scheduler for %s.\n", name); - return PTR_ERR(sched->thread); - } - - return 0; -} - -/** - * Destroy a gpu scheduler - * - * @sched The pointer to the scheduler - */ -void amd_sched_fini(struct amd_gpu_scheduler *sched) -{ - if (sched->thread) - kthread_stop(sched->thread); -} diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h deleted file mode 100644 index b590fcc2786a..000000000000 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef _GPU_SCHEDULER_H_ -#define _GPU_SCHEDULER_H_ - -#include -#include -#include "spsc_queue.h" - -struct amd_gpu_scheduler; -struct amd_sched_rq; - -enum amd_sched_priority { - AMD_SCHED_PRIORITY_MIN, - AMD_SCHED_PRIORITY_LOW = AMD_SCHED_PRIORITY_MIN, - AMD_SCHED_PRIORITY_NORMAL, - AMD_SCHED_PRIORITY_HIGH_SW, - AMD_SCHED_PRIORITY_HIGH_HW, - AMD_SCHED_PRIORITY_KERNEL, - AMD_SCHED_PRIORITY_MAX, - AMD_SCHED_PRIORITY_INVALID = -1, - AMD_SCHED_PRIORITY_UNSET = -2 -}; - - -/** - * A scheduler entity is a wrapper around a job queue or a group - * of other entities. Entities take turns emitting jobs from their - * job queues to corresponding hardware ring based on scheduling - * policy. -*/ -struct amd_sched_entity { - struct list_head list; - struct amd_sched_rq *rq; - spinlock_t rq_lock; - struct amd_gpu_scheduler *sched; - - spinlock_t queue_lock; - struct spsc_queue job_queue; - - atomic_t fence_seq; - uint64_t fence_context; - - struct dma_fence *dependency; - struct dma_fence_cb cb; - atomic_t *guilty; /* points to ctx's guilty */ -}; - -/** - * Run queue is a set of entities scheduling command submissions for - * one specific ring. It implements the scheduling policy that selects - * the next entity to emit commands from. -*/ -struct amd_sched_rq { - spinlock_t lock; - struct list_head entities; - struct amd_sched_entity *current_entity; -}; - -struct amd_sched_fence { - struct dma_fence scheduled; - struct dma_fence finished; - struct dma_fence_cb cb; - struct dma_fence *parent; - struct amd_gpu_scheduler *sched; - spinlock_t lock; - void *owner; -}; - -struct amd_sched_job { - struct spsc_node queue_node; - struct amd_gpu_scheduler *sched; - struct amd_sched_fence *s_fence; - struct dma_fence_cb finish_cb; - struct work_struct finish_work; - struct list_head node; - struct delayed_work work_tdr; - uint64_t id; - atomic_t karma; - enum amd_sched_priority s_priority; -}; - -extern const struct dma_fence_ops amd_sched_fence_ops_scheduled; -extern const struct dma_fence_ops amd_sched_fence_ops_finished; -static inline struct amd_sched_fence *to_amd_sched_fence(struct dma_fence *f) -{ - if (f->ops == &amd_sched_fence_ops_scheduled) - return container_of(f, struct amd_sched_fence, scheduled); - - if (f->ops == &amd_sched_fence_ops_finished) - return container_of(f, struct amd_sched_fence, finished); - - return NULL; -} - -static inline bool amd_sched_invalidate_job(struct amd_sched_job *s_job, int threshold) -{ - return (s_job && atomic_inc_return(&s_job->karma) > threshold); -} - -/** - * Define the backend operations called by the scheduler, - * these functions should be implemented in driver side -*/ -struct amd_sched_backend_ops { - struct dma_fence *(*dependency)(struct amd_sched_job *sched_job, - struct amd_sched_entity *s_entity); - struct dma_fence *(*run_job)(struct amd_sched_job *sched_job); - void (*timedout_job)(struct amd_sched_job *sched_job); - void (*free_job)(struct amd_sched_job *sched_job); -}; - -/** - * One scheduler is implemented for each hardware ring -*/ -struct amd_gpu_scheduler { - const struct amd_sched_backend_ops *ops; - uint32_t hw_submission_limit; - long timeout; - const char *name; - struct amd_sched_rq sched_rq[AMD_SCHED_PRIORITY_MAX]; - wait_queue_head_t wake_up_worker; - wait_queue_head_t job_scheduled; - atomic_t hw_rq_count; - atomic64_t job_id_count; - struct task_struct *thread; - struct list_head ring_mirror_list; - spinlock_t job_list_lock; - int hang_limit; -}; - -int amd_sched_init(struct amd_gpu_scheduler *sched, - const struct amd_sched_backend_ops *ops, - uint32_t hw_submission, unsigned hang_limit, long timeout, const char *name); -void amd_sched_fini(struct amd_gpu_scheduler *sched); - -int amd_sched_entity_init(struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity, - struct amd_sched_rq *rq, - uint32_t jobs, atomic_t* guilty); -void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity); -void amd_sched_entity_push_job(struct amd_sched_job *sched_job, - struct amd_sched_entity *entity); -void amd_sched_entity_set_rq(struct amd_sched_entity *entity, - struct amd_sched_rq *rq); - -int amd_sched_fence_slab_init(void); -void amd_sched_fence_slab_fini(void); - -struct amd_sched_fence *amd_sched_fence_create( - struct amd_sched_entity *s_entity, void *owner); -void amd_sched_fence_scheduled(struct amd_sched_fence *fence); -void amd_sched_fence_finished(struct amd_sched_fence *fence); -int amd_sched_job_init(struct amd_sched_job *job, - struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity, - void *owner); -void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_job *job); -void amd_sched_job_recovery(struct amd_gpu_scheduler *sched); -bool amd_sched_dependency_optimized(struct dma_fence* fence, - struct amd_sched_entity *entity); -void amd_sched_job_kickout(struct amd_sched_job *s_job); - -#endif diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c deleted file mode 100644 index 33f54d0a5c4f..000000000000 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * - */ -#include -#include -#include -#include -#include "gpu_scheduler.h" - -static struct kmem_cache *sched_fence_slab; - -int amd_sched_fence_slab_init(void) -{ - sched_fence_slab = kmem_cache_create( - "amd_sched_fence", sizeof(struct amd_sched_fence), 0, - SLAB_HWCACHE_ALIGN, NULL); - if (!sched_fence_slab) - return -ENOMEM; - - return 0; -} - -void amd_sched_fence_slab_fini(void) -{ - rcu_barrier(); - kmem_cache_destroy(sched_fence_slab); -} - -struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity, - void *owner) -{ - struct amd_sched_fence *fence = NULL; - unsigned seq; - - fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL); - if (fence == NULL) - return NULL; - - fence->owner = owner; - fence->sched = entity->sched; - spin_lock_init(&fence->lock); - - seq = atomic_inc_return(&entity->fence_seq); - dma_fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled, - &fence->lock, entity->fence_context, seq); - dma_fence_init(&fence->finished, &amd_sched_fence_ops_finished, - &fence->lock, entity->fence_context + 1, seq); - - return fence; -} - -void amd_sched_fence_scheduled(struct amd_sched_fence *fence) -{ - int ret = dma_fence_signal(&fence->scheduled); - - if (!ret) - DMA_FENCE_TRACE(&fence->scheduled, - "signaled from irq context\n"); - else - DMA_FENCE_TRACE(&fence->scheduled, - "was already signaled\n"); -} - -void amd_sched_fence_finished(struct amd_sched_fence *fence) -{ - int ret = dma_fence_signal(&fence->finished); - - if (!ret) - DMA_FENCE_TRACE(&fence->finished, - "signaled from irq context\n"); - else - DMA_FENCE_TRACE(&fence->finished, - "was already signaled\n"); -} - -static const char *amd_sched_fence_get_driver_name(struct dma_fence *fence) -{ - return "amd_sched"; -} - -static const char *amd_sched_fence_get_timeline_name(struct dma_fence *f) -{ - struct amd_sched_fence *fence = to_amd_sched_fence(f); - return (const char *)fence->sched->name; -} - -static bool amd_sched_fence_enable_signaling(struct dma_fence *f) -{ - return true; -} - -/** - * amd_sched_fence_free - free up the fence memory - * - * @rcu: RCU callback head - * - * Free up the fence memory after the RCU grace period. - */ -static void amd_sched_fence_free(struct rcu_head *rcu) -{ - struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); - struct amd_sched_fence *fence = to_amd_sched_fence(f); - - dma_fence_put(fence->parent); - kmem_cache_free(sched_fence_slab, fence); -} - -/** - * amd_sched_fence_release_scheduled - callback that fence can be freed - * - * @fence: fence - * - * This function is called when the reference count becomes zero. - * It just RCU schedules freeing up the fence. - */ -static void amd_sched_fence_release_scheduled(struct dma_fence *f) -{ - struct amd_sched_fence *fence = to_amd_sched_fence(f); - - call_rcu(&fence->finished.rcu, amd_sched_fence_free); -} - -/** - * amd_sched_fence_release_finished - drop extra reference - * - * @f: fence - * - * Drop the extra reference from the scheduled fence to the base fence. - */ -static void amd_sched_fence_release_finished(struct dma_fence *f) -{ - struct amd_sched_fence *fence = to_amd_sched_fence(f); - - dma_fence_put(&fence->scheduled); -} - -const struct dma_fence_ops amd_sched_fence_ops_scheduled = { - .get_driver_name = amd_sched_fence_get_driver_name, - .get_timeline_name = amd_sched_fence_get_timeline_name, - .enable_signaling = amd_sched_fence_enable_signaling, - .signaled = NULL, - .wait = dma_fence_default_wait, - .release = amd_sched_fence_release_scheduled, -}; - -const struct dma_fence_ops amd_sched_fence_ops_finished = { - .get_driver_name = amd_sched_fence_get_driver_name, - .get_timeline_name = amd_sched_fence_get_timeline_name, - .enable_signaling = amd_sched_fence_enable_signaling, - .signaled = NULL, - .wait = dma_fence_default_wait, - .release = amd_sched_fence_release_finished, -}; diff --git a/drivers/gpu/drm/amd/scheduler/spsc_queue.h b/drivers/gpu/drm/amd/scheduler/spsc_queue.h deleted file mode 100644 index 5902f35ce759..000000000000 --- a/drivers/gpu/drm/amd/scheduler/spsc_queue.h +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef AMD_SCHEDULER_SPSC_QUEUE_H_ -#define AMD_SCHEDULER_SPSC_QUEUE_H_ - -#include - -/** SPSC lockless queue */ - -struct spsc_node { - - /* Stores spsc_node* */ - struct spsc_node *next; -}; - -struct spsc_queue { - - struct spsc_node *head; - - /* atomic pointer to struct spsc_node* */ - atomic_long_t tail; - - atomic_t job_count; -}; - -static inline void spsc_queue_init(struct spsc_queue *queue) -{ - queue->head = NULL; - atomic_long_set(&queue->tail, (long)&queue->head); - atomic_set(&queue->job_count, 0); -} - -static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue) -{ - return queue->head; -} - -static inline int spsc_queue_count(struct spsc_queue *queue) -{ - return atomic_read(&queue->job_count); -} - -static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node) -{ - struct spsc_node **tail; - - node->next = NULL; - - preempt_disable(); - - tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next); - WRITE_ONCE(*tail, node); - atomic_inc(&queue->job_count); - - /* - * In case of first element verify new node will be visible to the consumer - * thread when we ping the kernel thread that there is new work to do. - */ - smp_wmb(); - - preempt_enable(); - - return tail == &queue->head; -} - - -static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue) -{ - struct spsc_node *next, *node; - - /* Verify reading from memory and not the cache */ - smp_rmb(); - - node = READ_ONCE(queue->head); - - if (!node) - return NULL; - - next = READ_ONCE(node->next); - WRITE_ONCE(queue->head, next); - - if (unlikely(!next)) { - /* slowpath for the last element in the queue */ - - if (atomic_long_cmpxchg(&queue->tail, - (long)&node->next, (long) &queue->head) != (long)&node->next) { - /* Updating tail failed wait for new next to appear */ - do { - smp_rmb(); - } while (unlikely(!(queue->head = READ_ONCE(node->next)))); - } - } - - atomic_dec(&queue->job_count); - return node; -} - - - -#endif /* AMD_SCHEDULER_SPSC_QUEUE_H_ */ diff --git a/drivers/gpu/drm/scheduler/Makefile b/drivers/gpu/drm/scheduler/Makefile new file mode 100644 index 000000000000..ed877912d06d --- /dev/null +++ b/drivers/gpu/drm/scheduler/Makefile @@ -0,0 +1,4 @@ +ccflags-y := -Iinclude/drm +gpu-sched-y := gpu_scheduler.o sched_fence.o + +obj-$(CONFIG_DRM_SCHED) += gpu-sched.o diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c new file mode 100644 index 000000000000..2c18996d59c5 --- /dev/null +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -0,0 +1,744 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include + +#define to_drm_sched_job(sched_job) \ + container_of((sched_job), struct drm_sched_job, queue_node) + +static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); +static void drm_sched_wakeup(struct drm_gpu_scheduler *sched); +static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb); + +/* Initialize a given run queue struct */ +static void drm_sched_rq_init(struct drm_sched_rq *rq) +{ + spin_lock_init(&rq->lock); + INIT_LIST_HEAD(&rq->entities); + rq->current_entity = NULL; +} + +static void drm_sched_rq_add_entity(struct drm_sched_rq *rq, + struct drm_sched_entity *entity) +{ + if (!list_empty(&entity->list)) + return; + spin_lock(&rq->lock); + list_add_tail(&entity->list, &rq->entities); + spin_unlock(&rq->lock); +} + +static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, + struct drm_sched_entity *entity) +{ + if (list_empty(&entity->list)) + return; + spin_lock(&rq->lock); + list_del_init(&entity->list); + if (rq->current_entity == entity) + rq->current_entity = NULL; + spin_unlock(&rq->lock); +} + +/** + * Select an entity which could provide a job to run + * + * @rq The run queue to check. + * + * Try to find a ready entity, returns NULL if none found. + */ +static struct drm_sched_entity * +drm_sched_rq_select_entity(struct drm_sched_rq *rq) +{ + struct drm_sched_entity *entity; + + spin_lock(&rq->lock); + + entity = rq->current_entity; + if (entity) { + list_for_each_entry_continue(entity, &rq->entities, list) { + if (drm_sched_entity_is_ready(entity)) { + rq->current_entity = entity; + spin_unlock(&rq->lock); + return entity; + } + } + } + + list_for_each_entry(entity, &rq->entities, list) { + + if (drm_sched_entity_is_ready(entity)) { + rq->current_entity = entity; + spin_unlock(&rq->lock); + return entity; + } + + if (entity == rq->current_entity) + break; + } + + spin_unlock(&rq->lock); + + return NULL; +} + +/** + * Init a context entity used by scheduler when submit to HW ring. + * + * @sched The pointer to the scheduler + * @entity The pointer to a valid drm_sched_entity + * @rq The run queue this entity belongs + * @kernel If this is an entity for the kernel + * @jobs The max number of jobs in the job queue + * + * return 0 if succeed. negative error code on failure +*/ +int drm_sched_entity_init(struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity, + struct drm_sched_rq *rq, + uint32_t jobs, atomic_t *guilty) +{ + if (!(sched && entity && rq)) + return -EINVAL; + + memset(entity, 0, sizeof(struct drm_sched_entity)); + INIT_LIST_HEAD(&entity->list); + entity->rq = rq; + entity->sched = sched; + entity->guilty = guilty; + + spin_lock_init(&entity->rq_lock); + spin_lock_init(&entity->queue_lock); + spsc_queue_init(&entity->job_queue); + + atomic_set(&entity->fence_seq, 0); + entity->fence_context = dma_fence_context_alloc(2); + + return 0; +} +EXPORT_SYMBOL(drm_sched_entity_init); + +/** + * Query if entity is initialized + * + * @sched Pointer to scheduler instance + * @entity The pointer to a valid scheduler entity + * + * return true if entity is initialized, false otherwise +*/ +static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity) +{ + return entity->sched == sched && + entity->rq != NULL; +} + +/** + * Check if entity is idle + * + * @entity The pointer to a valid scheduler entity + * + * Return true if entity don't has any unscheduled jobs. + */ +static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) +{ + rmb(); + if (spsc_queue_peek(&entity->job_queue) == NULL) + return true; + + return false; +} + +/** + * Check if entity is ready + * + * @entity The pointer to a valid scheduler entity + * + * Return true if entity could provide a job. + */ +static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) +{ + if (spsc_queue_peek(&entity->job_queue) == NULL) + return false; + + if (READ_ONCE(entity->dependency)) + return false; + + return true; +} + +/** + * Destroy a context entity + * + * @sched Pointer to scheduler instance + * @entity The pointer to a valid scheduler entity + * + * Cleanup and free the allocated resources. + */ +void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity) +{ + int r; + + if (!drm_sched_entity_is_initialized(sched, entity)) + return; + /** + * The client will not queue more IBs during this fini, consume existing + * queued IBs or discard them on SIGKILL + */ + if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL) + r = -ERESTARTSYS; + else + r = wait_event_killable(sched->job_scheduled, + drm_sched_entity_is_idle(entity)); + drm_sched_entity_set_rq(entity, NULL); + if (r) { + struct drm_sched_job *job; + + /* Park the kernel for a moment to make sure it isn't processing + * our enity. + */ + kthread_park(sched->thread); + kthread_unpark(sched->thread); + if (entity->dependency) { + dma_fence_remove_callback(entity->dependency, + &entity->cb); + dma_fence_put(entity->dependency); + entity->dependency = NULL; + } + + while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { + struct drm_sched_fence *s_fence = job->s_fence; + drm_sched_fence_scheduled(s_fence); + dma_fence_set_error(&s_fence->finished, -ESRCH); + drm_sched_fence_finished(s_fence); + WARN_ON(s_fence->parent); + dma_fence_put(&s_fence->finished); + sched->ops->free_job(job); + } + } +} +EXPORT_SYMBOL(drm_sched_entity_fini); + +static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) +{ + struct drm_sched_entity *entity = + container_of(cb, struct drm_sched_entity, cb); + entity->dependency = NULL; + dma_fence_put(f); + drm_sched_wakeup(entity->sched); +} + +static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb) +{ + struct drm_sched_entity *entity = + container_of(cb, struct drm_sched_entity, cb); + entity->dependency = NULL; + dma_fence_put(f); +} + +void drm_sched_entity_set_rq(struct drm_sched_entity *entity, + struct drm_sched_rq *rq) +{ + if (entity->rq == rq) + return; + + spin_lock(&entity->rq_lock); + + if (entity->rq) + drm_sched_rq_remove_entity(entity->rq, entity); + + entity->rq = rq; + if (rq) + drm_sched_rq_add_entity(rq, entity); + + spin_unlock(&entity->rq_lock); +} +EXPORT_SYMBOL(drm_sched_entity_set_rq); + +bool drm_sched_dependency_optimized(struct dma_fence* fence, + struct drm_sched_entity *entity) +{ + struct drm_gpu_scheduler *sched = entity->sched; + struct drm_sched_fence *s_fence; + + if (!fence || dma_fence_is_signaled(fence)) + return false; + if (fence->context == entity->fence_context) + return true; + s_fence = to_drm_sched_fence(fence); + if (s_fence && s_fence->sched == sched) + return true; + + return false; +} +EXPORT_SYMBOL(drm_sched_dependency_optimized); + +static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) +{ + struct drm_gpu_scheduler *sched = entity->sched; + struct dma_fence * fence = entity->dependency; + struct drm_sched_fence *s_fence; + + if (fence->context == entity->fence_context) { + /* We can ignore fences from ourself */ + dma_fence_put(entity->dependency); + return false; + } + + s_fence = to_drm_sched_fence(fence); + if (s_fence && s_fence->sched == sched) { + + /* + * Fence is from the same scheduler, only need to wait for + * it to be scheduled + */ + fence = dma_fence_get(&s_fence->scheduled); + dma_fence_put(entity->dependency); + entity->dependency = fence; + if (!dma_fence_add_callback(fence, &entity->cb, + drm_sched_entity_clear_dep)) + return true; + + /* Ignore it when it is already scheduled */ + dma_fence_put(fence); + return false; + } + + if (!dma_fence_add_callback(entity->dependency, &entity->cb, + drm_sched_entity_wakeup)) + return true; + + dma_fence_put(entity->dependency); + return false; +} + +static struct drm_sched_job * +drm_sched_entity_pop_job(struct drm_sched_entity *entity) +{ + struct drm_gpu_scheduler *sched = entity->sched; + struct drm_sched_job *sched_job = to_drm_sched_job( + spsc_queue_peek(&entity->job_queue)); + + if (!sched_job) + return NULL; + + while ((entity->dependency = sched->ops->dependency(sched_job, entity))) + if (drm_sched_entity_add_dependency_cb(entity)) + return NULL; + + /* skip jobs from entity that marked guilty */ + if (entity->guilty && atomic_read(entity->guilty)) + dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); + + spsc_queue_pop(&entity->job_queue); + return sched_job; +} + +/** + * Submit a job to the job queue + * + * @sched_job The pointer to job required to submit + * + * Returns 0 for success, negative error code otherwise. + */ +void drm_sched_entity_push_job(struct drm_sched_job *sched_job, + struct drm_sched_entity *entity) +{ + struct drm_gpu_scheduler *sched = sched_job->sched; + bool first = false; + + trace_drm_sched_job(sched_job, entity); + + spin_lock(&entity->queue_lock); + first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); + + spin_unlock(&entity->queue_lock); + + /* first job wakes up scheduler */ + if (first) { + /* Add the entity to the run queue */ + spin_lock(&entity->rq_lock); + drm_sched_rq_add_entity(entity->rq, entity); + spin_unlock(&entity->rq_lock); + drm_sched_wakeup(sched); + } +} +EXPORT_SYMBOL(drm_sched_entity_push_job); + +/* job_finish is called after hw fence signaled + */ +static void drm_sched_job_finish(struct work_struct *work) +{ + struct drm_sched_job *s_job = container_of(work, struct drm_sched_job, + finish_work); + struct drm_gpu_scheduler *sched = s_job->sched; + + /* remove job from ring_mirror_list */ + spin_lock(&sched->job_list_lock); + list_del_init(&s_job->node); + if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { + struct drm_sched_job *next; + + spin_unlock(&sched->job_list_lock); + cancel_delayed_work_sync(&s_job->work_tdr); + spin_lock(&sched->job_list_lock); + + /* queue TDR for next job */ + next = list_first_entry_or_null(&sched->ring_mirror_list, + struct drm_sched_job, node); + + if (next) + schedule_delayed_work(&next->work_tdr, sched->timeout); + } + spin_unlock(&sched->job_list_lock); + dma_fence_put(&s_job->s_fence->finished); + sched->ops->free_job(s_job); +} + +static void drm_sched_job_finish_cb(struct dma_fence *f, + struct dma_fence_cb *cb) +{ + struct drm_sched_job *job = container_of(cb, struct drm_sched_job, + finish_cb); + schedule_work(&job->finish_work); +} + +static void drm_sched_job_begin(struct drm_sched_job *s_job) +{ + struct drm_gpu_scheduler *sched = s_job->sched; + + dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb, + drm_sched_job_finish_cb); + + spin_lock(&sched->job_list_lock); + list_add_tail(&s_job->node, &sched->ring_mirror_list); + if (sched->timeout != MAX_SCHEDULE_TIMEOUT && + list_first_entry_or_null(&sched->ring_mirror_list, + struct drm_sched_job, node) == s_job) + schedule_delayed_work(&s_job->work_tdr, sched->timeout); + spin_unlock(&sched->job_list_lock); +} + +static void drm_sched_job_timedout(struct work_struct *work) +{ + struct drm_sched_job *job = container_of(work, struct drm_sched_job, + work_tdr.work); + + job->sched->ops->timedout_job(job); +} + +void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) +{ + struct drm_sched_job *s_job; + struct drm_sched_entity *entity, *tmp; + int i;; + + spin_lock(&sched->job_list_lock); + list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { + if (s_job->s_fence->parent && + dma_fence_remove_callback(s_job->s_fence->parent, + &s_job->s_fence->cb)) { + dma_fence_put(s_job->s_fence->parent); + s_job->s_fence->parent = NULL; + atomic_dec(&sched->hw_rq_count); + } + } + spin_unlock(&sched->job_list_lock); + + if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) { + atomic_inc(&bad->karma); + /* don't increase @bad's karma if it's from KERNEL RQ, + * becuase sometimes GPU hang would cause kernel jobs (like VM updating jobs) + * corrupt but keep in mind that kernel jobs always considered good. + */ + for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL; i++ ) { + struct drm_sched_rq *rq = &sched->sched_rq[i]; + + spin_lock(&rq->lock); + list_for_each_entry_safe(entity, tmp, &rq->entities, list) { + if (bad->s_fence->scheduled.context == entity->fence_context) { + if (atomic_read(&bad->karma) > bad->sched->hang_limit) + if (entity->guilty) + atomic_set(entity->guilty, 1); + break; + } + } + spin_unlock(&rq->lock); + if (&entity->list != &rq->entities) + break; + } + } +} +EXPORT_SYMBOL(drm_sched_hw_job_reset); + +void drm_sched_job_recovery(struct drm_gpu_scheduler *sched) +{ + struct drm_sched_job *s_job, *tmp; + bool found_guilty = false; + int r; + + spin_lock(&sched->job_list_lock); + s_job = list_first_entry_or_null(&sched->ring_mirror_list, + struct drm_sched_job, node); + if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT) + schedule_delayed_work(&s_job->work_tdr, sched->timeout); + + list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { + struct drm_sched_fence *s_fence = s_job->s_fence; + struct dma_fence *fence; + uint64_t guilty_context; + + if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { + found_guilty = true; + guilty_context = s_job->s_fence->scheduled.context; + } + + if (found_guilty && s_job->s_fence->scheduled.context == guilty_context) + dma_fence_set_error(&s_fence->finished, -ECANCELED); + + spin_unlock(&sched->job_list_lock); + fence = sched->ops->run_job(s_job); + atomic_inc(&sched->hw_rq_count); + if (fence) { + s_fence->parent = dma_fence_get(fence); + r = dma_fence_add_callback(fence, &s_fence->cb, + drm_sched_process_job); + if (r == -ENOENT) + drm_sched_process_job(fence, &s_fence->cb); + else if (r) + DRM_ERROR("fence add callback failed (%d)\n", + r); + dma_fence_put(fence); + } else { + drm_sched_process_job(NULL, &s_fence->cb); + } + spin_lock(&sched->job_list_lock); + } + spin_unlock(&sched->job_list_lock); +} +EXPORT_SYMBOL(drm_sched_job_recovery); + +/* init a sched_job with basic field */ +int drm_sched_job_init(struct drm_sched_job *job, + struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity, + void *owner) +{ + job->sched = sched; + job->s_priority = entity->rq - sched->sched_rq; + job->s_fence = drm_sched_fence_create(entity, owner); + if (!job->s_fence) + return -ENOMEM; + job->id = atomic64_inc_return(&sched->job_id_count); + + INIT_WORK(&job->finish_work, drm_sched_job_finish); + INIT_LIST_HEAD(&job->node); + INIT_DELAYED_WORK(&job->work_tdr, drm_sched_job_timedout); + + return 0; +} +EXPORT_SYMBOL(drm_sched_job_init); + +/** + * Return ture if we can push more jobs to the hw. + */ +static bool drm_sched_ready(struct drm_gpu_scheduler *sched) +{ + return atomic_read(&sched->hw_rq_count) < + sched->hw_submission_limit; +} + +/** + * Wake up the scheduler when it is ready + */ +static void drm_sched_wakeup(struct drm_gpu_scheduler *sched) +{ + if (drm_sched_ready(sched)) + wake_up_interruptible(&sched->wake_up_worker); +} + +/** + * Select next entity to process +*/ +static struct drm_sched_entity * +drm_sched_select_entity(struct drm_gpu_scheduler *sched) +{ + struct drm_sched_entity *entity; + int i; + + if (!drm_sched_ready(sched)) + return NULL; + + /* Kernel run queue has higher priority than normal run queue*/ + for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { + entity = drm_sched_rq_select_entity(&sched->sched_rq[i]); + if (entity) + break; + } + + return entity; +} + +static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) +{ + struct drm_sched_fence *s_fence = + container_of(cb, struct drm_sched_fence, cb); + struct drm_gpu_scheduler *sched = s_fence->sched; + + dma_fence_get(&s_fence->finished); + atomic_dec(&sched->hw_rq_count); + drm_sched_fence_finished(s_fence); + + trace_drm_sched_process_job(s_fence); + dma_fence_put(&s_fence->finished); + wake_up_interruptible(&sched->wake_up_worker); +} + +static bool drm_sched_blocked(struct drm_gpu_scheduler *sched) +{ + if (kthread_should_park()) { + kthread_parkme(); + return true; + } + + return false; +} + +static int drm_sched_main(void *param) +{ + struct sched_param sparam = {.sched_priority = 1}; + struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param; + int r; + + sched_setscheduler(current, SCHED_FIFO, &sparam); + + while (!kthread_should_stop()) { + struct drm_sched_entity *entity = NULL; + struct drm_sched_fence *s_fence; + struct drm_sched_job *sched_job; + struct dma_fence *fence; + + wait_event_interruptible(sched->wake_up_worker, + (!drm_sched_blocked(sched) && + (entity = drm_sched_select_entity(sched))) || + kthread_should_stop()); + + if (!entity) + continue; + + sched_job = drm_sched_entity_pop_job(entity); + if (!sched_job) + continue; + + s_fence = sched_job->s_fence; + + atomic_inc(&sched->hw_rq_count); + drm_sched_job_begin(sched_job); + + fence = sched->ops->run_job(sched_job); + drm_sched_fence_scheduled(s_fence); + + if (fence) { + s_fence->parent = dma_fence_get(fence); + r = dma_fence_add_callback(fence, &s_fence->cb, + drm_sched_process_job); + if (r == -ENOENT) + drm_sched_process_job(fence, &s_fence->cb); + else if (r) + DRM_ERROR("fence add callback failed (%d)\n", + r); + dma_fence_put(fence); + } else { + drm_sched_process_job(NULL, &s_fence->cb); + } + + wake_up(&sched->job_scheduled); + } + return 0; +} + +/** + * Init a gpu scheduler instance + * + * @sched The pointer to the scheduler + * @ops The backend operations for this scheduler. + * @hw_submissions Number of hw submissions to do. + * @name Name used for debugging + * + * Return 0 on success, otherwise error code. +*/ +int drm_sched_init(struct drm_gpu_scheduler *sched, + const struct drm_sched_backend_ops *ops, + unsigned hw_submission, + unsigned hang_limit, + long timeout, + const char *name) +{ + int i; + sched->ops = ops; + sched->hw_submission_limit = hw_submission; + sched->name = name; + sched->timeout = timeout; + sched->hang_limit = hang_limit; + for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++) + drm_sched_rq_init(&sched->sched_rq[i]); + + init_waitqueue_head(&sched->wake_up_worker); + init_waitqueue_head(&sched->job_scheduled); + INIT_LIST_HEAD(&sched->ring_mirror_list); + spin_lock_init(&sched->job_list_lock); + atomic_set(&sched->hw_rq_count, 0); + atomic64_set(&sched->job_id_count, 0); + + /* Each scheduler will run on a seperate kernel thread */ + sched->thread = kthread_run(drm_sched_main, sched, sched->name); + if (IS_ERR(sched->thread)) { + DRM_ERROR("Failed to create scheduler for %s.\n", name); + return PTR_ERR(sched->thread); + } + + return 0; +} +EXPORT_SYMBOL(drm_sched_init); + +/** + * Destroy a gpu scheduler + * + * @sched The pointer to the scheduler + */ +void drm_sched_fini(struct drm_gpu_scheduler *sched) +{ + if (sched->thread) + kthread_stop(sched->thread); +} +EXPORT_SYMBOL(drm_sched_fini); diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c new file mode 100644 index 000000000000..f6f2955890c4 --- /dev/null +++ b/drivers/gpu/drm/scheduler/sched_fence.c @@ -0,0 +1,187 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include +#include +#include +#include + +static struct kmem_cache *sched_fence_slab; + +int drm_sched_fence_slab_init(void) +{ + sched_fence_slab = kmem_cache_create( + "drm_sched_fence", sizeof(struct drm_sched_fence), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!sched_fence_slab) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL_GPL(drm_sched_fence_slab_init); + +void drm_sched_fence_slab_fini(void) +{ + rcu_barrier(); + kmem_cache_destroy(sched_fence_slab); +} +EXPORT_SYMBOL_GPL(drm_sched_fence_slab_fini); + +void drm_sched_fence_scheduled(struct drm_sched_fence *fence) +{ + int ret = dma_fence_signal(&fence->scheduled); + + if (!ret) + DMA_FENCE_TRACE(&fence->scheduled, + "signaled from irq context\n"); + else + DMA_FENCE_TRACE(&fence->scheduled, + "was already signaled\n"); +} + +void drm_sched_fence_finished(struct drm_sched_fence *fence) +{ + int ret = dma_fence_signal(&fence->finished); + + if (!ret) + DMA_FENCE_TRACE(&fence->finished, + "signaled from irq context\n"); + else + DMA_FENCE_TRACE(&fence->finished, + "was already signaled\n"); +} + +static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence) +{ + return "drm_sched"; +} + +static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f) +{ + struct drm_sched_fence *fence = to_drm_sched_fence(f); + return (const char *)fence->sched->name; +} + +static bool drm_sched_fence_enable_signaling(struct dma_fence *f) +{ + return true; +} + +/** + * amd_sched_fence_free - free up the fence memory + * + * @rcu: RCU callback head + * + * Free up the fence memory after the RCU grace period. + */ +static void drm_sched_fence_free(struct rcu_head *rcu) +{ + struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); + struct drm_sched_fence *fence = to_drm_sched_fence(f); + + dma_fence_put(fence->parent); + kmem_cache_free(sched_fence_slab, fence); +} + +/** + * amd_sched_fence_release_scheduled - callback that fence can be freed + * + * @fence: fence + * + * This function is called when the reference count becomes zero. + * It just RCU schedules freeing up the fence. + */ +static void drm_sched_fence_release_scheduled(struct dma_fence *f) +{ + struct drm_sched_fence *fence = to_drm_sched_fence(f); + + call_rcu(&fence->finished.rcu, drm_sched_fence_free); +} + +/** + * amd_sched_fence_release_finished - drop extra reference + * + * @f: fence + * + * Drop the extra reference from the scheduled fence to the base fence. + */ +static void drm_sched_fence_release_finished(struct dma_fence *f) +{ + struct drm_sched_fence *fence = to_drm_sched_fence(f); + + dma_fence_put(&fence->scheduled); +} + +const struct dma_fence_ops drm_sched_fence_ops_scheduled = { + .get_driver_name = drm_sched_fence_get_driver_name, + .get_timeline_name = drm_sched_fence_get_timeline_name, + .enable_signaling = drm_sched_fence_enable_signaling, + .signaled = NULL, + .wait = dma_fence_default_wait, + .release = drm_sched_fence_release_scheduled, +}; + +const struct dma_fence_ops drm_sched_fence_ops_finished = { + .get_driver_name = drm_sched_fence_get_driver_name, + .get_timeline_name = drm_sched_fence_get_timeline_name, + .enable_signaling = drm_sched_fence_enable_signaling, + .signaled = NULL, + .wait = dma_fence_default_wait, + .release = drm_sched_fence_release_finished, +}; + +struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f) +{ + if (f->ops == &drm_sched_fence_ops_scheduled) + return container_of(f, struct drm_sched_fence, scheduled); + + if (f->ops == &drm_sched_fence_ops_finished) + return container_of(f, struct drm_sched_fence, finished); + + return NULL; +} +EXPORT_SYMBOL(to_drm_sched_fence); + +struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity, + void *owner) +{ + struct drm_sched_fence *fence = NULL; + unsigned seq; + + fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL); + if (fence == NULL) + return NULL; + + fence->owner = owner; + fence->sched = entity->sched; + spin_lock_init(&fence->lock); + + seq = atomic_inc_return(&entity->fence_seq); + dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled, + &fence->lock, entity->fence_context, seq); + dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished, + &fence->lock, entity->fence_context + 1, seq); + + return fence; +} diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h new file mode 100644 index 000000000000..d29da4cbb042 --- /dev/null +++ b/include/drm/gpu_scheduler.h @@ -0,0 +1,176 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _DRM_GPU_SCHEDULER_H_ +#define _DRM_GPU_SCHEDULER_H_ + +#include +#include + +struct drm_gpu_scheduler; +struct drm_sched_rq; + +enum drm_sched_priority { + DRM_SCHED_PRIORITY_MIN, + DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN, + DRM_SCHED_PRIORITY_NORMAL, + DRM_SCHED_PRIORITY_HIGH_SW, + DRM_SCHED_PRIORITY_HIGH_HW, + DRM_SCHED_PRIORITY_KERNEL, + DRM_SCHED_PRIORITY_MAX, + DRM_SCHED_PRIORITY_INVALID = -1, + DRM_SCHED_PRIORITY_UNSET = -2 +}; + +/** + * A scheduler entity is a wrapper around a job queue or a group + * of other entities. Entities take turns emitting jobs from their + * job queues to corresponding hardware ring based on scheduling + * policy. +*/ +struct drm_sched_entity { + struct list_head list; + struct drm_sched_rq *rq; + spinlock_t rq_lock; + struct drm_gpu_scheduler *sched; + + spinlock_t queue_lock; + struct spsc_queue job_queue; + + atomic_t fence_seq; + uint64_t fence_context; + + struct dma_fence *dependency; + struct dma_fence_cb cb; + atomic_t *guilty; /* points to ctx's guilty */ +}; + +/** + * Run queue is a set of entities scheduling command submissions for + * one specific ring. It implements the scheduling policy that selects + * the next entity to emit commands from. +*/ +struct drm_sched_rq { + spinlock_t lock; + struct list_head entities; + struct drm_sched_entity *current_entity; +}; + +struct drm_sched_fence { + struct dma_fence scheduled; + struct dma_fence finished; + struct dma_fence_cb cb; + struct dma_fence *parent; + struct drm_gpu_scheduler *sched; + spinlock_t lock; + void *owner; +}; + +struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); + +struct drm_sched_job { + struct spsc_node queue_node; + struct drm_gpu_scheduler *sched; + struct drm_sched_fence *s_fence; + struct dma_fence_cb finish_cb; + struct work_struct finish_work; + struct list_head node; + struct delayed_work work_tdr; + uint64_t id; + atomic_t karma; + enum drm_sched_priority s_priority; +}; + +static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, + int threshold) +{ + return (s_job && atomic_inc_return(&s_job->karma) > threshold); +} + +/** + * Define the backend operations called by the scheduler, + * these functions should be implemented in driver side +*/ +struct drm_sched_backend_ops { + struct dma_fence *(*dependency)(struct drm_sched_job *sched_job, + struct drm_sched_entity *s_entity); + struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); + void (*timedout_job)(struct drm_sched_job *sched_job); + void (*free_job)(struct drm_sched_job *sched_job); +}; + +/** + * One scheduler is implemented for each hardware ring +*/ +struct drm_gpu_scheduler { + const struct drm_sched_backend_ops *ops; + uint32_t hw_submission_limit; + long timeout; + const char *name; + struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX]; + wait_queue_head_t wake_up_worker; + wait_queue_head_t job_scheduled; + atomic_t hw_rq_count; + atomic64_t job_id_count; + struct task_struct *thread; + struct list_head ring_mirror_list; + spinlock_t job_list_lock; + int hang_limit; +}; + +int drm_sched_init(struct drm_gpu_scheduler *sched, + const struct drm_sched_backend_ops *ops, + uint32_t hw_submission, unsigned hang_limit, long timeout, + const char *name); +void drm_sched_fini(struct drm_gpu_scheduler *sched); + +int drm_sched_entity_init(struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity, + struct drm_sched_rq *rq, + uint32_t jobs, atomic_t *guilty); +void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity); +void drm_sched_entity_push_job(struct drm_sched_job *sched_job, + struct drm_sched_entity *entity); +void drm_sched_entity_set_rq(struct drm_sched_entity *entity, + struct drm_sched_rq *rq); + +int drm_sched_fence_slab_init(void); +void drm_sched_fence_slab_fini(void); + +struct drm_sched_fence *drm_sched_fence_create( + struct drm_sched_entity *s_entity, void *owner); +void drm_sched_fence_scheduled(struct drm_sched_fence *fence); +void drm_sched_fence_finished(struct drm_sched_fence *fence); +int drm_sched_job_init(struct drm_sched_job *job, + struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity, + void *owner); +void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, + struct drm_sched_job *job); +void drm_sched_job_recovery(struct drm_gpu_scheduler *sched); +bool drm_sched_dependency_optimized(struct dma_fence* fence, + struct drm_sched_entity *entity); +void drm_sched_job_kickout(struct drm_sched_job *s_job); + +#endif diff --git a/include/drm/gpu_scheduler_trace.h b/include/drm/gpu_scheduler_trace.h new file mode 100644 index 000000000000..0789e8d0a0e1 --- /dev/null +++ b/include/drm/gpu_scheduler_trace.h @@ -0,0 +1,82 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _GPU_SCHED_TRACE_H_ + +#include +#include +#include + +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM gpu_scheduler +#define TRACE_INCLUDE_FILE gpu_scheduler_trace + +TRACE_EVENT(drm_sched_job, + TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity), + TP_ARGS(sched_job, entity), + TP_STRUCT__entry( + __field(struct drm_sched_entity *, entity) + __field(struct dma_fence *, fence) + __field(const char *, name) + __field(uint64_t, id) + __field(u32, job_count) + __field(int, hw_job_count) + ), + + TP_fast_assign( + __entry->entity = entity; + __entry->id = sched_job->id; + __entry->fence = &sched_job->s_fence->finished; + __entry->name = sched_job->sched->name; + __entry->job_count = spsc_queue_count(&entity->job_queue); + __entry->hw_job_count = atomic_read( + &sched_job->sched->hw_rq_count); + ), + TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d", + __entry->entity, __entry->id, + __entry->fence, __entry->name, + __entry->job_count, __entry->hw_job_count) +); + +TRACE_EVENT(drm_sched_process_job, + TP_PROTO(struct drm_sched_fence *fence), + TP_ARGS(fence), + TP_STRUCT__entry( + __field(struct dma_fence *, fence) + ), + + TP_fast_assign( + __entry->fence = &fence->finished; + ), + TP_printk("fence=%p signaled", __entry->fence) +); + +#endif + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include diff --git a/include/drm/spsc_queue.h b/include/drm/spsc_queue.h new file mode 100644 index 000000000000..125f096c88cb --- /dev/null +++ b/include/drm/spsc_queue.h @@ -0,0 +1,122 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef DRM_SCHEDULER_SPSC_QUEUE_H_ +#define DRM_SCHEDULER_SPSC_QUEUE_H_ + +#include +#include + +/** SPSC lockless queue */ + +struct spsc_node { + + /* Stores spsc_node* */ + struct spsc_node *next; +}; + +struct spsc_queue { + + struct spsc_node *head; + + /* atomic pointer to struct spsc_node* */ + atomic_long_t tail; + + atomic_t job_count; +}; + +static inline void spsc_queue_init(struct spsc_queue *queue) +{ + queue->head = NULL; + atomic_long_set(&queue->tail, (long)&queue->head); + atomic_set(&queue->job_count, 0); +} + +static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue) +{ + return queue->head; +} + +static inline int spsc_queue_count(struct spsc_queue *queue) +{ + return atomic_read(&queue->job_count); +} + +static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node) +{ + struct spsc_node **tail; + + node->next = NULL; + + preempt_disable(); + + tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next); + WRITE_ONCE(*tail, node); + atomic_inc(&queue->job_count); + + /* + * In case of first element verify new node will be visible to the consumer + * thread when we ping the kernel thread that there is new work to do. + */ + smp_wmb(); + + preempt_enable(); + + return tail == &queue->head; +} + + +static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue) +{ + struct spsc_node *next, *node; + + /* Verify reading from memory and not the cache */ + smp_rmb(); + + node = READ_ONCE(queue->head); + + if (!node) + return NULL; + + next = READ_ONCE(node->next); + WRITE_ONCE(queue->head, next); + + if (unlikely(!next)) { + /* slowpath for the last element in the queue */ + + if (atomic_long_cmpxchg(&queue->tail, + (long)&node->next, (long) &queue->head) != (long)&node->next) { + /* Updating tail failed wait for new next to appear */ + do { + smp_rmb(); + } while (unlikely(!(queue->head = READ_ONCE(node->next)))); + } + } + + atomic_dec(&queue->job_count); + return node; +} + + + +#endif /* DRM_SCHEDULER_SPSC_QUEUE_H_ */ -- cgit v1.2.3 From dcebf026e6f69fb79e7f88d10681faf4f8a985ba Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Tue, 12 Dec 2017 14:09:30 -0500 Subject: drm/amdgpu: Add gpu_recovery parameter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add new parameter to control GPU recovery procedure. v2: Add auto logic where reset is disabled for bare metal and enabled for SR-IOV. Allow forced reset from debugfs. Signed-off-by: Andrey Grodzovsky Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 9 ++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 2 +- drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 2 +- drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c | 2 +- 8 files changed, 19 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index c31c5496dc5e..ffbe99d839a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -126,6 +126,7 @@ extern int amdgpu_param_buf_per_se; extern int amdgpu_job_hang_limit; extern int amdgpu_lbpw; extern int amdgpu_compute_multipipe; +extern int amdgpu_gpu_recovery; #ifdef CONFIG_DRM_AMDGPU_SI extern int amdgpu_si_support; @@ -1910,7 +1911,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i)) /* Common functions */ -int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job* job); +int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job* job, bool force); bool amdgpu_need_backup(struct amdgpu_device *adev); void amdgpu_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_need_post(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 046b9d5bc14d..3f63f5ca4fa7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3009,11 +3009,12 @@ error: * * @adev: amdgpu device pointer * @job: which job trigger hang + * @force forces reset regardless of amdgpu_gpu_recovery * * Attempt to reset the GPU if it has hung (all asics). * Returns 0 for success or an error on failure. */ -int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job) +int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job, bool force) { struct drm_atomic_state *state = NULL; uint64_t reset_flags = 0; @@ -3024,6 +3025,12 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job) return 0; } + if (!force && (amdgpu_gpu_recovery == 0 || + (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))) { + DRM_INFO("GPU recovery disabled.\n"); + return 0; + } + dev_info(adev->dev, "GPU reset begin!\n"); mutex_lock(&adev->lock_reset); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 0b039bdcf84e..b734cd668ff1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -128,6 +128,7 @@ int amdgpu_param_buf_per_se = 0; int amdgpu_job_hang_limit = 0; int amdgpu_lbpw = -1; int amdgpu_compute_multipipe = -1; +int amdgpu_gpu_recovery = -1; /* auto */ MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); @@ -280,6 +281,9 @@ module_param_named(lbpw, amdgpu_lbpw, int, 0444); MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)"); module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444); +MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto"); +module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444); + #ifdef CONFIG_DRM_AMDGPU_SI #if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 7cb71a8e21df..d3ce12149542 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -705,7 +705,7 @@ static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data) struct amdgpu_device *adev = dev->dev_private; seq_printf(m, "gpu recover\n"); - amdgpu_gpu_recover(adev, NULL); + amdgpu_gpu_recover(adev, NULL, true); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index c340774082ea..c43643e8c8c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -88,7 +88,7 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work) reset_work); if (!amdgpu_sriov_vf(adev)) - amdgpu_gpu_recover(adev, NULL); + amdgpu_gpu_recover(adev, NULL, false); } /* Disable *all* interrupts */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 013c0a8cfb60..be8a437fad54 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -37,7 +37,7 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job) atomic_read(&job->ring->fence_drv.last_seq), job->ring->fence_drv.sync_seq); - amdgpu_gpu_recover(job->adev, job); + amdgpu_gpu_recover(job->adev, job, false); } int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 71f56900d6fe..7ade56d59c27 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -253,7 +253,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) } /* Trigger recovery due to world switch failure */ - amdgpu_gpu_recover(adev, NULL); + amdgpu_gpu_recover(adev, NULL, false); } static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c index df52824c0cd4..e05823d86cfb 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c @@ -521,7 +521,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work) } /* Trigger recovery due to world switch failure */ - amdgpu_gpu_recover(adev, NULL); + amdgpu_gpu_recover(adev, NULL, false); } static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev, -- cgit v1.2.3 From 5f152b5e69a5392181b0a84bd55fe17a417364ac Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 15 Dec 2017 16:40:49 -0500 Subject: drm/amdgpu: rename amdgpu_gpu_recover MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit add device to the name for consistency. Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 5 +++-- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 2 +- drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 2 +- drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c | 2 +- 7 files changed, 10 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index d4abb7f04a86..04e5498929c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1887,7 +1887,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i)) /* Common functions */ -int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job* job, bool force); +int amdgpu_device_gpu_recover(struct amdgpu_device *adev, + struct amdgpu_job* job, bool force); bool amdgpu_need_backup(struct amdgpu_device *adev); void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_device_need_post(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 0080776c4936..cfeceab29224 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2611,7 +2611,7 @@ error: } /** - * amdgpu_gpu_recover - reset the asic and recover scheduler + * amdgpu_device_gpu_recover - reset the asic and recover scheduler * * @adev: amdgpu device pointer * @job: which job trigger hang @@ -2620,7 +2620,8 @@ error: * Attempt to reset the GPU if it has hung (all asics). * Returns 0 for success or an error on failure. */ -int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job, bool force) +int amdgpu_device_gpu_recover(struct amdgpu_device *adev, + struct amdgpu_job *job, bool force) { struct drm_atomic_state *state = NULL; uint64_t reset_flags = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index da1510f65ee0..008e1984b7e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -693,7 +693,7 @@ static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data) struct amdgpu_device *adev = dev->dev_private; seq_printf(m, "gpu recover\n"); - amdgpu_gpu_recover(adev, NULL, true); + amdgpu_device_gpu_recover(adev, NULL, true); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index c43643e8c8c8..56bcd59c3399 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -88,7 +88,7 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work) reset_work); if (!amdgpu_sriov_vf(adev)) - amdgpu_gpu_recover(adev, NULL, false); + amdgpu_device_gpu_recover(adev, NULL, false); } /* Disable *all* interrupts */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index be8a437fad54..56d9ee5013a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -37,7 +37,7 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job) atomic_read(&job->ring->fence_drv.last_seq), job->ring->fence_drv.sync_seq); - amdgpu_gpu_recover(job->adev, job, false); + amdgpu_device_gpu_recover(job->adev, job, false); } int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 43e74ec93147..271452d3999a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -253,7 +253,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) } /* Trigger recovery due to world switch failure */ - amdgpu_gpu_recover(adev, NULL, false); + amdgpu_device_gpu_recover(adev, NULL, false); } static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c index af2d47e9abdc..9fc1c37344ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c @@ -521,7 +521,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work) } /* Trigger recovery due to world switch failure */ - amdgpu_gpu_recover(adev, NULL, false); + amdgpu_device_gpu_recover(adev, NULL, false); } static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev, -- cgit v1.2.3 From 620f774f4687d86c420152309eefb0ef0fcc7e51 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 18 Dec 2017 16:53:03 +0100 Subject: drm/amdgpu: separate VMID and PASID handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move both into the new files amdgpu_ids.[ch]. No functional change. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 459 ++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h | 91 +++++ drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 6 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 422 +------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 44 +-- drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +- 13 files changed, 579 insertions(+), 465 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index d8da12c114b1..d6e5b7273853 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -52,7 +52,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \ - amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o + amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o \ + amdgpu_ids.o # add asic specific block amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 1e3e9be7d77e..1ae149456c9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -169,8 +169,8 @@ static const struct kfd2kgd_calls kfd2kgd = { .get_vmem_size = get_vmem_size, .get_gpu_clock_counter = get_gpu_clock_counter, .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, - .alloc_pasid = amdgpu_vm_alloc_pasid, - .free_pasid = amdgpu_vm_free_pasid, + .alloc_pasid = amdgpu_pasid_alloc, + .free_pasid = amdgpu_pasid_free, .program_sh_mem_settings = kgd_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, .init_pipeline = kgd_init_pipeline, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 056929b8ccd0..e9b436bc8dcb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -128,8 +128,8 @@ static const struct kfd2kgd_calls kfd2kgd = { .get_vmem_size = get_vmem_size, .get_gpu_clock_counter = get_gpu_clock_counter, .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, - .alloc_pasid = amdgpu_vm_alloc_pasid, - .free_pasid = amdgpu_vm_free_pasid, + .alloc_pasid = amdgpu_pasid_alloc, + .free_pasid = amdgpu_pasid_free, .program_sh_mem_settings = kgd_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, .init_pipeline = kgd_init_pipeline, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 0cf86eb357d6..03a69942cce5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -230,8 +230,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); if (job && job->vm_id) - amdgpu_vm_reset_id(adev, ring->funcs->vmhub, - job->vm_id); + amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vm_id); amdgpu_ring_undo(ring); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c new file mode 100644 index 000000000000..71f8a76d4c10 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -0,0 +1,459 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu_ids.h" + +#include +#include +#include + +#include "amdgpu.h" +#include "amdgpu_trace.h" + +/* + * PASID manager + * + * PASIDs are global address space identifiers that can be shared + * between the GPU, an IOMMU and the driver. VMs on different devices + * may use the same PASID if they share the same address + * space. Therefore PASIDs are allocated using a global IDA. VMs are + * looked up from the PASID per amdgpu_device. + */ +static DEFINE_IDA(amdgpu_pasid_ida); + +/** + * amdgpu_pasid_alloc - Allocate a PASID + * @bits: Maximum width of the PASID in bits, must be at least 1 + * + * Allocates a PASID of the given width while keeping smaller PASIDs + * available if possible. + * + * Returns a positive integer on success. Returns %-EINVAL if bits==0. + * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on + * memory allocation failure. + */ +int amdgpu_pasid_alloc(unsigned int bits) +{ + int pasid = -EINVAL; + + for (bits = min(bits, 31U); bits > 0; bits--) { + pasid = ida_simple_get(&amdgpu_pasid_ida, + 1U << (bits - 1), 1U << bits, + GFP_KERNEL); + if (pasid != -ENOSPC) + break; + } + + return pasid; +} + +/** + * amdgpu_pasid_free - Free a PASID + * @pasid: PASID to free + */ +void amdgpu_pasid_free(unsigned int pasid) +{ + ida_simple_remove(&amdgpu_pasid_ida, pasid); +} + +/* + * VMID manager + * + * VMIDs are a per VMHUB identifier for page tables handling. + */ + +/** + * amdgpu_vmid_had_gpu_reset - check if reset occured since last use + * + * @adev: amdgpu_device pointer + * @id: VMID structure + * + * Check if GPU reset occured since last use of the VMID. + */ +bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, + struct amdgpu_vmid *id) +{ + return id->current_gpu_reset_count != + atomic_read(&adev->gpu_reset_counter); +} + +/* idr_mgr->lock must be held */ +static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm, + struct amdgpu_ring *ring, + struct amdgpu_sync *sync, + struct dma_fence *fence, + struct amdgpu_job *job) +{ + struct amdgpu_device *adev = ring->adev; + unsigned vmhub = ring->funcs->vmhub; + uint64_t fence_context = adev->fence_context + ring->idx; + struct amdgpu_vmid *id = vm->reserved_vmid[vmhub]; + struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; + struct dma_fence *updates = sync->last_vm_update; + int r = 0; + struct dma_fence *flushed, *tmp; + bool needs_flush = vm->use_cpu_for_update; + + flushed = id->flushed_updates; + if ((amdgpu_vmid_had_gpu_reset(adev, id)) || + (atomic64_read(&id->owner) != vm->client_id) || + (job->vm_pd_addr != id->pd_gpu_addr) || + (updates && (!flushed || updates->context != flushed->context || + dma_fence_is_later(updates, flushed))) || + (!id->last_flush || (id->last_flush->context != fence_context && + !dma_fence_is_signaled(id->last_flush)))) { + needs_flush = true; + /* to prevent one context starved by another context */ + id->pd_gpu_addr = 0; + tmp = amdgpu_sync_peek_fence(&id->active, ring); + if (tmp) { + r = amdgpu_sync_fence(adev, sync, tmp, false); + return r; + } + } + + /* Good we can use this VMID. Remember this submission as + * user of the VMID. + */ + r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); + if (r) + goto out; + + if (updates && (!flushed || updates->context != flushed->context || + dma_fence_is_later(updates, flushed))) { + dma_fence_put(id->flushed_updates); + id->flushed_updates = dma_fence_get(updates); + } + id->pd_gpu_addr = job->vm_pd_addr; + atomic64_set(&id->owner, vm->client_id); + job->vm_needs_flush = needs_flush; + if (needs_flush) { + dma_fence_put(id->last_flush); + id->last_flush = NULL; + } + job->vm_id = id - id_mgr->ids; + trace_amdgpu_vm_grab_id(vm, ring, job); +out: + return r; +} + +/** + * amdgpu_vm_grab_id - allocate the next free VMID + * + * @vm: vm to allocate id for + * @ring: ring we want to submit job to + * @sync: sync object where we add dependencies + * @fence: fence protecting ID from reuse + * + * Allocate an id for the vm, adding fences to the sync obj as necessary. + */ +int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, + struct amdgpu_sync *sync, struct dma_fence *fence, + struct amdgpu_job *job) +{ + struct amdgpu_device *adev = ring->adev; + unsigned vmhub = ring->funcs->vmhub; + struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; + uint64_t fence_context = adev->fence_context + ring->idx; + struct dma_fence *updates = sync->last_vm_update; + struct amdgpu_vmid *id, *idle; + struct dma_fence **fences; + unsigned i; + int r = 0; + + mutex_lock(&id_mgr->lock); + if (vm->reserved_vmid[vmhub]) { + r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync, fence, job); + mutex_unlock(&id_mgr->lock); + return r; + } + fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); + if (!fences) { + mutex_unlock(&id_mgr->lock); + return -ENOMEM; + } + /* Check if we have an idle VMID */ + i = 0; + list_for_each_entry(idle, &id_mgr->ids_lru, list) { + fences[i] = amdgpu_sync_peek_fence(&idle->active, ring); + if (!fences[i]) + break; + ++i; + } + + /* If we can't find a idle VMID to use, wait till one becomes available */ + if (&idle->list == &id_mgr->ids_lru) { + u64 fence_context = adev->vm_manager.fence_context + ring->idx; + unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; + struct dma_fence_array *array; + unsigned j; + + for (j = 0; j < i; ++j) + dma_fence_get(fences[j]); + + array = dma_fence_array_create(i, fences, fence_context, + seqno, true); + if (!array) { + for (j = 0; j < i; ++j) + dma_fence_put(fences[j]); + kfree(fences); + r = -ENOMEM; + goto error; + } + + + r = amdgpu_sync_fence(ring->adev, sync, &array->base, false); + dma_fence_put(&array->base); + if (r) + goto error; + + mutex_unlock(&id_mgr->lock); + return 0; + + } + kfree(fences); + + job->vm_needs_flush = vm->use_cpu_for_update; + /* Check if we can use a VMID already assigned to this VM */ + list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) { + struct dma_fence *flushed; + bool needs_flush = vm->use_cpu_for_update; + + /* Check all the prerequisites to using this VMID */ + if (amdgpu_vmid_had_gpu_reset(adev, id)) + continue; + + if (atomic64_read(&id->owner) != vm->client_id) + continue; + + if (job->vm_pd_addr != id->pd_gpu_addr) + continue; + + if (!id->last_flush || + (id->last_flush->context != fence_context && + !dma_fence_is_signaled(id->last_flush))) + needs_flush = true; + + flushed = id->flushed_updates; + if (updates && (!flushed || dma_fence_is_later(updates, flushed))) + needs_flush = true; + + /* Concurrent flushes are only possible starting with Vega10 */ + if (adev->asic_type < CHIP_VEGA10 && needs_flush) + continue; + + /* Good we can use this VMID. Remember this submission as + * user of the VMID. + */ + r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); + if (r) + goto error; + + if (updates && (!flushed || dma_fence_is_later(updates, flushed))) { + dma_fence_put(id->flushed_updates); + id->flushed_updates = dma_fence_get(updates); + } + + if (needs_flush) + goto needs_flush; + else + goto no_flush_needed; + + }; + + /* Still no ID to use? Then use the idle one found earlier */ + id = idle; + + /* Remember this submission as user of the VMID */ + r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); + if (r) + goto error; + + id->pd_gpu_addr = job->vm_pd_addr; + dma_fence_put(id->flushed_updates); + id->flushed_updates = dma_fence_get(updates); + atomic64_set(&id->owner, vm->client_id); + +needs_flush: + job->vm_needs_flush = true; + dma_fence_put(id->last_flush); + id->last_flush = NULL; + +no_flush_needed: + list_move_tail(&id->list, &id_mgr->ids_lru); + + job->vm_id = id - id_mgr->ids; + trace_amdgpu_vm_grab_id(vm, ring, job); + +error: + mutex_unlock(&id_mgr->lock); + return r; +} + +int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + unsigned vmhub) +{ + struct amdgpu_vmid_mgr *id_mgr; + struct amdgpu_vmid *idle; + int r = 0; + + id_mgr = &adev->vm_manager.id_mgr[vmhub]; + mutex_lock(&id_mgr->lock); + if (vm->reserved_vmid[vmhub]) + goto unlock; + if (atomic_inc_return(&id_mgr->reserved_vmid_num) > + AMDGPU_VM_MAX_RESERVED_VMID) { + DRM_ERROR("Over limitation of reserved vmid\n"); + atomic_dec(&id_mgr->reserved_vmid_num); + r = -EINVAL; + goto unlock; + } + /* Select the first entry VMID */ + idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list); + list_del_init(&idle->list); + vm->reserved_vmid[vmhub] = idle; + mutex_unlock(&id_mgr->lock); + + return 0; +unlock: + mutex_unlock(&id_mgr->lock); + return r; +} + +void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + unsigned vmhub) +{ + struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; + + mutex_lock(&id_mgr->lock); + if (vm->reserved_vmid[vmhub]) { + list_add(&vm->reserved_vmid[vmhub]->list, + &id_mgr->ids_lru); + vm->reserved_vmid[vmhub] = NULL; + atomic_dec(&id_mgr->reserved_vmid_num); + } + mutex_unlock(&id_mgr->lock); +} + +/** + * amdgpu_vmid_reset - reset VMID to zero + * + * @adev: amdgpu device structure + * @vm_id: vmid number to use + * + * Reset saved GDW, GWS and OA to force switch on next flush. + */ +void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub, + unsigned vmid) +{ + struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; + struct amdgpu_vmid *id = &id_mgr->ids[vmid]; + + atomic64_set(&id->owner, 0); + id->gds_base = 0; + id->gds_size = 0; + id->gws_base = 0; + id->gws_size = 0; + id->oa_base = 0; + id->oa_size = 0; +} + +/** + * amdgpu_vmid_reset_all - reset VMID to zero + * + * @adev: amdgpu device structure + * + * Reset VMID to force flush on next use + */ +void amdgpu_vmid_reset_all(struct amdgpu_device *adev) +{ + unsigned i, j; + + for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { + struct amdgpu_vmid_mgr *id_mgr = + &adev->vm_manager.id_mgr[i]; + + for (j = 1; j < id_mgr->num_ids; ++j) + amdgpu_vmid_reset(adev, i, j); + } +} + +/** + * amdgpu_vmid_mgr_init - init the VMID manager + * + * @adev: amdgpu_device pointer + * + * Initialize the VM manager structures + */ +void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) +{ + unsigned i, j; + + for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { + struct amdgpu_vmid_mgr *id_mgr = + &adev->vm_manager.id_mgr[i]; + + mutex_init(&id_mgr->lock); + INIT_LIST_HEAD(&id_mgr->ids_lru); + atomic_set(&id_mgr->reserved_vmid_num, 0); + + /* skip over VMID 0, since it is the system VM */ + for (j = 1; j < id_mgr->num_ids; ++j) { + amdgpu_vmid_reset(adev, i, j); + amdgpu_sync_create(&id_mgr->ids[i].active); + list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru); + } + } + + adev->vm_manager.fence_context = + dma_fence_context_alloc(AMDGPU_MAX_RINGS); + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) + adev->vm_manager.seqno[i] = 0; +} + +/** + * amdgpu_vmid_mgr_fini - cleanup VM manager + * + * @adev: amdgpu_device pointer + * + * Cleanup the VM manager and free resources. + */ +void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev) +{ + unsigned i, j; + + for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { + struct amdgpu_vmid_mgr *id_mgr = + &adev->vm_manager.id_mgr[i]; + + mutex_destroy(&id_mgr->lock); + for (j = 0; j < AMDGPU_NUM_VMID; ++j) { + struct amdgpu_vmid *id = &id_mgr->ids[j]; + + amdgpu_sync_free(&id->active); + dma_fence_put(id->flushed_updates); + dma_fence_put(id->last_flush); + } + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h new file mode 100644 index 000000000000..ad931fa570b3 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h @@ -0,0 +1,91 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __AMDGPU_IDS_H__ +#define __AMDGPU_IDS_H__ + +#include +#include +#include +#include + +#include "amdgpu_sync.h" + +/* maximum number of VMIDs */ +#define AMDGPU_NUM_VMID 16 + +struct amdgpu_device; +struct amdgpu_vm; +struct amdgpu_ring; +struct amdgpu_sync; +struct amdgpu_job; + +struct amdgpu_vmid { + struct list_head list; + struct amdgpu_sync active; + struct dma_fence *last_flush; + atomic64_t owner; + + uint64_t pd_gpu_addr; + /* last flushed PD/PT update */ + struct dma_fence *flushed_updates; + + uint32_t current_gpu_reset_count; + + uint32_t gds_base; + uint32_t gds_size; + uint32_t gws_base; + uint32_t gws_size; + uint32_t oa_base; + uint32_t oa_size; +}; + +struct amdgpu_vmid_mgr { + struct mutex lock; + unsigned num_ids; + struct list_head ids_lru; + struct amdgpu_vmid ids[AMDGPU_NUM_VMID]; + atomic_t reserved_vmid_num; +}; + +int amdgpu_pasid_alloc(unsigned int bits); +void amdgpu_pasid_free(unsigned int pasid); + +bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, + struct amdgpu_vmid *id); +int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + unsigned vmhub); +void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + unsigned vmhub); +int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, + struct amdgpu_sync *sync, struct dma_fence *fence, + struct amdgpu_job *job); +void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub, + unsigned vmid); +void amdgpu_vmid_reset_all(struct amdgpu_device *adev); + +void amdgpu_vmid_mgr_init(struct amdgpu_device *adev); +void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 56d9ee5013a9..cdc9e0f5336a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -161,9 +161,9 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, while (fence == NULL && vm && !job->vm_id) { struct amdgpu_ring *ring = job->ring; - r = amdgpu_vm_grab_id(vm, ring, &job->sync, - &job->base.s_fence->finished, - job); + r = amdgpu_vmid_grab(vm, ring, &job->sync, + &job->base.s_fence->finished, + job); if (r) DRM_ERROR("Error getting VM ID (%d)\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 398abbcbf029..01ee8e2258c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -33,52 +33,6 @@ #include "amdgpu.h" #include "amdgpu_trace.h" -/* - * PASID manager - * - * PASIDs are global address space identifiers that can be shared - * between the GPU, an IOMMU and the driver. VMs on different devices - * may use the same PASID if they share the same address - * space. Therefore PASIDs are allocated using a global IDA. VMs are - * looked up from the PASID per amdgpu_device. - */ -static DEFINE_IDA(amdgpu_vm_pasid_ida); - -/** - * amdgpu_vm_alloc_pasid - Allocate a PASID - * @bits: Maximum width of the PASID in bits, must be at least 1 - * - * Allocates a PASID of the given width while keeping smaller PASIDs - * available if possible. - * - * Returns a positive integer on success. Returns %-EINVAL if bits==0. - * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on - * memory allocation failure. - */ -int amdgpu_vm_alloc_pasid(unsigned int bits) -{ - int pasid = -EINVAL; - - for (bits = min(bits, 31U); bits > 0; bits--) { - pasid = ida_simple_get(&amdgpu_vm_pasid_ida, - 1U << (bits - 1), 1U << bits, - GFP_KERNEL); - if (pasid != -ENOSPC) - break; - } - - return pasid; -} - -/** - * amdgpu_vm_free_pasid - Free a PASID - * @pasid: PASID to free - */ -void amdgpu_vm_free_pasid(unsigned int pasid) -{ - ida_simple_remove(&amdgpu_vm_pasid_ida, pasid); -} - /* * GPUVM * GPUVM is similar to the legacy gart on older asics, however @@ -447,286 +401,6 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, adev->vm_manager.root_level); } -/** - * amdgpu_vm_had_gpu_reset - check if reset occured since last use - * - * @adev: amdgpu_device pointer - * @id: VMID structure - * - * Check if GPU reset occured since last use of the VMID. - */ -static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device *adev, - struct amdgpu_vm_id *id) -{ - return id->current_gpu_reset_count != - atomic_read(&adev->gpu_reset_counter); -} - -static bool amdgpu_vm_reserved_vmid_ready(struct amdgpu_vm *vm, unsigned vmhub) -{ - return !!vm->reserved_vmid[vmhub]; -} - -/* idr_mgr->lock must be held */ -static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm, - struct amdgpu_ring *ring, - struct amdgpu_sync *sync, - struct dma_fence *fence, - struct amdgpu_job *job) -{ - struct amdgpu_device *adev = ring->adev; - unsigned vmhub = ring->funcs->vmhub; - uint64_t fence_context = adev->fence_context + ring->idx; - struct amdgpu_vm_id *id = vm->reserved_vmid[vmhub]; - struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; - struct dma_fence *updates = sync->last_vm_update; - int r = 0; - struct dma_fence *flushed, *tmp; - bool needs_flush = vm->use_cpu_for_update; - - flushed = id->flushed_updates; - if ((amdgpu_vm_had_gpu_reset(adev, id)) || - (atomic64_read(&id->owner) != vm->client_id) || - (job->vm_pd_addr != id->pd_gpu_addr) || - (updates && (!flushed || updates->context != flushed->context || - dma_fence_is_later(updates, flushed))) || - (!id->last_flush || (id->last_flush->context != fence_context && - !dma_fence_is_signaled(id->last_flush)))) { - needs_flush = true; - /* to prevent one context starved by another context */ - id->pd_gpu_addr = 0; - tmp = amdgpu_sync_peek_fence(&id->active, ring); - if (tmp) { - r = amdgpu_sync_fence(adev, sync, tmp, false); - return r; - } - } - - /* Good we can use this VMID. Remember this submission as - * user of the VMID. - */ - r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); - if (r) - goto out; - - if (updates && (!flushed || updates->context != flushed->context || - dma_fence_is_later(updates, flushed))) { - dma_fence_put(id->flushed_updates); - id->flushed_updates = dma_fence_get(updates); - } - id->pd_gpu_addr = job->vm_pd_addr; - atomic64_set(&id->owner, vm->client_id); - job->vm_needs_flush = needs_flush; - if (needs_flush) { - dma_fence_put(id->last_flush); - id->last_flush = NULL; - } - job->vm_id = id - id_mgr->ids; - trace_amdgpu_vm_grab_id(vm, ring, job); -out: - return r; -} - -/** - * amdgpu_vm_grab_id - allocate the next free VMID - * - * @vm: vm to allocate id for - * @ring: ring we want to submit job to - * @sync: sync object where we add dependencies - * @fence: fence protecting ID from reuse - * - * Allocate an id for the vm, adding fences to the sync obj as necessary. - */ -int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync, struct dma_fence *fence, - struct amdgpu_job *job) -{ - struct amdgpu_device *adev = ring->adev; - unsigned vmhub = ring->funcs->vmhub; - struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; - uint64_t fence_context = adev->fence_context + ring->idx; - struct dma_fence *updates = sync->last_vm_update; - struct amdgpu_vm_id *id, *idle; - struct dma_fence **fences; - unsigned i; - int r = 0; - - mutex_lock(&id_mgr->lock); - if (amdgpu_vm_reserved_vmid_ready(vm, vmhub)) { - r = amdgpu_vm_grab_reserved_vmid_locked(vm, ring, sync, fence, job); - mutex_unlock(&id_mgr->lock); - return r; - } - fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); - if (!fences) { - mutex_unlock(&id_mgr->lock); - return -ENOMEM; - } - /* Check if we have an idle VMID */ - i = 0; - list_for_each_entry(idle, &id_mgr->ids_lru, list) { - fences[i] = amdgpu_sync_peek_fence(&idle->active, ring); - if (!fences[i]) - break; - ++i; - } - - /* If we can't find a idle VMID to use, wait till one becomes available */ - if (&idle->list == &id_mgr->ids_lru) { - u64 fence_context = adev->vm_manager.fence_context + ring->idx; - unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; - struct dma_fence_array *array; - unsigned j; - - for (j = 0; j < i; ++j) - dma_fence_get(fences[j]); - - array = dma_fence_array_create(i, fences, fence_context, - seqno, true); - if (!array) { - for (j = 0; j < i; ++j) - dma_fence_put(fences[j]); - kfree(fences); - r = -ENOMEM; - goto error; - } - - - r = amdgpu_sync_fence(ring->adev, sync, &array->base, false); - dma_fence_put(&array->base); - if (r) - goto error; - - mutex_unlock(&id_mgr->lock); - return 0; - - } - kfree(fences); - - job->vm_needs_flush = vm->use_cpu_for_update; - /* Check if we can use a VMID already assigned to this VM */ - list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) { - struct dma_fence *flushed; - bool needs_flush = vm->use_cpu_for_update; - - /* Check all the prerequisites to using this VMID */ - if (amdgpu_vm_had_gpu_reset(adev, id)) - continue; - - if (atomic64_read(&id->owner) != vm->client_id) - continue; - - if (job->vm_pd_addr != id->pd_gpu_addr) - continue; - - if (!id->last_flush || - (id->last_flush->context != fence_context && - !dma_fence_is_signaled(id->last_flush))) - needs_flush = true; - - flushed = id->flushed_updates; - if (updates && (!flushed || dma_fence_is_later(updates, flushed))) - needs_flush = true; - - /* Concurrent flushes are only possible starting with Vega10 */ - if (adev->asic_type < CHIP_VEGA10 && needs_flush) - continue; - - /* Good we can use this VMID. Remember this submission as - * user of the VMID. - */ - r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); - if (r) - goto error; - - if (updates && (!flushed || dma_fence_is_later(updates, flushed))) { - dma_fence_put(id->flushed_updates); - id->flushed_updates = dma_fence_get(updates); - } - - if (needs_flush) - goto needs_flush; - else - goto no_flush_needed; - - }; - - /* Still no ID to use? Then use the idle one found earlier */ - id = idle; - - /* Remember this submission as user of the VMID */ - r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); - if (r) - goto error; - - id->pd_gpu_addr = job->vm_pd_addr; - dma_fence_put(id->flushed_updates); - id->flushed_updates = dma_fence_get(updates); - atomic64_set(&id->owner, vm->client_id); - -needs_flush: - job->vm_needs_flush = true; - dma_fence_put(id->last_flush); - id->last_flush = NULL; - -no_flush_needed: - list_move_tail(&id->list, &id_mgr->ids_lru); - - job->vm_id = id - id_mgr->ids; - trace_amdgpu_vm_grab_id(vm, ring, job); - -error: - mutex_unlock(&id_mgr->lock); - return r; -} - -static void amdgpu_vm_free_reserved_vmid(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - unsigned vmhub) -{ - struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; - - mutex_lock(&id_mgr->lock); - if (vm->reserved_vmid[vmhub]) { - list_add(&vm->reserved_vmid[vmhub]->list, - &id_mgr->ids_lru); - vm->reserved_vmid[vmhub] = NULL; - atomic_dec(&id_mgr->reserved_vmid_num); - } - mutex_unlock(&id_mgr->lock); -} - -static int amdgpu_vm_alloc_reserved_vmid(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - unsigned vmhub) -{ - struct amdgpu_vm_id_manager *id_mgr; - struct amdgpu_vm_id *idle; - int r = 0; - - id_mgr = &adev->vm_manager.id_mgr[vmhub]; - mutex_lock(&id_mgr->lock); - if (vm->reserved_vmid[vmhub]) - goto unlock; - if (atomic_inc_return(&id_mgr->reserved_vmid_num) > - AMDGPU_VM_MAX_RESERVED_VMID) { - DRM_ERROR("Over limitation of reserved vmid\n"); - atomic_dec(&id_mgr->reserved_vmid_num); - r = -EINVAL; - goto unlock; - } - /* Select the first entry VMID */ - idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vm_id, list); - list_del_init(&idle->list); - vm->reserved_vmid[vmhub] = idle; - mutex_unlock(&id_mgr->lock); - - return 0; -unlock: - mutex_unlock(&id_mgr->lock); - return r; -} - /** * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug * @@ -767,8 +441,8 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; - struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; - struct amdgpu_vm_id *id; + struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; + struct amdgpu_vmid *id; bool gds_switch_needed; bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug; @@ -783,7 +457,7 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, id->oa_base != job->oa_base || id->oa_size != job->oa_size); - if (amdgpu_vm_had_gpu_reset(adev, id)) + if (amdgpu_vmid_had_gpu_reset(adev, id)) return true; return vm_flush_needed || gds_switch_needed; @@ -807,8 +481,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; - struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; - struct amdgpu_vm_id *id = &id_mgr->ids[job->vm_id]; + struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; + struct amdgpu_vmid *id = &id_mgr->ids[job->vm_id]; bool gds_switch_needed = ring->funcs->emit_gds_switch && ( id->gds_base != job->gds_base || id->gds_size != job->gds_size || @@ -820,7 +494,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ unsigned patch_offset = 0; int r; - if (amdgpu_vm_had_gpu_reset(adev, id)) { + if (amdgpu_vmid_had_gpu_reset(adev, id)) { gds_switch_needed = true; vm_flush_needed = true; } @@ -875,49 +549,6 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ return 0; } -/** - * amdgpu_vm_reset_id - reset VMID to zero - * - * @adev: amdgpu device structure - * @vm_id: vmid number to use - * - * Reset saved GDW, GWS and OA to force switch on next flush. - */ -void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, - unsigned vmid) -{ - struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; - struct amdgpu_vm_id *id = &id_mgr->ids[vmid]; - - atomic64_set(&id->owner, 0); - id->gds_base = 0; - id->gds_size = 0; - id->gws_base = 0; - id->gws_size = 0; - id->oa_base = 0; - id->oa_size = 0; -} - -/** - * amdgpu_vm_reset_all_id - reset VMID to zero - * - * @adev: amdgpu device structure - * - * Reset VMID to force flush on next use - */ -void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev) -{ - unsigned i, j; - - for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { - struct amdgpu_vm_id_manager *id_mgr = - &adev->vm_manager.id_mgr[i]; - - for (j = 1; j < id_mgr->num_ids; ++j) - amdgpu_vm_reset_id(adev, i, j); - } -} - /** * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo * @@ -2819,7 +2450,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_bo_unref(&root); dma_fence_put(vm->last_update); for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) - amdgpu_vm_free_reserved_vmid(adev, vm, i); + amdgpu_vmid_free_reserved(adev, vm, i); } /** @@ -2861,23 +2492,9 @@ bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev, */ void amdgpu_vm_manager_init(struct amdgpu_device *adev) { - unsigned i, j; - - for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { - struct amdgpu_vm_id_manager *id_mgr = - &adev->vm_manager.id_mgr[i]; + unsigned i; - mutex_init(&id_mgr->lock); - INIT_LIST_HEAD(&id_mgr->ids_lru); - atomic_set(&id_mgr->reserved_vmid_num, 0); - - /* skip over VMID 0, since it is the system VM */ - for (j = 1; j < id_mgr->num_ids; ++j) { - amdgpu_vm_reset_id(adev, i, j); - amdgpu_sync_create(&id_mgr->ids[i].active); - list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru); - } - } + amdgpu_vmid_mgr_init(adev); adev->vm_manager.fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); @@ -2918,24 +2535,10 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) */ void amdgpu_vm_manager_fini(struct amdgpu_device *adev) { - unsigned i, j; - WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr)); idr_destroy(&adev->vm_manager.pasid_idr); - for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { - struct amdgpu_vm_id_manager *id_mgr = - &adev->vm_manager.id_mgr[i]; - - mutex_destroy(&id_mgr->lock); - for (j = 0; j < AMDGPU_NUM_VM; ++j) { - struct amdgpu_vm_id *id = &id_mgr->ids[j]; - - amdgpu_sync_free(&id->active); - dma_fence_put(id->flushed_updates); - dma_fence_put(id->last_flush); - } - } + amdgpu_vmid_mgr_fini(adev); } int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) @@ -2948,13 +2551,12 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) switch (args->in.op) { case AMDGPU_VM_OP_RESERVE_VMID: /* current, we only have requirement to reserve vmid from gfxhub */ - r = amdgpu_vm_alloc_reserved_vmid(adev, &fpriv->vm, - AMDGPU_GFXHUB); + r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB); if (r) return r; break; case AMDGPU_VM_OP_UNRESERVE_VMID: - amdgpu_vm_free_reserved_vmid(adev, &fpriv->vm, AMDGPU_GFXHUB); + amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB); break; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index edd2ea52dc00..78296d1a5b2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -31,6 +31,7 @@ #include "amdgpu_sync.h" #include "amdgpu_ring.h" +#include "amdgpu_ids.h" struct amdgpu_bo_va; struct amdgpu_job; @@ -40,9 +41,6 @@ struct amdgpu_bo_list_entry; * GPUVM handling */ -/* maximum number of VMIDs */ -#define AMDGPU_NUM_VM 16 - /* Maximum number of PTEs the hardware can write with one command */ #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF @@ -197,7 +195,7 @@ struct amdgpu_vm { u64 client_id; unsigned int pasid; /* dedicated to vm */ - struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS]; + struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS]; /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */ bool use_cpu_for_update; @@ -212,37 +210,9 @@ struct amdgpu_vm { unsigned int fault_credit; }; -struct amdgpu_vm_id { - struct list_head list; - struct amdgpu_sync active; - struct dma_fence *last_flush; - atomic64_t owner; - - uint64_t pd_gpu_addr; - /* last flushed PD/PT update */ - struct dma_fence *flushed_updates; - - uint32_t current_gpu_reset_count; - - uint32_t gds_base; - uint32_t gds_size; - uint32_t gws_base; - uint32_t gws_size; - uint32_t oa_base; - uint32_t oa_size; -}; - -struct amdgpu_vm_id_manager { - struct mutex lock; - unsigned num_ids; - struct list_head ids_lru; - struct amdgpu_vm_id ids[AMDGPU_NUM_VM]; - atomic_t reserved_vmid_num; -}; - struct amdgpu_vm_manager { /* Handling of VMIDs */ - struct amdgpu_vm_id_manager id_mgr[AMDGPU_MAX_VMHUBS]; + struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS]; /* Handling of VM fences */ u64 fence_context; @@ -280,8 +250,6 @@ struct amdgpu_vm_manager { spinlock_t pasid_lock; }; -int amdgpu_vm_alloc_pasid(unsigned int bits); -void amdgpu_vm_free_pasid(unsigned int pasid); void amdgpu_vm_manager_init(struct amdgpu_device *adev); void amdgpu_vm_manager_fini(struct amdgpu_device *adev); int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, @@ -299,13 +267,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, struct amdgpu_vm *vm, uint64_t saddr, uint64_t size); -int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync, struct dma_fence *fence, - struct amdgpu_job *job); int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync); -void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, - unsigned vmid); -void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev); int amdgpu_vm_update_directories(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_clear_freed(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index e1a73c43f32d..8e28270d1ea9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -956,7 +956,7 @@ static int gmc_v6_0_resume(void *handle) if (r) return r; - amdgpu_vm_reset_all_ids(adev); + amdgpu_vmid_reset_all(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 356a9a71b8cf..86e9d682c59e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -1107,7 +1107,7 @@ static int gmc_v7_0_resume(void *handle) if (r) return r; - amdgpu_vm_reset_all_ids(adev); + amdgpu_vmid_reset_all(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index fce45578f5fd..9a813d834f1a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1212,7 +1212,7 @@ static int gmc_v8_0_resume(void *handle) if (r) return r; - amdgpu_vm_reset_all_ids(adev); + amdgpu_vmid_reset_all(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index b776df4c999f..909274e3ebe7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1056,7 +1056,7 @@ static int gmc_v9_0_resume(void *handle) if (r) return r; - amdgpu_vm_reset_all_ids(adev); + amdgpu_vmid_reset_all(adev); return 0; } -- cgit v1.2.3 From c4f46f22c448ff571eb8fdbe4ab71a25805228d1 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 18 Dec 2017 17:08:25 +0100 Subject: drm/amdgpu: rename vm_id to vmid MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sed -i "s/vm_id/vmid/g" drivers/gpu/drm/amd/amdgpu/*.c sed -i "s/vm_id/vmid/g" drivers/gpu/drm/amd/amdgpu/*.h Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 8 +++---- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 28 ++++++++++++------------ drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 14 ++++++------ drivers/gpu/drm/amd/amdgpu/cik_ih.c | 2 +- drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 14 ++++++------ drivers/gpu/drm/amd/amdgpu/cz_ih.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 14 ++++++------ drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 18 ++++++++-------- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 18 ++++++++-------- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 18 ++++++++-------- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 14 ++++++------ drivers/gpu/drm/amd/amdgpu/iceland_ih.c | 2 +- drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 16 ++++++-------- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 16 ++++++-------- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 18 +++++++--------- drivers/gpu/drm/amd/amdgpu/si_dma.c | 16 +++++++------- drivers/gpu/drm/amd/amdgpu/si_ih.c | 2 +- drivers/gpu/drm/amd/amdgpu/tonga_ih.c | 2 +- drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | 2 +- drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 26 +++++++++++----------- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 36 +++++++++++++++---------------- drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 10 ++++----- drivers/gpu/drm/amd/amdgpu/vce_v4_0.c | 18 ++++++++-------- drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 36 +++++++++++++++---------------- drivers/gpu/drm/amd/amdgpu/vega10_ih.c | 4 ++-- 33 files changed, 188 insertions(+), 194 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index bbe06e04dcb6..642bea2c9b3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -351,7 +351,7 @@ struct amdgpu_gart_funcs { /* get the pde for a given mc addr */ void (*get_vm_pde)(struct amdgpu_device *adev, int level, u64 *dst, u64 *flags); - uint32_t (*get_invalidate_req)(unsigned int vm_id); + uint32_t (*get_invalidate_req)(unsigned int vmid); }; /* provided by the ih block */ @@ -1124,7 +1124,7 @@ struct amdgpu_job { void *owner; uint64_t fence_ctx; /* the fence_context this job uses */ bool vm_needs_flush; - unsigned vm_id; + unsigned vmid; uint64_t vm_pd_addr; uint32_t gds_base, gds_size; uint32_t gws_base, gws_size; @@ -1849,7 +1849,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) -#define amdgpu_ring_emit_ib(r, ib, vm_id, c) (r)->funcs->emit_ib((r), (ib), (vm_id), (c)) +#define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c)) #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r)) #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 03a69942cce5..a162d87ca0c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -149,7 +149,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, return -EINVAL; } - if (vm && !job->vm_id) { + if (vm && !job->vmid) { dev_err(adev->dev, "VM IB without ID\n"); return -EINVAL; } @@ -211,7 +211,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */ continue; - amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0, + amdgpu_ring_emit_ib(ring, ib, job ? job->vmid : 0, need_ctx_switch); need_ctx_switch = false; } @@ -229,8 +229,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, r = amdgpu_fence_emit(ring, f); if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); - if (job && job->vm_id) - amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vm_id); + if (job && job->vmid) + amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid); amdgpu_ring_undo(ring); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 71f8a76d4c10..d24884b419cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -150,7 +150,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm, dma_fence_put(id->last_flush); id->last_flush = NULL; } - job->vm_id = id - id_mgr->ids; + job->vmid = id - id_mgr->ids; trace_amdgpu_vm_grab_id(vm, ring, job); out: return r; @@ -301,7 +301,7 @@ needs_flush: no_flush_needed: list_move_tail(&id->list, &id_mgr->ids_lru); - job->vm_id = id - id_mgr->ids; + job->vmid = id - id_mgr->ids; trace_amdgpu_vm_grab_id(vm, ring, job); error: @@ -360,7 +360,7 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, * amdgpu_vmid_reset - reset VMID to zero * * @adev: amdgpu device structure - * @vm_id: vmid number to use + * @vmid: vmid number to use * * Reset saved GDW, GWS and OA to force switch on next flush. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h index ada89358e220..29cf10927a92 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h @@ -105,8 +105,8 @@ struct amdgpu_iv_entry { unsigned client_id; unsigned src_id; unsigned ring_id; - unsigned vm_id; - unsigned vm_id_src; + unsigned vmid; + unsigned vmid_src; uint64_t timestamp; unsigned timestamp_src; unsigned pas_id; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index cdc9e0f5336a..2bd56760c744 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -158,7 +158,7 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, } } - while (fence == NULL && vm && !job->vm_id) { + while (fence == NULL && vm && !job->vmid) { struct amdgpu_ring *ring = job->ring; r = amdgpu_vmid_grab(vm, ring, &job->sync, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 010f69084af5..102dad3edf6a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -121,11 +121,11 @@ struct amdgpu_ring_funcs { /* command emit functions */ void (*emit_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch); + unsigned vmid, bool ctx_switch); void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, uint64_t seq, unsigned flags); void (*emit_pipeline_sync)(struct amdgpu_ring *ring); - void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id, + void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr); void (*emit_hdp_flush)(struct amdgpu_ring *ring); void (*emit_hdp_invalidate)(struct amdgpu_ring *ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 06525f2c36c3..cace7a93fc94 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -82,8 +82,8 @@ TRACE_EVENT(amdgpu_iv, __field(unsigned, client_id) __field(unsigned, src_id) __field(unsigned, ring_id) - __field(unsigned, vm_id) - __field(unsigned, vm_id_src) + __field(unsigned, vmid) + __field(unsigned, vmid_src) __field(uint64_t, timestamp) __field(unsigned, timestamp_src) __field(unsigned, pas_id) @@ -93,8 +93,8 @@ TRACE_EVENT(amdgpu_iv, __entry->client_id = iv->client_id; __entry->src_id = iv->src_id; __entry->ring_id = iv->ring_id; - __entry->vm_id = iv->vm_id; - __entry->vm_id_src = iv->vm_id_src; + __entry->vmid = iv->vmid; + __entry->vmid_src = iv->vmid_src; __entry->timestamp = iv->timestamp; __entry->timestamp_src = iv->timestamp_src; __entry->pas_id = iv->pas_id; @@ -103,9 +103,9 @@ TRACE_EVENT(amdgpu_iv, __entry->src_data[2] = iv->src_data[2]; __entry->src_data[3] = iv->src_data[3]; ), - TP_printk("client_id:%u src_id:%u ring:%u vm_id:%u timestamp: %llu pas_id:%u src_data: %08x %08x %08x %08x\n", + TP_printk("client_id:%u src_id:%u ring:%u vmid:%u timestamp: %llu pas_id:%u src_data: %08x %08x %08x %08x\n", __entry->client_id, __entry->src_id, - __entry->ring_id, __entry->vm_id, + __entry->ring_id, __entry->vmid, __entry->timestamp, __entry->pas_id, __entry->src_data[0], __entry->src_data[1], __entry->src_data[2], __entry->src_data[3]) @@ -219,7 +219,7 @@ TRACE_EVENT(amdgpu_vm_grab_id, TP_STRUCT__entry( __field(struct amdgpu_vm *, vm) __field(u32, ring) - __field(u32, vm_id) + __field(u32, vmid) __field(u32, vm_hub) __field(u64, pd_addr) __field(u32, needs_flush) @@ -228,13 +228,13 @@ TRACE_EVENT(amdgpu_vm_grab_id, TP_fast_assign( __entry->vm = vm; __entry->ring = ring->idx; - __entry->vm_id = job->vm_id; + __entry->vmid = job->vmid; __entry->vm_hub = ring->funcs->vmhub, __entry->pd_addr = job->vm_pd_addr; __entry->needs_flush = job->vm_needs_flush; ), TP_printk("vm=%p, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u", - __entry->vm, __entry->ring, __entry->vm_id, + __entry->vm, __entry->ring, __entry->vmid, __entry->vm_hub, __entry->pd_addr, __entry->needs_flush) ); @@ -357,24 +357,24 @@ TRACE_EVENT(amdgpu_vm_copy_ptes, ); TRACE_EVENT(amdgpu_vm_flush, - TP_PROTO(struct amdgpu_ring *ring, unsigned vm_id, + TP_PROTO(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr), - TP_ARGS(ring, vm_id, pd_addr), + TP_ARGS(ring, vmid, pd_addr), TP_STRUCT__entry( __field(u32, ring) - __field(u32, vm_id) + __field(u32, vmid) __field(u32, vm_hub) __field(u64, pd_addr) ), TP_fast_assign( __entry->ring = ring->idx; - __entry->vm_id = vm_id; + __entry->vmid = vmid; __entry->vm_hub = ring->funcs->vmhub; __entry->pd_addr = pd_addr; ), TP_printk("ring=%u, id=%u, hub=%u, pd_addr=%010Lx", - __entry->ring, __entry->vm_id, + __entry->ring, __entry->vmid, __entry->vm_hub,__entry->pd_addr) ); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 9857d482c942..55a726a322e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -991,7 +991,7 @@ out: * */ void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { amdgpu_ring_write(ring, VCE_CMD_IB); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index 162cae94e3b1..0fd378ae92c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -63,7 +63,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx); void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch); + unsigned vmid, bool ctx_switch); void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags); int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 01ee8e2258c8..946bc21c6d7d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -446,9 +446,9 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, bool gds_switch_needed; bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug; - if (job->vm_id == 0) + if (job->vmid == 0) return false; - id = &id_mgr->ids[job->vm_id]; + id = &id_mgr->ids[job->vmid]; gds_switch_needed = ring->funcs->emit_gds_switch && ( id->gds_base != job->gds_base || id->gds_size != job->gds_size || @@ -472,7 +472,7 @@ static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev) * amdgpu_vm_flush - hardware flush the vm * * @ring: ring to use for flush - * @vm_id: vmid number to use + * @vmid: vmid number to use * @pd_addr: address of the page directory * * Emit a VM flush when it is necessary. @@ -482,7 +482,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; - struct amdgpu_vmid *id = &id_mgr->ids[job->vm_id]; + struct amdgpu_vmid *id = &id_mgr->ids[job->vmid]; bool gds_switch_needed = ring->funcs->emit_gds_switch && ( id->gds_base != job->gds_base || id->gds_size != job->gds_size || @@ -511,8 +511,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ if (ring->funcs->emit_vm_flush && vm_flush_needed) { struct dma_fence *fence; - trace_amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr); - amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr); + trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr); + amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr); r = amdgpu_fence_emit(ring, &fence); if (r) @@ -532,7 +532,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ id->gws_size = job->gws_size; id->oa_base = job->oa_base; id->oa_size = job->oa_size; - amdgpu_ring_emit_gds_switch(ring, job->vm_id, job->gds_base, + amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base, job->gds_size, job->gws_base, job->gws_size, job->oa_base, job->oa_size); diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index a870b354e3f7..d5a05c19708f 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c @@ -280,7 +280,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev, entry->src_id = dw[0] & 0xff; entry->src_data[0] = dw[1] & 0xfffffff; entry->ring_id = dw[2] & 0xff; - entry->vm_id = (dw[2] >> 8) & 0xff; + entry->vmid = (dw[2] >> 8) & 0xff; entry->pas_id = (dw[2] >> 16) & 0xffff; /* wptr/rptr are in bytes! */ diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index e406c93d01d6..6e8278e689b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -221,9 +221,9 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) */ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { - u32 extra_bits = vm_id & 0xf; + u32 extra_bits = vmid & 0xf; /* IB packet must end on a 8 DW boundary */ cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8); @@ -880,23 +880,23 @@ static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring) * using sDMA (CIK). */ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) | SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); - if (vm_id < 8) { - amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); + if (vmid < 8) { + amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid)); } else { - amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); + amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8)); } amdgpu_ring_write(ring, pd_addr >> 12); /* flush TLB */ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, 1 << vmid); amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index fa61d649bb44..f576e9cbbc61 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c @@ -259,7 +259,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev, entry->src_id = dw[0] & 0xff; entry->src_data[0] = dw[1] & 0xfffffff; entry->ring_id = dw[2] & 0xff; - entry->vm_id = (dw[2] >> 8) & 0xff; + entry->vmid = (dw[2] >> 8) & 0xff; entry->pas_id = (dw[2] >> 16) & 0xffff; /* wptr/rptr are in bytes! */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index edef17d93527..9870d83b68c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1874,7 +1874,7 @@ static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { u32 header, control = 0; @@ -1889,7 +1889,7 @@ static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring, else header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - control |= ib->length_dw | (vm_id << 24); + control |= ib->length_dw | (vmid << 24); amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, @@ -2354,7 +2354,7 @@ static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) } static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); @@ -2362,10 +2362,10 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | WRITE_DATA_DST_SEL(0))); - if (vm_id < 8) { - amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id )); + if (vmid < 8) { + amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid )); } else { - amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8))); + amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8))); } amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, pd_addr >> 12); @@ -2376,7 +2376,7 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, WRITE_DATA_DST_SEL(0))); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, 1 << vmid); /* wait for the invalidate to complete */ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 83d94c23aa78..4b5109bfd5f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2252,7 +2252,7 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring, */ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { u32 header, control = 0; @@ -2267,7 +2267,7 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, else header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - control |= ib->length_dw | (vm_id << 24); + control |= ib->length_dw | (vmid << 24); amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, @@ -2281,9 +2281,9 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { - u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24); + u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); amdgpu_ring_write(ring, @@ -3237,19 +3237,19 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) * using the CP (CIK). */ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | WRITE_DATA_DST_SEL(0))); - if (vm_id < 8) { + if (vmid < 8) { amdgpu_ring_write(ring, - (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); + (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid)); } else { amdgpu_ring_write(ring, - (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); + (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8)); } amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, pd_addr >> 12); @@ -3260,7 +3260,7 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, WRITE_DATA_DST_SEL(0))); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, 1 << vmid); /* wait for the invalidate to complete */ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 46550b588982..ff9f1a82630f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -6245,7 +6245,7 @@ static void gfx_v8_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { u32 header, control = 0; @@ -6254,7 +6254,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, else header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - control |= ib->length_dw | (vm_id << 24); + control |= ib->length_dw | (vmid << 24); if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { control |= INDIRECT_BUFFER_PRE_ENB(1); @@ -6275,9 +6275,9 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { - u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24); + u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); amdgpu_ring_write(ring, @@ -6328,7 +6328,7 @@ static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) } static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); @@ -6336,12 +6336,12 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | WRITE_DATA_DST_SEL(0)) | WR_CONFIRM); - if (vm_id < 8) { + if (vmid < 8) { amdgpu_ring_write(ring, - (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); + (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid)); } else { amdgpu_ring_write(ring, - (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); + (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8)); } amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, pd_addr >> 12); @@ -6353,7 +6353,7 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, WRITE_DATA_DST_SEL(0))); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, 1 << vmid); /* wait for the invalidate to complete */ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 9f7be230734c..55670dbacace 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3594,7 +3594,7 @@ static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { u32 header, control = 0; @@ -3603,7 +3603,7 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, else header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - control |= ib->length_dw | (vm_id << 24); + control |= ib->length_dw | (vmid << 24); if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { control |= INDIRECT_BUFFER_PRE_ENB(1); @@ -3625,9 +3625,9 @@ BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { - u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24); + u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ @@ -3683,11 +3683,11 @@ static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) } static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); - uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); + uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); uint64_t flags = AMDGPU_PTE_VALID; unsigned eng = ring->vm_inv_eng; @@ -3695,11 +3695,11 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring, pd_addr |= flags; gfx_v9_0_write_data_to_reg(ring, usepfp, true, - hub->ctx0_ptb_addr_lo32 + (2 * vm_id), + hub->ctx0_ptb_addr_lo32 + (2 * vmid), lower_32_bits(pd_addr)); gfx_v9_0_write_data_to_reg(ring, usepfp, true, - hub->ctx0_ptb_addr_hi32 + (2 * vm_id), + hub->ctx0_ptb_addr_hi32 + (2 * vmid), upper_32_bits(pd_addr)); gfx_v9_0_write_data_to_reg(ring, usepfp, true, @@ -3707,7 +3707,7 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring, /* wait for the invalidate to complete */ gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, hub->vm_inv_eng0_ack + - eng, 0, 1 << vm_id, 1 << vm_id, 0x20); + eng, 0, 1 << vmid, 1 << vmid, 0x20); /* compute doesn't have PFP */ if (usepfp) { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 909274e3ebe7..eb8b1bb66389 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -248,7 +248,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - struct amdgpu_vmhub *hub = &adev->vmhub[entry->vm_id_src]; + struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src]; uint32_t status = 0; u64 addr; @@ -262,9 +262,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, if (printk_ratelimit()) { dev_err(adev->dev, - "[%s] VMC page fault (src_id:%u ring:%u vm_id:%u pas_id:%u)\n", - entry->vm_id_src ? "mmhub" : "gfxhub", - entry->src_id, entry->ring_id, entry->vm_id, + "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pas_id:%u)\n", + entry->vmid_src ? "mmhub" : "gfxhub", + entry->src_id, entry->ring_id, entry->vmid, entry->pas_id); dev_err(adev->dev, " at page 0x%016llx from %d\n", addr, entry->client_id); @@ -288,13 +288,13 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs; } -static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id) +static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid) { u32 req = 0; - /* invalidate using legacy mode on vm_id*/ + /* invalidate using legacy mode on vmid*/ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, - PER_VMID_INVALIDATE_REQ, 1 << vm_id); + PER_VMID_INVALIDATE_REQ, 1 << vmid); req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0); req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index bd592cb39f37..c4e4be3dd31d 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c @@ -259,7 +259,7 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev, entry->src_id = dw[0] & 0xff; entry->src_data[0] = dw[1] & 0xfffffff; entry->ring_id = dw[2] & 0xff; - entry->vm_id = (dw[2] >> 8) & 0xff; + entry->vmid = (dw[2] >> 8) & 0xff; entry->pas_id = (dw[2] >> 16) & 0xffff; /* wptr/rptr are in bytes! */ diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 401552bae7f5..d4787ad4d346 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -246,15 +246,13 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) */ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { - u32 vmid = vm_id & 0xf; - /* IB packet must end on a 8 DW boundary */ sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | - SDMA_PKT_INDIRECT_HEADER_VMID(vmid)); + SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); /* base must be 32 byte aligned */ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); @@ -861,14 +859,14 @@ static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring) * using sDMA (VI). */ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); - if (vm_id < 8) { - amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); + if (vmid < 8) { + amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid)); } else { - amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); + amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8)); } amdgpu_ring_write(ring, pd_addr >> 12); @@ -876,7 +874,7 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, 1 << vmid); /* wait for flush */ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 0735d4d0e56a..521978c40537 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -417,15 +417,13 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) */ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { - u32 vmid = vm_id & 0xf; - /* IB packet must end on a 8 DW boundary */ sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | - SDMA_PKT_INDIRECT_HEADER_VMID(vmid)); + SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); /* base must be 32 byte aligned */ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); @@ -1127,14 +1125,14 @@ static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) * using sDMA (VI). */ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); - if (vm_id < 8) { - amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); + if (vmid < 8) { + amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid)); } else { - amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); + amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8)); } amdgpu_ring_write(ring, pd_addr >> 12); @@ -1142,7 +1140,7 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, 1 << vmid); /* wait for flush */ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 73477c5ed9b4..e92fb372bc99 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -330,15 +330,13 @@ static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) */ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { - u32 vmid = vm_id & 0xf; - /* IB packet must end on a 8 DW boundary */ sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | - SDMA_PKT_INDIRECT_HEADER_VMID(vmid)); + SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); /* base must be 32 byte aligned */ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); @@ -1135,10 +1133,10 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) * using sDMA (VEGA10). */ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; - uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); + uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); uint64_t flags = AMDGPU_PTE_VALID; unsigned eng = ring->vm_inv_eng; @@ -1147,12 +1145,12 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); - amdgpu_ring_write(ring, hub->ctx0_ptb_addr_lo32 + vm_id * 2); + amdgpu_ring_write(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2); amdgpu_ring_write(ring, lower_32_bits(pd_addr)); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); - amdgpu_ring_write(ring, hub->ctx0_ptb_addr_hi32 + vm_id * 2); + amdgpu_ring_write(ring, hub->ctx0_ptb_addr_hi32 + vmid * 2); amdgpu_ring_write(ring, upper_32_bits(pd_addr)); /* flush TLB */ @@ -1167,8 +1165,8 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring, SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */ amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2); amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, 1 << vm_id); /* reference */ - amdgpu_ring_write(ring, 1 << vm_id); /* mask */ + amdgpu_ring_write(ring, 1 << vmid); /* reference */ + amdgpu_ring_write(ring, 1 << vmid); /* mask */ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); } diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index 9adca5d8b045..9a29c1399091 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -61,14 +61,14 @@ static void si_dma_ring_set_wptr(struct amdgpu_ring *ring) static void si_dma_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. * Pad as necessary with NOPs. */ while ((lower_32_bits(ring->wptr) & 7) != 5) amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); - amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0)); + amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vmid, 0)); amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); @@ -473,25 +473,25 @@ static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring) * using sDMA (VI). */ static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); - if (vm_id < 8) - amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); + if (vmid < 8) + amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid)); else - amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8))); + amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8))); amdgpu_ring_write(ring, pd_addr >> 12); /* bits 0-7 are the VM contexts0-7 */ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); amdgpu_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST)); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, 1 << vmid); /* wait for invalidate to complete */ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0)); amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST); amdgpu_ring_write(ring, 0xff << 16); /* retry */ - amdgpu_ring_write(ring, 1 << vm_id); /* mask */ + amdgpu_ring_write(ring, 1 << vmid); /* mask */ amdgpu_ring_write(ring, 0); /* value */ amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */ } diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c index d2c6b80309c8..60dad63098a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c @@ -146,7 +146,7 @@ static void si_ih_decode_iv(struct amdgpu_device *adev, entry->src_id = dw[0] & 0xff; entry->src_data[0] = dw[1] & 0xfffffff; entry->ring_id = dw[2] & 0xff; - entry->vm_id = (dw[2] >> 8) & 0xff; + entry->vmid = (dw[2] >> 8) & 0xff; adev->irq.ih.rptr += 16; } diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index aa4e320e31f8..5995ffc183de 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -270,7 +270,7 @@ static void tonga_ih_decode_iv(struct amdgpu_device *adev, entry->src_id = dw[0] & 0xff; entry->src_data[0] = dw[1] & 0xfffffff; entry->ring_id = dw[2] & 0xff; - entry->vm_id = (dw[2] >> 8) & 0xff; + entry->vmid = (dw[2] >> 8) & 0xff; entry->pas_id = (dw[2] >> 16) & 0xffff; /* wptr/rptr are in bytes! */ diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index b13ae34be1c2..8ab10c220910 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -541,7 +541,7 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) */ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0)); amdgpu_ring_write(ring, ib->gpu_addr); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index a4b0f1d842b7..c1fe30cdba32 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -556,7 +556,7 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) */ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 86123448a8ff..59271055a30e 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -1028,10 +1028,10 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) */ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0)); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); @@ -1050,24 +1050,24 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, * Write enc ring commands to execute the indirect buffer */ static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch) + struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch) { amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, ib->length_dw); } static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { uint32_t reg; - if (vm_id < 8) - reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id; + if (vmid < 8) + reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid; else - reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8; + reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8; amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); amdgpu_ring_write(ring, reg << 2); @@ -1079,7 +1079,7 @@ static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, 1 << vmid); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); amdgpu_ring_write(ring, 0x8); @@ -1088,7 +1088,7 @@ static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0)); - amdgpu_ring_write(ring, 1 << vm_id); /* mask */ + amdgpu_ring_write(ring, 1 << vmid); /* mask */ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); amdgpu_ring_write(ring, 0xC); } @@ -1127,14 +1127,14 @@ static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring) } static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned int vm_id, uint64_t pd_addr) + unsigned int vmid, uint64_t pd_addr) { amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, pd_addr >> 12); amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); } static bool uvd_v6_0_is_idle(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 4ec4447d33c0..6b95f4f344b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -1218,13 +1218,13 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring) */ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { struct amdgpu_device *adev = ring->adev; amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0)); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0)); @@ -1246,10 +1246,10 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring, * Write enc ring commands to execute the indirect buffer */ static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch) + struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch) { amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, ib->length_dw); @@ -1291,10 +1291,10 @@ static void uvd_v7_0_vm_reg_wait(struct amdgpu_ring *ring, } static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; - uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); + uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); uint64_t flags = AMDGPU_PTE_VALID; unsigned eng = ring->vm_inv_eng; uint32_t data0, data1, mask; @@ -1302,15 +1302,15 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); pd_addr |= flags; - data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2; + data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2; data1 = upper_32_bits(pd_addr); uvd_v7_0_vm_reg_write(ring, data0, data1); - data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2; + data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2; data1 = lower_32_bits(pd_addr); uvd_v7_0_vm_reg_write(ring, data0, data1); - data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2; + data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2; data1 = lower_32_bits(pd_addr); mask = 0xffffffff; uvd_v7_0_vm_reg_wait(ring, data0, data1, mask); @@ -1322,8 +1322,8 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, /* wait for flush */ data0 = (hub->vm_inv_eng0_ack + eng) << 2; - data1 = 1 << vm_id; - mask = 1 << vm_id; + data1 = 1 << vmid; + mask = 1 << vmid; uvd_v7_0_vm_reg_wait(ring, data0, data1, mask); } @@ -1343,10 +1343,10 @@ static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring) } static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned int vm_id, uint64_t pd_addr) + unsigned int vmid, uint64_t pd_addr) { struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; - uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); + uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); uint64_t flags = AMDGPU_PTE_VALID; unsigned eng = ring->vm_inv_eng; @@ -1354,15 +1354,15 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, pd_addr |= flags; amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE); - amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2); + amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2); amdgpu_ring_write(ring, upper_32_bits(pd_addr)); amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE); - amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2); + amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2); amdgpu_ring_write(ring, lower_32_bits(pd_addr)); amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT); - amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2); + amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2); amdgpu_ring_write(ring, 0xffffffff); amdgpu_ring_write(ring, lower_32_bits(pd_addr)); @@ -1374,8 +1374,8 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, /* wait for flush */ amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT); amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2); - amdgpu_ring_write(ring, 1 << vm_id); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, 1 << vmid); + amdgpu_ring_write(ring, 1 << vmid); } #if 0 diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index cf81065e3c5a..a5355eb689f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -834,24 +834,24 @@ out: } static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch) + struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch) { amdgpu_ring_write(ring, VCE_CMD_IB_VM); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, ib->length_dw); } static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring, - unsigned int vm_id, uint64_t pd_addr) + unsigned int vmid, uint64_t pd_addr) { amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, pd_addr >> 12); amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, VCE_CMD_END); } diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 308949d6edde..7cf2eef68cf2 100755 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -938,10 +938,10 @@ static int vce_v4_0_set_powergating_state(void *handle, #endif static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch) + struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch) { amdgpu_ring_write(ring, VCE_CMD_IB_VM); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, ib->length_dw); @@ -965,10 +965,10 @@ static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring) } static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring, - unsigned int vm_id, uint64_t pd_addr) + unsigned int vmid, uint64_t pd_addr) { struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; - uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); + uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); uint64_t flags = AMDGPU_PTE_VALID; unsigned eng = ring->vm_inv_eng; @@ -976,15 +976,15 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring, pd_addr |= flags; amdgpu_ring_write(ring, VCE_CMD_REG_WRITE); - amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2); + amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2); amdgpu_ring_write(ring, upper_32_bits(pd_addr)); amdgpu_ring_write(ring, VCE_CMD_REG_WRITE); - amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2); + amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2); amdgpu_ring_write(ring, lower_32_bits(pd_addr)); amdgpu_ring_write(ring, VCE_CMD_REG_WAIT); - amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2); + amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2); amdgpu_ring_write(ring, 0xffffffff); amdgpu_ring_write(ring, lower_32_bits(pd_addr)); @@ -996,8 +996,8 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring, /* wait for flush */ amdgpu_ring_write(ring, VCE_CMD_REG_WAIT); amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2); - amdgpu_ring_write(ring, 1 << vm_id); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, 1 << vmid); + amdgpu_ring_write(ring, 1 << vmid); } static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index deb3fba790a5..b99e15c43e45 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -833,13 +833,13 @@ static void vcn_v1_0_dec_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) */ static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { struct amdgpu_device *adev = ring->adev; amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0)); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0)); @@ -888,10 +888,10 @@ static void vcn_v1_0_dec_vm_reg_wait(struct amdgpu_ring *ring, } static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; - uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); + uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); uint64_t flags = AMDGPU_PTE_VALID; unsigned eng = ring->vm_inv_eng; uint32_t data0, data1, mask; @@ -899,15 +899,15 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); pd_addr |= flags; - data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2; + data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2; data1 = upper_32_bits(pd_addr); vcn_v1_0_dec_vm_reg_write(ring, data0, data1); - data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2; + data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2; data1 = lower_32_bits(pd_addr); vcn_v1_0_dec_vm_reg_write(ring, data0, data1); - data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2; + data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2; data1 = lower_32_bits(pd_addr); mask = 0xffffffff; vcn_v1_0_dec_vm_reg_wait(ring, data0, data1, mask); @@ -919,8 +919,8 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, /* wait for flush */ data0 = (hub->vm_inv_eng0_ack + eng) << 2; - data1 = 1 << vm_id; - mask = 1 << vm_id; + data1 = 1 << vmid; + mask = 1 << vmid; vcn_v1_0_dec_vm_reg_wait(ring, data0, data1, mask); } @@ -1011,20 +1011,20 @@ static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring) * Write enc ring commands to execute the indirect buffer */ static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch) + struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch) { amdgpu_ring_write(ring, VCN_ENC_CMD_IB); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, ib->length_dw); } static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned int vm_id, uint64_t pd_addr) + unsigned int vmid, uint64_t pd_addr) { struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; - uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); + uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); uint64_t flags = AMDGPU_PTE_VALID; unsigned eng = ring->vm_inv_eng; @@ -1033,17 +1033,17 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE); amdgpu_ring_write(ring, - (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2); + (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2); amdgpu_ring_write(ring, upper_32_bits(pd_addr)); amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE); amdgpu_ring_write(ring, - (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2); + (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2); amdgpu_ring_write(ring, lower_32_bits(pd_addr)); amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT); amdgpu_ring_write(ring, - (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2); + (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2); amdgpu_ring_write(ring, 0xffffffff); amdgpu_ring_write(ring, lower_32_bits(pd_addr)); @@ -1055,8 +1055,8 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, /* wait for flush */ amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT); amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2); - amdgpu_ring_write(ring, 1 << vm_id); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, 1 << vmid); + amdgpu_ring_write(ring, 1 << vmid); } static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index e1d7dae0989b..b69ceafb7888 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -327,8 +327,8 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev, entry->client_id = dw[0] & 0xff; entry->src_id = (dw[0] >> 8) & 0xff; entry->ring_id = (dw[0] >> 16) & 0xff; - entry->vm_id = (dw[0] >> 24) & 0xf; - entry->vm_id_src = (dw[0] >> 31); + entry->vmid = (dw[0] >> 24) & 0xf; + entry->vmid_src = (dw[0] >> 31); entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32); entry->timestamp_src = dw[2] >> 31; entry->pas_id = dw[3] & 0xffff; -- cgit v1.2.3