diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-08-01 21:44:08 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-08-01 21:44:08 -0400 |
commit | 731c7d3a205ba89b475b2aa71b5f13dd6ae3de56 (patch) | |
tree | d2b9c3e0a98b94dfc3e4e60e35622c0143ef4ed4 /drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | |
parent | 77a87824ed676ca8ff8482e4157d3adb284fd381 (diff) | |
parent | 753e7c8cbd8c503b962294303c7b5e9ea8513443 (diff) | |
download | talos-op-linux-731c7d3a205ba89b475b2aa71b5f13dd6ae3de56.tar.gz talos-op-linux-731c7d3a205ba89b475b2aa71b5f13dd6ae3de56.zip |
Merge tag 'drm-for-v4.8' of git://people.freedesktop.org/~airlied/linux
Merge drm updates from Dave Airlie:
"This is the main drm pull request for 4.8.
I'm down with a cold at the moment so hopefully this isn't in too bad
a state, I finished pulling stuff last week mostly (nouveau fixes just
went in today), so only this message should be influenced by illness.
Apologies to anyone who's major feature I missed :-)
Core:
Lockless GEM BO freeing
Non-blocking atomic work
Documentation changes (rst/sphinx)
Prep for new fencing changes
Simple display helpers
Master/auth changes
Register/unregister rework
Loads of trivial patches/fixes.
New stuff:
ARM Mali display driver (not the 3D chip)
sii902x RGB->HDMI bridge
Panel:
Support for new panels
Improved backlight support
Bridge:
Convert ADV7511 to bridge driver
ADV7533 support
TC358767 (DSI/DPI to eDP) encoder chip support
i915:
BXT support enabled by default
GVT-g infrastructure
GuC command submission and fixes
BXT workarounds
SKL/BKL workarounds
Demidlayering device registration
Thundering herd fixes
Missing pci ids
Atomic updates
amdgpu/radeon:
ATPX improvements for better dGPU power control on PX systems
New power features for CZ/BR/ST
Pipelined BO moves and evictions in TTM
GPU scheduler improvements
GPU reset improvements
Overclocking on dGPUs with amdgpu
Polaris powermanagement enabled
nouveau:
GK20A/GM20B volt and clock improvements.
Initial support for GP100/GP104 GPUs, GP104 will not yet support
acceleration due to NVIDIA having not released firmware for them as of yet.
exynos:
Exynos5433 SoC with IOMMU support.
vc4:
Shader validation for branching
imx-drm:
Atomic mode setting conversion
Reworked DMFC FIFO allocation
External bridge support
analogix-dp:
RK3399 eDP support
Lots of fixes.
rockchip:
Lots of small fixes.
msm:
DT bindings cleanups
Shrinker and madvise support
ASoC HDMI codec support
tegra:
Host1x driver cleanups
SOR reworking for DP support
Runtime PM support
omapdrm:
PLL enhancements
Header refactoring
Gamma table support
arcgpu:
Simulator support
virtio-gpu:
Atomic modesetting fixes.
rcar-du:
Misc fixes.
mediatek:
MT8173 HDMI support
sti:
ASOC HDMI codec support
Minor fixes
fsl-dcu:
Suspend/resume support
Bridge support
amdkfd:
Minor fixes.
etnaviv:
Enable GPU clock gating
hisilicon:
Vblank and other fixes"
* tag 'drm-for-v4.8' of git://people.freedesktop.org/~airlied/linux: (1575 commits)
drm/nouveau/gr/nv3x: fix instobj write offsets in gr setup
drm/nouveau/acpi: fix lockup with PCIe runtime PM
drm/nouveau/acpi: check for function 0x1B before using it
drm/nouveau/acpi: return supported DSM functions
drm/nouveau/acpi: ensure matching ACPI handle and supported functions
drm/nouveau/fbcon: fix font width not divisible by 8
drm/amd/powerplay: remove enable_clock_power_gatings_tasks from initialize and resume events
drm/amd/powerplay: move clockgating to after ungating power in pp for uvd/vce
drm/amdgpu: add query device id and revision id into system info entry at CGS
drm/amdgpu: add new definition in bif header
drm/amd/powerplay: rename smum header guards
drm/amdgpu: enable UVD context buffer for older HW
drm/amdgpu: fix default UVD context size
drm/amdgpu: fix incorrect type of info_id
drm/amdgpu: make amdgpu_cgs_call_acpi_method as static
drm/amdgpu: comment out unused defaults_staturn_pro static const structure to fix the build
drm/amdgpu: enable UVD VM only on polaris
drm/amdgpu: increase timeout of IB test
drm/amdgpu: add destroy session when generate VCE destroy msg.
drm/amd: fix deadlock of job_list_lock V2
...
Diffstat (limited to 'drivers/gpu/drm/amd/scheduler/gpu_scheduler.c')
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 190 |
1 files changed, 131 insertions, 59 deletions
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index c16248cee779..ef312bb75fda 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -32,6 +32,7 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); +static void amd_sched_process_job(struct fence *f, struct fence_cb *cb); struct kmem_cache *sched_fence_slab; atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); @@ -140,7 +141,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, return r; atomic_set(&entity->fence_seq, 0); - entity->fence_context = fence_context_alloc(1); + entity->fence_context = fence_context_alloc(2); return 0; } @@ -251,17 +252,21 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) s_fence = to_amd_sched_fence(fence); if (s_fence && s_fence->sched == sched) { - /* Fence is from the same scheduler */ - if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) { - /* Ignore it when it is already scheduled */ - fence_put(entity->dependency); - return false; - } - /* Wait for fence to be scheduled */ - entity->cb.func = amd_sched_entity_clear_dep; - list_add_tail(&entity->cb.node, &s_fence->scheduled_cb); - return true; + /* + * Fence is from the same scheduler, only need to wait for + * it to be scheduled + */ + fence = fence_get(&s_fence->scheduled); + fence_put(entity->dependency); + entity->dependency = fence; + if (!fence_add_callback(fence, &entity->cb, + amd_sched_entity_clear_dep)) + return true; + + /* Ignore it when it is already scheduled */ + fence_put(fence); + return false; } if (!fence_add_callback(entity->dependency, &entity->cb, @@ -319,46 +324,114 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job) return added; } -static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) { - struct amd_sched_job *job = container_of(cb, struct amd_sched_job, cb_free_job); - schedule_work(&job->work_free_job); -} - /* job_finish is called after hw fence signaled, and * the job had already been deleted from ring_mirror_list */ -void amd_sched_job_finish(struct amd_sched_job *s_job) +static void amd_sched_job_finish(struct work_struct *work) { - struct amd_sched_job *next; + struct amd_sched_job *s_job = container_of(work, struct amd_sched_job, + finish_work); struct amd_gpu_scheduler *sched = s_job->sched; + /* remove job from ring_mirror_list */ + spin_lock(&sched->job_list_lock); + list_del_init(&s_job->node); if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { - if (cancel_delayed_work(&s_job->work_tdr)) - amd_sched_job_put(s_job); + struct amd_sched_job *next; + + spin_unlock(&sched->job_list_lock); + cancel_delayed_work_sync(&s_job->work_tdr); + spin_lock(&sched->job_list_lock); /* queue TDR for next job */ next = list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node); - if (next) { - INIT_DELAYED_WORK(&next->work_tdr, s_job->timeout_callback); - amd_sched_job_get(next); + if (next) schedule_delayed_work(&next->work_tdr, sched->timeout); - } } + spin_unlock(&sched->job_list_lock); + sched->ops->free_job(s_job); } -void amd_sched_job_begin(struct amd_sched_job *s_job) +static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb) +{ + struct amd_sched_job *job = container_of(cb, struct amd_sched_job, + finish_cb); + schedule_work(&job->finish_work); +} + +static void amd_sched_job_begin(struct amd_sched_job *s_job) { struct amd_gpu_scheduler *sched = s_job->sched; + spin_lock(&sched->job_list_lock); + list_add_tail(&s_job->node, &sched->ring_mirror_list); if (sched->timeout != MAX_SCHEDULE_TIMEOUT && - list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node) == s_job) - { - INIT_DELAYED_WORK(&s_job->work_tdr, s_job->timeout_callback); - amd_sched_job_get(s_job); + list_first_entry_or_null(&sched->ring_mirror_list, + struct amd_sched_job, node) == s_job) + schedule_delayed_work(&s_job->work_tdr, sched->timeout); + spin_unlock(&sched->job_list_lock); +} + +static void amd_sched_job_timedout(struct work_struct *work) +{ + struct amd_sched_job *job = container_of(work, struct amd_sched_job, + work_tdr.work); + + job->sched->ops->timedout_job(job); +} + +void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched) +{ + struct amd_sched_job *s_job; + + spin_lock(&sched->job_list_lock); + list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { + if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) { + fence_put(s_job->s_fence->parent); + s_job->s_fence->parent = NULL; + } + } + atomic_set(&sched->hw_rq_count, 0); + spin_unlock(&sched->job_list_lock); +} + +void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) +{ + struct amd_sched_job *s_job, *tmp; + int r; + + spin_lock(&sched->job_list_lock); + s_job = list_first_entry_or_null(&sched->ring_mirror_list, + struct amd_sched_job, node); + if (s_job) schedule_delayed_work(&s_job->work_tdr, sched->timeout); + + list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { + struct amd_sched_fence *s_fence = s_job->s_fence; + struct fence *fence; + + spin_unlock(&sched->job_list_lock); + fence = sched->ops->run_job(s_job); + atomic_inc(&sched->hw_rq_count); + if (fence) { + s_fence->parent = fence_get(fence); + r = fence_add_callback(fence, &s_fence->cb, + amd_sched_process_job); + if (r == -ENOENT) + amd_sched_process_job(fence, &s_fence->cb); + else if (r) + DRM_ERROR("fence add callback failed (%d)\n", + r); + fence_put(fence); + } else { + DRM_ERROR("Failed to run job!\n"); + amd_sched_process_job(NULL, &s_fence->cb); + } + spin_lock(&sched->job_list_lock); } + spin_unlock(&sched->job_list_lock); } /** @@ -372,36 +445,29 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job) { struct amd_sched_entity *entity = sched_job->s_entity; - sched_job->use_sched = 1; - fence_add_callback(&sched_job->s_fence->base, - &sched_job->cb_free_job, amd_sched_free_job); trace_amd_sched_job(sched_job); + fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb, + amd_sched_job_finish_cb); wait_event(entity->sched->job_scheduled, amd_sched_entity_in(sched_job)); } /* init a sched_job with basic field */ int amd_sched_job_init(struct amd_sched_job *job, - struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity, - void (*timeout_cb)(struct work_struct *work), - void (*free_cb)(struct kref *refcount), - void *owner, struct fence **fence) + struct amd_gpu_scheduler *sched, + struct amd_sched_entity *entity, + void *owner) { - INIT_LIST_HEAD(&job->node); - kref_init(&job->refcount); job->sched = sched; job->s_entity = entity; job->s_fence = amd_sched_fence_create(entity, owner); if (!job->s_fence) return -ENOMEM; - job->s_fence->s_job = job; - job->timeout_callback = timeout_cb; - job->free_callback = free_cb; + INIT_WORK(&job->finish_work, amd_sched_job_finish); + INIT_LIST_HEAD(&job->node); + INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout); - if (fence) - *fence = &job->s_fence->base; return 0; } @@ -450,23 +516,25 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) struct amd_sched_fence *s_fence = container_of(cb, struct amd_sched_fence, cb); struct amd_gpu_scheduler *sched = s_fence->sched; - unsigned long flags; atomic_dec(&sched->hw_rq_count); - - /* remove job from ring_mirror_list */ - spin_lock_irqsave(&sched->job_list_lock, flags); - list_del_init(&s_fence->s_job->node); - sched->ops->finish_job(s_fence->s_job); - spin_unlock_irqrestore(&sched->job_list_lock, flags); - - amd_sched_fence_signal(s_fence); + amd_sched_fence_finished(s_fence); trace_amd_sched_process_job(s_fence); - fence_put(&s_fence->base); + fence_put(&s_fence->finished); wake_up_interruptible(&sched->wake_up_worker); } +static bool amd_sched_blocked(struct amd_gpu_scheduler *sched) +{ + if (kthread_should_park()) { + kthread_parkme(); + return true; + } + + return false; +} + static int amd_sched_main(void *param) { struct sched_param sparam = {.sched_priority = 1}; @@ -476,14 +544,15 @@ static int amd_sched_main(void *param) sched_setscheduler(current, SCHED_FIFO, &sparam); while (!kthread_should_stop()) { - struct amd_sched_entity *entity; + struct amd_sched_entity *entity = NULL; struct amd_sched_fence *s_fence; struct amd_sched_job *sched_job; struct fence *fence; wait_event_interruptible(sched->wake_up_worker, - (entity = amd_sched_select_entity(sched)) || - kthread_should_stop()); + (!amd_sched_blocked(sched) && + (entity = amd_sched_select_entity(sched))) || + kthread_should_stop()); if (!entity) continue; @@ -495,16 +564,19 @@ static int amd_sched_main(void *param) s_fence = sched_job->s_fence; atomic_inc(&sched->hw_rq_count); - amd_sched_job_pre_schedule(sched, sched_job); + amd_sched_job_begin(sched_job); + fence = sched->ops->run_job(sched_job); amd_sched_fence_scheduled(s_fence); if (fence) { + s_fence->parent = fence_get(fence); r = fence_add_callback(fence, &s_fence->cb, amd_sched_process_job); if (r == -ENOENT) amd_sched_process_job(fence, &s_fence->cb); else if (r) - DRM_ERROR("fence add callback failed (%d)\n", r); + DRM_ERROR("fence add callback failed (%d)\n", + r); fence_put(fence); } else { DRM_ERROR("Failed to run job!\n"); |