summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2015-08-12 11:46:04 +0200
committerAlex Deucher <alexander.deucher@amd.com>2015-08-17 16:51:22 -0400
commit432a4ff8b7224908a8bbc34b598f48af3f42b827 (patch)
tree06753b59a3710a465f6a1a8b6562dec2ff62cdb7 /drivers/gpu/drm
parentc3b95d4f9e460704e184ded7af60b9c4898f6181 (diff)
downloadblackbird-op-linux-432a4ff8b7224908a8bbc34b598f48af3f42b827.tar.gz
blackbird-op-linux-432a4ff8b7224908a8bbc34b598f48af3f42b827.zip
drm/amdgpu: cleanup sheduler rq handling v2
Rework run queue implementation, especially remove the odd list handling. v2: cleanup the code only, no algorithem change. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c2
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c103
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h25
3 files changed, 54 insertions, 76 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 1833f05c7e0b..08bc7722ddb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -41,7 +41,7 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
if (amdgpu_enable_scheduler) {
/* create context entity for each ring */
for (i = 0; i < adev->num_rings; i++) {
- struct amd_run_queue *rq;
+ struct amd_sched_rq *rq;
if (kernel)
rq = &adev->rings[i]->scheduler->kernel_rq;
else
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 3d45ff29eaa8..265d3e2f63cc 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -28,31 +28,29 @@
#include "gpu_scheduler.h"
/* Initialize a given run queue struct */
-static void init_rq(struct amd_run_queue *rq)
+static void amd_sched_rq_init(struct amd_sched_rq *rq)
{
- INIT_LIST_HEAD(&rq->head.list);
- rq->head.belongto_rq = rq;
+ INIT_LIST_HEAD(&rq->entities);
mutex_init(&rq->lock);
- atomic_set(&rq->nr_entity, 0);
- rq->current_entity = &rq->head;
+ rq->current_entity = NULL;
}
-/* Note: caller must hold the lock or in a atomic context */
-static void rq_remove_entity(struct amd_run_queue *rq,
- struct amd_sched_entity *entity)
+static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
+ struct amd_sched_entity *entity)
{
- if (rq->current_entity == entity)
- rq->current_entity = list_entry(entity->list.prev,
- typeof(*entity), list);
- list_del_init(&entity->list);
- atomic_dec(&rq->nr_entity);
+ mutex_lock(&rq->lock);
+ list_add_tail(&entity->list, &rq->entities);
+ mutex_unlock(&rq->lock);
}
-static void rq_add_entity(struct amd_run_queue *rq,
- struct amd_sched_entity *entity)
+static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
+ struct amd_sched_entity *entity)
{
- list_add_tail(&entity->list, &rq->head.list);
- atomic_inc(&rq->nr_entity);
+ mutex_lock(&rq->lock);
+ list_del_init(&entity->list);
+ if (rq->current_entity == entity)
+ rq->current_entity = NULL;
+ mutex_unlock(&rq->lock);
}
/**
@@ -60,38 +58,32 @@ static void rq_add_entity(struct amd_run_queue *rq,
* It could return the same entity as current one if current is the only
* available one in the queue. Return NULL if nothing available.
*/
-static struct amd_sched_entity *rq_select_entity(struct amd_run_queue *rq)
+static struct amd_sched_entity *
+amd_sched_rq_select_entity(struct amd_sched_rq *rq)
{
- struct amd_sched_entity *p = rq->current_entity;
- int i = atomic_read(&rq->nr_entity) + 1; /*real count + dummy head*/
-
- while (i) {
- p = list_entry(p->list.next, typeof(*p), list);
- if (!rq->check_entity_status(p)) {
- rq->current_entity = p;
- break;
+ struct amd_sched_entity *entity = rq->current_entity;
+
+ if (entity) {
+ list_for_each_entry_continue(entity, &rq->entities, list) {
+ if (!kfifo_is_empty(&entity->job_queue)) {
+ rq->current_entity = entity;
+ return rq->current_entity;
+ }
}
- i--;
}
- return i ? p : NULL;
-}
-static bool context_entity_is_waiting(struct amd_sched_entity *entity)
-{
- /* TODO: sync obj for multi-ring synchronization */
- return false;
-}
+ list_for_each_entry(entity, &rq->entities, list) {
-static int gpu_entity_check_status(struct amd_sched_entity *entity)
-{
- if (entity == &entity->belongto_rq->head)
- return -1;
+ if (!kfifo_is_empty(&entity->job_queue)) {
+ rq->current_entity = entity;
+ return rq->current_entity;
+ }
- if (kfifo_is_empty(&entity->job_queue) ||
- context_entity_is_waiting(entity))
- return -1;
+ if (entity == rq->current_entity)
+ break;
+ }
- return 0;
+ return NULL;
}
/**
@@ -124,10 +116,10 @@ static struct amd_sched_entity *
kernel_rq_select_context(struct amd_gpu_scheduler *sched)
{
struct amd_sched_entity *sched_entity;
- struct amd_run_queue *rq = &sched->kernel_rq;
+ struct amd_sched_rq *rq = &sched->kernel_rq;
mutex_lock(&rq->lock);
- sched_entity = rq_select_entity(rq);
+ sched_entity = amd_sched_rq_select_entity(rq);
mutex_unlock(&rq->lock);
return sched_entity;
}
@@ -140,7 +132,7 @@ select_context(struct amd_gpu_scheduler *sched)
{
struct amd_sched_entity *wake_entity = NULL;
struct amd_sched_entity *tmp;
- struct amd_run_queue *rq;
+ struct amd_sched_rq *rq;
if (!is_scheduler_ready(sched))
return NULL;
@@ -152,7 +144,7 @@ select_context(struct amd_gpu_scheduler *sched)
rq = &sched->sched_rq;
mutex_lock(&rq->lock);
- tmp = rq_select_entity(rq);
+ tmp = amd_sched_rq_select_entity(rq);
mutex_unlock(&rq->lock);
exit:
if (sched->current_entity && (sched->current_entity != tmp))
@@ -176,7 +168,7 @@ exit:
*/
int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity,
- struct amd_run_queue *rq,
+ struct amd_sched_rq *rq,
uint32_t jobs)
{
uint64_t seq_ring = 0;
@@ -206,9 +198,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
atomic64_set(&entity->last_signaled_v_seq, seq_ring);
/* Add the entity to the run queue */
- mutex_lock(&rq->lock);
- rq_add_entity(rq, entity);
- mutex_unlock(&rq->lock);
+ amd_sched_rq_add_entity(rq, entity);
return 0;
}
@@ -254,7 +244,7 @@ int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity)
{
int r = 0;
- struct amd_run_queue *rq = entity->belongto_rq;
+ struct amd_sched_rq *rq = entity->belongto_rq;
if (!is_context_entity_initialized(sched, entity))
return 0;
@@ -276,9 +266,7 @@ int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
entity);
}
- mutex_lock(&rq->lock);
- rq_remove_entity(rq, entity);
- mutex_unlock(&rq->lock);
+ amd_sched_rq_remove_entity(rq, entity);
kfifo_free(&entity->job_queue);
return r;
}
@@ -429,11 +417,8 @@ struct amd_gpu_scheduler *amd_sched_create(void *device,
snprintf(name, sizeof(name), "gpu_sched[%d]", ring);
mutex_init(&sched->sched_lock);
spin_lock_init(&sched->queue_lock);
- init_rq(&sched->sched_rq);
- sched->sched_rq.check_entity_status = gpu_entity_check_status;
-
- init_rq(&sched->kernel_rq);
- sched->kernel_rq.check_entity_status = gpu_entity_check_status;
+ amd_sched_rq_init(&sched->sched_rq);
+ amd_sched_rq_init(&sched->kernel_rq);
init_waitqueue_head(&sched->wait_queue);
INIT_LIST_HEAD(&sched->active_hw_rq);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 47823b4a71e0..ceb5918bfbeb 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -30,7 +30,7 @@
#define AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
struct amd_gpu_scheduler;
-struct amd_run_queue;
+struct amd_sched_rq;
/**
* A scheduler entity is a wrapper around a job queue or a group
@@ -40,7 +40,7 @@ struct amd_run_queue;
*/
struct amd_sched_entity {
struct list_head list;
- struct amd_run_queue *belongto_rq;
+ struct amd_sched_rq *belongto_rq;
spinlock_t lock;
/* the virtual_seq is unique per context per ring */
atomic64_t last_queued_v_seq;
@@ -62,17 +62,10 @@ struct amd_sched_entity {
* one specific ring. It implements the scheduling policy that selects
* the next entity to emit commands from.
*/
-struct amd_run_queue {
- struct mutex lock;
- atomic_t nr_entity;
- struct amd_sched_entity head;
- struct amd_sched_entity *current_entity;
- /**
- * Return 0 means this entity can be scheduled
- * Return -1 means this entity cannot be scheduled for reasons,
- * i.e, it is the head, or these is no job, etc
- */
- int (*check_entity_status)(struct amd_sched_entity *entity);
+struct amd_sched_rq {
+ struct mutex lock;
+ struct list_head entities;
+ struct amd_sched_entity *current_entity;
};
struct amd_sched_fence {
@@ -124,8 +117,8 @@ struct amd_sched_backend_ops {
struct amd_gpu_scheduler {
void *device;
struct task_struct *thread;
- struct amd_run_queue sched_rq;
- struct amd_run_queue kernel_rq;
+ struct amd_sched_rq sched_rq;
+ struct amd_sched_rq kernel_rq;
struct list_head active_hw_rq;
atomic64_t hw_rq_count;
struct amd_sched_backend_ops *ops;
@@ -154,7 +147,7 @@ int amd_sched_push_job(struct amd_gpu_scheduler *sched,
int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity,
- struct amd_run_queue *rq,
+ struct amd_sched_rq *rq,
uint32_t jobs);
int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity);
OpenPOWER on IntegriCloud