summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
diff options
context:
space:
mode:
authorChunming Zhou <David1.Zhou@amd.com>2016-01-15 11:25:00 +0800
committerAlex Deucher <alexander.deucher@amd.com>2016-02-10 14:16:50 -0500
commitcadf97b196a1e5b2db2606d53f77714e3e9cf4bb (patch)
tree1954976bc68547599f4ea9c29381a962c5c5d681 /drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
parentbe86c606b50a53b60f3591ba94dd687524f2ee21 (diff)
downloadtalos-op-linux-cadf97b196a1e5b2db2606d53f77714e3e9cf4bb.tar.gz
talos-op-linux-cadf97b196a1e5b2db2606d53f77714e3e9cf4bb.zip
drm/amdgpu: clean up non-scheduler code path (v2)
Non-scheduler code is longer supported. v2: agd: rebased on upstream Signed-off-by: Chunming Zhou <David1.Zhou@amd.com> Reviewed-by: Ken Wang <Qingqing.Wang@amd.com> Reviewed-by: Monk Liu <monk.liu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c48
1 files changed, 22 insertions, 26 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 17d1fb12128a..f1f4b453ece1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -45,29 +45,27 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
ctx->rings[i].fences = (void *)ctx->fences + sizeof(struct fence *) *
amdgpu_sched_jobs * i;
}
- if (amdgpu_enable_scheduler) {
- /* create context entity for each ring */
- for (i = 0; i < adev->num_rings; i++) {
- struct amd_sched_rq *rq;
- if (pri >= AMD_SCHED_MAX_PRIORITY) {
- kfree(ctx->fences);
- return -EINVAL;
- }
- rq = &adev->rings[i]->sched.sched_rq[pri];
- r = amd_sched_entity_init(&adev->rings[i]->sched,
- &ctx->rings[i].entity,
- rq, amdgpu_sched_jobs);
- if (r)
- break;
- }
-
- if (i < adev->num_rings) {
- for (j = 0; j < i; j++)
- amd_sched_entity_fini(&adev->rings[j]->sched,
- &ctx->rings[j].entity);
+ /* create context entity for each ring */
+ for (i = 0; i < adev->num_rings; i++) {
+ struct amd_sched_rq *rq;
+ if (pri >= AMD_SCHED_MAX_PRIORITY) {
kfree(ctx->fences);
- return r;
+ return -EINVAL;
}
+ rq = &adev->rings[i]->sched.sched_rq[pri];
+ r = amd_sched_entity_init(&adev->rings[i]->sched,
+ &ctx->rings[i].entity,
+ rq, amdgpu_sched_jobs);
+ if (r)
+ break;
+ }
+
+ if (i < adev->num_rings) {
+ for (j = 0; j < i; j++)
+ amd_sched_entity_fini(&adev->rings[j]->sched,
+ &ctx->rings[j].entity);
+ kfree(ctx->fences);
+ return r;
}
return 0;
}
@@ -85,11 +83,9 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
fence_put(ctx->rings[i].fences[j]);
kfree(ctx->fences);
- if (amdgpu_enable_scheduler) {
- for (i = 0; i < adev->num_rings; i++)
- amd_sched_entity_fini(&adev->rings[i]->sched,
- &ctx->rings[i].entity);
- }
+ for (i = 0; i < adev->num_rings; i++)
+ amd_sched_entity_fini(&adev->rings[i]->sched,
+ &ctx->rings[i].entity);
}
static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
OpenPOWER on IntegriCloud