summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
diff options
context:
space:
mode:
authorChunming Zhou <david1.zhou@amd.com>2015-08-18 09:16:40 +0200
committerAlex Deucher <alexander.deucher@amd.com>2015-08-20 23:00:35 +0200
commitbb977d3711ed1de1601b463e7fd5a43d82a2b077 (patch)
tree25ce4d1396258790d27ca111f246df39c49e1027 /drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
parentdrm/amdgpu: bump the DRM version for new allowed mem-mapped registers (diff)
downloadlinux-bb977d3711ed1de1601b463e7fd5a43d82a2b077.tar.xz
linux-bb977d3711ed1de1601b463e7fd5a43d82a2b077.zip
drm/amdgpu: abstract amdgpu_job for scheduler
Signed-off-by: Chunming Zhou <david1.zhou@amd.com> Reviewed-by: Christian K?nig <christian.koenig@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c88
1 files changed, 34 insertions, 54 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index a86e38158afa..5b1ae18f5e8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -27,81 +27,58 @@
#include <drm/drmP.h>
#include "amdgpu.h"
-static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity,
- struct amd_sched_job *job)
-{
- int r = 0;
- struct amdgpu_cs_parser *sched_job;
- if (!job || !job->data) {
- DRM_ERROR("job is null\n");
- return -EINVAL;
- }
-
- sched_job = (struct amdgpu_cs_parser *)job->data;
- if (sched_job->prepare_job) {
- r = sched_job->prepare_job(sched_job);
- if (r) {
- DRM_ERROR("Prepare job error\n");
- schedule_work(&sched_job->job_work);
- }
- }
- return r;
-}
-
static struct fence *amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity,
struct amd_sched_job *job)
{
int r = 0;
- struct amdgpu_cs_parser *sched_job;
+ struct amdgpu_job *sched_job;
struct amdgpu_fence *fence;
- if (!job || !job->data) {
+ if (!job) {
DRM_ERROR("job is null\n");
return NULL;
}
- sched_job = (struct amdgpu_cs_parser *)job->data;
+ sched_job = (struct amdgpu_job *)job;
mutex_lock(&sched_job->job_lock);
r = amdgpu_ib_schedule(sched_job->adev,
sched_job->num_ibs,
sched_job->ibs,
- sched_job->filp);
+ sched_job->owner);
if (r)
goto err;
fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence);
- if (sched_job->run_job) {
- r = sched_job->run_job(sched_job);
- if (r)
- goto err;
- }
-
mutex_unlock(&sched_job->job_lock);
return &fence->base;
err:
DRM_ERROR("Run job error\n");
mutex_unlock(&sched_job->job_lock);
- schedule_work(&sched_job->job_work);
+ sched->ops->process_job(sched, (struct amd_sched_job *)sched_job);
return NULL;
}
static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched,
struct amd_sched_job *job)
{
- struct amdgpu_cs_parser *sched_job;
+ struct amdgpu_job *sched_job;
- if (!job || !job->data) {
+ if (!job) {
DRM_ERROR("job is null\n");
return;
}
- sched_job = (struct amdgpu_cs_parser *)job->data;
- schedule_work(&sched_job->job_work);
+ sched_job = (struct amdgpu_job *)job;
+ mutex_lock(&sched_job->job_lock);
+ if (sched_job->free_job)
+ sched_job->free_job(sched_job);
+ mutex_unlock(&sched_job->job_lock);
+ /* after processing job, free memory */
+ fence_put(&sched_job->base.s_fence->base);
+ kfree(sched_job);
}
struct amd_sched_backend_ops amdgpu_sched_ops = {
- .prepare_job = amdgpu_sched_prepare_job,
.run_job = amdgpu_sched_run_job,
.process_job = amdgpu_sched_process_job
};
@@ -110,31 +87,34 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_ib *ibs,
unsigned num_ibs,
- int (*free_job)(struct amdgpu_cs_parser *),
+ int (*free_job)(struct amdgpu_job *),
void *owner,
struct fence **f)
{
int r = 0;
if (amdgpu_enable_scheduler) {
- struct amdgpu_cs_parser *sched_job =
- amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx,
- ibs, num_ibs);
- if(!sched_job) {
+ struct amdgpu_job *job =
+ kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
+ if (!job)
return -ENOMEM;
- }
- sched_job->free_job = free_job;
- mutex_lock(&sched_job->job_lock);
- r = amd_sched_push_job(ring->scheduler,
- &adev->kernel_ctx.rings[ring->idx].entity,
- sched_job, &sched_job->s_fence);
+ job->base.sched = ring->scheduler;
+ job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
+ job->adev = adev;
+ job->ibs = ibs;
+ job->num_ibs = num_ibs;
+ job->owner = owner;
+ mutex_init(&job->job_lock);
+ job->free_job = free_job;
+ mutex_lock(&job->job_lock);
+ r = amd_sched_push_job((struct amd_sched_job *)job);
if (r) {
- mutex_unlock(&sched_job->job_lock);
- kfree(sched_job);
+ mutex_unlock(&job->job_lock);
+ kfree(job);
return r;
}
- ibs[num_ibs - 1].sequence = sched_job->s_fence->v_seq;
- *f = fence_get(&sched_job->s_fence->base);
- mutex_unlock(&sched_job->job_lock);
+ ibs[num_ibs - 1].sequence = job->base.s_fence->v_seq;
+ *f = fence_get(&job->base.s_fence->base);
+ mutex_unlock(&job->job_lock);
} else {
r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
if (r)