diff options
author | Chunming Zhou <david1.zhou@amd.com> | 2015-08-06 10:19:12 +0300 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2015-08-17 23:51:06 +0300 |
commit | 953e8fd4e734857f6dabbaf325035bf10c4a9c7a (patch) | |
tree | 10336decabe4e5a1e5a4c9a15b3a4ec26d1e4c83 /drivers/gpu/drm/amd/scheduler | |
parent | 6f0e54a964932d3d5252ac1ff7ab153c984a5d51 (diff) | |
download | linux-953e8fd4e734857f6dabbaf325035bf10c4a9c7a.tar.xz |
drm/amdgpu: use amd_sched_job in its backend ops
Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Christian K?nig <christian.koenig@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/scheduler')
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 30 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 11 |
2 files changed, 20 insertions, 21 deletions
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 438dc23f4bb3..33b4f55e48b1 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -291,8 +291,15 @@ int amd_sched_entity_fini(struct amd_gpu_scheduler *sched, */ int amd_sched_push_job(struct amd_gpu_scheduler *sched, struct amd_sched_entity *c_entity, - void *job) + void *data) { + struct amd_sched_job *job = kzalloc(sizeof(struct amd_sched_job), + GFP_KERNEL); + if (!job) + return -ENOMEM; + job->sched = sched; + job->s_entity = c_entity; + job->data = data; while (kfifo_in_spinlocked(&c_entity->job_queue, &job, sizeof(void *), &c_entity->queue_lock) != sizeof(void *)) { /** @@ -366,7 +373,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) atomic64_dec(&sched->hw_rq_count); spin_unlock_irqrestore(&sched->queue_lock, flags); - sched->ops->process_job(sched, sched_job->job); + sched->ops->process_job(sched, sched_job); kfree(sched_job); wake_up_interruptible(&sched->wait_queue); } @@ -374,7 +381,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) static int amd_sched_main(void *param) { int r; - void *job; + struct amd_sched_job *job; struct sched_param sparam = {.sched_priority = 1}; struct amd_sched_entity *c_entity = NULL; struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param; @@ -382,7 +389,6 @@ static int amd_sched_main(void *param) sched_setscheduler(current, SCHED_FIFO, &sparam); while (!kthread_should_stop()) { - struct amd_sched_job *sched_job = NULL; struct fence *fence; wait_event_interruptible(sched->wait_queue, @@ -394,26 +400,18 @@ static int amd_sched_main(void *param) r = sched->ops->prepare_job(sched, c_entity, job); if (!r) { unsigned long flags; - sched_job = kzalloc(sizeof(struct amd_sched_job), - GFP_KERNEL); - if (!sched_job) { - WARN(true, "No memory to allocate\n"); - continue; - } - sched_job->job = job; - sched_job->sched = sched; spin_lock_irqsave(&sched->queue_lock, flags); - list_add_tail(&sched_job->list, &sched->active_hw_rq); + list_add_tail(&job->list, &sched->active_hw_rq); atomic64_inc(&sched->hw_rq_count); spin_unlock_irqrestore(&sched->queue_lock, flags); } mutex_lock(&sched->sched_lock); - fence = sched->ops->run_job(sched, c_entity, sched_job); + fence = sched->ops->run_job(sched, c_entity, job); if (fence) { - r = fence_add_callback(fence, &sched_job->cb, + r = fence_add_callback(fence, &job->cb, amd_sched_process_job); if (r == -ENOENT) - amd_sched_process_job(fence, &sched_job->cb); + amd_sched_process_job(fence, &job->cb); else if (r) DRM_ERROR("fence add callback failed (%d)\n", r); fence_put(fence); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index e7cc40a6993b..f54615d6a500 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -76,7 +76,8 @@ struct amd_sched_job { struct list_head list; struct fence_cb cb; struct amd_gpu_scheduler *sched; - void *job; + struct amd_sched_entity *s_entity; + void *data; }; /** @@ -86,11 +87,12 @@ struct amd_sched_job { struct amd_sched_backend_ops { int (*prepare_job)(struct amd_gpu_scheduler *sched, struct amd_sched_entity *c_entity, - void *job); + struct amd_sched_job *job); struct fence *(*run_job)(struct amd_gpu_scheduler *sched, struct amd_sched_entity *c_entity, struct amd_sched_job *job); - void (*process_job)(struct amd_gpu_scheduler *sched, void *job); + void (*process_job)(struct amd_gpu_scheduler *sched, + struct amd_sched_job *job); }; /** @@ -120,12 +122,11 @@ struct amd_gpu_scheduler *amd_sched_create(void *device, uint32_t granularity, uint32_t preemption, uint32_t hw_submission); - int amd_sched_destroy(struct amd_gpu_scheduler *sched); int amd_sched_push_job(struct amd_gpu_scheduler *sched, struct amd_sched_entity *c_entity, - void *job); + void *data); int amd_sched_wait_emit(struct amd_sched_entity *c_entity, uint64_t seq, |