diff options
author | Christian König <christian.koenig@amd.com> | 2016-05-19 10:54:15 +0300 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2016-07-07 21:50:54 +0300 |
commit | c5f74f7802775b9ccdb0a4fd90e0c7d0b03da9fa (patch) | |
tree | 3db2ce910290e8005a5d562c2a107d32f9c0f363 /drivers/gpu/drm/amd/scheduler | |
parent | f42d20a94284149bc6815a66ddb0b449f9f2fe74 (diff) | |
download | linux-c5f74f7802775b9ccdb0a4fd90e0c7d0b03da9fa.tar.xz |
drm/amdgpu: fix and cleanup job destruction
Remove the job reference counting and just properly destroy it from a
work item which blocks on any potential running timeout handler.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Monk.Liu <monk.liu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/scheduler')
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 46 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 25 |
2 files changed, 24 insertions, 47 deletions
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index cb56d9065e43..2425172e612e 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -319,19 +319,13 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job) return added; } -static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) { - struct amd_sched_job *job = container_of(cb, struct amd_sched_job, - cb_free_job); - - schedule_work(&job->work_free_job); -} - /* job_finish is called after hw fence signaled, and * the job had already been deleted from ring_mirror_list */ -static void amd_sched_job_finish(struct amd_sched_job *s_job) +static void amd_sched_job_finish(struct work_struct *work) { - struct amd_sched_job *next; + struct amd_sched_job *s_job = container_of(work, struct amd_sched_job, + finish_work); struct amd_gpu_scheduler *sched = s_job->sched; unsigned long flags; @@ -339,19 +333,26 @@ static void amd_sched_job_finish(struct amd_sched_job *s_job) spin_lock_irqsave(&sched->job_list_lock, flags); list_del_init(&s_job->node); if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { - if (cancel_delayed_work(&s_job->work_tdr)) - amd_sched_job_put(s_job); + struct amd_sched_job *next; + + cancel_delayed_work_sync(&s_job->work_tdr); /* queue TDR for next job */ next = list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node); - if (next) { - amd_sched_job_get(next); + if (next) schedule_delayed_work(&next->work_tdr, sched->timeout); - } } spin_unlock_irqrestore(&sched->job_list_lock, flags); + sched->ops->free_job(s_job); +} + +static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb) +{ + struct amd_sched_job *job = container_of(cb, struct amd_sched_job, + finish_cb); + schedule_work(&job->finish_work); } static void amd_sched_job_begin(struct amd_sched_job *s_job) @@ -364,10 +365,7 @@ static void amd_sched_job_begin(struct amd_sched_job *s_job) if (sched->timeout != MAX_SCHEDULE_TIMEOUT && list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node) == s_job) - { - amd_sched_job_get(s_job); schedule_delayed_work(&s_job->work_tdr, sched->timeout); - } spin_unlock_irqrestore(&sched->job_list_lock, flags); } @@ -390,9 +388,9 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job) { struct amd_sched_entity *entity = sched_job->s_entity; - fence_add_callback(&sched_job->s_fence->base, - &sched_job->cb_free_job, amd_sched_free_job); trace_amd_sched_job(sched_job); + fence_add_callback(&sched_job->s_fence->base, &sched_job->finish_cb, + amd_sched_job_finish_cb); wait_event(entity->sched->job_scheduled, amd_sched_entity_in(sched_job)); } @@ -401,20 +399,17 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job) int amd_sched_job_init(struct amd_sched_job *job, struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity, - void (*free_cb)(struct kref *refcount), void *owner, struct fence **fence) { - INIT_LIST_HEAD(&job->node); - kref_init(&job->refcount); job->sched = sched; job->s_entity = entity; job->s_fence = amd_sched_fence_create(entity, owner); if (!job->s_fence) return -ENOMEM; - job->s_fence->s_job = job; + INIT_WORK(&job->finish_work, amd_sched_job_finish); + INIT_LIST_HEAD(&job->node); INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout); - job->free_callback = free_cb; if (fence) *fence = &job->s_fence->base; @@ -468,9 +463,6 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) struct amd_gpu_scheduler *sched = s_fence->sched; atomic_dec(&sched->hw_rq_count); - - amd_sched_job_finish(s_fence->s_job); - amd_sched_fence_signal(s_fence); trace_amd_sched_process_job(s_fence); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index f0de46ce7afe..e63034ec7fa4 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -74,19 +74,16 @@ struct amd_sched_fence { struct amd_gpu_scheduler *sched; spinlock_t lock; void *owner; - struct amd_sched_job *s_job; }; struct amd_sched_job { - struct kref refcount; struct amd_gpu_scheduler *sched; struct amd_sched_entity *s_entity; struct amd_sched_fence *s_fence; - struct fence_cb cb_free_job; - struct work_struct work_free_job; - struct list_head node; - struct delayed_work work_tdr; - void (*free_callback)(struct kref *refcount); + struct fence_cb finish_cb; + struct work_struct finish_work; + struct list_head node; + struct delayed_work work_tdr; }; extern const struct fence_ops amd_sched_fence_ops; @@ -109,6 +106,7 @@ struct amd_sched_backend_ops { struct fence *(*dependency)(struct amd_sched_job *sched_job); struct fence *(*run_job)(struct amd_sched_job *sched_job); void (*timedout_job)(struct amd_sched_job *sched_job); + void (*free_job)(struct amd_sched_job *sched_job); }; enum amd_sched_priority { @@ -154,18 +152,5 @@ void amd_sched_fence_signal(struct amd_sched_fence *fence); int amd_sched_job_init(struct amd_sched_job *job, struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity, - void (*free_cb)(struct kref* refcount), void *owner, struct fence **fence); -static inline void amd_sched_job_get(struct amd_sched_job *job) -{ - if (job) - kref_get(&job->refcount); -} - -static inline void amd_sched_job_put(struct amd_sched_job *job) -{ - if (job) - kref_put(&job->refcount, job->free_callback); -} - #endif |