summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/scheduler/sched_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/scheduler/sched_main.c')
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c350
1 files changed, 206 insertions, 144 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 7ce25281c74c..e2cda28a1af4 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -64,14 +64,9 @@
* credit limit, the job won't be executed. Instead, the scheduler will wait
* until the credit count has decreased enough to not overflow its credit limit.
* This implies waiting for previously executed jobs.
- *
- * Optionally, drivers may register a callback (update_job_credits) provided by
- * struct drm_sched_backend_ops to update the job's credits dynamically. The
- * scheduler executes this callback every time the scheduler considers a job for
- * execution and subsequently checks whether the job fits the scheduler's credit
- * limit.
*/
+#include <linux/export.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/completion.h>
@@ -84,18 +79,11 @@
#include <drm/gpu_scheduler.h>
#include <drm/spsc_queue.h>
+#include "sched_internal.h"
+
#define CREATE_TRACE_POINTS
#include "gpu_scheduler_trace.h"
-#ifdef CONFIG_LOCKDEP
-static struct lockdep_map drm_sched_lockdep_map = {
- .name = "drm_sched_lockdep_map"
-};
-#endif
-
-#define to_drm_sched_job(sched_job) \
- container_of((sched_job), struct drm_sched_job, queue_node)
-
int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
/**
@@ -109,9 +97,9 @@ static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched)
{
u32 credits;
- drm_WARN_ON(sched, check_sub_overflow(sched->credit_limit,
- atomic_read(&sched->credit_count),
- &credits));
+ WARN_ON(check_sub_overflow(sched->credit_limit,
+ atomic_read(&sched->credit_count),
+ &credits));
return credits;
}
@@ -129,23 +117,18 @@ static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched,
{
struct drm_sched_job *s_job;
- s_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
+ s_job = drm_sched_entity_queue_peek(entity);
if (!s_job)
return false;
- if (sched->ops->update_job_credits) {
- s_job->credits = sched->ops->update_job_credits(s_job);
-
- drm_WARN(sched, !s_job->credits,
- "Jobs with zero credits bypass job-flow control.\n");
- }
-
/* If a job exceeds the credit limit, truncate it to the credit limit
* itself to guarantee forward progress.
*/
- if (drm_WARN(sched, s_job->credits > sched->credit_limit,
- "Jobs may not exceed the credit limit, truncate.\n"))
+ if (s_job->credits > sched->credit_limit) {
+ dev_WARN(sched->dev,
+ "Jobs may not exceed the credit limit, truncate.\n");
s_job->credits = sched->credit_limit;
+ }
return drm_sched_available_credits(sched) >= s_job->credits;
}
@@ -280,38 +263,14 @@ drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched,
entity = rq->current_entity;
if (entity) {
list_for_each_entry_continue(entity, &rq->entities, list) {
- if (drm_sched_entity_is_ready(entity)) {
- /* If we can't queue yet, preserve the current
- * entity in terms of fairness.
- */
- if (!drm_sched_can_queue(sched, entity)) {
- spin_unlock(&rq->lock);
- return ERR_PTR(-ENOSPC);
- }
-
- rq->current_entity = entity;
- reinit_completion(&entity->entity_idle);
- spin_unlock(&rq->lock);
- return entity;
- }
+ if (drm_sched_entity_is_ready(entity))
+ goto found;
}
}
list_for_each_entry(entity, &rq->entities, list) {
- if (drm_sched_entity_is_ready(entity)) {
- /* If we can't queue yet, preserve the current entity in
- * terms of fairness.
- */
- if (!drm_sched_can_queue(sched, entity)) {
- spin_unlock(&rq->lock);
- return ERR_PTR(-ENOSPC);
- }
-
- rq->current_entity = entity;
- reinit_completion(&entity->entity_idle);
- spin_unlock(&rq->lock);
- return entity;
- }
+ if (drm_sched_entity_is_ready(entity))
+ goto found;
if (entity == rq->current_entity)
break;
@@ -320,6 +279,22 @@ drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched,
spin_unlock(&rq->lock);
return NULL;
+
+found:
+ if (!drm_sched_can_queue(sched, entity)) {
+ /*
+ * If scheduler cannot take more jobs signal the caller to not
+ * consider lower priority queues.
+ */
+ entity = ERR_PTR(-ENOSPC);
+ } else {
+ rq->current_entity = entity;
+ reinit_completion(&entity->entity_idle);
+ }
+
+ spin_unlock(&rq->lock);
+
+ return entity;
}
/**
@@ -391,11 +366,16 @@ static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *job;
- spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->pending_list,
struct drm_sched_job, list);
if (job && dma_fence_is_signaled(&job->s_fence->finished))
__drm_sched_run_free_queue(sched);
+}
+
+static void drm_sched_run_free_queue_unlocked(struct drm_gpu_scheduler *sched)
+{
+ spin_lock(&sched->job_list_lock);
+ drm_sched_run_free_queue(sched);
spin_unlock(&sched->job_list_lock);
}
@@ -403,7 +383,7 @@ static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
* drm_sched_job_done - complete a job
* @s_job: pointer to the job which is done
*
- * Finish the job's fence and wake up the worker thread.
+ * Finish the job's fence and resubmit the work items.
*/
static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
{
@@ -413,7 +393,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
atomic_sub(s_job->credits, &sched->credit_count);
atomic_dec(sched->score);
- trace_drm_sched_process_job(s_fence);
+ trace_drm_sched_job_done(s_fence);
dma_fence_get(&s_fence->finished);
drm_sched_fence_finished(s_fence, result);
@@ -548,11 +528,37 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job)
spin_unlock(&sched->job_list_lock);
}
+/**
+ * drm_sched_job_reinsert_on_false_timeout - reinsert the job on a false timeout
+ * @sched: scheduler instance
+ * @job: job to be reinserted on the pending list
+ *
+ * In the case of a "false timeout" - when a timeout occurs but the GPU isn't
+ * hung and is making progress, the scheduler must reinsert the job back into
+ * @sched->pending_list. Otherwise, the job and its resources won't be freed
+ * through the &struct drm_sched_backend_ops.free_job callback.
+ *
+ * This function must be used in "false timeout" cases only.
+ */
+static void drm_sched_job_reinsert_on_false_timeout(struct drm_gpu_scheduler *sched,
+ struct drm_sched_job *job)
+{
+ spin_lock(&sched->job_list_lock);
+ list_add(&job->list, &sched->pending_list);
+
+ /* After reinserting the job, the scheduler enqueues the free-job work
+ * again if ready. Otherwise, a signaled job could be added to the
+ * pending list, but never freed.
+ */
+ drm_sched_run_free_queue(sched);
+ spin_unlock(&sched->job_list_lock);
+}
+
static void drm_sched_job_timedout(struct work_struct *work)
{
struct drm_gpu_scheduler *sched;
struct drm_sched_job *job;
- enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL;
+ enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_RESET;
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
@@ -563,9 +569,10 @@ static void drm_sched_job_timedout(struct work_struct *work)
if (job) {
/*
- * Remove the bad job so it cannot be freed by concurrent
- * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
- * is parked at which point it's safe.
+ * Remove the bad job so it cannot be freed by a concurrent
+ * &struct drm_sched_backend_ops.free_job. It will be
+ * reinserted after the scheduler's work items have been
+ * cancelled, at which point it's safe.
*/
list_del_init(&job->list);
spin_unlock(&sched->job_list_lock);
@@ -580,6 +587,9 @@ static void drm_sched_job_timedout(struct work_struct *work)
job->sched->ops->free_job(job);
sched->free_guilty = false;
}
+
+ if (status == DRM_GPU_SCHED_STAT_NO_HANG)
+ drm_sched_job_reinsert_on_false_timeout(sched, job);
} else {
spin_unlock(&sched->job_list_lock);
}
@@ -602,6 +612,10 @@ static void drm_sched_job_timedout(struct work_struct *work)
* This function is typically used for reset recovery (see the docu of
* drm_sched_backend_ops.timedout_job() for details). Do not call it for
* scheduler teardown, i.e., before calling drm_sched_fini().
+ *
+ * As it's only used for reset recovery, drivers must not call this function
+ * in their &struct drm_sched_backend_ops.timedout_job callback when they
+ * skip a reset using &enum drm_gpu_sched_stat.DRM_GPU_SCHED_STAT_NO_HANG.
*/
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
{
@@ -611,10 +625,10 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
/*
* Reinsert back the bad job here - now it's safe as
- * drm_sched_get_finished_job cannot race against us and release the
+ * drm_sched_get_finished_job() cannot race against us and release the
* bad job at this point - we parked (waited for) any in progress
- * (earlier) cleanups and drm_sched_get_finished_job will not be called
- * now until the scheduler thread is unparked.
+ * (earlier) cleanups and drm_sched_get_finished_job() will not be
+ * called now until the scheduler's work items are submitted again.
*/
if (bad && bad->sched == sched)
/*
@@ -627,7 +641,8 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
* Iterate the job list from later to earlier one and either deactive
* their HW callbacks or remove them from pending list if they already
* signaled.
- * This iteration is thread safe as sched thread is stopped.
+ * This iteration is thread safe as the scheduler's work items have been
+ * cancelled.
*/
list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
list) {
@@ -686,15 +701,19 @@ EXPORT_SYMBOL(drm_sched_stop);
* drm_sched_backend_ops.timedout_job() for details). Do not call it for
* scheduler startup. The scheduler itself is fully operational after
* drm_sched_init() succeeded.
+ *
+ * As it's only used for reset recovery, drivers must not call this function
+ * in their &struct drm_sched_backend_ops.timedout_job callback when they
+ * skip a reset using &enum drm_gpu_sched_stat.DRM_GPU_SCHED_STAT_NO_HANG.
*/
void drm_sched_start(struct drm_gpu_scheduler *sched, int errno)
{
struct drm_sched_job *s_job, *tmp;
/*
- * Locking the list is not required here as the sched thread is parked
- * so no new jobs are being inserted or removed. Also concurrent
- * GPU recovers can't run in parallel.
+ * Locking the list is not required here as the scheduler's work items
+ * are currently not running, so no new jobs are being inserted or
+ * removed. Also concurrent GPU recovers can't run in parallel.
*/
list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
struct dma_fence *fence = s_job->s_fence->parent;
@@ -776,6 +795,8 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs);
* @credits: the number of credits this job contributes to the schedulers
* credit limit
* @owner: job owner for debugging
+ * @drm_client_id: &struct drm_file.client_id of the owner (used by trace
+ * events)
*
* Refer to drm_sched_entity_push_job() documentation
* for locking considerations.
@@ -796,14 +817,15 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs);
*/
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
- u32 credits, void *owner)
+ u32 credits, void *owner,
+ uint64_t drm_client_id)
{
if (!entity->rq) {
/* This will most likely be followed by missing frames
* or worse--a blank screen--leave a trail in the
* logs, so this can be debugged easier.
*/
- drm_err(job->sched, "%s: entity has no rq!\n", __func__);
+ dev_err(job->sched->dev, "%s: entity has no rq!\n", __func__);
return -ENOENT;
}
@@ -822,7 +844,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
job->entity = entity;
job->credits = credits;
- job->s_fence = drm_sched_fence_alloc(entity, owner);
+ job->s_fence = drm_sched_fence_alloc(entity, owner, drm_client_id);
if (!job->s_fence)
return -ENOMEM;
@@ -840,11 +862,15 @@ EXPORT_SYMBOL(drm_sched_job_init);
*
* This arms a scheduler job for execution. Specifically it initializes the
* &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
- * or other places that need to track the completion of this job.
+ * or other places that need to track the completion of this job. It also
+ * initializes sequence numbers, which are fundamental for fence ordering.
*
* Refer to drm_sched_entity_push_job() documentation for locking
* considerations.
*
+ * Once this function was called, you *must* submit @job with
+ * drm_sched_entity_push_job().
+ *
* This can only be called if drm_sched_job_init() succeeded.
*/
void drm_sched_job_arm(struct drm_sched_job *job)
@@ -858,7 +884,6 @@ void drm_sched_job_arm(struct drm_sched_job *job)
job->sched = sched;
job->s_priority = entity->priority;
- job->id = atomic64_inc_return(&sched->job_id_count);
drm_sched_fence_init(job->s_fence, job->entity);
}
@@ -998,6 +1023,29 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
/**
+ * drm_sched_job_has_dependency - check whether fence is the job's dependency
+ * @job: scheduler job to check
+ * @fence: fence to look for
+ *
+ * Returns:
+ * True if @fence is found within the job's dependencies, or otherwise false.
+ */
+bool drm_sched_job_has_dependency(struct drm_sched_job *job,
+ struct dma_fence *fence)
+{
+ struct dma_fence *f;
+ unsigned long index;
+
+ xa_for_each(&job->dependencies, index, f) {
+ if (f == fence)
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(drm_sched_job_has_dependency);
+
+/**
* drm_sched_job_cleanup - clean up scheduler job resources
* @job: scheduler job to clean up
*
@@ -1006,9 +1054,12 @@ EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
* Drivers should call this from their error unwind code if @job is aborted
* before drm_sched_job_arm() is called.
*
- * After that point of no return @job is committed to be executed by the
- * scheduler, and this function should be called from the
- * &drm_sched_backend_ops.free_job callback.
+ * drm_sched_job_arm() is a point of no return since it initializes the fences
+ * and their sequence number etc. Once that function has been called, you *must*
+ * submit it with drm_sched_entity_push_job() and cannot simply abort it by
+ * calling drm_sched_job_cleanup().
+ *
+ * This function should be called in the &drm_sched_backend_ops.free_job callback.
*/
void drm_sched_job_cleanup(struct drm_sched_job *job)
{
@@ -1016,10 +1067,15 @@ void drm_sched_job_cleanup(struct drm_sched_job *job)
unsigned long index;
if (kref_read(&job->s_fence->finished.refcount)) {
- /* drm_sched_job_arm() has been called */
+ /* The job has been processed by the scheduler, i.e.,
+ * drm_sched_job_arm() and drm_sched_entity_push_job() have
+ * been called.
+ */
dma_fence_put(&job->s_fence->finished);
} else {
- /* aborted job before committing to run it */
+ /* The job was aborted before it has been committed to be run;
+ * notably, drm_sched_job_arm() has not been called.
+ */
drm_sched_fence_free(job->s_fence);
}
@@ -1166,14 +1222,11 @@ static void drm_sched_free_job_work(struct work_struct *w)
container_of(w, struct drm_gpu_scheduler, work_free_job);
struct drm_sched_job *job;
- if (READ_ONCE(sched->pause_submit))
- return;
-
job = drm_sched_get_finished_job(sched);
if (job)
sched->ops->free_job(job);
- drm_sched_run_free_queue(sched);
+ drm_sched_run_free_queue_unlocked(sched);
drm_sched_run_job_queue(sched);
}
@@ -1192,9 +1245,6 @@ static void drm_sched_run_job_work(struct work_struct *w)
struct drm_sched_job *sched_job;
int r;
- if (READ_ONCE(sched->pause_submit))
- return;
-
/* Find entity with a ready job */
entity = drm_sched_select_entity(sched);
if (!entity)
@@ -1212,21 +1262,24 @@ static void drm_sched_run_job_work(struct work_struct *w)
atomic_add(sched_job->credits, &sched->credit_count);
drm_sched_job_begin(sched_job);
- trace_drm_run_job(sched_job, entity);
+ trace_drm_sched_job_run(sched_job, entity);
+ /*
+ * The run_job() callback must by definition return a fence whose
+ * refcount has been incremented for the scheduler already.
+ */
fence = sched->ops->run_job(sched_job);
complete_all(&entity->entity_idle);
drm_sched_fence_scheduled(s_fence, fence);
if (!IS_ERR_OR_NULL(fence)) {
- /* Drop for original kref_init of the fence */
- dma_fence_put(fence);
-
r = dma_fence_add_callback(fence, &sched_job->cb,
drm_sched_job_done_cb);
if (r == -ENOENT)
drm_sched_job_done(sched_job, fence->error);
else if (r)
DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r);
+
+ dma_fence_put(fence);
} else {
drm_sched_job_done(sched_job, IS_ERR(fence) ?
PTR_ERR(fence) : 0);
@@ -1236,47 +1289,50 @@ static void drm_sched_run_job_work(struct work_struct *w)
drm_sched_run_job_queue(sched);
}
+static struct workqueue_struct *drm_sched_alloc_wq(const char *name)
+{
+#if (IS_ENABLED(CONFIG_LOCKDEP))
+ static struct lockdep_map map = {
+ .name = "drm_sched_lockdep_map"
+ };
+
+ /*
+ * Avoid leaking a lockdep map on each drm sched creation and
+ * destruction by using a single lockdep map for all drm sched
+ * allocated submit_wq.
+ */
+
+ return alloc_ordered_workqueue_lockdep_map(name, WQ_MEM_RECLAIM, &map);
+#else
+ return alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
+#endif
+}
+
/**
* drm_sched_init - Init a gpu scheduler instance
*
* @sched: scheduler instance
- * @ops: backend operations for this scheduler
- * @submit_wq: workqueue to use for submission. If NULL, an ordered wq is
- * allocated and used
- * @num_rqs: number of runqueues, one for each priority, up to DRM_SCHED_PRIORITY_COUNT
- * @credit_limit: the number of credits this scheduler can hold from all jobs
- * @hang_limit: number of times to allow a job to hang before dropping it
- * @timeout: timeout value in jiffies for the scheduler
- * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is
- * used
- * @score: optional score atomic shared with other schedulers
- * @name: name used for debugging
- * @dev: target &struct device
+ * @args: scheduler initialization arguments
*
* Return 0 on success, otherwise error code.
*/
-int drm_sched_init(struct drm_gpu_scheduler *sched,
- const struct drm_sched_backend_ops *ops,
- struct workqueue_struct *submit_wq,
- u32 num_rqs, u32 credit_limit, unsigned int hang_limit,
- long timeout, struct workqueue_struct *timeout_wq,
- atomic_t *score, const char *name, struct device *dev)
+int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_args *args)
{
int i;
- sched->ops = ops;
- sched->credit_limit = credit_limit;
- sched->name = name;
- sched->timeout = timeout;
- sched->timeout_wq = timeout_wq ? : system_wq;
- sched->hang_limit = hang_limit;
- sched->score = score ? score : &sched->_score;
- sched->dev = dev;
+ sched->ops = args->ops;
+ sched->credit_limit = args->credit_limit;
+ sched->name = args->name;
+ sched->timeout = args->timeout;
+ sched->hang_limit = args->hang_limit;
+ sched->timeout_wq = args->timeout_wq ? args->timeout_wq : system_wq;
+ sched->score = args->score ? args->score : &sched->_score;
+ sched->dev = args->dev;
- if (num_rqs > DRM_SCHED_PRIORITY_COUNT) {
+ if (args->num_rqs > DRM_SCHED_PRIORITY_COUNT) {
/* This is a gross violation--tell drivers what the problem is.
*/
- drm_err(sched, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n",
+ dev_err(sched->dev, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n",
__func__);
return -EINVAL;
} else if (sched->sched_rq) {
@@ -1284,32 +1340,26 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
* fine-tune their DRM calling order, and return all
* is good.
*/
- drm_warn(sched, "%s: scheduler already initialized!\n", __func__);
+ dev_warn(sched->dev, "%s: scheduler already initialized!\n", __func__);
return 0;
}
- if (submit_wq) {
- sched->submit_wq = submit_wq;
+ if (args->submit_wq) {
+ sched->submit_wq = args->submit_wq;
sched->own_submit_wq = false;
} else {
-#ifdef CONFIG_LOCKDEP
- sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name,
- WQ_MEM_RECLAIM,
- &drm_sched_lockdep_map);
-#else
- sched->submit_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
-#endif
+ sched->submit_wq = drm_sched_alloc_wq(args->name);
if (!sched->submit_wq)
return -ENOMEM;
sched->own_submit_wq = true;
}
- sched->sched_rq = kmalloc_array(num_rqs, sizeof(*sched->sched_rq),
+ sched->sched_rq = kmalloc_array(args->num_rqs, sizeof(*sched->sched_rq),
GFP_KERNEL | __GFP_ZERO);
if (!sched->sched_rq)
goto Out_check_own;
- sched->num_rqs = num_rqs;
+ sched->num_rqs = args->num_rqs;
for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
if (!sched->sched_rq[i])
@@ -1339,11 +1389,23 @@ Out_unroll:
Out_check_own:
if (sched->own_submit_wq)
destroy_workqueue(sched->submit_wq);
- drm_err(sched, "%s: Failed to setup GPU scheduler--out of memory\n", __func__);
+ dev_err(sched->dev, "%s: Failed to setup GPU scheduler--out of memory\n", __func__);
return -ENOMEM;
}
EXPORT_SYMBOL(drm_sched_init);
+static void drm_sched_cancel_remaining_jobs(struct drm_gpu_scheduler *sched)
+{
+ struct drm_sched_job *job, *tmp;
+
+ /* All other accessors are stopped. No locking necessary. */
+ list_for_each_entry_safe_reverse(job, tmp, &sched->pending_list, list) {
+ sched->ops->cancel_job(job);
+ list_del(&job->list);
+ sched->ops->free_job(job);
+ }
+}
+
/**
* drm_sched_fini - Destroy a gpu scheduler
*
@@ -1351,18 +1413,11 @@ EXPORT_SYMBOL(drm_sched_init);
*
* Tears down and cleans up the scheduler.
*
- * This stops submission of new jobs to the hardware through
- * drm_sched_backend_ops.run_job(). Consequently, drm_sched_backend_ops.free_job()
- * will not be called for all jobs still in drm_gpu_scheduler.pending_list.
- * There is no solution for this currently. Thus, it is up to the driver to make
- * sure that
- * a) drm_sched_fini() is only called after for all submitted jobs
- * drm_sched_backend_ops.free_job() has been called or that
- * b) the jobs for which drm_sched_backend_ops.free_job() has not been called
- * after drm_sched_fini() ran are freed manually.
- *
- * FIXME: Take care of the above problem and prevent this function from leaking
- * the jobs in drm_gpu_scheduler.pending_list under any circumstances.
+ * This stops submission of new jobs to the hardware through &struct
+ * drm_sched_backend_ops.run_job. If &struct drm_sched_backend_ops.cancel_job
+ * is implemented, all jobs will be canceled through it and afterwards cleaned
+ * up through &struct drm_sched_backend_ops.free_job. If cancel_job is not
+ * implemented, memory could leak.
*/
void drm_sched_fini(struct drm_gpu_scheduler *sched)
{
@@ -1392,11 +1447,18 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
/* Confirm no work left behind accessing device structures */
cancel_delayed_work_sync(&sched->work_tdr);
+ /* Avoid memory leaks if supported by the driver. */
+ if (sched->ops->cancel_job)
+ drm_sched_cancel_remaining_jobs(sched);
+
if (sched->own_submit_wq)
destroy_workqueue(sched->submit_wq);
sched->ready = false;
kfree(sched->sched_rq);
sched->sched_rq = NULL;
+
+ if (!list_empty(&sched->pending_list))
+ dev_warn(sched->dev, "Tearing down scheduler while jobs are pending!\n");
}
EXPORT_SYMBOL(drm_sched_fini);