summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/scheduler/sched_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/scheduler/sched_main.c')
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c37
1 files changed, 21 insertions, 16 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 829579c41c6b..c63543132f9d 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -66,6 +66,7 @@
* This implies waiting for previously executed jobs.
*/
+#include <linux/export.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/completion.h>
@@ -391,7 +392,7 @@ static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
* drm_sched_job_done - complete a job
* @s_job: pointer to the job which is done
*
- * Finish the job's fence and wake up the worker thread.
+ * Finish the job's fence and resubmit the work items.
*/
static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
{
@@ -401,7 +402,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
atomic_sub(s_job->credits, &sched->credit_count);
atomic_dec(sched->score);
- trace_drm_sched_process_job(s_fence);
+ trace_drm_sched_job_done(s_fence);
dma_fence_get(&s_fence->finished);
drm_sched_fence_finished(s_fence, result);
@@ -551,9 +552,10 @@ static void drm_sched_job_timedout(struct work_struct *work)
if (job) {
/*
- * Remove the bad job so it cannot be freed by concurrent
- * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
- * is parked at which point it's safe.
+ * Remove the bad job so it cannot be freed by a concurrent
+ * &struct drm_sched_backend_ops.free_job. It will be
+ * reinserted after the scheduler's work items have been
+ * cancelled, at which point it's safe.
*/
list_del_init(&job->list);
spin_unlock(&sched->job_list_lock);
@@ -599,10 +601,10 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
/*
* Reinsert back the bad job here - now it's safe as
- * drm_sched_get_finished_job cannot race against us and release the
+ * drm_sched_get_finished_job() cannot race against us and release the
* bad job at this point - we parked (waited for) any in progress
- * (earlier) cleanups and drm_sched_get_finished_job will not be called
- * now until the scheduler thread is unparked.
+ * (earlier) cleanups and drm_sched_get_finished_job() will not be
+ * called now until the scheduler's work items are submitted again.
*/
if (bad && bad->sched == sched)
/*
@@ -615,7 +617,8 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
* Iterate the job list from later to earlier one and either deactive
* their HW callbacks or remove them from pending list if they already
* signaled.
- * This iteration is thread safe as sched thread is stopped.
+ * This iteration is thread safe as the scheduler's work items have been
+ * cancelled.
*/
list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
list) {
@@ -680,9 +683,9 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, int errno)
struct drm_sched_job *s_job, *tmp;
/*
- * Locking the list is not required here as the sched thread is parked
- * so no new jobs are being inserted or removed. Also concurrent
- * GPU recovers can't run in parallel.
+ * Locking the list is not required here as the scheduler's work items
+ * are currently not running, so no new jobs are being inserted or
+ * removed. Also concurrent GPU recovers can't run in parallel.
*/
list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
struct dma_fence *fence = s_job->s_fence->parent;
@@ -764,6 +767,8 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs);
* @credits: the number of credits this job contributes to the schedulers
* credit limit
* @owner: job owner for debugging
+ * @drm_client_id: &struct drm_file.client_id of the owner (used by trace
+ * events)
*
* Refer to drm_sched_entity_push_job() documentation
* for locking considerations.
@@ -784,7 +789,8 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs);
*/
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
- u32 credits, void *owner)
+ u32 credits, void *owner,
+ uint64_t drm_client_id)
{
if (!entity->rq) {
/* This will most likely be followed by missing frames
@@ -810,7 +816,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
job->entity = entity;
job->credits = credits;
- job->s_fence = drm_sched_fence_alloc(entity, owner);
+ job->s_fence = drm_sched_fence_alloc(entity, owner, drm_client_id);
if (!job->s_fence)
return -ENOMEM;
@@ -850,7 +856,6 @@ void drm_sched_job_arm(struct drm_sched_job *job)
job->sched = sched;
job->s_priority = entity->priority;
- job->id = atomic64_inc_return(&sched->job_id_count);
drm_sched_fence_init(job->s_fence, job->entity);
}
@@ -1229,7 +1234,7 @@ static void drm_sched_run_job_work(struct work_struct *w)
atomic_add(sched_job->credits, &sched->credit_count);
drm_sched_job_begin(sched_job);
- trace_drm_run_job(sched_job, entity);
+ trace_drm_sched_job_run(sched_job, entity);
/*
* The run_job() callback must by definition return a fence whose
* refcount has been incremented for the scheduler already.