summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c10
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c6
-rw-r--r--include/drm/gpu_scheduler.h6
3 files changed, 10 insertions, 12 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 90fd9c30ae5a..d631521a9679 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -148,7 +148,7 @@ static struct drm_sched_rq *
drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
{
struct drm_sched_rq *rq = NULL;
- unsigned int min_score = UINT_MAX, num_score;
+ unsigned int min_jobs = UINT_MAX, num_jobs;
int i;
for (i = 0; i < entity->num_sched_list; ++i) {
@@ -159,9 +159,9 @@ drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
continue;
}
- num_score = atomic_read(&sched->score);
- if (num_score < min_score) {
- min_score = num_score;
+ num_jobs = atomic_read(&sched->num_jobs);
+ if (num_jobs < min_jobs) {
+ min_jobs = num_jobs;
rq = &entity->sched_list[i]->sched_rq[entity->priority];
}
}
@@ -516,7 +516,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
bool first;
trace_drm_sched_job(sched_job, entity);
- atomic_inc(&entity->rq->sched->score);
+ atomic_inc(&entity->rq->sched->num_jobs);
WRITE_ONCE(entity->last_user, current->group_leader);
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index bb53a80f047c..f4ac38b435f7 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -92,7 +92,6 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
if (!list_empty(&entity->list))
return;
spin_lock(&rq->lock);
- atomic_inc(&rq->sched->score);
list_add_tail(&entity->list, &rq->entities);
spin_unlock(&rq->lock);
}
@@ -111,7 +110,6 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
if (list_empty(&entity->list))
return;
spin_lock(&rq->lock);
- atomic_dec(&rq->sched->score);
list_del_init(&entity->list);
if (rq->current_entity == entity)
rq->current_entity = NULL;
@@ -649,7 +647,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
struct drm_gpu_scheduler *sched = s_fence->sched;
atomic_dec(&sched->hw_rq_count);
- atomic_dec(&sched->score);
+ atomic_dec(&sched->num_jobs);
trace_drm_sched_process_job(s_fence);
@@ -824,7 +822,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
- atomic_set(&sched->score, 0);
+ atomic_set(&sched->num_jobs, 0);
atomic64_set(&sched->job_id_count, 0);
/* Each scheduler will run on a seperate kernel thread */
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index d8972836d248..ae39eacee250 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -262,7 +262,7 @@ struct drm_sched_backend_ops {
* @job_list_lock: lock to protect the ring_mirror_list.
* @hang_limit: once the hangs by a job crosses this limit then it is marked
* guilty and it will be considered for scheduling further.
- * @score: score to help loadbalancer pick a idle sched
+ * @num_jobs: the number of jobs in queue in the scheduler
* @ready: marks if the underlying HW is ready to work
* @free_guilty: A hit to time out handler to free the guilty job.
*
@@ -283,8 +283,8 @@ struct drm_gpu_scheduler {
struct list_head ring_mirror_list;
spinlock_t job_list_lock;
int hang_limit;
- atomic_t score;
- bool ready;
+ atomic_t num_jobs;
+ bool ready;
bool free_guilty;
};