summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/scheduler
diff options
context:
space:
mode:
authorNayan Deshmukh <nayan26deshmukh@gmail.com>2018-08-01 11:20:02 +0300
committerAlex Deucher <alexander.deucher@amd.com>2018-08-27 19:09:46 +0300
commitdf0ca30838eeddbd34d7573cdbfaf88c56ad3e65 (patch)
tree157e61d6e33943a831cea978ee76feba742ce8e5 /drivers/gpu/drm/scheduler
parent97ffa35b5dec4e68baa85e626b69ae4949a4ca2a (diff)
downloadlinux-df0ca30838eeddbd34d7573cdbfaf88c56ad3e65.tar.xz
drm/scheduler: move idle entities to scheduler with less load v2
This is the first attempt to move entities between schedulers to have dynamic load balancing. We just move entities with no jobs for now as moving the ones with jobs will lead to other compilcations like ensuring that the other scheduler does not remove a job from the current entity while we are moving. v2: remove unused variable and an unecessary check Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/scheduler')
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler.c22
1 files changed, 18 insertions, 4 deletions
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 3e13bdfa8710..61ea802ce492 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -520,6 +520,8 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
if (!sched_job)
return NULL;
+ sched_job->sched = sched;
+ sched_job->s_fence->sched = sched;
while ((entity->dependency = sched->ops->dependency(sched_job, entity)))
if (drm_sched_entity_add_dependency_cb(entity))
return NULL;
@@ -550,11 +552,23 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
struct drm_sched_entity *entity)
{
- struct drm_gpu_scheduler *sched = sched_job->sched;
- bool first = false;
+ struct drm_sched_rq *rq = entity->rq;
+ bool first, reschedule, idle;
- trace_drm_sched_job(sched_job, entity);
+ idle = entity->last_scheduled == NULL ||
+ dma_fence_is_signaled(entity->last_scheduled);
+ first = spsc_queue_count(&entity->job_queue) == 0;
+ reschedule = idle && first && (entity->num_rq_list > 1);
+ if (reschedule) {
+ rq = drm_sched_entity_get_free_sched(entity);
+ spin_lock(&entity->rq_lock);
+ drm_sched_rq_remove_entity(entity->rq, entity);
+ entity->rq = rq;
+ spin_unlock(&entity->rq_lock);
+ }
+
+ trace_drm_sched_job(sched_job, entity);
atomic_inc(&entity->rq->sched->num_jobs);
WRITE_ONCE(entity->last_user, current->group_leader);
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
@@ -570,7 +584,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
}
drm_sched_rq_add_entity(entity->rq, entity);
spin_unlock(&entity->rq_lock);
- drm_sched_wakeup(sched);
+ drm_sched_wakeup(entity->rq->sched);
}
}
EXPORT_SYMBOL(drm_sched_entity_push_job);