diff options
author | Jani Nikula <jani.nikula@intel.com> | 2019-11-15 14:17:39 +0300 |
---|---|---|
committer | Jani Nikula <jani.nikula@intel.com> | 2019-11-15 14:17:39 +0300 |
commit | e205ceeb253723f4e4bdce619844ed678ae48276 (patch) | |
tree | b6f5e3b21f915320a86733504faf8206569b8057 /drivers/gpu/drm/scheduler/sched_main.c | |
parent | 1cdc2330e8d396c9cbebfc75fba4c94d34f80782 (diff) | |
parent | 17cc51390c141662748dbbc2fe98f3ed10f2e13e (diff) | |
download | linux-e205ceeb253723f4e4bdce619844ed678ae48276.tar.xz |
Merge drm/drm-next into drm-intel-next-queued
Backmerge to get dfce90259d74 ("Backmerge i915 security patches from
commit 'ea0b163b13ff' into drm-next") and thus 100d46bd72ec ("Merge
Intel Gen8/Gen9 graphics fixes from Jon Bloomfield.").
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Diffstat (limited to 'drivers/gpu/drm/scheduler/sched_main.c')
-rw-r--r-- | drivers/gpu/drm/scheduler/sched_main.c | 81 |
1 files changed, 56 insertions, 25 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 9a0ee74d82dc..3c57e84222ca 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -47,6 +47,7 @@ #include <linux/kthread.h> #include <linux/wait.h> #include <linux/sched.h> +#include <linux/completion.h> #include <uapi/linux/sched/types.h> #include <drm/drm_print.h> @@ -134,6 +135,7 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq) list_for_each_entry_continue(entity, &rq->entities, list) { if (drm_sched_entity_is_ready(entity)) { rq->current_entity = entity; + reinit_completion(&entity->entity_idle); spin_unlock(&rq->lock); return entity; } @@ -144,6 +146,7 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq) if (drm_sched_entity_is_ready(entity)) { rq->current_entity = entity; + reinit_completion(&entity->entity_idle); spin_unlock(&rq->lock); return entity; } @@ -479,6 +482,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) struct drm_sched_job *s_job, *tmp; uint64_t guilty_context; bool found_guilty = false; + struct dma_fence *fence; list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { struct drm_sched_fence *s_fence = s_job->s_fence; @@ -492,7 +496,18 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) dma_fence_set_error(&s_fence->finished, -ECANCELED); dma_fence_put(s_job->s_fence->parent); - s_job->s_fence->parent = sched->ops->run_job(s_job); + fence = sched->ops->run_job(s_job); + + if (IS_ERR_OR_NULL(fence)) { + if (IS_ERR(fence)) + dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); + + s_job->s_fence->parent = NULL; + } else { + s_job->s_fence->parent = fence; + } + + } } EXPORT_SYMBOL(drm_sched_resubmit_jobs); @@ -622,43 +637,45 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) } /** - * drm_sched_cleanup_jobs - destroy finished jobs + * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed * * @sched: scheduler instance * - * Remove all finished jobs from the mirror list and destroy them. + * Returns the next finished job from the mirror list (if there is one) + * ready for it to be destroyed. */ -static void drm_sched_cleanup_jobs(struct drm_gpu_scheduler *sched) +static struct drm_sched_job * +drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) { + struct drm_sched_job *job; unsigned long flags; - /* Don't destroy jobs while the timeout worker is running */ - if (sched->timeout != MAX_SCHEDULE_TIMEOUT && - !cancel_delayed_work(&sched->work_tdr)) - return; - + /* + * Don't destroy jobs while the timeout worker is running OR thread + * is being parked and hence assumed to not touch ring_mirror_list + */ + if ((sched->timeout != MAX_SCHEDULE_TIMEOUT && + !cancel_delayed_work(&sched->work_tdr)) || + __kthread_should_park(sched->thread)) + return NULL; - while (!list_empty(&sched->ring_mirror_list)) { - struct drm_sched_job *job; + spin_lock_irqsave(&sched->job_list_lock, flags); - job = list_first_entry(&sched->ring_mirror_list, + job = list_first_entry_or_null(&sched->ring_mirror_list, struct drm_sched_job, node); - if (!dma_fence_is_signaled(&job->s_fence->finished)) - break; - spin_lock_irqsave(&sched->job_list_lock, flags); + if (job && dma_fence_is_signaled(&job->s_fence->finished)) { /* remove job from ring_mirror_list */ list_del_init(&job->node); - spin_unlock_irqrestore(&sched->job_list_lock, flags); - - sched->ops->free_job(job); + } else { + job = NULL; + /* queue timeout for next job */ + drm_sched_start_timeout(sched); } - /* queue timeout for next job */ - spin_lock_irqsave(&sched->job_list_lock, flags); - drm_sched_start_timeout(sched); spin_unlock_irqrestore(&sched->job_list_lock, flags); + return job; } /** @@ -698,17 +715,27 @@ static int drm_sched_main(void *param) struct drm_sched_fence *s_fence; struct drm_sched_job *sched_job; struct dma_fence *fence; + struct drm_sched_job *cleanup_job = NULL; wait_event_interruptible(sched->wake_up_worker, - (drm_sched_cleanup_jobs(sched), + (cleanup_job = drm_sched_get_cleanup_job(sched)) || (!drm_sched_blocked(sched) && (entity = drm_sched_select_entity(sched))) || - kthread_should_stop())); + kthread_should_stop()); + + if (cleanup_job) { + sched->ops->free_job(cleanup_job); + /* queue timeout for next job */ + drm_sched_start_timeout(sched); + } if (!entity) continue; sched_job = drm_sched_entity_pop_job(entity); + + complete(&entity->entity_idle); + if (!sched_job) continue; @@ -720,7 +747,7 @@ static int drm_sched_main(void *param) fence = sched->ops->run_job(sched_job); drm_sched_fence_scheduled(s_fence); - if (fence) { + if (!IS_ERR_OR_NULL(fence)) { s_fence->parent = dma_fence_get(fence); r = dma_fence_add_callback(fence, &sched_job->cb, drm_sched_process_job); @@ -730,8 +757,12 @@ static int drm_sched_main(void *param) DRM_ERROR("fence add callback failed (%d)\n", r); dma_fence_put(fence); - } else + } else { + if (IS_ERR(fence)) + dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); + drm_sched_process_job(NULL, &sched_job->cb); + } wake_up(&sched->job_scheduled); } |