diff options
Diffstat (limited to 'drivers/gpu/drm/amd/scheduler')
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 191 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/sched_fence.c | 23 |
4 files changed, 199 insertions, 57 deletions
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h index 144f50acc971..c89dc777768f 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h @@ -16,6 +16,8 @@ TRACE_EVENT(amd_sched_job, TP_ARGS(sched_job), TP_STRUCT__entry( __field(struct amd_sched_entity *, entity) + __field(struct amd_sched_job *, sched_job) + __field(struct fence *, fence) __field(const char *, name) __field(u32, job_count) __field(int, hw_job_count) @@ -23,16 +25,32 @@ TRACE_EVENT(amd_sched_job, TP_fast_assign( __entry->entity = sched_job->s_entity; + __entry->sched_job = sched_job; + __entry->fence = &sched_job->s_fence->base; __entry->name = sched_job->sched->name; __entry->job_count = kfifo_len( &sched_job->s_entity->job_queue) / sizeof(sched_job); __entry->hw_job_count = atomic_read( &sched_job->sched->hw_rq_count); ), - TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d", - __entry->entity, __entry->name, __entry->job_count, - __entry->hw_job_count) + TP_printk("entity=%p, sched job=%p, fence=%p, ring=%s, job count:%u, hw job count:%d", + __entry->entity, __entry->sched_job, __entry->fence, __entry->name, + __entry->job_count, __entry->hw_job_count) ); + +TRACE_EVENT(amd_sched_process_job, + TP_PROTO(struct amd_sched_fence *fence), + TP_ARGS(fence), + TP_STRUCT__entry( + __field(struct fence *, fence) + ), + + TP_fast_assign( + __entry->fence = &fence->base; + ), + TP_printk("fence=%p signaled", __entry->fence) +); + #endif /* This part must be outside protection */ diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 3697eeeecf82..3a4820e863ec 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -30,10 +30,12 @@ #define CREATE_TRACE_POINTS #include "gpu_sched_trace.h" -static struct amd_sched_job * -amd_sched_entity_pop_job(struct amd_sched_entity *entity); +static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); +struct kmem_cache *sched_fence_slab; +atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); + /* Initialize a given run queue struct */ static void amd_sched_rq_init(struct amd_sched_rq *rq) { @@ -61,36 +63,36 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, } /** - * Select next job from a specified run queue with round robin policy. - * Return NULL if nothing available. + * Select an entity which could provide a job to run + * + * @rq The run queue to check. + * + * Try to find a ready entity, returns NULL if none found. */ -static struct amd_sched_job * -amd_sched_rq_select_job(struct amd_sched_rq *rq) +static struct amd_sched_entity * +amd_sched_rq_select_entity(struct amd_sched_rq *rq) { struct amd_sched_entity *entity; - struct amd_sched_job *sched_job; spin_lock(&rq->lock); entity = rq->current_entity; if (entity) { list_for_each_entry_continue(entity, &rq->entities, list) { - sched_job = amd_sched_entity_pop_job(entity); - if (sched_job) { + if (amd_sched_entity_is_ready(entity)) { rq->current_entity = entity; spin_unlock(&rq->lock); - return sched_job; + return entity; } } } list_for_each_entry(entity, &rq->entities, list) { - sched_job = amd_sched_entity_pop_job(entity); - if (sched_job) { + if (amd_sched_entity_is_ready(entity)) { rq->current_entity = entity; spin_unlock(&rq->lock); - return sched_job; + return entity; } if (entity == rq->current_entity) @@ -174,6 +176,24 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) } /** + * Check if entity is ready + * + * @entity The pointer to a valid scheduler entity + * + * Return true if entity could provide a job. + */ +static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity) +{ + if (kfifo_is_empty(&entity->job_queue)) + return false; + + if (ACCESS_ONCE(entity->dependency)) + return false; + + return true; +} + +/** * Destroy a context entity * * @sched Pointer to scheduler instance @@ -208,26 +228,53 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) amd_sched_wakeup(entity->sched); } +static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) +{ + struct amd_gpu_scheduler *sched = entity->sched; + struct fence * fence = entity->dependency; + struct amd_sched_fence *s_fence; + + if (fence->context == entity->fence_context) { + /* We can ignore fences from ourself */ + fence_put(entity->dependency); + return false; + } + + s_fence = to_amd_sched_fence(fence); + if (s_fence && s_fence->sched == sched) { + /* Fence is from the same scheduler */ + if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) { + /* Ignore it when it is already scheduled */ + fence_put(entity->dependency); + return false; + } + + /* Wait for fence to be scheduled */ + entity->cb.func = amd_sched_entity_wakeup; + list_add_tail(&entity->cb.node, &s_fence->scheduled_cb); + return true; + } + + if (!fence_add_callback(entity->dependency, &entity->cb, + amd_sched_entity_wakeup)) + return true; + + fence_put(entity->dependency); + return false; +} + static struct amd_sched_job * amd_sched_entity_pop_job(struct amd_sched_entity *entity) { struct amd_gpu_scheduler *sched = entity->sched; struct amd_sched_job *sched_job; - if (ACCESS_ONCE(entity->dependency)) - return NULL; - if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) return NULL; - while ((entity->dependency = sched->ops->dependency(sched_job))) { - - if (fence_add_callback(entity->dependency, &entity->cb, - amd_sched_entity_wakeup)) - fence_put(entity->dependency); - else + while ((entity->dependency = sched->ops->dependency(sched_job))) + if (amd_sched_entity_add_dependency_cb(entity)) return NULL; - } return sched_job; } @@ -241,6 +288,7 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity) */ static bool amd_sched_entity_in(struct amd_sched_job *sched_job) { + struct amd_gpu_scheduler *sched = sched_job->sched; struct amd_sched_entity *entity = sched_job->s_entity; bool added, first = false; @@ -255,7 +303,7 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job) /* first job wakes up scheduler */ if (first) - amd_sched_wakeup(sched_job->sched); + amd_sched_wakeup(sched); return added; } @@ -267,22 +315,13 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job) * * Returns 0 for success, negative error code otherwise. */ -int amd_sched_entity_push_job(struct amd_sched_job *sched_job) +void amd_sched_entity_push_job(struct amd_sched_job *sched_job) { struct amd_sched_entity *entity = sched_job->s_entity; - struct amd_sched_fence *fence = amd_sched_fence_create( - entity, sched_job->owner); - - if (!fence) - return -ENOMEM; - - fence_get(&fence->base); - sched_job->s_fence = fence; + trace_amd_sched_job(sched_job); wait_event(entity->sched->job_scheduled, amd_sched_entity_in(sched_job)); - trace_amd_sched_job(sched_job); - return 0; } /** @@ -304,22 +343,22 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) } /** - * Select next to run + * Select next entity to process */ -static struct amd_sched_job * -amd_sched_select_job(struct amd_gpu_scheduler *sched) +static struct amd_sched_entity * +amd_sched_select_entity(struct amd_gpu_scheduler *sched) { - struct amd_sched_job *sched_job; + struct amd_sched_entity *entity; if (!amd_sched_ready(sched)) return NULL; /* Kernel run queue has higher priority than normal run queue*/ - sched_job = amd_sched_rq_select_job(&sched->kernel_rq); - if (sched_job == NULL) - sched_job = amd_sched_rq_select_job(&sched->sched_rq); + entity = amd_sched_rq_select_entity(&sched->kernel_rq); + if (entity == NULL) + entity = amd_sched_rq_select_entity(&sched->sched_rq); - return sched_job; + return entity; } static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) @@ -327,19 +366,50 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) struct amd_sched_fence *s_fence = container_of(cb, struct amd_sched_fence, cb); struct amd_gpu_scheduler *sched = s_fence->sched; + unsigned long flags; atomic_dec(&sched->hw_rq_count); amd_sched_fence_signal(s_fence); + if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { + cancel_delayed_work(&s_fence->dwork); + spin_lock_irqsave(&sched->fence_list_lock, flags); + list_del_init(&s_fence->list); + spin_unlock_irqrestore(&sched->fence_list_lock, flags); + } + trace_amd_sched_process_job(s_fence); fence_put(&s_fence->base); wake_up_interruptible(&sched->wake_up_worker); } +static void amd_sched_fence_work_func(struct work_struct *work) +{ + struct amd_sched_fence *s_fence = + container_of(work, struct amd_sched_fence, dwork.work); + struct amd_gpu_scheduler *sched = s_fence->sched; + struct amd_sched_fence *entity, *tmp; + unsigned long flags; + + DRM_ERROR("[%s] scheduler is timeout!\n", sched->name); + + /* Clean all pending fences */ + spin_lock_irqsave(&sched->fence_list_lock, flags); + list_for_each_entry_safe(entity, tmp, &sched->fence_list, list) { + DRM_ERROR(" fence no %d\n", entity->base.seqno); + cancel_delayed_work(&entity->dwork); + list_del_init(&entity->list); + fence_put(&entity->base); + } + spin_unlock_irqrestore(&sched->fence_list_lock, flags); +} + static int amd_sched_main(void *param) { struct sched_param sparam = {.sched_priority = 1}; struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param; int r, count; + spin_lock_init(&sched->fence_list_lock); + INIT_LIST_HEAD(&sched->fence_list); sched_setscheduler(current, SCHED_FIFO, &sparam); while (!kthread_should_stop()) { @@ -347,18 +417,32 @@ static int amd_sched_main(void *param) struct amd_sched_fence *s_fence; struct amd_sched_job *sched_job; struct fence *fence; + unsigned long flags; wait_event_interruptible(sched->wake_up_worker, - kthread_should_stop() || - (sched_job = amd_sched_select_job(sched))); + (entity = amd_sched_select_entity(sched)) || + kthread_should_stop()); + + if (!entity) + continue; + sched_job = amd_sched_entity_pop_job(entity); if (!sched_job) continue; - entity = sched_job->s_entity; s_fence = sched_job->s_fence; + + if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { + INIT_DELAYED_WORK(&s_fence->dwork, amd_sched_fence_work_func); + schedule_delayed_work(&s_fence->dwork, sched->timeout); + spin_lock_irqsave(&sched->fence_list_lock, flags); + list_add_tail(&s_fence->list, &sched->fence_list); + spin_unlock_irqrestore(&sched->fence_list_lock, flags); + } + atomic_inc(&sched->hw_rq_count); fence = sched->ops->run_job(sched_job); + amd_sched_fence_scheduled(s_fence); if (fence) { r = fence_add_callback(fence, &s_fence->cb, amd_sched_process_job); @@ -392,17 +476,25 @@ static int amd_sched_main(void *param) */ int amd_sched_init(struct amd_gpu_scheduler *sched, struct amd_sched_backend_ops *ops, - unsigned hw_submission, const char *name) + unsigned hw_submission, long timeout, const char *name) { sched->ops = ops; sched->hw_submission_limit = hw_submission; sched->name = name; + sched->timeout = timeout; amd_sched_rq_init(&sched->sched_rq); amd_sched_rq_init(&sched->kernel_rq); init_waitqueue_head(&sched->wake_up_worker); init_waitqueue_head(&sched->job_scheduled); atomic_set(&sched->hw_rq_count, 0); + if (atomic_inc_return(&sched_fence_slab_ref) == 1) { + sched_fence_slab = kmem_cache_create( + "amd_sched_fence", sizeof(struct amd_sched_fence), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!sched_fence_slab) + return -ENOMEM; + } /* Each scheduler will run on a seperate kernel thread */ sched->thread = kthread_run(amd_sched_main, sched, sched->name); @@ -421,5 +513,8 @@ int amd_sched_init(struct amd_gpu_scheduler *sched, */ void amd_sched_fini(struct amd_gpu_scheduler *sched) { - kthread_stop(sched->thread); + if (sched->thread) + kthread_stop(sched->thread); + if (atomic_dec_and_test(&sched_fence_slab_ref)) + kmem_cache_destroy(sched_fence_slab); } diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 80b64dc22214..a0f0ae53aacd 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -27,9 +27,14 @@ #include <linux/kfifo.h> #include <linux/fence.h> +#define AMD_SCHED_FENCE_SCHEDULED_BIT FENCE_FLAG_USER_BITS + struct amd_gpu_scheduler; struct amd_sched_rq; +extern struct kmem_cache *sched_fence_slab; +extern atomic_t sched_fence_slab_ref; + /** * A scheduler entity is a wrapper around a job queue or a group * of other entities. Entities take turns emitting jobs from their @@ -65,16 +70,18 @@ struct amd_sched_rq { struct amd_sched_fence { struct fence base; struct fence_cb cb; + struct list_head scheduled_cb; struct amd_gpu_scheduler *sched; spinlock_t lock; void *owner; + struct delayed_work dwork; + struct list_head list; }; struct amd_sched_job { struct amd_gpu_scheduler *sched; struct amd_sched_entity *s_entity; struct amd_sched_fence *s_fence; - void *owner; }; extern const struct fence_ops amd_sched_fence_ops; @@ -103,18 +110,21 @@ struct amd_sched_backend_ops { struct amd_gpu_scheduler { struct amd_sched_backend_ops *ops; uint32_t hw_submission_limit; + long timeout; const char *name; struct amd_sched_rq sched_rq; struct amd_sched_rq kernel_rq; wait_queue_head_t wake_up_worker; wait_queue_head_t job_scheduled; atomic_t hw_rq_count; + struct list_head fence_list; + spinlock_t fence_list_lock; struct task_struct *thread; }; int amd_sched_init(struct amd_gpu_scheduler *sched, struct amd_sched_backend_ops *ops, - uint32_t hw_submission, const char *name); + uint32_t hw_submission, long timeout, const char *name); void amd_sched_fini(struct amd_gpu_scheduler *sched); int amd_sched_entity_init(struct amd_gpu_scheduler *sched, @@ -123,11 +133,11 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, uint32_t jobs); void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity); -int amd_sched_entity_push_job(struct amd_sched_job *sched_job); +void amd_sched_entity_push_job(struct amd_sched_job *sched_job); struct amd_sched_fence *amd_sched_fence_create( struct amd_sched_entity *s_entity, void *owner); +void amd_sched_fence_scheduled(struct amd_sched_fence *fence); void amd_sched_fence_signal(struct amd_sched_fence *fence); - #endif diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c index d802638094f4..87c78eecea64 100644 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c @@ -32,9 +32,11 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity struct amd_sched_fence *fence = NULL; unsigned seq; - fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL); + fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL); if (fence == NULL) return NULL; + + INIT_LIST_HEAD(&fence->scheduled_cb); fence->owner = owner; fence->sched = s_entity->sched; spin_lock_init(&fence->lock); @@ -55,6 +57,17 @@ void amd_sched_fence_signal(struct amd_sched_fence *fence) FENCE_TRACE(&fence->base, "was already signaled\n"); } +void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence) +{ + struct fence_cb *cur, *tmp; + + set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags); + list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) { + list_del_init(&cur->node); + cur->func(&s_fence->base, cur); + } +} + static const char *amd_sched_fence_get_driver_name(struct fence *fence) { return "amd_sched"; @@ -71,11 +84,17 @@ static bool amd_sched_fence_enable_signaling(struct fence *f) return true; } +static void amd_sched_fence_release(struct fence *f) +{ + struct amd_sched_fence *fence = to_amd_sched_fence(f); + kmem_cache_free(sched_fence_slab, fence); +} + const struct fence_ops amd_sched_fence_ops = { .get_driver_name = amd_sched_fence_get_driver_name, .get_timeline_name = amd_sched_fence_get_timeline_name, .enable_signaling = amd_sched_fence_enable_signaling, .signaled = NULL, .wait = fence_default_wait, - .release = NULL, + .release = amd_sched_fence_release, }; |