summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_scheduler.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_scheduler.c')
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c52
1 files changed, 34 insertions, 18 deletions
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index d2edb527dcb8..e19a37a83397 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -202,21 +202,26 @@ static void kick_submission(struct intel_engine_cs *engine,
if (prio <= engine->execlists.queue_priority_hint)
return;
+ rcu_read_lock();
+
/* Nothing currently active? We're overdue for a submission! */
inflight = execlists_active(&engine->execlists);
if (!inflight)
- return;
+ goto unlock;
/*
* If we are already the currently executing context, don't
* bother evaluating if we should preempt ourselves.
*/
- if (inflight->hw_context == rq->hw_context)
- return;
+ if (inflight->context == rq->context)
+ goto unlock;
engine->execlists.queue_priority_hint = prio;
if (need_preempt(prio, rq_prio(inflight)))
tasklet_hi_schedule(&engine->execlists.tasklet);
+
+unlock:
+ rcu_read_unlock();
}
static void __i915_schedule(struct i915_sched_node *node,
@@ -321,20 +326,18 @@ static void __i915_schedule(struct i915_sched_node *node,
node->attr.priority = prio;
- if (list_empty(&node->link)) {
- /*
- * If the request is not in the priolist queue because
- * it is not yet runnable, then it doesn't contribute
- * to our preemption decisions. On the other hand,
- * if the request is on the HW, it too is not in the
- * queue; but in that case we may still need to reorder
- * the inflight requests.
- */
+ /*
+ * Once the request is ready, it will be placed into the
+ * priority lists and then onto the HW runlist. Before the
+ * request is ready, it does not contribute to our preemption
+ * decisions and we can safely ignore it, as it will, and
+ * any preemption required, be dealt with upon submission.
+ * See engine->submit_request()
+ */
+ if (list_empty(&node->link))
continue;
- }
- if (!intel_engine_is_virtual(engine) &&
- !i915_request_is_active(node_to_request(node))) {
+ if (i915_request_in_priority_queue(node_to_request(node))) {
if (!cache.priolist)
cache.priolist =
i915_sched_lookup_priolist(engine,
@@ -382,9 +385,19 @@ void i915_sched_node_init(struct i915_sched_node *node)
INIT_LIST_HEAD(&node->signalers_list);
INIT_LIST_HEAD(&node->waiters_list);
INIT_LIST_HEAD(&node->link);
+
+ i915_sched_node_reinit(node);
+}
+
+void i915_sched_node_reinit(struct i915_sched_node *node)
+{
node->attr.priority = I915_PRIORITY_INVALID;
node->semaphores = 0;
node->flags = 0;
+
+ GEM_BUG_ON(!list_empty(&node->signalers_list));
+ GEM_BUG_ON(!list_empty(&node->waiters_list));
+ GEM_BUG_ON(!list_empty(&node->link));
}
static struct i915_dependency *
@@ -410,8 +423,6 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
if (!node_signaled(signal)) {
INIT_LIST_HEAD(&dep->dfs_link);
- list_add(&dep->wait_link, &signal->waiters_list);
- list_add(&dep->signal_link, &node->signalers_list);
dep->signaler = signal;
dep->waiter = node;
dep->flags = flags;
@@ -421,6 +432,10 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
!node_started(signal))
node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
+ /* All set, now publish. Beware the lockless walkers. */
+ list_add_rcu(&dep->signal_link, &node->signalers_list);
+ list_add_rcu(&dep->wait_link, &signal->waiters_list);
+
/*
* As we do not allow WAIT to preempt inflight requests,
* once we have executed a request, along with triggering
@@ -469,13 +484,13 @@ void i915_sched_node_fini(struct i915_sched_node *node)
* so we may be called out-of-order.
*/
list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
- GEM_BUG_ON(!node_signaled(dep->signaler));
GEM_BUG_ON(!list_empty(&dep->dfs_link));
list_del(&dep->wait_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(dep);
}
+ INIT_LIST_HEAD(&node->signalers_list);
/* Remove ourselves from everyone who depends upon us */
list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
@@ -486,6 +501,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(dep);
}
+ INIT_LIST_HEAD(&node->waiters_list);
spin_unlock_irq(&schedule_lock);
}