diff options
author | Matthew Brost <matthew.brost@intel.com> | 2021-06-18 04:06:34 +0300 |
---|---|---|
committer | Matt Roper <matthew.d.roper@intel.com> | 2021-06-19 01:13:33 +0300 |
commit | 349a2bc5aae45f54bce1c6fd54d8d3ac2ae26611 (patch) | |
tree | a93ad9e0bc0d8f5b2065a834569fabfd3a7743fd /drivers/gpu/drm/i915/i915_scheduler.c | |
parent | c4fd7d8cc3caa614ab492e0efc8854328f72b719 (diff) | |
download | linux-349a2bc5aae45f54bce1c6fd54d8d3ac2ae26611.tar.xz |
drm/i915: Move active tracking to i915_sched_engine
Move active request tracking and its lock to i915_sched_engine. This
lock is also the submission lock so having it in the i915_sched_engine
is the correct place.
v3:
(Jason Ekstrand)
Add kernel doc
v6:
Rebase
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.comk>
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210618010638.98941-5-matthew.brost@intel.com
Diffstat (limited to 'drivers/gpu/drm/i915/i915_scheduler.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_scheduler.c | 30 |
1 files changed, 23 insertions, 7 deletions
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index 2c31e07883ba..4bc6969f6a97 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -68,7 +68,7 @@ i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio) struct rb_node **parent, *rb; bool first = true; - lockdep_assert_held(&engine->active.lock); + lockdep_assert_held(&engine->sched_engine->lock); assert_priolists(sched_engine); if (unlikely(sched_engine->no_priolist)) @@ -147,9 +147,9 @@ sched_lock_engine(const struct i915_sched_node *node, * check that the rq still belongs to the newly locked engine. */ while (locked != (engine = READ_ONCE(rq->engine))) { - spin_unlock(&locked->active.lock); + spin_unlock(&locked->sched_engine->lock); memset(cache, 0, sizeof(*cache)); - spin_lock(&engine->active.lock); + spin_lock(&engine->sched_engine->lock); locked = engine; } @@ -296,7 +296,7 @@ static void __i915_schedule(struct i915_sched_node *node, memset(&cache, 0, sizeof(cache)); engine = node_to_request(node)->engine; - spin_lock(&engine->active.lock); + spin_lock(&engine->sched_engine->lock); /* Fifo and depth-first replacement ensure our deps execute before us */ engine = sched_lock_engine(node, engine, &cache); @@ -305,7 +305,7 @@ static void __i915_schedule(struct i915_sched_node *node, node = dep->signaler; engine = sched_lock_engine(node, engine, &cache); - lockdep_assert_held(&engine->active.lock); + lockdep_assert_held(&engine->sched_engine->lock); /* Recheck after acquiring the engine->timeline.lock */ if (prio <= node->attr.priority || node_signaled(node)) @@ -338,7 +338,7 @@ static void __i915_schedule(struct i915_sched_node *node, kick_submission(engine, node_to_request(node), prio); } - spin_unlock(&engine->active.lock); + spin_unlock(&engine->sched_engine->lock); } void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr) @@ -511,7 +511,23 @@ i915_sched_engine_create(unsigned int subclass) sched_engine->queue = RB_ROOT_CACHED; sched_engine->queue_priority_hint = INT_MIN; - /* subclass is used in a follow up patch */ + INIT_LIST_HEAD(&sched_engine->requests); + INIT_LIST_HEAD(&sched_engine->hold); + + spin_lock_init(&sched_engine->lock); + lockdep_set_subclass(&sched_engine->lock, subclass); + + /* + * Due to an interesting quirk in lockdep's internal debug tracking, + * after setting a subclass we must ensure the lock is used. Otherwise, + * nr_unused_locks is incremented once too often. + */ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + local_irq_disable(); + lock_map_acquire(&sched_engine->lock.dep_map); + lock_map_release(&sched_engine->lock.dep_map); + local_irq_enable(); +#endif return sched_engine; } |