diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2020-02-20 10:50:20 +0300 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2020-02-20 16:27:23 +0300 |
commit | 66940061a52fba730fdb49e133502e19be684202 (patch) | |
tree | 95e7e36c40ff4e23a8fb5daf9c94e432cb212e92 /drivers/gpu/drm/i915/i915_scheduler.c | |
parent | df6b1f3da89f1ff87399316c836fc3981de96c38 (diff) | |
download | linux-66940061a52fba730fdb49e133502e19be684202.tar.xz |
drm/i915/gt: Protect signaler walk with RCU
While we know that the waiters cannot disappear as we walk our list
(only that they might be added), the same cannot be said for our
signalers as they may be completed by the HW and retired as we process
this request. Ergo we need to use rcu to protect the list iteration and
remember to mark up the list_del_rcu.
v2: Mark the deps as safe-for-rcu
Fixes: 793c22617367 ("drm/i915/gt: Protect execlists_hold/unhold from new waiters")
Fixes: 32ff621fd744 ("drm/i915/gt: Allow temporary suspension of inflight requests")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200220075025.1539375-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_scheduler.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_scheduler.c | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index e19a37a83397..59f70b674665 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -486,7 +486,7 @@ void i915_sched_node_fini(struct i915_sched_node *node) list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) { GEM_BUG_ON(!list_empty(&dep->dfs_link)); - list_del(&dep->wait_link); + list_del_rcu(&dep->wait_link); if (dep->flags & I915_DEPENDENCY_ALLOC) i915_dependency_free(dep); } @@ -497,7 +497,7 @@ void i915_sched_node_fini(struct i915_sched_node *node) GEM_BUG_ON(dep->signaler != node); GEM_BUG_ON(!list_empty(&dep->dfs_link)); - list_del(&dep->signal_link); + list_del_rcu(&dep->signal_link); if (dep->flags & I915_DEPENDENCY_ALLOC) i915_dependency_free(dep); } @@ -526,7 +526,8 @@ static struct i915_global_scheduler global = { { int __init i915_global_scheduler_init(void) { global.slab_dependencies = KMEM_CACHE(i915_dependency, - SLAB_HWCACHE_ALIGN); + SLAB_HWCACHE_ALIGN | + SLAB_TYPESAFE_BY_RCU); if (!global.slab_dependencies) return -ENOMEM; |