summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_active.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2020-01-06 14:42:33 +0300
committerChris Wilson <chris@chris-wilson.co.uk>2020-01-06 17:38:57 +0300
commit8413502238168561acf1e2137eaea6af4004e506 (patch)
tree18712f891e3c62867def3dbb412b43a8f62e65ac /drivers/gpu/drm/i915/i915_active.c
parent3fbbbef4f56a94c1b71b342b8157990430375f97 (diff)
downloadlinux-8413502238168561acf1e2137eaea6af4004e506.tar.xz
drm/i915/gt: Drop mutex serialisation between context pin/unpin
The last remaining reason for serialising the pin/unpin of the intel_context is to ensure that our preallocated wakerefs are not consumed too early (i.e. the unpin of the previous phase does not emit the idle barriers for this phase before we even submit). All of the other operations within the context pin/unpin are supposed to be atomic... Therefore, we can reduce the serialisation to being just on the i915_active.preallocated_barriers itself and drop the nested pin_mutex from intel_context_unpin(). Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200106114234.2529613-5-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_active.c')
-rw-r--r--drivers/gpu/drm/i915/i915_active.c19
1 files changed, 15 insertions, 4 deletions
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index cfe09964622b..f3da5c06f331 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -605,12 +605,15 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
struct intel_engine_cs *engine)
{
intel_engine_mask_t tmp, mask = engine->mask;
+ struct llist_node *pos = NULL, *next;
struct intel_gt *gt = engine->gt;
- struct llist_node *pos, *next;
int err;
GEM_BUG_ON(i915_active_is_idle(ref));
- GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
+
+ /* Wait until the previous preallocation is completed */
+ while (!llist_empty(&ref->preallocated_barriers))
+ cond_resched();
/*
* Preallocate a node for each physical engine supporting the target
@@ -653,16 +656,24 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
GEM_BUG_ON(barrier_to_engine(node) != engine);
- llist_add(barrier_to_ll(node), &ref->preallocated_barriers);
+ next = barrier_to_ll(node);
+ next->next = pos;
+ if (!pos)
+ pos = next;
intel_engine_pm_get(engine);
}
+ GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
+ llist_add_batch(next, pos, &ref->preallocated_barriers);
+
return 0;
unwind:
- llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
+ while (pos) {
struct active_node *node = barrier_from_ll(pos);
+ pos = pos->next;
+
atomic_dec(&ref->count);
intel_engine_pm_put(barrier_to_engine(node));