summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2020-02-03 12:41:47 +0300
committerChris Wilson <chris@chris-wilson.co.uk>2020-02-03 14:25:39 +0300
commit30ca04e16cbeea7401dabc51e53b92975144e436 (patch)
tree637508a1efec53596a06c8fcc2bc94b0c45342f2 /drivers
parent6a79c28936dfc44019d42e36aaf8acd9c38f66c2 (diff)
downloadlinux-30ca04e16cbeea7401dabc51e53b92975144e436.tar.xz
drm/i915: Hold reference to previous active fence as we queue
Take a reference to the previous exclusive fence on the i915_active, as we wish to add an await to it in the caller (and so must prevent it from being freed until we have completed that task). Fixes: e3793468b466 ("drm/i915: Use the async worker to avoid reclaim tainting the ggtt->mutex") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Matthew Auld <matthew.auld@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200203094152.4150550-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_active.c6
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c4
2 files changed, 8 insertions, 2 deletions
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index da58e5d084f4..9ccb931a733e 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -398,9 +398,13 @@ i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
/* We expect the caller to manage the exclusive timeline ordering */
GEM_BUG_ON(i915_active_is_idle(ref));
+ rcu_read_lock();
prev = __i915_active_fence_set(&ref->excl, f);
- if (!prev)
+ if (prev)
+ prev = dma_fence_get_rcu(prev);
+ else
atomic_inc(&ref->count);
+ rcu_read_unlock();
return prev;
}
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index e801e28de470..74dc3ba59ce5 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -422,10 +422,12 @@ int i915_vma_bind(struct i915_vma *vma,
* execution and not content or object's backing store lifetime.
*/
prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
- if (prev)
+ if (prev) {
__i915_sw_fence_await_dma_fence(&work->base.chain,
prev,
&work->cb);
+ dma_fence_put(prev);
+ }
work->base.dma.error = 0; /* enable the queue_work() */